internal/gocore: remove Flags and make Process cacheable

Flags controled the amount of initialization work Core has to perform
and determined the kind of information available in the returned
Process object. This API prevented the reuse of the Process object for
different types of analysis.

This change gets rid of the Flags type, lets the Core perform only
the basic initialization work, and makes Process to gather more
information (e.g. type and reverse reference info) on demand if needed.

This allows the viewcore command to cache the output of gocore.Core,
Process, and reuse it in the interactive mode.

Change-Id: Ifd4f40f240e4a7f10286f966aafc38abe58877ee
Reviewed-on: https://go-review.googlesource.com/122481
Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/cmd/viewcore/main.go b/cmd/viewcore/main.go
index 91ffebd..050d0ab 100644
--- a/cmd/viewcore/main.go
+++ b/cmd/viewcore/main.go
@@ -233,33 +233,33 @@
 	// copy of params used to generate p.
 	cfg config
 
-	p   *core.Process
-	err error
+	coreP   *core.Process
+	gocoreP *gocore.Process
+	err     error
 }{}
 
 // readCore reads corefile and returns core and gocore process states.
-func readCore(flags gocore.Flags) (*core.Process, *gocore.Process, error) {
+func readCore() (*core.Process, *gocore.Process, error) {
 	cc := coreCache
-	if cc.cfg != cfg {
-		cc.cfg = cfg
-		cc.p, cc.err = core.Core(cfg.corefile, cfg.base, cfg.exePath)
+	if cc.cfg == cfg {
+		return cc.coreP, cc.gocoreP, cc.err
 	}
-	if cc.err != nil {
-		return nil, nil, cc.err
-	}
-	for _, w := range cc.p.Warnings() {
-		fmt.Fprintf(os.Stderr, "WARNING: %s\n", w)
-	}
-	// TODO: Cache gocore.Core object too.
-	// The tricky part of gocore is the flags. Change gocore
-	// API to initialize parts on first use so the processed
-	// results, which are expensive, can be cached.
-	c, err := gocore.Core(cc.p, flags)
+	c, err := core.Core(cfg.corefile, cfg.base, cfg.exePath)
 	if err != nil {
-		fmt.Fprintf(os.Stderr, "%v\n", err)
 		return nil, nil, err
 	}
-	return cc.p, c, nil
+	p, err := gocore.Core(c)
+	if err != nil {
+		return nil, nil, err
+	}
+	for _, w := range c.Warnings() {
+		fmt.Fprintf(os.Stderr, "WARNING: %s\n", w)
+	}
+	cc.cfg = cfg
+	cc.coreP = c
+	cc.gocoreP = p
+	cc.err = nil
+	return c, p, nil
 }
 
 func runRoot(cmd *cobra.Command, args []string) {
@@ -267,7 +267,7 @@
 		cmd.Usage()
 		return
 	}
-	p, _, err := readCore(0)
+	p, _, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -346,7 +346,7 @@
 }
 
 func runOverview(cmd *cobra.Command, args []string) {
-	p, c, err := readCore(0)
+	p, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -363,7 +363,7 @@
 }
 
 func runMappings(cmd *cobra.Command, args []string) {
-	p, _, err := readCore(0)
+	p, _, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -398,7 +398,7 @@
 }
 
 func runGoroutines(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(0)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -422,7 +422,7 @@
 }
 
 func runHistogram(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(gocore.FlagTypes)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -458,7 +458,7 @@
 }
 
 func runBreakdown(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(0)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -488,7 +488,7 @@
 }
 
 func runObjgraph(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(gocore.FlagTypes)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -552,7 +552,7 @@
 }
 
 func runObjects(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(gocore.FlagTypes)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -564,7 +564,7 @@
 }
 
 func runReachable(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(gocore.FlagTypes | gocore.FlagReverse)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -648,7 +648,7 @@
 }
 
 func runHTML(cmd *cobra.Command, args []string) {
-	_, c, err := readCore(gocore.FlagTypes | gocore.FlagReverse)
+	_, c, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
@@ -656,7 +656,7 @@
 }
 
 func runRead(cmd *cobra.Command, args []string) {
-	p, _, err := readCore(0)
+	p, _, err := readCore()
 	if err != nil {
 		exitf("%v\n", err)
 	}
diff --git a/internal/gocore/dominator.go b/internal/gocore/dominator.go
index 59fac42..ca2709f 100644
--- a/internal/gocore/dominator.go
+++ b/internal/gocore/dominator.go
@@ -86,6 +86,9 @@
 }
 
 func runLT(p *Process) ltDom {
+	p.typeHeap()
+	p.reverseEdges()
+
 	nVertices := 1 + len(p.rootIdx) + p.nObj
 	lt := ltDom{
 		p:         p,
diff --git a/internal/gocore/gocore_test.go b/internal/gocore/gocore_test.go
index 8d4b96f..d85da17 100644
--- a/internal/gocore/gocore_test.go
+++ b/internal/gocore/gocore_test.go
@@ -25,7 +25,7 @@
 	if err != nil {
 		t.Fatalf("can't load test core file: %s", err)
 	}
-	p, err := Core(c, FlagTypes|FlagReverse)
+	p, err := Core(c)
 	if err != nil {
 		t.Fatalf("can't parse Go core: %s", err)
 	}
@@ -40,7 +40,7 @@
 	if err != nil {
 		t.Fatalf("can't load test core file: %s", err)
 	}
-	p, err := Core(c, FlagTypes|FlagReverse)
+	p, err := Core(c)
 	if err != nil {
 		t.Fatalf("can't parse Go core: %s", err)
 	}
diff --git a/internal/gocore/object.go b/internal/gocore/object.go
index 5765959..15d1c38 100644
--- a/internal/gocore/object.go
+++ b/internal/gocore/object.go
@@ -238,8 +238,9 @@
 
 // Type returns the type and repeat count for the object x.
 // x contains at least repeat copies of the returned type.
-// FlagTypes must have been passed to Core when p was constructed.
 func (p *Process) Type(x Object) (*Type, int64) {
+	p.typeHeap()
+
 	i, _ := p.findObjectIndex(core.Address(x))
 	return p.types[i].t, p.types[i].r
 }
diff --git a/internal/gocore/process.go b/internal/gocore/process.go
index d12feed..d4e368a 100644
--- a/internal/gocore/process.go
+++ b/internal/gocore/process.go
@@ -8,6 +8,7 @@
 	"debug/dwarf"
 	"fmt"
 	"strings"
+	"sync"
 
 	"golang.org/x/debug/internal/core"
 )
@@ -56,16 +57,16 @@
 	globals []*Root
 
 	// Types of each object, indexed by object index.
-	// Only initialized if FlagTypes is passed to Core.
-	types []typeInfo
+	initTypeHeap sync.Once
+	types        []typeInfo
 
 	// Reverse edges.
 	// The reverse edges for object #i are redge[ridx[i]:ridx[i+1]].
 	// A "reverse edge" for object #i is a location in memory where a pointer
 	// to object #i lives.
-	// Only initialized if FlagReverse is passed to Core.
-	redge []core.Address
-	ridx  []int64
+	initReverseEdges sync.Once
+	redge            []core.Address
+	ridx             []int64
 	// Sorted list of all roots.
 	// Only initialized if FlagReverse is passed to Core.
 	rootIdx []*Root
@@ -107,23 +108,8 @@
 	return s[0]
 }
 
-// A Flags indicates optional analyses for Core to compute.
-type Flags uint8
-
-const (
-	// FlagTypes requests that Core compute type information for all Go objects,
-	// required to use the Type function.
-	// Setting this flag will require more initialization time and use more memory.
-	FlagTypes Flags = 1 << iota
-	// FlagReverse requests that Core compute reverse edge information,
-	// required to use ForEachReversePtr.
-	// Setting this flag will require more initialization time and use more memory.
-	FlagReverse
-)
-
 // Core takes a loaded core file and extracts Go information from it.
-// flags is a bitmask of data that should be extracted from the core.
-func Core(proc *core.Process, flags Flags) (p *Process, err error) {
+func Core(proc *core.Process) (p *Process, err error) {
 	// Make sure we have DWARF info.
 	if _, err := proc.DWARF(); err != nil {
 		return nil, err
@@ -171,12 +157,6 @@
 	p.readGs()
 	p.readStackVars() // needs to be after readGs.
 	p.markObjects()   // needs to be after readGlobals, readStackVars.
-	if flags&FlagTypes != 0 {
-		p.typeHeap() // needs to be after markObjects.
-	}
-	if flags&FlagReverse != 0 {
-		p.reverseEdges() // needs to be after markObjects.
-	}
 
 	return p, nil
 }
diff --git a/internal/gocore/reverse.go b/internal/gocore/reverse.go
index 21cea17..40768a3 100644
--- a/internal/gocore/reverse.go
+++ b/internal/gocore/reverse.go
@@ -11,69 +11,71 @@
 )
 
 func (p *Process) reverseEdges() {
-	// First, count the number of edges into each object.
-	// This allows for efficient packing of the reverse edge storage.
-	cnt := make([]int64, p.nObj+1)
-	p.ForEachObject(func(x Object) bool {
-		p.ForEachPtr(x, func(_ int64, y Object, _ int64) bool {
-			idx, _ := p.findObjectIndex(p.Addr(y))
-			cnt[idx]++
+	p.initReverseEdges.Do(func() {
+		// First, count the number of edges into each object.
+		// This allows for efficient packing of the reverse edge storage.
+		cnt := make([]int64, p.nObj+1)
+		p.ForEachObject(func(x Object) bool {
+			p.ForEachPtr(x, func(_ int64, y Object, _ int64) bool {
+				idx, _ := p.findObjectIndex(p.Addr(y))
+				cnt[idx]++
+				return true
+			})
 			return true
 		})
-		return true
-	})
-	p.ForEachRoot(func(r *Root) bool {
-		p.ForEachRootPtr(r, func(_ int64, y Object, _ int64) bool {
-			idx, _ := p.findObjectIndex(p.Addr(y))
-			cnt[idx]++
+		p.ForEachRoot(func(r *Root) bool {
+			p.ForEachRootPtr(r, func(_ int64, y Object, _ int64) bool {
+				idx, _ := p.findObjectIndex(p.Addr(y))
+				cnt[idx]++
+				return true
+			})
 			return true
 		})
-		return true
-	})
 
-	// Compute cumulative count of all incoming edges up to and including each object.
-	var n int64
-	for idx, c := range cnt {
-		n += c
-		cnt[idx] = n
-	}
+		// Compute cumulative count of all incoming edges up to and including each object.
+		var n int64
+		for idx, c := range cnt {
+			n += c
+			cnt[idx] = n
+		}
 
-	// Allocate all the storage for the reverse edges.
-	p.redge = make([]core.Address, n)
+		// Allocate all the storage for the reverse edges.
+		p.redge = make([]core.Address, n)
 
-	// Add edges to the lists.
-	p.ForEachObject(func(x Object) bool {
-		p.ForEachPtr(x, func(i int64, y Object, _ int64) bool {
-			idx, _ := p.findObjectIndex(p.Addr(y))
-			e := cnt[idx]
-			e--
-			cnt[idx] = e
-			p.redge[e] = p.Addr(x).Add(i)
+		// Add edges to the lists.
+		p.ForEachObject(func(x Object) bool {
+			p.ForEachPtr(x, func(i int64, y Object, _ int64) bool {
+				idx, _ := p.findObjectIndex(p.Addr(y))
+				e := cnt[idx]
+				e--
+				cnt[idx] = e
+				p.redge[e] = p.Addr(x).Add(i)
+				return true
+			})
 			return true
 		})
-		return true
-	})
-	p.ForEachRoot(func(r *Root) bool {
-		p.ForEachRootPtr(r, func(i int64, y Object, _ int64) bool {
-			idx, _ := p.findObjectIndex(p.Addr(y))
-			e := cnt[idx]
-			e--
-			cnt[idx] = e
-			p.redge[e] = r.Addr.Add(i)
+		p.ForEachRoot(func(r *Root) bool {
+			p.ForEachRootPtr(r, func(i int64, y Object, _ int64) bool {
+				idx, _ := p.findObjectIndex(p.Addr(y))
+				e := cnt[idx]
+				e--
+				cnt[idx] = e
+				p.redge[e] = r.Addr.Add(i)
+				return true
+			})
 			return true
 		})
-		return true
-	})
-	// At this point, cnt contains the cumulative count of all edges up to
-	// but *not* including each object.
-	p.ridx = cnt
+		// At this point, cnt contains the cumulative count of all edges up to
+		// but *not* including each object.
+		p.ridx = cnt
 
-	// Make root index.
-	p.ForEachRoot(func(r *Root) bool {
-		p.rootIdx = append(p.rootIdx, r)
-		return true
+		// Make root index.
+		p.ForEachRoot(func(r *Root) bool {
+			p.rootIdx = append(p.rootIdx, r)
+			return true
+		})
+		sort.Slice(p.rootIdx, func(i, j int) bool { return p.rootIdx[i].Addr < p.rootIdx[j].Addr })
 	})
-	sort.Slice(p.rootIdx, func(i, j int) bool { return p.rootIdx[i].Addr < p.rootIdx[j].Addr })
 }
 
 // ForEachReversePtr calls fn for all pointers it finds pointing to y.
@@ -82,8 +84,9 @@
 //   the offset i in that object or root where the pointer appears.
 //   the offset j in y where the pointer points.
 // If fn returns false, ForEachReversePtr returns immediately.
-// FlagReverse must have been passed to Core when p was constructed.
 func (p *Process) ForEachReversePtr(y Object, fn func(x Object, r *Root, i, j int64) bool) {
+	p.reverseEdges()
+
 	idx, _ := p.findObjectIndex(p.Addr(y))
 	for _, a := range p.redge[p.ridx[idx]:p.ridx[idx+1]] {
 		// Read pointer, compute offset in y.
diff --git a/internal/gocore/type.go b/internal/gocore/type.go
index 05ba59b..4f564fc 100644
--- a/internal/gocore/type.go
+++ b/internal/gocore/type.go
@@ -326,174 +326,176 @@
 
 // typeHeap tries to label all the heap objects with types.
 func (p *Process) typeHeap() {
-	// Type info for the start of each object. a.k.a. "0 offset" typings.
-	p.types = make([]typeInfo, p.nObj)
+	p.initTypeHeap.Do(func() {
+		// Type info for the start of each object. a.k.a. "0 offset" typings.
+		p.types = make([]typeInfo, p.nObj)
 
-	// Type info for the interior of objects, a.k.a. ">0 offset" typings.
-	// Type information is arranged in chunks. Chunks are stored in an
-	// arbitrary order, and are guaranteed to not overlap. If types are
-	// equal, chunks are also guaranteed not to abut.
-	// Interior typings are kept separate because they hopefully are rare.
-	// TODO: They aren't really that rare. On some large heaps I tried
-	// ~50% of objects have an interior pointer into them.
-	// Keyed by object index.
-	interior := map[int][]typeChunk{}
+		// Type info for the interior of objects, a.k.a. ">0 offset" typings.
+		// Type information is arranged in chunks. Chunks are stored in an
+		// arbitrary order, and are guaranteed to not overlap. If types are
+		// equal, chunks are also guaranteed not to abut.
+		// Interior typings are kept separate because they hopefully are rare.
+		// TODO: They aren't really that rare. On some large heaps I tried
+		// ~50% of objects have an interior pointer into them.
+		// Keyed by object index.
+		interior := map[int][]typeChunk{}
 
-	// Typings we know about but haven't scanned yet.
-	type workRecord struct {
-		a core.Address
-		t *Type
-		r int64
-	}
-	var work []workRecord
-
-	// add records the fact that we know the object at address a has
-	// r copies of type t.
-	add := func(a core.Address, t *Type, r int64) {
-		if a == 0 { // nil pointer
-			return
+		// Typings we know about but haven't scanned yet.
+		type workRecord struct {
+			a core.Address
+			t *Type
+			r int64
 		}
-		i, off := p.findObjectIndex(a)
-		if i < 0 { // pointer doesn't point to an object in the Go heap
-			return
-		}
-		if off == 0 {
-			// We have a 0-offset typing. Replace existing 0-offset typing
-			// if the new one is larger.
-			ot := p.types[i].t
-			or := p.types[i].r
-			if ot == nil || r*t.Size > or*ot.Size {
-				if t == ot {
-					// Scan just the new section.
-					work = append(work, workRecord{
-						a: a.Add(or * ot.Size),
-						t: t,
-						r: r - or,
-					})
-				} else {
-					// Rescan the whole typing using the updated type.
-					work = append(work, workRecord{
-						a: a,
-						t: t,
-						r: r,
-					})
-				}
-				p.types[i].t = t
-				p.types[i].r = r
+		var work []workRecord
+
+		// add records the fact that we know the object at address a has
+		// r copies of type t.
+		add := func(a core.Address, t *Type, r int64) {
+			if a == 0 { // nil pointer
+				return
 			}
-			return
-		}
+			i, off := p.findObjectIndex(a)
+			if i < 0 { // pointer doesn't point to an object in the Go heap
+				return
+			}
+			if off == 0 {
+				// We have a 0-offset typing. Replace existing 0-offset typing
+				// if the new one is larger.
+				ot := p.types[i].t
+				or := p.types[i].r
+				if ot == nil || r*t.Size > or*ot.Size {
+					if t == ot {
+						// Scan just the new section.
+						work = append(work, workRecord{
+							a: a.Add(or * ot.Size),
+							t: t,
+							r: r - or,
+						})
+					} else {
+						// Rescan the whole typing using the updated type.
+						work = append(work, workRecord{
+							a: a,
+							t: t,
+							r: r,
+						})
+					}
+					p.types[i].t = t
+					p.types[i].r = r
+				}
+				return
+			}
 
-		// Add an interior typing to object #i.
-		c := typeChunk{off: off, t: t, r: r}
+			// Add an interior typing to object #i.
+			c := typeChunk{off: off, t: t, r: r}
 
-		// Merge the given typing into the chunks we already know.
-		// TODO: this could be O(n) per insert if there are lots of internal pointers.
-		chunks := interior[i]
-		newchunks := chunks[:0]
-		addWork := true
-		for _, d := range chunks {
-			if c.max() <= d.min() || c.min() >= d.max() {
-				// c does not overlap with d.
-				if c.t == d.t && (c.max() == d.min() || c.min() == d.max()) {
-					// c and d abut and share the same base type. Merge them.
+			// Merge the given typing into the chunks we already know.
+			// TODO: this could be O(n) per insert if there are lots of internal pointers.
+			chunks := interior[i]
+			newchunks := chunks[:0]
+			addWork := true
+			for _, d := range chunks {
+				if c.max() <= d.min() || c.min() >= d.max() {
+					// c does not overlap with d.
+					if c.t == d.t && (c.max() == d.min() || c.min() == d.max()) {
+						// c and d abut and share the same base type. Merge them.
+						c = c.merge(d)
+						continue
+					}
+					// Keep existing chunk d.
+					// Overwrites chunks slice, but we're only merging chunks so it
+					// can't overwrite to-be-processed elements.
+					newchunks = append(newchunks, d)
+					continue
+				}
+				// There is some overlap. There are a few possibilities:
+				// 1) One is completely contained in the other.
+				// 2) Both are slices of a larger underlying array.
+				// 3) Some unsafe trickery has happened. Non-containing overlap
+				//    can only happen in safe Go via case 2.
+				if c.min() >= d.min() && c.max() <= d.max() {
+					// 1a: c is contained within the existing chunk d.
+					// Note that there can be a type mismatch between c and d,
+					// but we don't care. We use the larger chunk regardless.
+					c = d
+					addWork = false // We've already scanned all of c.
+					continue
+				}
+				if d.min() >= c.min() && d.max() <= c.max() {
+					// 1b: existing chunk d is completely covered by c.
+					continue
+				}
+				if c.t == d.t && c.matchingAlignment(d) {
+					// Union two regions of the same base type. Case 2 above.
 					c = c.merge(d)
 					continue
 				}
-				// Keep existing chunk d.
-				// Overwrites chunks slice, but we're only merging chunks so it
-				// can't overwrite to-be-processed elements.
-				newchunks = append(newchunks, d)
-				continue
-			}
-			// There is some overlap. There are a few possibilities:
-			// 1) One is completely contained in the other.
-			// 2) Both are slices of a larger underlying array.
-			// 3) Some unsafe trickery has happened. Non-containing overlap
-			//    can only happen in safe Go via case 2.
-			if c.min() >= d.min() && c.max() <= d.max() {
-				// 1a: c is contained within the existing chunk d.
-				// Note that there can be a type mismatch between c and d,
-				// but we don't care. We use the larger chunk regardless.
-				c = d
-				addWork = false // We've already scanned all of c.
-				continue
-			}
-			if d.min() >= c.min() && d.max() <= c.max() {
-				// 1b: existing chunk d is completely covered by c.
-				continue
-			}
-			if c.t == d.t && c.matchingAlignment(d) {
-				// Union two regions of the same base type. Case 2 above.
-				c = c.merge(d)
-				continue
-			}
-			if c.size() < d.size() {
-				// Keep the larger of the two chunks.
-				c = d
-				addWork = false
-			}
-		}
-		// Add new chunk to list of chunks for object.
-		newchunks = append(newchunks, c)
-		interior[i] = newchunks
-		// Also arrange to scan the new chunk. Note that if we merged
-		// with an existing chunk (or chunks), those will get rescanned.
-		// Duplicate work, but that's ok. TODO: but could be expensive.
-		if addWork {
-			work = append(work, workRecord{
-				a: a.Add(c.off - off),
-				t: c.t,
-				r: c.r,
-			})
-		}
-	}
-
-	// Get typings starting at roots.
-	fr := &frameReader{p: p}
-	p.ForEachRoot(func(r *Root) bool {
-		if r.Frame != nil {
-			fr.live = r.Frame.Live
-			p.typeObject(r.Addr, r.Type, fr, add)
-		} else {
-			p.typeObject(r.Addr, r.Type, p.proc, add)
-		}
-		return true
-	})
-
-	// Propagate typings through the heap.
-	for len(work) > 0 {
-		c := work[len(work)-1]
-		work = work[:len(work)-1]
-		for i := int64(0); i < c.r; i++ {
-			p.typeObject(c.a.Add(i*c.t.Size), c.t, p.proc, add)
-		}
-	}
-
-	// Merge any interior typings with the 0-offset typing.
-	for i, chunks := range interior {
-		t := p.types[i].t
-		r := p.types[i].r
-		if t == nil {
-			continue // We have no type info at offset 0.
-		}
-		for _, c := range chunks {
-			if c.max() <= r*t.Size {
-				// c is completely contained in the 0-offset typing. Ignore it.
-				continue
-			}
-			if c.min() <= r*t.Size {
-				// Typings overlap or abut. Extend if we can.
-				if c.t == t && c.min()%t.Size == 0 {
-					r = c.max() / t.Size
-					p.types[i].r = r
+				if c.size() < d.size() {
+					// Keep the larger of the two chunks.
+					c = d
+					addWork = false
 				}
-				continue
 			}
-			// Note: at this point we throw away any interior typings that weren't
-			// merged with the 0-offset typing.  TODO: make more use of this info.
+			// Add new chunk to list of chunks for object.
+			newchunks = append(newchunks, c)
+			interior[i] = newchunks
+			// Also arrange to scan the new chunk. Note that if we merged
+			// with an existing chunk (or chunks), those will get rescanned.
+			// Duplicate work, but that's ok. TODO: but could be expensive.
+			if addWork {
+				work = append(work, workRecord{
+					a: a.Add(c.off - off),
+					t: c.t,
+					r: c.r,
+				})
+			}
 		}
-	}
+
+		// Get typings starting at roots.
+		fr := &frameReader{p: p}
+		p.ForEachRoot(func(r *Root) bool {
+			if r.Frame != nil {
+				fr.live = r.Frame.Live
+				p.typeObject(r.Addr, r.Type, fr, add)
+			} else {
+				p.typeObject(r.Addr, r.Type, p.proc, add)
+			}
+			return true
+		})
+
+		// Propagate typings through the heap.
+		for len(work) > 0 {
+			c := work[len(work)-1]
+			work = work[:len(work)-1]
+			for i := int64(0); i < c.r; i++ {
+				p.typeObject(c.a.Add(i*c.t.Size), c.t, p.proc, add)
+			}
+		}
+
+		// Merge any interior typings with the 0-offset typing.
+		for i, chunks := range interior {
+			t := p.types[i].t
+			r := p.types[i].r
+			if t == nil {
+				continue // We have no type info at offset 0.
+			}
+			for _, c := range chunks {
+				if c.max() <= r*t.Size {
+					// c is completely contained in the 0-offset typing. Ignore it.
+					continue
+				}
+				if c.min() <= r*t.Size {
+					// Typings overlap or abut. Extend if we can.
+					if c.t == t && c.min()%t.Size == 0 {
+						r = c.max() / t.Size
+						p.types[i].r = r
+					}
+					continue
+				}
+				// Note: at this point we throw away any interior typings that weren't
+				// merged with the 0-offset typing.  TODO: make more use of this info.
+			}
+		}
+	})
 }
 
 type reader interface {