[gopls-release-branch.0.7] all: merge master into gopls-release-branch.0.7

4fe0d6c80 internal/lsp: skip signature help within a string literal
0c506a274 internal/lsp/source: evaluate bin/hex literal on hover
46d1522a5 internal/lsp: add extract to method code action
c740bfd9b internal/lsp: handle incorrect import with CRLF line endings
251092de1 internal/lsp/source: compute imports text edits from scratch
412ee174e all: add SliceToArrayPointer instruction
7f68387a4 internal/lsp/source: workspace symbol improvements for selectors
7aa829446 internal/lsp: handle panic in fix AST
6e9046bfc gopls/doc: fix imports function for Neovim LSP
0cf4e2708 internal/lsp/semantic: improve semantic token processing
5061c412c internal/lsp: signal diagnostic completion if modification failed
ef97713d9 gopls/doc: add vetted examples for Sublime Text
8e85a283c internal/lsp: adopt bcmills' suggestion for an improved debouncer API
ae0deb7a4 internal/lsp: fix variable reuse bug in code actions
d36a54b56 internal/lsp: improve package search in a couple places
384460091 gopls/internal/regtest: add a flag to profile didChange handling
de4477617 gopls/doc: Neovim v0.5 is now stable
ccff7327b internal/lsp/source: fix comment update during rename for short variable declarations
a7dfe3d2b internal/lsp: attempt to make TestDebouncer more robust
980829d8a internal/lsp/lsprpc: add an AutoDialer abstraction
cb1acef8b Revert "internal/lsp/semantic.go: repress useless messages and tighten logic"
5b540d349 internal/lsp/semantic.go: repress useless messages and tighten logic
e33c0f293 go/packages: skip tests that link binaries in short mode
8e32e9f14 internal/lsp/regtest: fix a panic TestResolveImportCycle
2583041a9 go/packages: fix data race in TestCgoNoSyntax
69948257b go/ssa: incorrect document in type Builtin struct
e688b9451 go/packages: parallelize most tests
64bd808b7 internal/lsp/cache: don't report a context error if load succeeded
cae92d5d6 go/packages: skip GOPATH tests in short mode
2f04284e7 internal/lsp/regtest: allow for unsent diagnostics in TestResolveImportCycle
41a65bdc1 internal/lsp: avoid flake in TestDebouncer
1c9019e64 internal/lsp/cache: fix raciness of updating the view workspace
febfa9d67 internal/lsp/source: move diagnosticsDelay out of experimental
71eae3a1b internal/lsp/cache: be consistent about using snapshot.FileSet
251f28368 internal/lsp: add a setting to batch didChangeWatchedFile notifications
c979f9254 internal/lsp/cache: invalidate packages in setMetadata
55cd4804d gopls/doc: Document how gopls generates semantic tokens.
77c1b7a4f internal/lsp/cmd: print flags when running gopls help
640c1dea8 go/ssa: support unsafe.Add and unsafe.Slice
fd0057433 internal/lsp/protocol: upgrade generated lsp code to beginning of July
7edcfe523 internal/lsp/lsprpc: add a goenv middleware
e3040f272 internal/lsp/lsprpc: add a CommandInterceptor middleware
ea370293d internal/lsp/lsprpc: use middleware for the V2 handshaking
20dafe5d6 go/ssa: allow conversion from slice to array pointer
f0847e0ce go/callgraph: change reflect.Call to reflect.Value.Call in comment
72e4d1bb8 go/internal/cgo: handle symlinks with $PWD, not -srcdir
100b22926 internal/lsp/cache: treat load timeouts as critical errors
12f8456a0 internal/testenv: actually Exit if small machine for netbsd-arm*-bsiegert
00129ffdb internal/lsp/lsprpc: update binder tests to handle forwarding
fe2294a1b internal/jsonrpc2_v2: initialize async before binding
636017e13 internal/lsp/cache: fix missing pkg error on Windows
48cad5ecb tools/gopls: small fixes to contributing.md
da404b52b internal/lsp: start parsing go.work file like gopls.mod file
4833ac519 internal/mod: add workfile parser
bfc167431 internal/lsp/cache: fix loading of std and cmd
d824a7481 gopls/doc: include instructions for compiling generic code
6d3e43932 gopls/doc: add instructions for working with generic code
4c651fc1f internal/lsp/source: add inferred types to generic function hover
d25f90668  internal/lsp: do not block on channel when there is an error
463a76b3d internal/lsp: only reload invalid metadata when necessary
116feaea4 internal/lsp: move the progress tracker to the session
3f7c32638 gopls/internal/regtest: skip the flaky TestResolveImportCycle
b12e6172d internal/lsp/cache: don't delete metadata until it's reloaded
4b484fb13 internal/lsp: exclude the module cache from the workspace
9a55cb1fb internal/lsp/command: minor clean-up of StartDebugging description
490eac872 internal/lsp/command: add missing doc and support for result parameters
9aa007ed1 internal/lsp/cache: invalidate broken packages when imports are deleted
f6327c5b2 vta: adds the VTA call graph construction
13cf84452 go/ast/astutil: fix panic when rewriting multi-argument type instances
937957b6d vta: adds VTA graph propagation functionality
e0b9cf74f lsp/completion: support completing to Elem() types
16e5f5500 lsp/completion: search deeper for candidate type mods
b57987414 lsp/completion: reorganize how we track candidate type mods
890984ba2 internal/lsp: change generated variable names to be more verbose
9f230b562 internal/lsp: fix extract bug choosing available identifiers
4e58f8f09 internal/lsp: handle out of range panic in template parsing
716319fe5 vta: finalizes VTA graph construction by adding support for function calls
234f954de vta: extends VTA graph construction to handle collections
126df1d64 cmd/getgo: determine current version via /dl/?mode=json API
b9b845e62 internal/lsp: fix folding range for block comments
1225b6f53 internal/lsp: memoize allKnownSubdirs instead of recomputing
7295a4e73 lsp/completion: don't offer untyped conversions
bf132055d vta: adds VTA graph construction for basic program statements
8f2cf6ccf gopls/internal/regtest: clean up TestFillReturnsPanic
4abb1e2f2 internal/lsp: handle empty buffers in template parsing
7ac129f24 internal/lsp: don't diagnose/analyze intermediate test variants
1c2154ae3 internal/lsp: address some staticcheck warning

Change-Id: Ie2238b8bad61a238b46d1e5ae34e0c5fd13950be
diff --git a/cmd/getgo/download.go b/cmd/getgo/download.go
index 1731131..86f0a2f 100644
--- a/cmd/getgo/download.go
+++ b/cmd/getgo/download.go
@@ -12,6 +12,7 @@
 	"archive/zip"
 	"compress/gzip"
 	"crypto/sha256"
+	"encoding/json"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -22,7 +23,6 @@
 )
 
 const (
-	currentVersionURL = "https://golang.org/VERSION?m=text"
 	downloadURLPrefix = "https://dl.google.com/go"
 )
 
@@ -168,18 +168,24 @@
 }
 
 func getLatestGoVersion() (string, error) {
-	resp, err := http.Get(currentVersionURL)
+	resp, err := http.Get("https://golang.org/dl/?mode=json")
 	if err != nil {
 		return "", fmt.Errorf("Getting current Go version failed: %v", err)
 	}
 	defer resp.Body.Close()
-	if resp.StatusCode > 299 {
+	if resp.StatusCode != http.StatusOK {
 		b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
-		return "", fmt.Errorf("Could not get current Go version: HTTP %d: %q", resp.StatusCode, b)
+		return "", fmt.Errorf("Could not get current Go release: HTTP %d: %q", resp.StatusCode, b)
 	}
-	version, err := ioutil.ReadAll(resp.Body)
+	var releases []struct {
+		Version string
+	}
+	err = json.NewDecoder(resp.Body).Decode(&releases)
 	if err != nil {
 		return "", err
 	}
-	return strings.TrimSpace(string(version)), nil
+	if len(releases) < 1 {
+		return "", fmt.Errorf("Could not get at least one Go release")
+	}
+	return releases[0].Version, nil
 }
diff --git a/go/ast/astutil/rewrite.go b/go/ast/astutil/rewrite.go
index cf72ea9..b949fc8 100644
--- a/go/ast/astutil/rewrite.go
+++ b/go/ast/astutil/rewrite.go
@@ -9,6 +9,8 @@
 	"go/ast"
 	"reflect"
 	"sort"
+
+	"golang.org/x/tools/internal/typeparams"
 )
 
 // An ApplyFunc is invoked by Apply for each node n, even if n is nil,
@@ -437,7 +439,11 @@
 		}
 
 	default:
-		panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+		if typeparams.IsListExpr(n) {
+			a.applyList(n, "ElemList")
+		} else {
+			panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+		}
 	}
 
 	if a.post != nil && !a.post(&a.cursor) {
diff --git a/go/ast/astutil/rewrite_test.go b/go/ast/astutil/rewrite_test.go
index 1c86970..3a74afa 100644
--- a/go/ast/astutil/rewrite_test.go
+++ b/go/ast/astutil/rewrite_test.go
@@ -13,13 +13,16 @@
 	"testing"
 
 	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/internal/typeparams"
 )
 
-var rewriteTests = [...]struct {
+type rewriteTest struct {
 	name       string
 	orig, want string
 	pre, post  astutil.ApplyFunc
-}{
+}
+
+var rewriteTests = []rewriteTest{
 	{name: "nop", orig: "package p\n", want: "package p\n"},
 
 	{name: "replace",
@@ -190,6 +193,34 @@
 	},
 }
 
+func init() {
+	if typeparams.Enabled {
+		rewriteTests = append(rewriteTests, rewriteTest{
+			name: "replace",
+			orig: `package p
+
+type T[P1, P2 any] int
+
+type R T[int, string]
+`,
+			want: `package p
+
+type T[P1, P2 any] int32
+
+type R T[int32, string]
+`,
+			post: func(c *astutil.Cursor) bool {
+				if ident, ok := c.Node().(*ast.Ident); ok {
+					if ident.Name == "int" {
+						c.Replace(ast.NewIdent("int32"))
+					}
+				}
+				return true
+			},
+		})
+	}
+}
+
 func valspec(name, typ string) *ast.ValueSpec {
 	return &ast.ValueSpec{Names: []*ast.Ident{ast.NewIdent(name)},
 		Type: ast.NewIdent(typ),
diff --git a/go/callgraph/callgraph.go b/go/callgraph/callgraph.go
index 707a319..2bcc3dc 100644
--- a/go/callgraph/callgraph.go
+++ b/go/callgraph/callgraph.go
@@ -89,7 +89,7 @@
 // A Edge represents an edge in the call graph.
 //
 // Site is nil for edges originating in synthetic or intrinsic
-// functions, e.g. reflect.Call or the root of the call graph.
+// functions, e.g. reflect.Value.Call or the root of the call graph.
 type Edge struct {
 	Caller *Node
 	Site   ssa.CallInstruction
diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go
index 6ca765b..1b7b105 100644
--- a/go/callgraph/vta/graph.go
+++ b/go/callgraph/vta/graph.go
@@ -6,9 +6,12 @@
 
 import (
 	"fmt"
+	"go/token"
 	"go/types"
 
+	"golang.org/x/tools/go/callgraph"
 	"golang.org/x/tools/go/ssa"
+	"golang.org/x/tools/go/types/typeutil"
 )
 
 // node interface for VTA nodes.
@@ -232,3 +235,451 @@
 	}
 	return succs
 }
+
+// typePropGraph builds a VTA graph for a set of `funcs` and initial
+// `callgraph` needed to establish interprocedural edges. Returns the
+// graph and a map for unique type representatives.
+func typePropGraph(funcs map[*ssa.Function]bool, callgraph *callgraph.Graph) (vtaGraph, *typeutil.Map) {
+	b := builder{graph: make(vtaGraph), callGraph: callgraph}
+	b.visit(funcs)
+	return b.graph, &b.canon
+}
+
+// Data structure responsible for linearly traversing the
+// code and building a VTA graph.
+type builder struct {
+	graph     vtaGraph
+	callGraph *callgraph.Graph // initial call graph for creating flows at unresolved call sites.
+
+	// Specialized type map for canonicalization of types.Type.
+	// Semantically equivalent types can have different implementations,
+	// i.e., they are different pointer values. The map allows us to
+	// have one unique representative. The keys are fixed and from the
+	// client perspective they are types. The values in our case are
+	// types too, in particular type representatives. Each value is a
+	// pointer so this map is not expected to take much memory.
+	canon typeutil.Map
+}
+
+func (b *builder) visit(funcs map[*ssa.Function]bool) {
+	// Add the fixed edge Panic -> Recover
+	b.graph.addEdge(panicArg{}, recoverReturn{})
+
+	for f, in := range funcs {
+		if in {
+			b.fun(f)
+		}
+	}
+}
+
+func (b *builder) fun(f *ssa.Function) {
+	for _, bl := range f.Blocks {
+		for _, instr := range bl.Instrs {
+			b.instr(instr)
+		}
+	}
+}
+
+func (b *builder) instr(instr ssa.Instruction) {
+	switch i := instr.(type) {
+	case *ssa.Store:
+		b.addInFlowAliasEdges(b.nodeFromVal(i.Addr), b.nodeFromVal(i.Val))
+	case *ssa.MakeInterface:
+		b.addInFlowEdge(b.nodeFromVal(i.X), b.nodeFromVal(i))
+	case *ssa.MakeClosure:
+		b.closure(i)
+	case *ssa.UnOp:
+		b.unop(i)
+	case *ssa.Phi:
+		b.phi(i)
+	case *ssa.ChangeInterface:
+		// Although in change interface a := A(b) command a and b are
+		// the same object, the only interesting flow happens when A
+		// is an interface. We create flow b -> a, but omit a -> b.
+		// The latter flow is not needed: if a gets assigned concrete
+		// type later on, that cannot be propagated back to b as b
+		// is a separate variable. The a -> b flow can happen when
+		// A is a pointer to interface, but then the command is of
+		// type ChangeType, handled below.
+		b.addInFlowEdge(b.nodeFromVal(i.X), b.nodeFromVal(i))
+	case *ssa.ChangeType:
+		// change type command a := A(b) results in a and b being the
+		// same value. For concrete type A, there is no interesting flow.
+		//
+		// Note: When A is an interface, most interface casts are handled
+		// by the ChangeInterface instruction. The relevant case here is
+		// when converting a pointer to an interface type. This can happen
+		// when the underlying interfaces have the same method set.
+		//   type I interface{ foo() }
+		//   type J interface{ foo() }
+		//   var b *I
+		//   a := (*J)(b)
+		// When this happens we add flows between a <--> b.
+		b.addInFlowAliasEdges(b.nodeFromVal(i), b.nodeFromVal(i.X))
+	case *ssa.TypeAssert:
+		b.tassert(i)
+	case *ssa.Extract:
+		b.extract(i)
+	case *ssa.Field:
+		b.field(i)
+	case *ssa.FieldAddr:
+		b.fieldAddr(i)
+	case *ssa.Send:
+		b.send(i)
+	case *ssa.Select:
+		b.selekt(i)
+	case *ssa.Index:
+		b.index(i)
+	case *ssa.IndexAddr:
+		b.indexAddr(i)
+	case *ssa.Lookup:
+		b.lookup(i)
+	case *ssa.MapUpdate:
+		b.mapUpdate(i)
+	case *ssa.Next:
+		b.next(i)
+	case ssa.CallInstruction:
+		b.call(i)
+	case *ssa.Panic:
+		b.panic(i)
+	case *ssa.Return:
+		b.rtrn(i)
+	case *ssa.MakeChan, *ssa.MakeMap, *ssa.MakeSlice, *ssa.BinOp,
+		*ssa.Alloc, *ssa.DebugRef, *ssa.Convert, *ssa.Jump, *ssa.If,
+		*ssa.Slice, *ssa.Range, *ssa.RunDefers:
+		// No interesting flow here.
+		return
+	default:
+		panic(fmt.Sprintf("unsupported instruction %v\n", instr))
+	}
+}
+
+func (b *builder) unop(u *ssa.UnOp) {
+	switch u.Op {
+	case token.MUL:
+		// Multiplication operator * is used here as a dereference operator.
+		b.addInFlowAliasEdges(b.nodeFromVal(u), b.nodeFromVal(u.X))
+	case token.ARROW:
+		t := u.X.Type().Underlying().(*types.Chan).Elem()
+		b.addInFlowAliasEdges(b.nodeFromVal(u), channelElem{typ: t})
+	default:
+		// There is no interesting type flow otherwise.
+	}
+}
+
+func (b *builder) phi(p *ssa.Phi) {
+	for _, edge := range p.Edges {
+		b.addInFlowAliasEdges(b.nodeFromVal(p), b.nodeFromVal(edge))
+	}
+}
+
+func (b *builder) tassert(a *ssa.TypeAssert) {
+	if !a.CommaOk {
+		b.addInFlowEdge(b.nodeFromVal(a.X), b.nodeFromVal(a))
+		return
+	}
+	// The case where a is <a.AssertedType, bool> register so there
+	// is a flow from a.X to a[0]. Here, a[0] is represented as an
+	// indexedLocal: an entry into local tuple register a at index 0.
+	tup := a.Type().Underlying().(*types.Tuple)
+	t := tup.At(0).Type()
+
+	local := indexedLocal{val: a, typ: t, index: 0}
+	b.addInFlowEdge(b.nodeFromVal(a.X), local)
+}
+
+// extract instruction t1 := t2[i] generates flows between t2[i]
+// and t1 where the source is indexed local representing a value
+// from tuple register t2 at index i and the target is t1.
+func (b *builder) extract(e *ssa.Extract) {
+	tup := e.Tuple.Type().Underlying().(*types.Tuple)
+	t := tup.At(e.Index).Type()
+
+	local := indexedLocal{val: e.Tuple, typ: t, index: e.Index}
+	b.addInFlowAliasEdges(b.nodeFromVal(e), local)
+}
+
+func (b *builder) field(f *ssa.Field) {
+	fnode := field{StructType: f.X.Type(), index: f.Field}
+	b.addInFlowEdge(fnode, b.nodeFromVal(f))
+}
+
+func (b *builder) fieldAddr(f *ssa.FieldAddr) {
+	t := f.X.Type().Underlying().(*types.Pointer).Elem()
+
+	// Since we are getting pointer to a field, make a bidirectional edge.
+	fnode := field{StructType: t, index: f.Field}
+	b.addInFlowEdge(fnode, b.nodeFromVal(f))
+	b.addInFlowEdge(b.nodeFromVal(f), fnode)
+}
+
+func (b *builder) send(s *ssa.Send) {
+	t := s.Chan.Type().Underlying().(*types.Chan).Elem()
+	b.addInFlowAliasEdges(channelElem{typ: t}, b.nodeFromVal(s.X))
+}
+
+// selekt generates flows for select statement
+//   a = select blocking/nonblocking [c_1 <- t_1, c_2 <- t_2, ..., <- o_1, <- o_2, ...]
+// between receiving channel registers c_i and corresponding input register t_i. Further,
+// flows are generated between o_i and a[2 + i]. Note that a is a tuple register of type
+// <int, bool, r_1, r_2, ...> where the type of r_i is the element type of channel o_i.
+func (b *builder) selekt(s *ssa.Select) {
+	recvIndex := 0
+	for _, state := range s.States {
+		t := state.Chan.Type().Underlying().(*types.Chan).Elem()
+
+		if state.Dir == types.SendOnly {
+			b.addInFlowAliasEdges(channelElem{typ: t}, b.nodeFromVal(state.Send))
+		} else {
+			// state.Dir == RecvOnly by definition of select instructions.
+			tupEntry := indexedLocal{val: s, typ: t, index: 2 + recvIndex}
+			b.addInFlowAliasEdges(tupEntry, channelElem{typ: t})
+			recvIndex++
+		}
+	}
+}
+
+// index instruction a := b[c] on slices creates flows between a and
+// SliceElem(t) flow where t is an interface type of c. Arrays and
+// slice elements are both modeled as SliceElem.
+func (b *builder) index(i *ssa.Index) {
+	et := sliceArrayElem(i.X.Type())
+	b.addInFlowAliasEdges(b.nodeFromVal(i), sliceElem{typ: et})
+}
+
+// indexAddr instruction a := &b[c] fetches address of a index
+// into the field so we create bidirectional flow a <-> SliceElem(t)
+// where t is an interface type of c. Arrays and slice elements are
+// both modeled as SliceElem.
+func (b *builder) indexAddr(i *ssa.IndexAddr) {
+	et := sliceArrayElem(i.X.Type())
+	b.addInFlowEdge(sliceElem{typ: et}, b.nodeFromVal(i))
+	b.addInFlowEdge(b.nodeFromVal(i), sliceElem{typ: et})
+}
+
+// lookup handles map query commands a := m[b] where m is of type
+// map[...]V and V is an interface. It creates flows between `a`
+// and MapValue(V).
+func (b *builder) lookup(l *ssa.Lookup) {
+	t, ok := l.X.Type().Underlying().(*types.Map)
+	if !ok {
+		// No interesting flows for string lookups.
+		return
+	}
+	b.addInFlowAliasEdges(b.nodeFromVal(l), mapValue{typ: t.Elem()})
+}
+
+// mapUpdate handles map update commands m[b] = a where m is of type
+// map[K]V and K and V are interfaces. It creates flows between `a`
+// and MapValue(V) as well as between MapKey(K) and `b`.
+func (b *builder) mapUpdate(u *ssa.MapUpdate) {
+	t, ok := u.Map.Type().Underlying().(*types.Map)
+	if !ok {
+		// No interesting flows for string updates.
+		return
+	}
+
+	b.addInFlowAliasEdges(mapKey{typ: t.Key()}, b.nodeFromVal(u.Key))
+	b.addInFlowAliasEdges(mapValue{typ: t.Elem()}, b.nodeFromVal(u.Value))
+}
+
+// next instruction <ok, key, value> := next r, where r
+// is a range over map or string generates flow between
+// key and MapKey as well value and MapValue nodes.
+func (b *builder) next(n *ssa.Next) {
+	if n.IsString {
+		return
+	}
+	tup := n.Type().Underlying().(*types.Tuple)
+	kt := tup.At(1).Type()
+	vt := tup.At(2).Type()
+
+	b.addInFlowAliasEdges(indexedLocal{val: n, typ: kt, index: 1}, mapKey{typ: kt})
+	b.addInFlowAliasEdges(indexedLocal{val: n, typ: vt, index: 2}, mapValue{typ: vt})
+}
+
+// addInFlowAliasEdges adds an edge r -> l to b.graph if l is a node that can
+// have an inflow, i.e., a node that represents an interface or an unresolved
+// function value. Similarly for the edge l -> r with an additional condition
+// of that l and r can potentially alias.
+func (b *builder) addInFlowAliasEdges(l, r node) {
+	b.addInFlowEdge(r, l)
+
+	if canAlias(l, r) {
+		b.addInFlowEdge(l, r)
+	}
+}
+
+func (b *builder) closure(c *ssa.MakeClosure) {
+	f := c.Fn.(*ssa.Function)
+	b.addInFlowEdge(function{f: f}, b.nodeFromVal(c))
+
+	for i, fv := range f.FreeVars {
+		b.addInFlowAliasEdges(b.nodeFromVal(fv), b.nodeFromVal(c.Bindings[i]))
+	}
+}
+
+// panic creates a flow from arguments to panic instructions to return
+// registers of all recover statements in the program. Introduces a
+// global panic node Panic and
+//  1) for every panic statement p: add p -> Panic
+//  2) for every recover statement r: add Panic -> r (handled in call)
+// TODO(zpavlinovic): improve precision by explicitly modeling how panic
+// values flow from callees to callers and into deferred recover instructions.
+func (b *builder) panic(p *ssa.Panic) {
+	// Panics often have, for instance, strings as arguments which do
+	// not create interesting flows.
+	if !canHaveMethods(p.X.Type()) {
+		return
+	}
+
+	b.addInFlowEdge(b.nodeFromVal(p.X), panicArg{})
+}
+
+// call adds flows between arguments/parameters and return values/registers
+// for both static and dynamic calls, as well as go and defer calls.
+func (b *builder) call(c ssa.CallInstruction) {
+	// When c is r := recover() call register instruction, we add Recover -> r.
+	if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" {
+		b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(c.(*ssa.Call)))
+		return
+	}
+
+	for _, f := range siteCallees(c, b.callGraph) {
+		addArgumentFlows(b, c, f)
+	}
+}
+
+func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) {
+	cc := c.Common()
+	// When c is an unresolved method call (cc.Method != nil), cc.Value contains
+	// the receiver object rather than cc.Args[0].
+	if cc.Method != nil {
+		b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value))
+	}
+
+	offset := 0
+	if cc.Method != nil {
+		offset = 1
+	}
+	for i, v := range cc.Args {
+		b.addInFlowAliasEdges(b.nodeFromVal(f.Params[i+offset]), b.nodeFromVal(v))
+	}
+}
+
+// rtrn produces flows between values of r and c where
+// c is a call instruction that resolves to the enclosing
+// function of r based on b.callGraph.
+func (b *builder) rtrn(r *ssa.Return) {
+	n := b.callGraph.Nodes[r.Parent()]
+	// n != nil when b.callgraph is sound, but the client can
+	// pass any callgraph, including an underapproximate one.
+	if n == nil {
+		return
+	}
+
+	for _, e := range n.In {
+		if cv, ok := e.Site.(ssa.Value); ok {
+			addReturnFlows(b, r, cv)
+		}
+	}
+}
+
+func addReturnFlows(b *builder, r *ssa.Return, site ssa.Value) {
+	results := r.Results
+	if len(results) == 1 {
+		// When there is only one return value, the destination register does not
+		// have a tuple type.
+		b.addInFlowEdge(b.nodeFromVal(results[0]), b.nodeFromVal(site))
+		return
+	}
+
+	tup := site.Type().Underlying().(*types.Tuple)
+	for i, r := range results {
+		local := indexedLocal{val: site, typ: tup.At(i).Type(), index: i}
+		b.addInFlowEdge(b.nodeFromVal(r), local)
+	}
+}
+
+// addInFlowEdge adds s -> d to g if d is node that can have an inflow, i.e., a node
+// that represents an interface or an unresolved function value. Otherwise, there
+// is no interesting type flow so the edge is ommited.
+func (b *builder) addInFlowEdge(s, d node) {
+	if hasInFlow(d) {
+		b.graph.addEdge(b.representative(s), b.representative(d))
+	}
+}
+
+// Creates const, pointer, global, func, and local nodes based on register instructions.
+func (b *builder) nodeFromVal(val ssa.Value) node {
+	if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) {
+		// Nested pointer to interfaces are modeled as a special
+		// nestedPtrInterface node.
+		if i := interfaceUnderPtr(p.Elem()); i != nil {
+			return nestedPtrInterface{typ: i}
+		}
+		return pointer{typ: p}
+	}
+
+	switch v := val.(type) {
+	case *ssa.Const:
+		return constant{typ: val.Type()}
+	case *ssa.Global:
+		return global{val: v}
+	case *ssa.Function:
+		return function{f: v}
+	case *ssa.Parameter, *ssa.FreeVar, ssa.Instruction:
+		// ssa.Param, ssa.FreeVar, and a specific set of "register" instructions,
+		// satisifying the ssa.Value interface, can serve as local variables.
+		return local{val: v}
+	default:
+		panic(fmt.Errorf("unsupported value %v in node creation", val))
+	}
+	return nil
+}
+
+// representative returns a unique representative for node `n`. Since
+// semantically equivalent types can have different implementations,
+// this method guarantees the same implementation is always used.
+func (b *builder) representative(n node) node {
+	if !hasInitialTypes(n) {
+		return n
+	}
+	t := canonicalize(n.Type(), &b.canon)
+
+	switch i := n.(type) {
+	case constant:
+		return constant{typ: t}
+	case pointer:
+		return pointer{typ: t.(*types.Pointer)}
+	case sliceElem:
+		return sliceElem{typ: t}
+	case mapKey:
+		return mapKey{typ: t}
+	case mapValue:
+		return mapValue{typ: t}
+	case channelElem:
+		return channelElem{typ: t}
+	case nestedPtrInterface:
+		return nestedPtrInterface{typ: t}
+	case field:
+		return field{StructType: canonicalize(i.StructType, &b.canon), index: i.index}
+	case indexedLocal:
+		return indexedLocal{typ: t, val: i.val, index: i.index}
+	case local, global, panicArg, recoverReturn, function:
+		return n
+	default:
+		panic(fmt.Errorf("canonicalizing unrecognized node %v", n))
+	}
+}
+
+// canonicalize returns a type representative of `t` unique subject
+// to type map `canon`.
+func canonicalize(t types.Type, canon *typeutil.Map) types.Type {
+	rep := canon.At(t)
+	if rep != nil {
+		return rep.(types.Type)
+	}
+	canon.Set(t, t)
+	return t
+}
diff --git a/go/callgraph/vta/graph_test.go b/go/callgraph/vta/graph_test.go
index 0b5ec7e..61bb05a 100644
--- a/go/callgraph/vta/graph_test.go
+++ b/go/callgraph/vta/graph_test.go
@@ -5,75 +5,17 @@
 package vta
 
 import (
-	"go/ast"
-	"go/parser"
+	"fmt"
 	"go/types"
-	"io/ioutil"
 	"reflect"
+	"sort"
 	"strings"
 	"testing"
 
-	"golang.org/x/tools/go/ssa"
+	"golang.org/x/tools/go/callgraph/cha"
 	"golang.org/x/tools/go/ssa/ssautil"
-
-	"golang.org/x/tools/go/loader"
 )
 
-// want extracts the contents of the first comment
-// section starting with "WANT:\n". The returned
-// content is split into lines without // prefix.
-func want(f *ast.File) []string {
-	for _, c := range f.Comments {
-		text := strings.TrimSpace(c.Text())
-		if t := strings.TrimPrefix(text, "WANT:\n"); t != text {
-			return strings.Split(t, "\n")
-		}
-	}
-	return nil
-}
-
-// testProg returns an ssa representation of a program at
-// `path`, assumed to define package "testdata," and the
-// test want result as list of strings.
-func testProg(path string) (*ssa.Program, []string, error) {
-	content, err := ioutil.ReadFile(path)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	conf := loader.Config{
-		ParserMode: parser.ParseComments,
-	}
-
-	f, err := conf.ParseFile(path, content)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	conf.CreateFromFiles("testdata", f)
-	iprog, err := conf.Load()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	prog := ssautil.CreateProgram(iprog, 0)
-	// Set debug mode to exercise DebugRef instructions.
-	prog.Package(iprog.Created[0].Pkg).SetDebugMode(true)
-	prog.Build()
-	return prog, want(f), nil
-}
-
-func firstRegInstr(f *ssa.Function) ssa.Value {
-	for _, b := range f.Blocks {
-		for _, i := range b.Instrs {
-			if v, ok := i.(ssa.Value); ok {
-				return v
-			}
-		}
-	}
-	return nil
-}
-
 func TestNodeInterface(t *testing.T) {
 	// Since ssa package does not allow explicit creation of ssa
 	// values, we use the values from the program testdata/simple.go:
@@ -187,3 +129,76 @@
 		}
 	}
 }
+
+// vtaGraphStr stringifies vtaGraph into a list of strings
+// where each string represents an edge set of the format
+// node -> succ_1, ..., succ_n. succ_1, ..., succ_n are
+// sorted in alphabetical order.
+func vtaGraphStr(g vtaGraph) []string {
+	var vgs []string
+	for n, succ := range g {
+		var succStr []string
+		for s := range succ {
+			succStr = append(succStr, s.String())
+		}
+		sort.Strings(succStr)
+		entry := fmt.Sprintf("%v -> %v", n.String(), strings.Join(succStr, ", "))
+		vgs = append(vgs, entry)
+	}
+	return vgs
+}
+
+// subGraph checks if a graph `g1` is a subgraph of graph `g2`.
+// Assumes that each element in `g1` and `g2` is an edge set
+// for a particular node in a fixed yet arbitrary format.
+func subGraph(g1, g2 []string) bool {
+	m := make(map[string]bool)
+	for _, s := range g2 {
+		m[s] = true
+	}
+
+	for _, s := range g1 {
+		if _, ok := m[s]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+func TestVTAGraphConstruction(t *testing.T) {
+	for _, file := range []string{
+		"testdata/store.go",
+		"testdata/phi.go",
+		"testdata/type_conversions.go",
+		"testdata/type_assertions.go",
+		"testdata/fields.go",
+		"testdata/node_uniqueness.go",
+		"testdata/store_load_alias.go",
+		"testdata/phi_alias.go",
+		"testdata/channels.go",
+		"testdata/select.go",
+		"testdata/stores_arrays.go",
+		"testdata/maps.go",
+		"testdata/ranges.go",
+		"testdata/closures.go",
+		"testdata/static_calls.go",
+		"testdata/dynamic_calls.go",
+		"testdata/returns.go",
+		"testdata/panic.go",
+	} {
+		t.Run(file, func(t *testing.T) {
+			prog, want, err := testProg(file)
+			if err != nil {
+				t.Fatalf("couldn't load test file '%s': %s", file, err)
+			}
+			if len(want) == 0 {
+				t.Fatalf("couldn't find want in `%s`", file)
+			}
+
+			g, _ := typePropGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+			if gs := vtaGraphStr(g); !subGraph(want, gs) {
+				t.Errorf("`%s`: want superset of %v;\n got %v", file, want, gs)
+			}
+		})
+	}
+}
diff --git a/go/callgraph/vta/helpers_test.go b/go/callgraph/vta/helpers_test.go
new file mode 100644
index 0000000..4451f57
--- /dev/null
+++ b/go/callgraph/vta/helpers_test.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vta
+
+import (
+	"go/ast"
+	"go/parser"
+	"io/ioutil"
+	"strings"
+
+	"golang.org/x/tools/go/ssa/ssautil"
+
+	"golang.org/x/tools/go/loader"
+	"golang.org/x/tools/go/ssa"
+)
+
+// want extracts the contents of the first comment
+// section starting with "WANT:\n". The returned
+// content is split into lines without // prefix.
+func want(f *ast.File) []string {
+	for _, c := range f.Comments {
+		text := strings.TrimSpace(c.Text())
+		if t := strings.TrimPrefix(text, "WANT:\n"); t != text {
+			return strings.Split(t, "\n")
+		}
+	}
+	return nil
+}
+
+// testProg returns an ssa representation of a program at
+// `path`, assumed to define package "testdata," and the
+// test want result as list of strings.
+func testProg(path string) (*ssa.Program, []string, error) {
+	content, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	conf := loader.Config{
+		ParserMode: parser.ParseComments,
+	}
+
+	f, err := conf.ParseFile(path, content)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	conf.CreateFromFiles("testdata", f)
+	iprog, err := conf.Load()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	prog := ssautil.CreateProgram(iprog, 0)
+	// Set debug mode to exercise DebugRef instructions.
+	prog.Package(iprog.Created[0].Pkg).SetDebugMode(true)
+	prog.Build()
+	return prog, want(f), nil
+}
+
+func firstRegInstr(f *ssa.Function) ssa.Value {
+	for _, b := range f.Blocks {
+		for _, i := range b.Instrs {
+			if v, ok := i.(ssa.Value); ok {
+				return v
+			}
+		}
+	}
+	return nil
+}
+
+// funcName returns a name of the function `f`
+// prefixed with the name of the receiver type.
+func funcName(f *ssa.Function) string {
+	recv := f.Signature.Recv()
+	if recv == nil {
+		return f.Name()
+	}
+	tp := recv.Type().String()
+	return tp[strings.LastIndex(tp, ".")+1:] + "." + f.Name()
+}
diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go
new file mode 100644
index 0000000..6c11801
--- /dev/null
+++ b/go/callgraph/vta/propagation.go
@@ -0,0 +1,181 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vta
+
+import (
+	"go/types"
+
+	"golang.org/x/tools/go/ssa"
+
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+// scc computes strongly connected components (SCCs) of `g` using the
+// classical Tarjan's algorithm for SCCs. The result is a pair <m, id>
+// where m is a map from nodes to unique id of their SCC in the range
+// [0, id). The SCCs are sorted in reverse topological order: for SCCs
+// with ids X and Y s.t. X < Y, Y comes before X in the topological order.
+func scc(g vtaGraph) (map[node]int, int) {
+	// standard data structures used by Tarjan's algorithm.
+	var index uint64
+	var stack []node
+	indexMap := make(map[node]uint64)
+	lowLink := make(map[node]uint64)
+	onStack := make(map[node]bool)
+
+	nodeToSccID := make(map[node]int)
+	sccID := 0
+
+	var doSCC func(node)
+	doSCC = func(n node) {
+		indexMap[n] = index
+		lowLink[n] = index
+		index = index + 1
+		onStack[n] = true
+		stack = append(stack, n)
+
+		for s := range g[n] {
+			if _, ok := indexMap[s]; !ok {
+				// Analyze successor s that has not been visited yet.
+				doSCC(s)
+				lowLink[n] = min(lowLink[n], lowLink[s])
+			} else if onStack[s] {
+				// The successor is on the stack, meaning it has to be
+				// in the current SCC.
+				lowLink[n] = min(lowLink[n], indexMap[s])
+			}
+		}
+
+		// if n is a root node, pop the stack and generate a new SCC.
+		if lowLink[n] == indexMap[n] {
+			for {
+				w := stack[len(stack)-1]
+				stack = stack[:len(stack)-1]
+				onStack[w] = false
+				nodeToSccID[w] = sccID
+				if w == n {
+					break
+				}
+			}
+			sccID++
+		}
+	}
+
+	index = 0
+	for n := range g {
+		if _, ok := indexMap[n]; !ok {
+			doSCC(n)
+		}
+	}
+
+	return nodeToSccID, sccID
+}
+
+func min(x, y uint64) uint64 {
+	if x < y {
+		return x
+	}
+	return y
+}
+
+// propType represents type information being propagated
+// over the vta graph. f != nil only for function nodes
+// and nodes reachable from function nodes. There, we also
+// remember the actual *ssa.Function in order to more
+// precisely model higher-order flow.
+type propType struct {
+	typ types.Type
+	f   *ssa.Function
+}
+
+// propTypeMap is an auxiliary structure that serves
+// the role of a map from nodes to a set of propTypes.
+type propTypeMap struct {
+	nodeToScc  map[node]int
+	sccToTypes map[int]map[propType]bool
+}
+
+// propTypes returns a set of propTypes associated with
+// node `n`. If `n` is not in the map `ptm`, nil is returned.
+//
+// Note: for performance reasons, the returned set is a
+// reference to existing set in the map `ptm`, so any updates
+// to it will affect `ptm` as well.
+func (ptm propTypeMap) propTypes(n node) map[propType]bool {
+	id, ok := ptm.nodeToScc[n]
+	if !ok {
+		return nil
+	}
+	return ptm.sccToTypes[id]
+}
+
+// propagate reduces the `graph` based on its SCCs and
+// then propagates type information through the reduced
+// graph. The result is a map from nodes to a set of types
+// and functions, stemming from higher-order data flow,
+// reaching the node. `canon` is used for type uniqueness.
+func propagate(graph vtaGraph, canon *typeutil.Map) propTypeMap {
+	nodeToScc, sccID := scc(graph)
+	// Initialize sccToTypes to avoid repeated check
+	// for initialization later.
+	sccToTypes := make(map[int]map[propType]bool, sccID)
+	for i := 0; i <= sccID; i++ {
+		sccToTypes[i] = make(map[propType]bool)
+	}
+
+	// We also need the reverse map, from ids to SCCs.
+	sccs := make(map[int][]node, sccID)
+	for n, id := range nodeToScc {
+		sccs[id] = append(sccs[id], n)
+	}
+
+	for i := len(sccs) - 1; i >= 0; i-- {
+		nodes := sccs[i]
+		// Save the types induced by the nodes of the SCC.
+		mergeTypes(sccToTypes[i], nodeTypes(nodes, canon))
+		nextSccs := make(map[int]bool)
+		for _, node := range nodes {
+			for succ := range graph[node] {
+				nextSccs[nodeToScc[succ]] = true
+			}
+		}
+		// Propagate types to all successor SCCs.
+		for nextScc := range nextSccs {
+			mergeTypes(sccToTypes[nextScc], sccToTypes[i])
+		}
+	}
+
+	return propTypeMap{nodeToScc: nodeToScc, sccToTypes: sccToTypes}
+}
+
+// nodeTypes returns a set of propTypes for `nodes`. These are the
+// propTypes stemming from the type of each node in `nodes` plus.
+func nodeTypes(nodes []node, canon *typeutil.Map) map[propType]bool {
+	types := make(map[propType]bool)
+	for _, n := range nodes {
+		if hasInitialTypes(n) {
+			types[getPropType(n, canon)] = true
+		}
+	}
+	return types
+}
+
+// getPropType creates a propType for `node` based on its type.
+// propType.typ is always node.Type(). If node is function, then
+// propType.val is the underlying function; nil otherwise.
+func getPropType(node node, canon *typeutil.Map) propType {
+	t := canonicalize(node.Type(), canon)
+	if fn, ok := node.(function); ok {
+		return propType{f: fn.f, typ: t}
+	}
+	return propType{f: nil, typ: t}
+}
+
+// mergeTypes merges propTypes in `rhs` to `lhs`.
+func mergeTypes(lhs, rhs map[propType]bool) {
+	for typ := range rhs {
+		lhs[typ] = true
+	}
+}
diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go
new file mode 100644
index 0000000..219fd70
--- /dev/null
+++ b/go/callgraph/vta/propagation_test.go
@@ -0,0 +1,336 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vta
+
+import (
+	"go/token"
+	"go/types"
+	"reflect"
+	"sort"
+	"strings"
+	"testing"
+	"unsafe"
+
+	"golang.org/x/tools/go/ssa"
+
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+// val is a test data structure for creating ssa.Value
+// outside of the ssa package. Needed for manual creation
+// of vta graph nodes in testing.
+type val struct {
+	name string
+	typ  types.Type
+}
+
+func (v val) String() string {
+	return v.name
+}
+
+func (v val) Name() string {
+	return v.name
+}
+
+func (v val) Type() types.Type {
+	return v.typ
+}
+
+func (v val) Parent() *ssa.Function {
+	return nil
+}
+
+func (v val) Referrers() *[]ssa.Instruction {
+	return nil
+}
+
+func (v val) Pos() token.Pos {
+	return token.NoPos
+}
+
+// newLocal creates a new local node with ssa.Value
+// named `name` and type `t`.
+func newLocal(name string, t types.Type) local {
+	return local{val: val{name: name, typ: t}}
+}
+
+// newNamedType creates a bogus type named `name`.
+func newNamedType(name string) *types.Named {
+	return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil)
+}
+
+// sccString is a utility for stringifying `nodeToScc`. Every
+// scc is represented as a string where string representation
+// of scc nodes are sorted and concatenated using `;`.
+func sccString(nodeToScc map[node]int) []string {
+	sccs := make(map[int][]node)
+	for n, id := range nodeToScc {
+		sccs[id] = append(sccs[id], n)
+	}
+
+	var sccsStr []string
+	for _, scc := range sccs {
+		var nodesStr []string
+		for _, node := range scc {
+			nodesStr = append(nodesStr, node.String())
+		}
+		sort.Strings(nodesStr)
+		sccsStr = append(sccsStr, strings.Join(nodesStr, ";"))
+	}
+	return sccsStr
+}
+
+// nodeToTypeString is testing utility for stringifying results
+// of type propagation: propTypeMap `pMap` is converted to a map
+// from node strings to a string consisting of type stringifications
+// concatenated with `;`. We stringify reachable type information
+// that also has an accompanying function by the function name.
+func nodeToTypeString(pMap propTypeMap) map[string]string {
+	// Convert propType to a string. If propType has
+	// an attached function, return the function name.
+	// Otherwise, return the type name.
+	propTypeString := func(p propType) string {
+		if p.f != nil {
+			return p.f.Name()
+		}
+		return p.typ.String()
+	}
+
+	nodeToTypeStr := make(map[string]string)
+	for node := range pMap.nodeToScc {
+		var propStrings []string
+		for prop := range pMap.propTypes(node) {
+			propStrings = append(propStrings, propTypeString(prop))
+		}
+		sort.Strings(propStrings)
+		nodeToTypeStr[node.String()] = strings.Join(propStrings, ";")
+	}
+
+	return nodeToTypeStr
+}
+
+// sccEqual compares two sets of SCC stringifications.
+func sccEqual(sccs1 []string, sccs2 []string) bool {
+	if len(sccs1) != len(sccs2) {
+		return false
+	}
+	sort.Strings(sccs1)
+	sort.Strings(sccs2)
+	return reflect.DeepEqual(sccs1, sccs2)
+}
+
+// isRevTopSorted checks if sccs of `g` are sorted in reverse
+// topological order:
+//  for every edge x -> y in g, nodeToScc[x] > nodeToScc[y]
+func isRevTopSorted(g vtaGraph, nodeToScc map[node]int) bool {
+	for n, succs := range g {
+		for s := range succs {
+			if nodeToScc[n] < nodeToScc[s] {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// setName sets name of the function `f` to `name`
+// using reflection since setting the name otherwise
+// is only possible within the ssa package.
+func setName(f *ssa.Function, name string) {
+	fi := reflect.ValueOf(f).Elem().FieldByName("name")
+	fi = reflect.NewAt(fi.Type(), unsafe.Pointer(fi.UnsafeAddr())).Elem()
+	fi.SetString(name)
+}
+
+// testSuite produces a named set of graphs as follows, where
+// parentheses contain node types and F nodes stand for function
+// nodes whose content is function named F:
+//
+//  no-cycles:
+//	t0 (A) -> t1 (B) -> t2 (C)
+//
+//  trivial-cycle:
+//      <--------    <--------
+//      |       |    |       |
+//      t0 (A) ->    t1 (B) ->
+//
+//  circle-cycle:
+//	t0 (A) -> t1 (A) -> t2 (B)
+//      |                   |
+//      <--------------------
+//
+//  fully-connected:
+//	t0 (A) <-> t1 (B)
+//           \    /
+//            t2(C)
+//
+//  subsumed-scc:
+//	t0 (A) -> t1 (B) -> t2(B) -> t3 (A)
+//      |          |         |        |
+//      |          <---------         |
+//      <-----------------------------
+//
+//  more-realistic:
+//      <--------
+//      |        |
+//      t0 (A) -->
+//                            ---------->
+//                           |           |
+//      t1 (A) -> t2 (B) -> F1 -> F2 -> F3 -> F4
+//       |        |          |           |
+//        <-------           <------------
+func testSuite() map[string]vtaGraph {
+	a := newNamedType("A")
+	b := newNamedType("B")
+	c := newNamedType("C")
+	sig := types.NewSignature(nil, types.NewTuple(), types.NewTuple(), false)
+
+	f1 := &ssa.Function{Signature: sig}
+	setName(f1, "F1")
+	f2 := &ssa.Function{Signature: sig}
+	setName(f2, "F2")
+	f3 := &ssa.Function{Signature: sig}
+	setName(f3, "F3")
+	f4 := &ssa.Function{Signature: sig}
+	setName(f4, "F4")
+
+	graphs := make(map[string]vtaGraph)
+	graphs["no-cycles"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t1", b): true},
+		newLocal("t1", b): {newLocal("t2", c): true},
+	}
+
+	graphs["trivial-cycle"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t0", a): true},
+		newLocal("t1", b): {newLocal("t1", b): true},
+	}
+
+	graphs["circle-cycle"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t1", a): true},
+		newLocal("t1", a): {newLocal("t2", b): true},
+		newLocal("t2", b): {newLocal("t0", a): true},
+	}
+
+	graphs["fully-connected"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t1", b): true, newLocal("t2", c): true},
+		newLocal("t1", b): {newLocal("t0", a): true, newLocal("t2", c): true},
+		newLocal("t2", c): {newLocal("t0", a): true, newLocal("t1", b): true},
+	}
+
+	graphs["subsumed-scc"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t1", b): true},
+		newLocal("t1", b): {newLocal("t2", b): true},
+		newLocal("t2", b): {newLocal("t1", b): true, newLocal("t3", a): true},
+		newLocal("t3", a): {newLocal("t0", a): true},
+	}
+
+	graphs["more-realistic"] = map[node]map[node]bool{
+		newLocal("t0", a): {newLocal("t0", a): true},
+		newLocal("t1", a): {newLocal("t2", b): true},
+		newLocal("t2", b): {newLocal("t1", a): true, function{f1}: true},
+		function{f1}:      {function{f2}: true, function{f3}: true},
+		function{f2}:      {function{f3}: true},
+		function{f3}:      {function{f1}: true, function{f4}: true},
+	}
+
+	return graphs
+}
+
+func TestSCC(t *testing.T) {
+	suite := testSuite()
+	for _, test := range []struct {
+		name  string
+		graph vtaGraph
+		want  []string
+	}{
+		// No cycles results in three separate SCCs: {t0}	{t1}	{t2}
+		{name: "no-cycles", graph: suite["no-cycles"], want: []string{"Local(t0)", "Local(t1)", "Local(t2)"}},
+		// The two trivial self-loop cycles results in: {t0}	{t1}
+		{name: "trivial-cycle", graph: suite["trivial-cycle"], want: []string{"Local(t0)", "Local(t1)"}},
+		// The circle cycle produce a single SCC: {t0, t1, t2}
+		{name: "circle-cycle", graph: suite["circle-cycle"], want: []string{"Local(t0);Local(t1);Local(t2)"}},
+		// Similar holds for fully connected SCC: {t0, t1, t2}
+		{name: "fully-connected", graph: suite["fully-connected"], want: []string{"Local(t0);Local(t1);Local(t2)"}},
+		// Subsumed SCC also has a single SCC: {t0, t1, t2, t3}
+		{name: "subsumed-scc", graph: suite["subsumed-scc"], want: []string{"Local(t0);Local(t1);Local(t2);Local(t3)"}},
+		// The more realistic example has the following SCCs: {t0}	{t1, t2}	{F1, F2, F3}	{F4}
+		{name: "more-realistic", graph: suite["more-realistic"], want: []string{"Local(t0)", "Local(t1);Local(t2)", "Function(F1);Function(F2);Function(F3)", "Function(F4)"}},
+	} {
+		sccs, _ := scc(test.graph)
+		if got := sccString(sccs); !sccEqual(test.want, got) {
+			t.Errorf("want %v for graph %v; got %v", test.want, test.name, got)
+		}
+		if !isRevTopSorted(test.graph, sccs) {
+			t.Errorf("%v not topologically sorted", test.name)
+		}
+	}
+}
+
+func TestPropagation(t *testing.T) {
+	suite := testSuite()
+	var canon typeutil.Map
+	for _, test := range []struct {
+		name  string
+		graph vtaGraph
+		want  map[string]string
+	}{
+		// No cycles graph pushes type information forward.
+		{name: "no-cycles", graph: suite["no-cycles"],
+			want: map[string]string{
+				"Local(t0)": "A",
+				"Local(t1)": "A;B",
+				"Local(t2)": "A;B;C",
+			},
+		},
+		// No interesting type flow in trivial cycle graph.
+		{name: "trivial-cycle", graph: suite["trivial-cycle"],
+			want: map[string]string{
+				"Local(t0)": "A",
+				"Local(t1)": "B",
+			},
+		},
+		// Circle cycle makes type A and B get propagated everywhere.
+		{name: "circle-cycle", graph: suite["circle-cycle"],
+			want: map[string]string{
+				"Local(t0)": "A;B",
+				"Local(t1)": "A;B",
+				"Local(t2)": "A;B",
+			},
+		},
+		// Similarly for fully connected graph.
+		{name: "fully-connected", graph: suite["fully-connected"],
+			want: map[string]string{
+				"Local(t0)": "A;B;C",
+				"Local(t1)": "A;B;C",
+				"Local(t2)": "A;B;C",
+			},
+		},
+		// The outer loop of subsumed-scc pushes A an B through the graph.
+		{name: "subsumed-scc", graph: suite["subsumed-scc"],
+			want: map[string]string{
+				"Local(t0)": "A;B",
+				"Local(t1)": "A;B",
+				"Local(t2)": "A;B",
+				"Local(t3)": "A;B",
+			},
+		},
+		// More realistic graph has a more fine grained flow.
+		{name: "more-realistic", graph: suite["more-realistic"],
+			want: map[string]string{
+				"Local(t0)":    "A",
+				"Local(t1)":    "A;B",
+				"Local(t2)":    "A;B",
+				"Function(F1)": "A;B;F1;F2;F3",
+				"Function(F2)": "A;B;F1;F2;F3",
+				"Function(F3)": "A;B;F1;F2;F3",
+				"Function(F4)": "A;B;F1;F2;F3;F4",
+			},
+		},
+	} {
+		if got := nodeToTypeString(propagate(test.graph, &canon)); !reflect.DeepEqual(got, test.want) {
+			t.Errorf("want %v for graph %v; got %v", test.want, test.name, got)
+		}
+	}
+}
diff --git a/go/callgraph/vta/testdata/callgraph_collections.go b/go/callgraph/vta/testdata/callgraph_collections.go
new file mode 100644
index 0000000..bc418e3
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_collections.go
@@ -0,0 +1,67 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+func Do(a A, b B) map[I]I {
+	m := make(map[I]I)
+	m[a] = B{}
+	m[b] = b
+	return m
+}
+
+func Baz(a A, b B) {
+	var x []I
+	for k, v := range Do(a, b) {
+		k.Foo()
+		v.Foo()
+
+		x = append(x, k)
+	}
+
+	x[len(x)-1].Foo()
+}
+
+// Relevant SSA:
+// func Baz(a A, b B):
+//   ...
+//   t4 = Do(t2, t3)
+//   t5 = range t4
+//   jump 1
+//  1:
+//   t6 = phi [0: nil:[]I, 2: t16] #x
+//   t7 = next t5
+//   t8 = extract t7 #0
+//   if t8 goto 2 else 3
+//  2:
+//   t9 = extract t7 #1
+//   t10 = extract t7 #2
+//   t11 = invoke t9.Foo()
+//   t12 = invoke t10.Foo()
+//   ...
+//   jump 1
+//  3:
+//   t17 = len(t6)
+//   t18 = t17 - 1:int
+//   t19 = &t6[t18]
+//   t20 = *t19
+//   t21 = invoke t20.Foo()
+//   return
+
+// WANT:
+// Baz: Do(t2, t3) -> Do; invoke t10.Foo() -> B.Foo; invoke t20.Foo() -> A.Foo, B.Foo; invoke t9.Foo() -> A.Foo, B.Foo
diff --git a/go/callgraph/vta/testdata/callgraph_ho.go b/go/callgraph/vta/testdata/callgraph_ho.go
new file mode 100644
index 0000000..0e5fa0d
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_ho.go
@@ -0,0 +1,45 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+func Foo() {}
+
+func Do(b bool) func() {
+	if b {
+		return Foo
+	}
+	return func() {}
+}
+
+func Finish(h func()) {
+	h()
+}
+
+func Baz(b bool) {
+	Finish(Do(b))
+}
+
+// Relevant SSA:
+// func Baz(b bool):
+//   t0 = Do(b)
+//   t1 = Finish(t0)
+//   return
+
+// func Do(b bool) func():
+//   if b goto 1 else 2
+//  1:
+//   return Foo
+//  2:
+//   return Do$1
+
+// func Finish(h func()):
+//   t0 = h()
+//   return
+
+// WANT:
+// Baz: Do(b) -> Do; Finish(t0) -> Finish
+// Finish: h() -> Do$1, Foo
diff --git a/go/callgraph/vta/testdata/callgraph_interfaces.go b/go/callgraph/vta/testdata/callgraph_interfaces.go
new file mode 100644
index 0000000..123468c
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_interfaces.go
@@ -0,0 +1,61 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+type C struct{}
+
+func (c C) Foo() {}
+
+func NewB() B {
+	return B{}
+}
+
+func Do(b bool) I {
+	if b {
+		return A{}
+	}
+
+	c := C{}
+	c.Foo()
+
+	return NewB()
+}
+
+func Baz(b bool) {
+	Do(b).Foo()
+}
+
+// Relevant SSA:
+// func Baz(b bool):
+//   t0 = Do(b)
+//   t1 = invoke t0.Foo()
+//   return
+
+// func Do(b bool) I:
+//    ...
+//   t3 = local C (c)
+//   t4 = *t3
+//   t5 = (C).Foo(t4)
+//   t6 = NewB()
+//   t7 = make I <- B (t6)
+//   return t7
+
+// WANT:
+// Baz: Do(b) -> Do; invoke t0.Foo() -> A.Foo, B.Foo
+// Do: (C).Foo(t4) -> C.Foo; NewB() -> NewB
diff --git a/go/callgraph/vta/testdata/callgraph_nested_ptr.go b/go/callgraph/vta/testdata/callgraph_nested_ptr.go
new file mode 100644
index 0000000..a6afc3b
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_nested_ptr.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+func Do(i **I) {
+	**i = A{}
+}
+
+func Bar(i **I) {
+	**i = B{}
+}
+
+func Baz(i **I) {
+	Do(i)
+	(**i).Foo()
+}
+
+// Relevant SSA:
+//  func Baz(i **I):
+//   t0 = Do(i)
+//   t1 = *i
+//   t2 = *t1
+//   t3 = invoke t2.Foo()
+//   return
+
+//  func Bar(i **I):
+//   t0 = *i
+//   t1 = local B (complit)
+//   t2 = *t1
+//   t3 = make I <- B (t2)
+//   *t0 = t3
+//   return
+
+// func Do(i **I):
+//   t0 = *i
+//   t1 = local A (complit)
+//   t2 = *t1
+//   t3 = make I <- A (t2)
+//   *t0 = t3
+//   return
+
+// WANT:
+// Baz: Do(i) -> Do; invoke t2.Foo() -> A.Foo, B.Foo
diff --git a/go/callgraph/vta/testdata/callgraph_pointers.go b/go/callgraph/vta/testdata/callgraph_pointers.go
new file mode 100644
index 0000000..e07f969
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_pointers.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type A struct {
+	f *I
+}
+
+func (a A) Foo() {}
+
+type B struct{}
+
+func (b B) Foo() {}
+
+func Do(a A, i I, c bool) *I {
+	if c {
+		*a.f = a
+	} else {
+		a.f = &i
+	}
+	(*a.f).Foo()
+	return &i
+}
+
+func Baz(a A, b B, c bool) {
+	x := Do(a, b, c)
+	(*x).Foo()
+}
+
+// Relevant SSA:
+// func Baz(a A, b B, c bool):
+//   t0 = local A (a)
+//   ...
+//   t5 = Do(t2, t4, c)
+//   t6 = *t5
+//   t7 = invoke t6.Foo()
+//   return
+
+// func Do(a A, i I, c bool) *I:
+//   t0 = local A (a)
+//   *t0 = a
+//   ...
+//   if c goto 1 else 3
+//  1:
+//   t2 = &t0.f [#0]
+//   ...
+//   jump 2
+//  2:
+//   t6 = &t0.f [#0]
+//   ...
+//   t9 = invoke t8.Foo()
+//   return t1
+//  3:
+//   t10 = &t0.f [#0]      alias between A.f and t10
+//   *t10 = t1             alias between t10 and t1
+//   jump 2
+
+// The command a.f = &i introduces aliasing that results in
+// A and B reaching both *A.f and return value of Do(a, b, c).
+
+// WANT:
+// Baz: Do(t2, t4, c) -> Do; invoke t6.Foo() -> A.Foo, B.Foo
+// Do: invoke t8.Foo() -> A.Foo, B.Foo
diff --git a/go/callgraph/vta/testdata/callgraph_static.go b/go/callgraph/vta/testdata/callgraph_static.go
new file mode 100644
index 0000000..1ed904f
--- /dev/null
+++ b/go/callgraph/vta/testdata/callgraph_static.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type A struct{}
+
+func (a A) foo() {}
+
+func Bar() {}
+
+func Baz(a A) {
+	a.foo()
+	Bar()
+	Baz(A{})
+}
+
+// Relevant SSA:
+// func Baz(a A):
+//   ...
+//   t2 = (A).foo(t1)
+//   t3 = Bar()
+//   ...
+//   t6 = Baz(t5)
+
+// WANT:
+// Baz: (A).foo(t1) -> A.foo; Bar() -> Bar; Baz(t5) -> Baz
diff --git a/go/callgraph/vta/testdata/channels.go b/go/callgraph/vta/testdata/channels.go
new file mode 100644
index 0000000..2888af6
--- /dev/null
+++ b/go/callgraph/vta/testdata/channels.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+func foo(c chan interface{}, j int) {
+	c <- j + 1
+}
+
+func Baz(i int) {
+	c := make(chan interface{})
+	go foo(c, i)
+	x := <-c
+	print(x)
+}
+
+// Relevant SSA:
+//  func foo(c chan interface{}, j int):
+//  t0 = j + 1:int
+//  t1 = make interface{} <- int (t0)
+//  send c <- t1                        // t1 -> chan {}interface
+//  return
+//
+// func Baz(i int):
+//  t0 = make chan interface{} 0:int
+//  go foo(t0, i)
+//  t1 = <-t0                           // chan {}interface -> t1
+//  t2 = print(t1)
+//  return
+
+// WANT:
+// Channel(chan interface{}) -> Local(t1)
+// Local(t1) -> Channel(chan interface{})
diff --git a/go/callgraph/vta/testdata/closures.go b/go/callgraph/vta/testdata/closures.go
new file mode 100644
index 0000000..6e6c0ac
--- /dev/null
+++ b/go/callgraph/vta/testdata/closures.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+func Do(i I) { i.Foo() }
+
+func Baz(b bool, h func(I)) {
+	var i I
+	a := func(g func(I)) {
+		g(i)
+	}
+
+	if b {
+		h = Do
+	}
+
+	a(h)
+}
+
+// Relevant SSA:
+//  func Baz(b bool, h func(I)):
+//    t0 = new I (i)
+//    t1 = make closure Baz$1 [t0]
+//    if b goto 1 else 2
+//   1:
+//         jump 2
+//   2:
+//    t2 = phi [0: h, 1: Do] #h
+//    t3 = t1(t2)
+//    return
+//
+// func Baz$1(g func(I)):
+//    t0 = *i
+//    t1 = g(t0)
+//    return
+
+// In the edge set Local(i) -> Local(t0), Local(t0) below,
+// two occurrences of t0 come from t0 in Baz and Baz$1.
+
+// WANT:
+// Function(Do) -> Local(t2)
+// Function(Baz$1) -> Local(t1)
+// Local(h) -> Local(t2)
+// Local(t0) -> Local(i)
+// Local(i) -> Local(t0), Local(t0)
diff --git a/go/callgraph/vta/testdata/dynamic_calls.go b/go/callgraph/vta/testdata/dynamic_calls.go
new file mode 100644
index 0000000..fa4270b
--- /dev/null
+++ b/go/callgraph/vta/testdata/dynamic_calls.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	foo(I)
+}
+
+type A struct{}
+
+func (a A) foo(ai I) {}
+
+type B struct{}
+
+func (b B) foo(bi I) {}
+
+func doWork() I { return nil }
+func close() I  { return nil }
+
+func Baz(x B, h func() I, i I) I {
+	i.foo(x)
+
+	return h()
+}
+
+// Relevant SSA:
+// func Baz(x B, h func() I, i I) I:
+//   t0 = local B (x)
+//   *t0 = x
+//   t1 = *t0
+//   t2 = make I <- B (t1)
+//   t3 = invoke i.foo(t2)
+//   t4 = h()
+//   return t4
+
+// WANT:
+// Local(t2) -> Local(ai), Local(bi)
+// Constant(testdata.I) -> Local(t4)
+// Local(t1) -> Local(t2)
diff --git a/go/callgraph/vta/testdata/fields.go b/go/callgraph/vta/testdata/fields.go
new file mode 100644
index 0000000..e539327
--- /dev/null
+++ b/go/callgraph/vta/testdata/fields.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type J interface {
+	I
+	Bar()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+func (a A) Bar() {}
+
+type B struct {
+	a A
+	i I
+}
+
+func Do() B {
+	b := B{}
+	return b
+}
+
+func Baz(b B) {
+	var j J
+	j = b.a
+
+	j.Bar()
+
+	b.i = j
+
+	Do().i.Foo()
+}
+
+// Relevant SSA:
+// func Baz(b B):
+//   t0 = local B (b)
+//   *t0 = b
+//   t1 = &t0.a [#0]       // no flow here since a is of concrete type
+//   t2 = *t1
+//   t3 = make J <- A (t2)
+//   t4 = invoke t3.Bar()
+//   t5 = &t0.i [#1]
+//   t6 = change interface I <- J (t3)
+//   *t5 = t6
+//   t7 = Do()
+//   t8 = t7.i [#0]
+//   t9 = (A).Foo(t8)
+//   return
+
+// WANT:
+// Field(testdata.B:i) -> Local(t5), Local(t8)
+// Local(t5) -> Field(testdata.B:i)
+// Local(t2) -> Local(t3)
+// Local(t3) -> Local(t6)
+// Local(t6) -> Local(t5)
diff --git a/go/callgraph/vta/testdata/maps.go b/go/callgraph/vta/testdata/maps.go
new file mode 100644
index 0000000..b7354dc
--- /dev/null
+++ b/go/callgraph/vta/testdata/maps.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo() string
+}
+
+type J interface {
+	Foo() string
+	Bar()
+}
+
+type B struct {
+	p string
+}
+
+func (b B) Foo() string { return b.p }
+func (b B) Bar()        {}
+
+func Baz(m map[I]I, b1, b2 B, n map[string]*J) *J {
+	m[b1] = b2
+
+	return n[b1.Foo()]
+}
+
+// Relevant SSA:
+// func Baz(m map[I]I, b1 B, b2 B, n map[string]*J) *J:
+//   t0 = local B (b1)
+//   *t0 = b1
+//   t1 = local B (b2)
+//   *t1 = b2
+//   t2 = *t0
+//   t3 = make I <- B (t2)
+//   t4 = *t1
+//   t5 = make I <- B (t4)
+//   m[t3] = t5
+//   t6 = *t0
+//   t7 = (B).Foo(t6)
+//   t8 = n[t7]
+//   return t8
+
+// WANT:
+// Local(t4) -> Local(t5)
+// Local(t5) -> MapValue(testdata.I)
+// Local(t3) -> MapKey(testdata.I)
+// Local(t8) -> MapValue(*testdata.J)
+// MapValue(*testdata.J) -> Local(t8)
diff --git a/go/callgraph/vta/testdata/node_uniqueness.go b/go/callgraph/vta/testdata/node_uniqueness.go
new file mode 100644
index 0000000..0c1dc07
--- /dev/null
+++ b/go/callgraph/vta/testdata/node_uniqueness.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+// TestNodeTypeUniqueness checks if semantically equivalent types
+// are being represented using the same pointer value in vta nodes.
+// If not, some edges become missing in the string representation
+// of the graph.
+
+type I interface {
+	Foo()
+}
+
+type A struct{}
+
+func (a A) Foo() {}
+
+func Baz(a *A) (I, I, interface{}, interface{}) {
+	var i I
+	i = a
+
+	var ii I
+	aa := &A{}
+	ii = aa
+
+	m := make(map[int]int)
+	var iii interface{}
+	iii = m
+
+	var iiii interface{}
+	iiii = m
+
+	return i, ii, iii, iiii
+}
+
+// Relevant SSA:
+// func Baz(a *A) (I, I, interface{}, interface{}):
+//   t0 = make I <- *A (a)
+//	 t1 = new A (complit)
+//   t2 = make I <- *A (t1)
+//   t3 = make map[int]int
+//   t4 = make interface{} <- map[int]int (t3)
+//   t5 = make interface{} <- map[int]int (t3)
+//   return t0, t2, t4, t5
+
+// Without canon approach, one of Pointer(*A) -> Local(t0) and Pointer(*A) -> Local(t2) edges is
+// missing in the graph string representation. The original graph has both of the edges but the
+// source node Pointer(*A) is not the same; two occurences of Pointer(*A) are considered separate
+// nodes. Since they have the same string representation, one edge gets overriden by the other
+// during the graph stringification, instead of being joined together as in below.
+
+// WANT:
+// Pointer(*testdata.A) -> Local(t0), Local(t2)
+// Local(t3) -> Local(t4), Local(t5)
diff --git a/go/callgraph/vta/testdata/panic.go b/go/callgraph/vta/testdata/panic.go
new file mode 100644
index 0000000..2d39c70
--- /dev/null
+++ b/go/callgraph/vta/testdata/panic.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	foo()
+}
+
+type A struct{}
+
+func (a A) foo() {}
+
+func recover1() {
+	print("only this recover should execute")
+	if r, ok := recover().(I); ok {
+		r.foo()
+	}
+}
+
+func recover2() {
+	recover()
+}
+
+func Baz(a A) {
+	defer recover1()
+	panic(a)
+}
+
+// Relevant SSA:
+// func recover1():
+// 	0:
+//   t0 = print("only this recover...":string)
+//   t1 = recover()
+//   t2 = typeassert,ok t1.(I)
+//   t3 = extract t2 #0
+//   t4 = extract t2 #1
+//   if t4 goto 1 else 2
+//  1:
+//   t5 = invoke t3.foo()
+//   jump 2
+//  2:
+//   return
+//
+// func recover2():
+//   t0 = recover()
+//   return
+//
+// func Baz(i I):
+//   t0 = local A (a)
+//   *t0 = a
+//   defer recover1()
+//   t1 = *t0
+//   t2 = make interface{} <- A (t1)
+//   panic t2
+
+// t2 argument to panic in Baz gets ultimately connected to recover
+// registers t1 in recover1() and t0 in recover2().
+
+// WANT:
+// Panic -> Recover
+// Local(t2) -> Panic
+// Recover -> Local(t0), Local(t1)
diff --git a/go/callgraph/vta/testdata/phi.go b/go/callgraph/vta/testdata/phi.go
new file mode 100644
index 0000000..2144a2c
--- /dev/null
+++ b/go/callgraph/vta/testdata/phi.go
@@ -0,0 +1,55 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type A struct{}
+type B struct{}
+
+type I interface{ foo() }
+
+func (a A) foo() {}
+func (b B) foo() {}
+
+func Baz(b B, c bool) {
+	var i I
+	if c {
+		i = b
+	} else {
+		a := A{}
+		i = a
+	}
+	i.foo()
+}
+
+// Relevant SSA:
+// func Baz(b B, c bool):
+// 0:
+//  t0 = local B (b)
+//  *t0 = b
+//  if c goto 1 else 3
+//
+// 1:
+//  t1 = *t0
+//  t2 = make I <- B (t1)
+//  jump 2
+//
+// 2:
+//  t3 = phi [1: t2, 3: t7] #i
+//  t4 = invoke t3.foo()
+//  return
+//
+// 3:
+//  t5 = local A (a)
+//  t6 = *t5
+//  t7 = make I <- A (t6)
+//  jump 2
+
+// WANT:
+// Local(t1) -> Local(t2)
+// Local(t2) -> Local(t3)
+// Local(t7) -> Local(t3)
+// Local(t6) -> Local(t7)
diff --git a/go/callgraph/vta/testdata/phi_alias.go b/go/callgraph/vta/testdata/phi_alias.go
new file mode 100644
index 0000000..d4c414d
--- /dev/null
+++ b/go/callgraph/vta/testdata/phi_alias.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type B struct {
+	p int
+}
+
+func (b B) Foo() {}
+
+func Baz(i, j *I, b, c bool) {
+	if b {
+		i = j
+	}
+	*i = B{9}
+	if c {
+		(*i).Foo()
+	} else {
+		(*j).Foo()
+	}
+}
+
+// Relevant SSA:
+// func Baz(i *I, j *I, b bool, c bool):
+//    if b goto 1 else 2
+//  1:
+//    jump 2
+//  2:
+//    t0 = phi [0: i, 1: j] #i
+//    t1 = local B (complit)
+//    t2 = &t1.p [#0]
+//    *t2 = 9:int
+//    t3 = *t1
+//    t4 = make I <- B (t3)
+//    *t0 = t4
+//    if c goto 3 else 5
+//  3:
+//    t5 = *t0
+//    t6 = invoke t5.Foo()
+//    jump 4
+//  4:
+//    return
+//  5:
+//    t7 = *j
+//    t8 = invoke t7.Foo()
+//    jump 4
+
+// Flow chain showing that B reaches (*i).foo():
+//   t3 (B) -> t4 -> t0 -> t5
+// Flow chain showing that B reaches (*j).foo():
+//   t3 (B) -> t4 -> t0 <--> j -> t7
+
+// WANT:
+// Local(t0) -> Local(i), Local(j), Local(t5)
+// Local(i) -> Local(t0)
+// Local(j) -> Local(t0), Local(t7)
+// Local(t3) -> Local(t4)
+// Local(t4) -> Local(t0)
diff --git a/go/callgraph/vta/testdata/ranges.go b/go/callgraph/vta/testdata/ranges.go
new file mode 100644
index 0000000..557bb4d
--- /dev/null
+++ b/go/callgraph/vta/testdata/ranges.go
@@ -0,0 +1,55 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo() string
+}
+
+type B struct {
+	p string
+}
+
+func (b B) Foo() string { return b.p }
+
+func Baz(m map[I]*I) {
+	for i, v := range m {
+		*v = B{p: i.Foo()}
+	}
+}
+
+// Relevant SSA:
+//  func Baz(m map[I]*I):
+//   0:
+//    t0 = range m
+//         jump 1
+//   1:
+//    t1 = next t0
+//    t2 = extract t1 #0
+//    if t2 goto 2 else 3
+//   2:
+//    t3 = extract t1 #1
+//    t4 = extract t1 #2
+//    t5 = local B (complit)
+//    t6 = &t5.p [#0]
+//    t7 = invoke t3.Foo()
+//    *t6 = t7
+//    t8 = *t5
+//    t9 = make I <- B (t8)
+//    *t4 = t9
+//    jump 1
+//   3:
+//    return
+
+// WANT:
+// MapKey(testdata.I) -> Local(t1[1])
+// Local(t1[1]) -> Local(t3)
+// MapValue(*testdata.I) -> Local(t1[2])
+// Local(t1[2]) -> Local(t4), MapValue(*testdata.I)
+// Local(t8) -> Local(t9)
+// Local(t9) -> Local(t4)
+// Local(t4) -> Local(t1[2])
diff --git a/go/callgraph/vta/testdata/returns.go b/go/callgraph/vta/testdata/returns.go
new file mode 100644
index 0000000..b11b432
--- /dev/null
+++ b/go/callgraph/vta/testdata/returns.go
@@ -0,0 +1,57 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface{}
+
+func Bar(ii I) (I, I) {
+	return Foo(ii)
+}
+
+func Foo(iii I) (I, I) {
+	return iii, iii
+}
+
+func Do(j I) *I {
+	return &j
+}
+
+func Baz(i I) *I {
+	Bar(i)
+	return Do(i)
+}
+
+// Relevant SSA:
+// func Bar(ii I) (I, I):
+//   t0 = Foo(ii)
+//   t1 = extract t0 #0
+//   t2 = extract t0 #1
+//   return t1, t2
+//
+// func Foo(iii I) (I, I):
+//   return iii, iii
+//
+// func Do(j I) *I:
+//   t0 = new I (j)
+//   *t0 = j
+//   return t0
+//
+// func Baz(i I):
+//   t0 = Bar(i)
+//   t1 = Do(i)
+//   return t1
+
+// t0 and t1 in the last edge correspond to the nodes
+// of Do and Baz. This edge is induced by Do(i).
+
+// WANT:
+// Local(i) -> Local(ii), Local(j)
+// Local(ii) -> Local(iii)
+// Local(iii) -> Local(t0[0]), Local(t0[1])
+// Local(t1) -> Local(t0[0])
+// Local(t2) -> Local(t0[1])
+// Local(t0) -> Local(t1)
diff --git a/go/callgraph/vta/testdata/select.go b/go/callgraph/vta/testdata/select.go
new file mode 100644
index 0000000..50586d3
--- /dev/null
+++ b/go/callgraph/vta/testdata/select.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo() string
+}
+
+type J interface {
+	I
+}
+
+type B struct {
+	p string
+}
+
+func (b B) Foo() string { return b.p }
+
+func Baz(b1, b2 B, c1 chan I, c2 chan J) {
+	for {
+		select {
+		case c1 <- b1:
+			print("b1")
+		case c2 <- b2:
+			print("b2")
+		case <-c1:
+			print("c1")
+		case k := <-c2:
+			print(k.Foo())
+			return
+		}
+	}
+}
+
+// Relevant SSA:
+// func Baz(b1 B, b2 B, c1 chan I, c2 chan J):
+//   ...
+//   t2 = *t0
+//   t3 = make I <- B (t2)
+//   t4 = *t1
+//   t5 = make J <- B (t4)
+//   t6 = select blocking [c1<-t3, c2<-t5, <-c1, <-c2] (index int, ok bool, I, J)
+//   t7 = extract t6 #0
+//   t8 = t7 == 0:int
+//   if t8 goto 2 else 3
+//         ...
+//  8:
+//   t15 = extract t6 #3
+//   t16 = invoke t15.Foo()
+//   t17 = print(t18)
+
+// WANT:
+// Local(t3) -> Channel(chan testdata.I)
+// Local(t5) -> Channel(chan testdata.J)
+// Channel(chan testdata.I) -> Local(t6[2])
+// Channel(chan testdata.J) -> Local(t6[3])
diff --git a/go/callgraph/vta/testdata/simple.go b/go/callgraph/vta/testdata/simple.go
index 9592021..d3bfbe7 100644
--- a/go/callgraph/vta/testdata/simple.go
+++ b/go/callgraph/vta/testdata/simple.go
@@ -1,3 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
 package testdata
 
 var gl int
diff --git a/go/callgraph/vta/testdata/static_calls.go b/go/callgraph/vta/testdata/static_calls.go
new file mode 100644
index 0000000..74a27c1
--- /dev/null
+++ b/go/callgraph/vta/testdata/static_calls.go
@@ -0,0 +1,41 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface{}
+
+func foo(i I) (I, I) {
+	return i, i
+}
+
+func doWork(ii I) {}
+
+func close(iii I) {}
+
+func Baz(inp I) {
+	a, b := foo(inp)
+	defer close(a)
+	go doWork(b)
+}
+
+// Relevant SSA:
+// func Baz(inp I):
+//   t0 = foo(inp)
+//   t1 = extract t0 #0
+//   t2 = extract t0 #1
+//   defer close(t1)
+//   go doWork(t2)
+//   rundefers
+//   ...
+// func foo(i I) (I, I):
+//   return i, i
+
+// WANT:
+// Local(inp) -> Local(i)
+// Local(t1) -> Local(iii)
+// Local(t2) -> Local(ii)
+// Local(i) -> Local(t0[0]), Local(t0[1])
diff --git a/go/callgraph/vta/testdata/store.go b/go/callgraph/vta/testdata/store.go
new file mode 100644
index 0000000..8a36d4e
--- /dev/null
+++ b/go/callgraph/vta/testdata/store.go
@@ -0,0 +1,41 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+// Tests graph creation for store/load and make instructions.
+// Note that ssa package does not have a load instruction per
+// se. Yet, one is encoded as a unary instruction with the
+// * operator.
+
+type A struct{}
+
+type I interface{ foo() }
+
+func (a A) foo() {}
+
+func main() {
+	a := A{}
+	var i I
+	i = a
+	ii := &i
+	(*ii).foo()
+}
+
+// Relevant SSA:
+//	t0 = local A (a)
+//	t1 = new I (i)
+//	t2 = *t0                 no interesting flow: concrete types
+//	t3 = make I <- A (t2)    t2 -> t3
+//	*t1 = t3                 t3 -> t1
+//	t4 = *t1                 t1 -> t4
+//	t5 = invoke t4.foo()
+//	return
+
+// WANT:
+// Local(t2) -> Local(t3)
+// Local(t3) -> Local(t1)
+// Local(t1) -> Local(t4)
diff --git a/go/callgraph/vta/testdata/store_load_alias.go b/go/callgraph/vta/testdata/store_load_alias.go
new file mode 100644
index 0000000..b9814f0
--- /dev/null
+++ b/go/callgraph/vta/testdata/store_load_alias.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type A struct{}
+
+func (a A) foo() {}
+
+type I interface{ foo() }
+
+func Baz(i I) {
+	j := &i
+	k := &j
+	**k = A{}
+	i.foo()
+	(**k).foo()
+}
+
+// Relevant SSA:
+// func Baz(i I):
+//   t0 = new I (i)
+//   *t0 = i
+//   t1 = new *I (j)
+//   *t1 = t0
+//   t2 = *t1
+//   t3 = local A (complit)
+//   t4 = *t3
+//   t5 = make I <- A (t4)
+//   *t2 = t5
+//   t6 = *t0
+//   t7 = invoke t6.foo()
+//   t8 = *t1
+//   t9 = *t8
+//   t10 = invoke t9.foo()
+
+// Flow chain showing that A reaches i.foo():
+//   t4 (A) -> t5 -> t2 <-> PtrInterface(I) <-> t0 -> t6
+// Flow chain showing that A reaches (**k).foo():
+//	 t4 (A) -> t5 -> t2 <-> PtrInterface(I) <-> t8 -> t9
+
+// WANT:
+// Local(i) -> Local(t0)
+// Local(t0) -> Local(t6), PtrInterface(testdata.I)
+// PtrInterface(testdata.I) -> Local(t0), Local(t2), Local(t8)
+// Local(t2) -> PtrInterface(testdata.I)
+// Local(t4) -> Local(t5)
+// Local(t5) -> Local(t2)
+// Local(t8) -> Local(t9), PtrInterface(testdata.I)
diff --git a/go/callgraph/vta/testdata/stores_arrays.go b/go/callgraph/vta/testdata/stores_arrays.go
new file mode 100644
index 0000000..80de2b0
--- /dev/null
+++ b/go/callgraph/vta/testdata/stores_arrays.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type I interface {
+	Foo()
+}
+
+type J interface {
+	Foo()
+	Bar()
+}
+
+type B struct {
+	p int
+}
+
+func (b B) Foo() {}
+func (b B) Bar() {}
+
+func Baz(b *B, S []*I, s []J) {
+	var x [3]I
+	x[1] = b
+
+	a := &s[2]
+	(*a).Bar()
+
+	print([3]*I{nil, nil, nil}[2])
+}
+
+// Relevant SSA:
+// func Baz(b *B, S []*I, s []J):
+//   t0 = local [3]I (x)
+//   t1 = &t0[1:int]
+//   ...
+//   t3 = &s[2:int]
+//   t4 = *t3
+//   ...
+//   t6 = local [3]*I (complit)
+//   t7 = &t6[0:int]
+//         ...
+//   t11 = t10[2:int]
+//   ...
+
+// WANT:
+// Slice([]testdata.I) -> Local(t1)
+// Local(t1) -> Slice([]testdata.I)
+// Slice([]testdata.J) -> Local(t3)
+// Local(t3) -> Local(t4), Slice([]testdata.J)
+// Local(t11) -> Slice([]*testdata.I)
+// Slice([]*testdata.I) -> Local(t11), PtrInterface(testdata.I)
+// Constant(*testdata.I) -> PtrInterface(testdata.I)
diff --git a/go/callgraph/vta/testdata/type_assertions.go b/go/callgraph/vta/testdata/type_assertions.go
new file mode 100644
index 0000000..d4e8e33
--- /dev/null
+++ b/go/callgraph/vta/testdata/type_assertions.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+// Test program for testing type assertions and extract instructions.
+// The latter are tested here too since extract instruction comes
+// naturally in type assertions.
+
+type I interface {
+	Foo()
+}
+
+type J interface {
+	Foo()
+	Bar()
+}
+
+type A struct {
+	c int
+}
+
+func (a A) Foo() {}
+func (a A) Bar() {}
+
+func Baz(i I) {
+	j, ok := i.(J)
+	if ok {
+		j.Foo()
+	}
+
+	a := i.(*A)
+	a.Bar()
+}
+
+// Relevant SSA:
+// 	func Baz(i I):
+//    t0 = typeassert,ok i.(J)
+//    t1 = extract t0 #0
+//    t2 = extract t0 #1
+//    if t2 goto 1 else 2
+//  1:
+//    t3 = invoke t1.Foo()
+//    jump 2
+//  2:
+//    t4 = typeassert i.(*A)  // no flow since t4 is of concrete type
+//    t5 = *t4
+//    t6 = (A).Bar(t5)
+//    return
+
+// WANT:
+// Local(i) -> Local(t0[0])
+// Local(t0[0]) -> Local(t1)
diff --git a/go/callgraph/vta/testdata/type_conversions.go b/go/callgraph/vta/testdata/type_conversions.go
new file mode 100644
index 0000000..e18077f
--- /dev/null
+++ b/go/callgraph/vta/testdata/type_conversions.go
@@ -0,0 +1,85 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go:build ignore
+
+package testdata
+
+type Y interface {
+	Foo()
+	Bar(float64)
+}
+
+type Z Y
+
+type W interface {
+	Y
+}
+
+type A struct{}
+
+func (a A) Foo()          { print("A:Foo") }
+func (a A) Bar(f float64) { print(uint(f)) }
+
+type B struct{}
+
+func (b B) Foo()          { print("B:Foo") }
+func (b B) Bar(f float64) { print(uint(f) + 1) }
+
+type X interface {
+	Foo()
+}
+
+func Baz(y Y) {
+	z := Z(y)
+	z.Foo()
+
+	x := X(y)
+	x.Foo()
+
+	y = A{}
+	var y_p *Y = &y
+
+	w_p := (*W)(y_p)
+	*w_p = B{}
+
+	(*y_p).Foo() // prints B:Foo
+	(*w_p).Foo() // prints B:Foo
+}
+
+// Relevant SSA:
+//  func Baz(y Y):
+//   t0 = new Y (y)
+//   *t0 = y
+//   t1 = *t0
+//   t2 = changetype Z <- Y (t1)
+//   t3 = invoke t2.Foo()
+//
+//   t4 = *t0
+//   t5 = change interface X <- Y (t4)
+//   t6 = invoke t5.Foo()
+//
+//   t7 = local A (complit)
+//   t8 = *t7
+//   t9 = make Y <- A (t8)
+//   *t0 = t9
+//   t10 = changetype *W <- *Y (t0)
+//   t11 = local B (complit)
+//   t12 = *t11
+//   t13 = make W <- B (t12)
+//   *t10 = t13
+//   t14 = *t0
+//   t15 = invoke t14.Foo()
+//   t16 = *t10
+//   t17 = invoke t16.Foo()
+//   return
+
+// WANT:
+// Local(t1) -> Local(t2)
+// Local(t4) -> Local(t5)
+// Local(t0) -> Local(t1), Local(t10), Local(t14), Local(t4)
+// Local(y) -> Local(t0)
+// Local(t8) -> Local(t9)
+// Local(t9) -> Local(t0)
+// Local(t13) -> Local(t10)
diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go
new file mode 100644
index 0000000..69361ab
--- /dev/null
+++ b/go/callgraph/vta/utils.go
@@ -0,0 +1,167 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vta
+
+import (
+	"go/types"
+
+	"golang.org/x/tools/go/callgraph"
+	"golang.org/x/tools/go/ssa"
+)
+
+func canAlias(n1, n2 node) bool {
+	return isReferenceNode(n1) && isReferenceNode(n2)
+}
+
+func isReferenceNode(n node) bool {
+	if _, ok := n.(nestedPtrInterface); ok {
+		return true
+	}
+
+	if _, ok := n.Type().(*types.Pointer); ok {
+		return true
+	}
+
+	return false
+}
+
+// hasInFlow checks if a concrete type can flow to node `n`.
+// Returns yes iff the type of `n` satisfies one the following:
+//  1) is an interface
+//  2) is a (nested) pointer to interface (needed for, say,
+//     slice elements of nested pointers to interface type)
+//  3) is a function type (needed for higher-order type flow)
+//  4) is a global Recover or Panic node
+func hasInFlow(n node) bool {
+	if _, ok := n.(panicArg); ok {
+		return true
+	}
+	if _, ok := n.(recoverReturn); ok {
+		return true
+	}
+
+	t := n.Type()
+
+	if _, ok := t.Underlying().(*types.Signature); ok {
+		return true
+	}
+
+	if i := interfaceUnderPtr(t); i != nil {
+		return true
+	}
+
+	return isInterface(t)
+}
+
+// hasInitialTypes check if a node can have initial types.
+// Returns true iff `n` is not a panic or recover node as
+// those are artifical.
+func hasInitialTypes(n node) bool {
+	switch n.(type) {
+	case panicArg, recoverReturn:
+		return false
+	default:
+		return true
+	}
+}
+
+func isInterface(t types.Type) bool {
+	_, ok := t.Underlying().(*types.Interface)
+	return ok
+}
+
+// interfaceUnderPtr checks if type `t` is a potentially nested
+// pointer to interface and if yes, returns the interface type.
+// Otherwise, returns nil.
+func interfaceUnderPtr(t types.Type) types.Type {
+	p, ok := t.Underlying().(*types.Pointer)
+	if !ok {
+		return nil
+	}
+
+	if isInterface(p.Elem()) {
+		return p.Elem()
+	}
+
+	return interfaceUnderPtr(p.Elem())
+}
+
+// sliceArrayElem returns the element type of type `t` that is
+// expected to be a (pointer to) array or slice, consistent with
+// the ssa.Index and ssa.IndexAddr instructions. Panics otherwise.
+func sliceArrayElem(t types.Type) types.Type {
+	u := t.Underlying()
+
+	if p, ok := u.(*types.Pointer); ok {
+		u = p.Elem().Underlying()
+	}
+
+	if a, ok := u.(*types.Array); ok {
+		return a.Elem()
+	}
+	return u.(*types.Slice).Elem()
+}
+
+// siteCallees computes a set of callees for call site `c` given program `callgraph`.
+func siteCallees(c ssa.CallInstruction, callgraph *callgraph.Graph) []*ssa.Function {
+	var matches []*ssa.Function
+
+	node := callgraph.Nodes[c.Parent()]
+	if node == nil {
+		return nil
+	}
+
+	for _, edge := range node.Out {
+		callee := edge.Callee.Func
+		// Skip synthetic functions wrapped around source functions.
+		if edge.Site == c && callee.Synthetic == "" {
+			matches = append(matches, callee)
+		}
+	}
+	return matches
+}
+
+func canHaveMethods(t types.Type) bool {
+	if _, ok := t.(*types.Named); ok {
+		return true
+	}
+
+	u := t.Underlying()
+	switch u.(type) {
+	case *types.Interface, *types.Signature, *types.Struct:
+		return true
+	default:
+		return false
+	}
+}
+
+// calls returns the set of call instructions in `f`.
+func calls(f *ssa.Function) []ssa.CallInstruction {
+	var calls []ssa.CallInstruction
+	for _, bl := range f.Blocks {
+		for _, instr := range bl.Instrs {
+			if c, ok := instr.(ssa.CallInstruction); ok {
+				calls = append(calls, c)
+			}
+		}
+	}
+	return calls
+}
+
+// intersect produces an intersection of functions in `fs1` and `fs2`.
+func intersect(fs1, fs2 []*ssa.Function) []*ssa.Function {
+	m := make(map[*ssa.Function]bool)
+	for _, f := range fs1 {
+		m[f] = true
+	}
+
+	var res []*ssa.Function
+	for _, f := range fs2 {
+		if m[f] {
+			res = append(res, f)
+		}
+	}
+	return res
+}
diff --git a/go/callgraph/vta/vta.go b/go/callgraph/vta/vta.go
index 2769df4..6a0e55d 100644
--- a/go/callgraph/vta/vta.go
+++ b/go/callgraph/vta/vta.go
@@ -54,4 +54,120 @@
 
 package vta
 
-// TODO(zpavlinovic): add exported VTA library functions.
+import (
+	"go/types"
+
+	"golang.org/x/tools/go/callgraph"
+	"golang.org/x/tools/go/ssa"
+)
+
+// CallGraph uses the VTA algorithm to compute call graph for all functions
+// f such that f:true is in `funcs`. VTA refines the results of 'initial'
+// callgraph and uses it to establish interprocedural data flow. VTA is
+// sound if 'initial` is sound modulo reflection and unsage. The resulting
+// callgraph does not have a root node.
+func CallGraph(funcs map[*ssa.Function]bool, initial *callgraph.Graph) *callgraph.Graph {
+	vtaG, canon := typePropGraph(funcs, initial)
+	types := propagate(vtaG, canon)
+
+	c := &constructor{types: types, initial: initial, cache: make(methodCache)}
+	return c.construct(funcs)
+}
+
+// constructor type linearly traverses the input program
+// and constructs a callgraph based on the results of the
+// VTA type propagation phase.
+type constructor struct {
+	types   propTypeMap
+	cache   methodCache
+	initial *callgraph.Graph
+}
+
+func (c *constructor) construct(funcs map[*ssa.Function]bool) *callgraph.Graph {
+	cg := &callgraph.Graph{Nodes: make(map[*ssa.Function]*callgraph.Node)}
+	for f, in := range funcs {
+		if in {
+			c.constrct(cg, f)
+		}
+	}
+	return cg
+}
+
+func (c *constructor) constrct(g *callgraph.Graph, f *ssa.Function) {
+	caller := g.CreateNode(f)
+	for _, call := range calls(f) {
+		for _, c := range c.callees(call) {
+			callgraph.AddEdge(caller, call, g.CreateNode(c))
+		}
+	}
+}
+
+// callees computes the set of functions to which VTA resolves `c`. The resolved
+// functions are intersected with functions to which `initial` resolves `c`.
+func (c *constructor) callees(call ssa.CallInstruction) []*ssa.Function {
+	cc := call.Common()
+	if cc.StaticCallee() != nil {
+		return []*ssa.Function{cc.StaticCallee()}
+	}
+
+	// Skip builtins as they are not *ssa.Function.
+	if _, ok := cc.Value.(*ssa.Builtin); ok {
+		return nil
+	}
+
+	// Cover the case of dynamic higher-order and interface calls.
+	return intersect(resolve(call, c.types, c.cache), siteCallees(call, c.initial))
+}
+
+// resolve returns a set of functions `c` resolves to based on the
+// type propagation results in `types`.
+func resolve(c ssa.CallInstruction, types propTypeMap, cache methodCache) []*ssa.Function {
+	n := local{val: c.Common().Value}
+	var funcs []*ssa.Function
+	for p := range types.propTypes(n) {
+		funcs = append(funcs, propFunc(p, c, cache)...)
+	}
+	return funcs
+}
+
+// propFunc returns the functions modeled with the propagation type `p`
+// assigned to call site `c`. If no such funciton exists, nil is returned.
+func propFunc(p propType, c ssa.CallInstruction, cache methodCache) []*ssa.Function {
+	if p.f != nil {
+		return []*ssa.Function{p.f}
+	}
+
+	if c.Common().Method == nil {
+		return nil
+	}
+
+	return cache.methods(p.typ, c.Common().Method.Name(), c.Parent().Prog)
+}
+
+// methodCache serves as a type -> method name -> methods
+// cache when computing methods of a type using the
+// ssa.Program.MethodSets and ssa.Program.MethodValue
+// APIs. The cache is used to speed up querying of
+// methods of a type as the mentioned APIs are expensive.
+type methodCache map[types.Type]map[string][]*ssa.Function
+
+// methods returns methods of a type `t` named `name`. First consults
+// `mc` and otherwise queries `prog` for the method. If no such method
+// exists, nil is returned.
+func (mc methodCache) methods(t types.Type, name string, prog *ssa.Program) []*ssa.Function {
+	if ms, ok := mc[t]; ok {
+		return ms[name]
+	}
+
+	ms := make(map[string][]*ssa.Function)
+	mset := prog.MethodSets.MethodSet(t)
+	for i, n := 0, mset.Len(); i < n; i++ {
+		// f can be nil when t is an interface or some
+		// other type without any runtime methods.
+		if f := prog.MethodValue(mset.At(i)); f != nil {
+			ms[f.Name()] = append(ms[f.Name()], f)
+		}
+	}
+	mc[t] = ms
+	return ms[name]
+}
diff --git a/go/callgraph/vta/vta_test.go b/go/callgraph/vta/vta_test.go
new file mode 100644
index 0000000..79ab31c
--- /dev/null
+++ b/go/callgraph/vta/vta_test.go
@@ -0,0 +1,109 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vta
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+	"testing"
+
+	"golang.org/x/tools/go/callgraph"
+	"golang.org/x/tools/go/callgraph/cha"
+	"golang.org/x/tools/go/ssa"
+
+	"golang.org/x/tools/go/ssa/ssautil"
+)
+
+// callGraphStr stringifes `g` into a list of strings where
+// each entry is of the form
+//   f: cs1 -> f1, f2, ...; ...; csw -> fx, fy, ...
+// f is a function, cs1, ..., csw are call sites in f, and
+// f1, f2, ..., fx, fy, ... are the resolved callees.
+func callGraphStr(g *callgraph.Graph) []string {
+	var gs []string
+	for f, n := range g.Nodes {
+		c := make(map[string][]string)
+		for _, edge := range n.Out {
+			cs := edge.Site.String()
+			c[cs] = append(c[cs], funcName(edge.Callee.Func))
+		}
+
+		var cs []string
+		for site, fs := range c {
+			sort.Strings(fs)
+			entry := fmt.Sprintf("%v -> %v", site, strings.Join(fs, ", "))
+			cs = append(cs, entry)
+		}
+
+		sort.Strings(cs)
+		entry := fmt.Sprintf("%v: %v", funcName(f), strings.Join(cs, "; "))
+		gs = append(gs, entry)
+	}
+	return gs
+}
+
+func TestVTACallGraph(t *testing.T) {
+	for _, file := range []string{
+		"testdata/callgraph_static.go",
+		"testdata/callgraph_ho.go",
+		"testdata/callgraph_interfaces.go",
+		"testdata/callgraph_pointers.go",
+		"testdata/callgraph_collections.go",
+	} {
+		t.Run(file, func(t *testing.T) {
+			prog, want, err := testProg(file)
+			if err != nil {
+				t.Fatalf("couldn't load test file '%s': %s", file, err)
+			}
+			if len(want) == 0 {
+				t.Fatalf("couldn't find want in `%s`", file)
+			}
+
+			g := CallGraph(ssautil.AllFunctions(prog), cha.CallGraph(prog))
+			if got := callGraphStr(g); !subGraph(want, got) {
+				t.Errorf("computed callgraph %v should contain %v", got, want)
+			}
+		})
+	}
+}
+
+// TestVTAProgVsFuncSet exemplifies and tests different possibilities
+// enabled by having an arbitrary function set as input to CallGraph
+// instead of the whole program (i.e., ssautil.AllFunctions(prog)).
+func TestVTAProgVsFuncSet(t *testing.T) {
+	prog, want, err := testProg("testdata/callgraph_nested_ptr.go")
+	if err != nil {
+		t.Fatalf("couldn't load test `testdata/callgraph_nested_ptr.go`: %s", err)
+	}
+	if len(want) == 0 {
+		t.Fatal("couldn't find want in `testdata/callgraph_nested_ptr.go`")
+	}
+
+	allFuncs := ssautil.AllFunctions(prog)
+	g := CallGraph(allFuncs, cha.CallGraph(prog))
+	// VTA over the whole program will produce a call graph that
+	// includes Baz:(**i).Foo -> A.Foo, B.Foo.
+	if got := callGraphStr(g); !subGraph(want, got) {
+		t.Errorf("computed callgraph %v should contain %v", got, want)
+	}
+
+	// Prune the set of program functions to exclude Bar(). This should
+	// yield a call graph that includes different set of callees for Baz
+	// Baz:(**i).Foo -> A.Foo
+	//
+	// Note that the exclusion of Bar can happen, for instance, if Baz is
+	// considered an entry point of some data flow analysis and Bar is
+	// provably (e.g., using CHA forward reachability) unreachable from Baz.
+	noBarFuncs := make(map[*ssa.Function]bool)
+	for f, in := range allFuncs {
+		noBarFuncs[f] = in && (funcName(f) != "Bar")
+	}
+	want = []string{"Baz: Do(i) -> Do; invoke t2.Foo() -> A.Foo"}
+	g = CallGraph(noBarFuncs, cha.CallGraph(prog))
+	if got := callGraphStr(g); !subGraph(want, got) {
+		t.Errorf("pruned callgraph %v should contain %v", got, want)
+	}
+}
diff --git a/go/internal/cgo/cgo.go b/go/internal/cgo/cgo.go
index d9074ea..d01fb04 100644
--- a/go/internal/cgo/cgo.go
+++ b/go/internal/cgo/cgo.go
@@ -160,13 +160,15 @@
 	}
 
 	args := stringList(
-		"go", "tool", "cgo", "-srcdir", pkgdir, "-objdir", tmpdir, cgoflags, "--",
+		"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
 		cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
 	)
 	if false {
-		log.Printf("Running cgo for package %q: %s", bp.ImportPath, args)
+		log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
 	}
 	cmd := exec.Command(args[0], args[1:]...)
+	cmd.Dir = pkgdir
+	cmd.Env = append(os.Environ(), "PWD="+pkgdir)
 	cmd.Stdout = os.Stderr
 	cmd.Stderr = os.Stderr
 	if err := cmd.Run(); err != nil {
diff --git a/go/loader/loader_test.go b/go/loader/loader_test.go
index e39653c..956c01a 100644
--- a/go/loader/loader_test.go
+++ b/go/loader/loader_test.go
@@ -834,3 +834,11 @@
 		t.Fatal(err)
 	}
 }
+
+func TestCgoCwdIssue46877(t *testing.T) {
+	var conf loader.Config
+	conf.Import("golang.org/x/tools/go/loader/testdata/issue46877")
+	if _, err := conf.Load(); err != nil {
+		t.Errorf("Load failed: %v", err)
+	}
+}
diff --git a/go/loader/testdata/issue46877/x.go b/go/loader/testdata/issue46877/x.go
new file mode 100644
index 0000000..a1e6797
--- /dev/null
+++ b/go/loader/testdata/issue46877/x.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x
+
+// #include "x.h"
+import "C"
+
+var _ C.myint
diff --git a/go/loader/testdata/issue46877/x.h b/go/loader/testdata/issue46877/x.h
new file mode 100644
index 0000000..9fc115b
--- /dev/null
+++ b/go/loader/testdata/issue46877/x.h
@@ -0,0 +1,5 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+typedef int myint;
diff --git a/go/packages/overlay_test.go b/go/packages/overlay_test.go
index 5850a7f..f2164c2 100644
--- a/go/packages/overlay_test.go
+++ b/go/packages/overlay_test.go
@@ -27,7 +27,7 @@
 )
 
 func TestOverlayChangesPackageName(t *testing.T) {
-	packagestest.TestAll(t, testOverlayChangesPackageName)
+	testAllOrModulesParallel(t, testOverlayChangesPackageName)
 }
 func testOverlayChangesPackageName(t *testing.T, exporter packagestest.Exporter) {
 	log.SetFlags(log.Lshortfile)
@@ -57,7 +57,7 @@
 	log.SetFlags(0)
 }
 func TestOverlayChangesBothPackageNames(t *testing.T) {
-	packagestest.TestAll(t, testOverlayChangesBothPackageNames)
+	testAllOrModulesParallel(t, testOverlayChangesBothPackageNames)
 }
 func testOverlayChangesBothPackageNames(t *testing.T, exporter packagestest.Exporter) {
 	log.SetFlags(log.Lshortfile)
@@ -106,7 +106,7 @@
 	log.SetFlags(0)
 }
 func TestOverlayChangesTestPackageName(t *testing.T) {
-	packagestest.TestAll(t, testOverlayChangesTestPackageName)
+	testAllOrModulesParallel(t, testOverlayChangesTestPackageName)
 }
 func testOverlayChangesTestPackageName(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsGo1Point(t, 16)
@@ -163,7 +163,7 @@
 }
 
 func TestOverlayXTests(t *testing.T) {
-	packagestest.TestAll(t, testOverlayXTests)
+	testAllOrModulesParallel(t, testOverlayXTests)
 }
 
 // This test checks the behavior of go/packages.Load with an overlaid
@@ -247,7 +247,7 @@
 	}
 }
 
-func TestOverlay(t *testing.T) { packagestest.TestAll(t, testOverlay) }
+func TestOverlay(t *testing.T) { testAllOrModulesParallel(t, testOverlay) }
 func testOverlay(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -315,7 +315,7 @@
 	}
 }
 
-func TestOverlayDeps(t *testing.T) { packagestest.TestAll(t, testOverlayDeps) }
+func TestOverlayDeps(t *testing.T) { testAllOrModulesParallel(t, testOverlayDeps) }
 func testOverlayDeps(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -364,7 +364,7 @@
 
 }
 
-func TestNewPackagesInOverlay(t *testing.T) { packagestest.TestAll(t, testNewPackagesInOverlay) }
+func TestNewPackagesInOverlay(t *testing.T) { testAllOrModulesParallel(t, testNewPackagesInOverlay) }
 func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{
 		{
@@ -468,7 +468,7 @@
 
 // Test that we can create a package and its test package in an overlay.
 func TestOverlayNewPackageAndTest(t *testing.T) {
-	packagestest.TestAll(t, testOverlayNewPackageAndTest)
+	testAllOrModulesParallel(t, testOverlayNewPackageAndTest)
 }
 func testOverlayNewPackageAndTest(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{
@@ -496,6 +496,7 @@
 }
 
 func TestAdHocOverlays(t *testing.T) {
+	t.Parallel()
 	testenv.NeedsTool(t, "go")
 
 	// This test doesn't use packagestest because we are testing ad-hoc packages,
@@ -551,6 +552,7 @@
 // TestOverlayModFileChanges tests the behavior resulting from having files
 // from multiple modules in overlays.
 func TestOverlayModFileChanges(t *testing.T) {
+	t.Parallel()
 	testenv.NeedsTool(t, "go")
 
 	// Create two unrelated modules in a temporary directory.
@@ -620,6 +622,8 @@
 }
 
 func TestOverlayGOPATHVendoring(t *testing.T) {
+	t.Parallel()
+
 	exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{
 		Name: "golang.org/fake",
 		Files: map[string]interface{}{
@@ -647,7 +651,7 @@
 	}
 }
 
-func TestContainsOverlay(t *testing.T) { packagestest.TestAll(t, testContainsOverlay) }
+func TestContainsOverlay(t *testing.T) { testAllOrModulesParallel(t, testContainsOverlay) }
 func testContainsOverlay(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -676,7 +680,7 @@
 	}
 }
 
-func TestContainsOverlayXTest(t *testing.T) { packagestest.TestAll(t, testContainsOverlayXTest) }
+func TestContainsOverlayXTest(t *testing.T) { testAllOrModulesParallel(t, testContainsOverlayXTest) }
 func testContainsOverlayXTest(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -709,7 +713,7 @@
 }
 
 func TestInvalidFilesBeforeOverlay(t *testing.T) {
-	packagestest.TestAll(t, testInvalidFilesBeforeOverlay)
+	testAllOrModulesParallel(t, testInvalidFilesBeforeOverlay)
 }
 
 func testInvalidFilesBeforeOverlay(t *testing.T, exporter packagestest.Exporter) {
@@ -749,7 +753,7 @@
 
 // Tests golang/go#35973, fixed in Go 1.14.
 func TestInvalidFilesBeforeOverlayContains(t *testing.T) {
-	packagestest.TestAll(t, testInvalidFilesBeforeOverlayContains)
+	testAllOrModulesParallel(t, testInvalidFilesBeforeOverlayContains)
 }
 func testInvalidFilesBeforeOverlayContains(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsGo1Point(t, 15)
@@ -856,7 +860,7 @@
 }
 
 func TestInvalidXTestInGOPATH(t *testing.T) {
-	packagestest.TestAll(t, testInvalidXTestInGOPATH)
+	testAllOrModulesParallel(t, testInvalidXTestInGOPATH)
 }
 func testInvalidXTestInGOPATH(t *testing.T, exporter packagestest.Exporter) {
 	t.Skip("Not fixed yet. See golang.org/issue/40825.")
@@ -889,7 +893,7 @@
 
 // Reproduces golang/go#40685.
 func TestAddImportInOverlay(t *testing.T) {
-	packagestest.TestAll(t, testAddImportInOverlay)
+	testAllOrModulesParallel(t, testAddImportInOverlay)
 }
 func testAddImportInOverlay(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{
@@ -958,7 +962,7 @@
 
 // Tests that overlays are applied for different kinds of load patterns.
 func TestLoadDifferentPatterns(t *testing.T) {
-	packagestest.TestAll(t, testLoadDifferentPatterns)
+	testAllOrModulesParallel(t, testLoadDifferentPatterns)
 }
 func testLoadDifferentPatterns(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{
@@ -1042,6 +1046,8 @@
 // This does not use go/packagestest because it needs to write a replace
 // directive with an absolute path in one of the module's go.mod files.
 func TestOverlaysInReplace(t *testing.T) {
+	t.Parallel()
+
 	// Create module b.com in a temporary directory. Do not add any Go files
 	// on disk.
 	tmpPkgs, err := ioutil.TempDir("", "modules")
diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go
index 25866ec..6549fd6 100644
--- a/go/packages/packages_test.go
+++ b/go/packages/packages_test.go
@@ -54,6 +54,34 @@
 	os.Exit(m.Run())
 }
 
+func skipIfShort(t *testing.T, reason string) {
+	if testing.Short() {
+		t.Skipf("skipping slow test in short mode: %s", reason)
+	}
+}
+
+// testAllOrModulesParallel tests f, in parallel, against all packagestest
+// exporters in long mode, but only against the Modules exporter in short mode.
+func testAllOrModulesParallel(t *testing.T, f func(*testing.T, packagestest.Exporter)) {
+	t.Parallel()
+	packagestest.TestAll(t, func(t *testing.T, exporter packagestest.Exporter) {
+		t.Helper()
+
+		switch exporter.Name() {
+		case "Modules":
+		case "GOPATH":
+			if testing.Short() {
+				t.Skipf("skipping GOPATH test in short mode")
+			}
+		default:
+			t.Fatalf("unexpected exporter %q", exporter.Name())
+		}
+
+		t.Parallel()
+		f(t, exporter)
+	})
+}
+
 // TODO(adonovan): more test cases to write:
 //
 // - When the tests fail, make them print a 'cd & load' command
@@ -75,6 +103,7 @@
 // The zero-value of Config has LoadFiles mode.
 func TestLoadZeroConfig(t *testing.T) {
 	testenv.NeedsGoPackages(t)
+	t.Parallel()
 
 	initial, err := packages.Load(nil, "hash")
 	if err != nil {
@@ -93,7 +122,7 @@
 	}
 }
 
-func TestLoadImportsGraph(t *testing.T) { packagestest.TestAll(t, testLoadImportsGraph) }
+func TestLoadImportsGraph(t *testing.T) { testAllOrModulesParallel(t, testLoadImportsGraph) }
 func testLoadImportsGraph(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -267,7 +296,9 @@
 	}
 }
 
-func TestLoadImportsTestVariants(t *testing.T) { packagestest.TestAll(t, testLoadImportsTestVariants) }
+func TestLoadImportsTestVariants(t *testing.T) {
+	testAllOrModulesParallel(t, testLoadImportsTestVariants)
+}
 func testLoadImportsTestVariants(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -308,6 +339,8 @@
 }
 
 func TestLoadAbsolutePath(t *testing.T) {
+	t.Parallel()
+
 	exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{
 		Name: "golang.org/gopatha",
 		Files: map[string]interface{}{
@@ -336,6 +369,8 @@
 }
 
 func TestVendorImports(t *testing.T) {
+	t.Parallel()
+
 	exported := packagestest.Export(t, packagestest.GOPATH, []packagestest.Module{{
 		Name: "golang.org/fake",
 		Files: map[string]interface{}{
@@ -395,7 +430,7 @@
 	return keys
 }
 
-func TestConfigDir(t *testing.T) { packagestest.TestAll(t, testConfigDir) }
+func TestConfigDir(t *testing.T) { testAllOrModulesParallel(t, testConfigDir) }
 func testConfigDir(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -458,7 +493,7 @@
 	}
 }
 
-func TestConfigFlags(t *testing.T) { packagestest.TestAll(t, testConfigFlags) }
+func TestConfigFlags(t *testing.T) { testAllOrModulesParallel(t, testConfigFlags) }
 func testConfigFlags(t *testing.T, exporter packagestest.Exporter) {
 	// Test satisfying +build line tags, with -tags flag.
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -519,7 +554,7 @@
 	}
 }
 
-func TestLoadTypes(t *testing.T) { packagestest.TestAll(t, testLoadTypes) }
+func TestLoadTypes(t *testing.T) { testAllOrModulesParallel(t, testLoadTypes) }
 func testLoadTypes(t *testing.T, exporter packagestest.Exporter) {
 	// In LoadTypes and LoadSyntax modes, the compiler will
 	// fail to generate an export data file for c, because it has
@@ -577,7 +612,7 @@
 
 // TestLoadTypesBits is equivalent to TestLoadTypes except that it only requests
 // the types using the NeedTypes bit.
-func TestLoadTypesBits(t *testing.T) { packagestest.TestAll(t, testLoadTypesBits) }
+func TestLoadTypesBits(t *testing.T) { testAllOrModulesParallel(t, testLoadTypesBits) }
 func testLoadTypesBits(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -653,7 +688,7 @@
 	}
 }
 
-func TestLoadSyntaxOK(t *testing.T) { packagestest.TestAll(t, testLoadSyntaxOK) }
+func TestLoadSyntaxOK(t *testing.T) { testAllOrModulesParallel(t, testLoadSyntaxOK) }
 func testLoadSyntaxOK(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -743,7 +778,7 @@
 	}
 }
 
-func TestLoadDiamondTypes(t *testing.T) { packagestest.TestAll(t, testLoadDiamondTypes) }
+func TestLoadDiamondTypes(t *testing.T) { testAllOrModulesParallel(t, testLoadDiamondTypes) }
 func testLoadDiamondTypes(t *testing.T, exporter packagestest.Exporter) {
 	// We make a diamond dependency and check the type d.D is the same through both paths
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -783,7 +818,7 @@
 	}
 }
 
-func TestLoadSyntaxError(t *testing.T) { packagestest.TestAll(t, testLoadSyntaxError) }
+func TestLoadSyntaxError(t *testing.T) { testAllOrModulesParallel(t, testLoadSyntaxError) }
 func testLoadSyntaxError(t *testing.T, exporter packagestest.Exporter) {
 	// A type error in a lower-level package (e) prevents go list
 	// from producing export data for all packages that depend on it
@@ -859,7 +894,7 @@
 
 // This function tests use of the ParseFile hook to modify
 // the AST after parsing.
-func TestParseFileModifyAST(t *testing.T) { packagestest.TestAll(t, testParseFileModifyAST) }
+func TestParseFileModifyAST(t *testing.T) { testAllOrModulesParallel(t, testParseFileModifyAST) }
 func testParseFileModifyAST(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -891,6 +926,8 @@
 }
 
 func TestAdHocPackagesBadImport(t *testing.T) {
+	t.Parallel()
+
 	// This test doesn't use packagestest because we are testing ad-hoc packages,
 	// which are outside of $GOPATH and outside of a module.
 	tmp, err := ioutil.TempDir("", "a")
@@ -939,7 +976,7 @@
 }
 
 func TestLoadAllSyntaxImportErrors(t *testing.T) {
-	packagestest.TestAll(t, testLoadAllSyntaxImportErrors)
+	testAllOrModulesParallel(t, testLoadAllSyntaxImportErrors)
 }
 func testLoadAllSyntaxImportErrors(t *testing.T, exporter packagestest.Exporter) {
 	// TODO(matloob): Remove this once go list -e -compiled is fixed.
@@ -1024,7 +1061,7 @@
 	}
 }
 
-func TestAbsoluteFilenames(t *testing.T) { packagestest.TestAll(t, testAbsoluteFilenames) }
+func TestAbsoluteFilenames(t *testing.T) { testAllOrModulesParallel(t, testAbsoluteFilenames) }
 func testAbsoluteFilenames(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1103,7 +1140,7 @@
 	}
 }
 
-func TestContains(t *testing.T) { packagestest.TestAll(t, testContains) }
+func TestContains(t *testing.T) { testAllOrModulesParallel(t, testContains) }
 func testContains(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1135,7 +1172,7 @@
 // application determines the Sizes function used by the type checker.
 // This behavior is a stop-gap until we make the build system's query
 // tool report the correct sizes function for the actual configuration.
-func TestSizes(t *testing.T) { packagestest.TestAll(t, testSizes) }
+func TestSizes(t *testing.T) { testAllOrModulesParallel(t, testSizes) }
 func testSizes(t *testing.T, exporter packagestest.Exporter) {
 	// Only run this test on operating systems that have both an amd64 and 386 port.
 	switch runtime.GOOS {
@@ -1172,7 +1209,9 @@
 // TestContainsFallbackSticks ensures that when there are both contains and non-contains queries
 // the decision whether to fallback to the pre-1.11 go list sticks across both sets of calls to
 // go list.
-func TestContainsFallbackSticks(t *testing.T) { packagestest.TestAll(t, testContainsFallbackSticks) }
+func TestContainsFallbackSticks(t *testing.T) {
+	testAllOrModulesParallel(t, testContainsFallbackSticks)
+}
 func testContainsFallbackSticks(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1205,7 +1244,7 @@
 
 // Test that Load with no patterns is equivalent to loading "." via the golist
 // driver.
-func TestNoPatterns(t *testing.T) { packagestest.TestAll(t, testNoPatterns) }
+func TestNoPatterns(t *testing.T) { testAllOrModulesParallel(t, testNoPatterns) }
 func testNoPatterns(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1227,7 +1266,7 @@
 	}
 }
 
-func TestJSON(t *testing.T) { packagestest.TestAll(t, testJSON) }
+func TestJSON(t *testing.T) { testAllOrModulesParallel(t, testJSON) }
 func testJSON(t *testing.T, exporter packagestest.Exporter) {
 	//TODO: add in some errors
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1379,6 +1418,8 @@
 }
 
 func TestRejectInvalidQueries(t *testing.T) {
+	t.Parallel()
+
 	queries := []string{"key=", "key=value"}
 	cfg := &packages.Config{
 		Mode: packages.LoadImports,
@@ -1393,7 +1434,7 @@
 	}
 }
 
-func TestPatternPassthrough(t *testing.T) { packagestest.TestAll(t, testPatternPassthrough) }
+func TestPatternPassthrough(t *testing.T) { testAllOrModulesParallel(t, testPatternPassthrough) }
 func testPatternPassthrough(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1417,7 +1458,11 @@
 
 }
 
-func TestConfigDefaultEnv(t *testing.T) { packagestest.TestAll(t, testConfigDefaultEnv) }
+func TestConfigDefaultEnv(t *testing.T) {
+	// packagestest.TestAll instead of testAllOrModulesParallel because this test
+	// can't be parallelized (it modifies the environment).
+	packagestest.TestAll(t, testConfigDefaultEnv)
+}
 func testConfigDefaultEnv(t *testing.T, exporter packagestest.Exporter) {
 	const driverJSON = `{
   "Roots": ["gopackagesdriver"],
@@ -1525,7 +1570,7 @@
 // list. This would then cause a nil pointer crash.
 // This bug was triggered by the simple package layout below, and thus this
 // test will make sure the bug remains fixed.
-func TestBasicXTest(t *testing.T) { packagestest.TestAll(t, testBasicXTest) }
+func TestBasicXTest(t *testing.T) { testAllOrModulesParallel(t, testBasicXTest) }
 func testBasicXTest(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1543,7 +1588,7 @@
 	}
 }
 
-func TestErrorMissingFile(t *testing.T) { packagestest.TestAll(t, testErrorMissingFile) }
+func TestErrorMissingFile(t *testing.T) { testAllOrModulesParallel(t, testErrorMissingFile) }
 func testErrorMissingFile(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1570,7 +1615,7 @@
 }
 
 func TestReturnErrorWhenUsingNonGoFiles(t *testing.T) {
-	packagestest.TestAll(t, testReturnErrorWhenUsingNonGoFiles)
+	testAllOrModulesParallel(t, testReturnErrorWhenUsingNonGoFiles)
 }
 func testReturnErrorWhenUsingNonGoFiles(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1598,7 +1643,7 @@
 }
 
 func TestReturnErrorWhenUsingGoFilesInMultipleDirectories(t *testing.T) {
-	packagestest.TestAll(t, testReturnErrorWhenUsingGoFilesInMultipleDirectories)
+	testAllOrModulesParallel(t, testReturnErrorWhenUsingGoFilesInMultipleDirectories)
 }
 func testReturnErrorWhenUsingGoFilesInMultipleDirectories(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1630,7 +1675,7 @@
 }
 
 func TestReturnErrorForUnexpectedDirectoryLayout(t *testing.T) {
-	packagestest.TestAll(t, testReturnErrorForUnexpectedDirectoryLayout)
+	testAllOrModulesParallel(t, testReturnErrorForUnexpectedDirectoryLayout)
 }
 func testReturnErrorForUnexpectedDirectoryLayout(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1660,7 +1705,7 @@
 	}
 }
 
-func TestMissingDependency(t *testing.T) { packagestest.TestAll(t, testMissingDependency) }
+func TestMissingDependency(t *testing.T) { testAllOrModulesParallel(t, testMissingDependency) }
 func testMissingDependency(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1682,7 +1727,7 @@
 	}
 }
 
-func TestAdHocContains(t *testing.T) { packagestest.TestAll(t, testAdHocContains) }
+func TestAdHocContains(t *testing.T) { testAllOrModulesParallel(t, testAdHocContains) }
 func testAdHocContains(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name: "golang.org/fake",
@@ -1724,7 +1769,7 @@
 	}
 }
 
-func TestCgoNoCcompiler(t *testing.T) { packagestest.TestAll(t, testCgoNoCcompiler) }
+func TestCgoNoCcompiler(t *testing.T) { testAllOrModulesParallel(t, testCgoNoCcompiler) }
 func testCgoNoCcompiler(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsTool(t, "cgo")
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1758,7 +1803,7 @@
 	}
 }
 
-func TestCgoMissingFile(t *testing.T) { packagestest.TestAll(t, testCgoMissingFile) }
+func TestCgoMissingFile(t *testing.T) { testAllOrModulesParallel(t, testCgoMissingFile) }
 func testCgoMissingFile(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsTool(t, "cgo")
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1809,6 +1854,7 @@
 		// See https://golang.org/issue/27100.
 		t.Skip(`skipping on plan9; for some reason "net [syscall.test]" is not loaded`)
 	}
+	t.Parallel()
 	testenv.NeedsGoPackages(t)
 
 	cfg := &packages.Config{
@@ -1844,7 +1890,7 @@
 }
 
 func TestCgoNoSyntax(t *testing.T) {
-	packagestest.TestAll(t, testCgoNoSyntax)
+	testAllOrModulesParallel(t, testCgoNoSyntax)
 }
 func testCgoNoSyntax(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsTool(t, "cgo")
@@ -1867,6 +1913,7 @@
 		packages.NeedName | packages.NeedImports,
 	}
 	for _, mode := range modes {
+		mode := mode
 		t.Run(fmt.Sprint(mode), func(t *testing.T) {
 			exported.Config.Mode = mode
 			pkgs, err := packages.Load(exported.Config, "golang.org/fake/c")
@@ -1885,9 +1932,10 @@
 }
 
 func TestCgoBadPkgConfig(t *testing.T) {
-	packagestest.TestAll(t, testCgoBadPkgConfig)
+	testAllOrModulesParallel(t, testCgoBadPkgConfig)
 }
 func testCgoBadPkgConfig(t *testing.T, exporter packagestest.Exporter) {
+	skipIfShort(t, "builds and links a fake pkgconfig binary")
 	testenv.NeedsTool(t, "cgo")
 
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -1957,7 +2005,7 @@
 	return tmpdir
 }
 
-func TestIssue32814(t *testing.T) { packagestest.TestAll(t, testIssue32814) }
+func TestIssue32814(t *testing.T) { testAllOrModulesParallel(t, testIssue32814) }
 func testIssue32814(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
 		Name:  "golang.org/fake",
@@ -1985,7 +2033,7 @@
 }
 
 func TestLoadTypesInfoWithoutNeedDeps(t *testing.T) {
-	packagestest.TestAll(t, testLoadTypesInfoWithoutNeedDeps)
+	testAllOrModulesParallel(t, testLoadTypesInfoWithoutNeedDeps)
 }
 func testLoadTypesInfoWithoutNeedDeps(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2012,7 +2060,7 @@
 }
 
 func TestLoadWithNeedDeps(t *testing.T) {
-	packagestest.TestAll(t, testLoadWithNeedDeps)
+	testAllOrModulesParallel(t, testLoadWithNeedDeps)
 }
 func testLoadWithNeedDeps(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2056,7 +2104,7 @@
 }
 
 func TestImpliedLoadMode(t *testing.T) {
-	packagestest.TestAll(t, testImpliedLoadMode)
+	testAllOrModulesParallel(t, testImpliedLoadMode)
 }
 func testImpliedLoadMode(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2094,7 +2142,7 @@
 }
 
 func TestIssue35331(t *testing.T) {
-	packagestest.TestAll(t, testIssue35331)
+	testAllOrModulesParallel(t, testIssue35331)
 }
 func testIssue35331(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2124,7 +2172,7 @@
 }
 
 func TestMultiplePackageVersionsIssue36188(t *testing.T) {
-	packagestest.TestAll(t, testMultiplePackageVersionsIssue36188)
+	testAllOrModulesParallel(t, testMultiplePackageVersionsIssue36188)
 }
 
 func testMultiplePackageVersionsIssue36188(t *testing.T, exporter packagestest.Exporter) {
@@ -2233,7 +2281,7 @@
 }
 
 func TestCycleImportStack(t *testing.T) {
-	packagestest.TestAll(t, testCycleImportStack)
+	testAllOrModulesParallel(t, testCycleImportStack)
 }
 func testCycleImportStack(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2263,7 +2311,7 @@
 }
 
 func TestForTestField(t *testing.T) {
-	packagestest.TestAll(t, testForTestField)
+	testAllOrModulesParallel(t, testForTestField)
 }
 func testForTestField(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2309,7 +2357,7 @@
 }
 
 func TestIssue37529(t *testing.T) {
-	packagestest.TestAll(t, testIssue37529)
+	testAllOrModulesParallel(t, testIssue37529)
 }
 func testIssue37529(t *testing.T, exporter packagestest.Exporter) {
 	// Tests #37529. When automatic vendoring is triggered, and we try to determine
@@ -2349,7 +2397,7 @@
 	}
 }
 
-func TestIssue37098(t *testing.T) { packagestest.TestAll(t, testIssue37098) }
+func TestIssue37098(t *testing.T) { testAllOrModulesParallel(t, testIssue37098) }
 func testIssue37098(t *testing.T, exporter packagestest.Exporter) {
 	// packages.Load should only return Go sources in
 	// (*Package).CompiledGoFiles.  This tests #37098, where using SWIG to
@@ -2413,7 +2461,7 @@
 }
 
 // TestInvalidFilesInXTest checks the fix for golang/go#37971 in Go 1.15.
-func TestInvalidFilesInXTest(t *testing.T) { packagestest.TestAll(t, testInvalidFilesInXTest) }
+func TestInvalidFilesInXTest(t *testing.T) { testAllOrModulesParallel(t, testInvalidFilesInXTest) }
 func testInvalidFilesInXTest(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsGo1Point(t, 15)
 	exported := packagestest.Export(t, exporter, []packagestest.Module{
@@ -2440,7 +2488,7 @@
 	}
 }
 
-func TestTypecheckCgo(t *testing.T) { packagestest.TestAll(t, testTypecheckCgo) }
+func TestTypecheckCgo(t *testing.T) { testAllOrModulesParallel(t, testTypecheckCgo) }
 func testTypecheckCgo(t *testing.T, exporter packagestest.Exporter) {
 	testenv.NeedsGo1Point(t, 15)
 	testenv.NeedsTool(t, "cgo")
@@ -2483,7 +2531,7 @@
 }
 
 func TestModule(t *testing.T) {
-	packagestest.TestAll(t, testModule)
+	testAllOrModulesParallel(t, testModule)
 }
 func testModule(t *testing.T, exporter packagestest.Exporter) {
 	exported := packagestest.Export(t, exporter, []packagestest.Module{{
@@ -2523,9 +2571,10 @@
 }
 
 func TestExternal_NotHandled(t *testing.T) {
-	packagestest.TestAll(t, testExternal_NotHandled)
+	testAllOrModulesParallel(t, testExternal_NotHandled)
 }
 func testExternal_NotHandled(t *testing.T, exporter packagestest.Exporter) {
+	skipIfShort(t, "builds and links fake driver binaries")
 	testenv.NeedsGoBuild(t)
 
 	tempdir, err := ioutil.TempDir("", "testexternal")
@@ -2609,7 +2658,7 @@
 }
 
 func TestInvalidPackageName(t *testing.T) {
-	packagestest.TestAll(t, testInvalidPackageName)
+	testAllOrModulesParallel(t, testInvalidPackageName)
 }
 
 func testInvalidPackageName(t *testing.T, exporter packagestest.Exporter) {
@@ -2638,6 +2687,8 @@
 }
 
 func TestEmptyEnvironment(t *testing.T) {
+	t.Parallel()
+
 	cfg := &packages.Config{
 		Env: []string{"FOO=BAR"},
 	}
diff --git a/go/packages/packagestest/export.go b/go/packages/packagestest/export.go
index 2b93d2c..5dea613 100644
--- a/go/packages/packagestest/export.go
+++ b/go/packages/packagestest/export.go
@@ -159,6 +159,7 @@
 func TestAll(t *testing.T, f func(*testing.T, Exporter)) {
 	t.Helper()
 	for _, e := range All {
+		e := e // in case f calls t.Parallel
 		t.Run(e.Name(), func(t *testing.T) {
 			t.Helper()
 			f(t, e)
@@ -172,6 +173,7 @@
 func BenchmarkAll(b *testing.B, f func(*testing.B, Exporter)) {
 	b.Helper()
 	for _, e := range All {
+		e := e // in case f calls t.Parallel
 		b.Run(e.Name(), func(b *testing.B) {
 			b.Helper()
 			f(b, e)
diff --git a/go/packages/stdlib_test.go b/go/packages/stdlib_test.go
index 254f459..f8b93df 100644
--- a/go/packages/stdlib_test.go
+++ b/go/packages/stdlib_test.go
@@ -19,11 +19,6 @@
 
 // This test loads the metadata for the standard library,
 func TestStdlibMetadata(t *testing.T) {
-	// TODO(adonovan): see if we can get away without this hack.
-	// if runtime.GOOS == "android" {
-	// 	t.Skipf("incomplete std lib on %s", runtime.GOOS)
-	// }
-
 	testenv.NeedsGoPackages(t)
 
 	runtime.GC()
@@ -61,9 +56,7 @@
 }
 
 func TestCgoOption(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping in short mode; uses tons of memory (https://golang.org/issue/14113)")
-	}
+	skipIfShort(t, "uses tons of memory (https://golang.org/issue/14113)")
 
 	testenv.NeedsGoPackages(t)
 
diff --git a/go/ssa/builder.go b/go/ssa/builder.go
index a13a884..e1540db 100644
--- a/go/ssa/builder.go
+++ b/go/ssa/builder.go
@@ -579,6 +579,8 @@
 					y.pos = e.Lparen
 				case *MakeInterface:
 					y.pos = e.Lparen
+				case *SliceToArrayPointer:
+					y.pos = e.Lparen
 				}
 			}
 			return y
@@ -693,6 +695,10 @@
 	case *ast.SelectorExpr:
 		sel, ok := fn.Pkg.info.Selections[e]
 		if !ok {
+			// builtin unsafe.{Add,Slice}
+			if obj, ok := fn.Pkg.info.Uses[e.Sel].(*types.Builtin); ok {
+				return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
+			}
 			// qualified identifier
 			return b.expr(fn, e.Sel)
 		}
diff --git a/go/ssa/builder_go117_test.go b/go/ssa/builder_go117_test.go
new file mode 100644
index 0000000..f6545e5
--- /dev/null
+++ b/go/ssa/builder_go117_test.go
@@ -0,0 +1,82 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.17
+// +build go1.17
+
+package ssa_test
+
+import (
+	"go/ast"
+	"go/importer"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"testing"
+
+	"golang.org/x/tools/go/ssa"
+	"golang.org/x/tools/go/ssa/ssautil"
+)
+
+func TestBuildPackageGo117(t *testing.T) {
+	tests := []struct {
+		name     string
+		src      string
+		importer types.Importer
+	}{
+		{"slice to array pointer", "package p; var s []byte; var _ = (*[4]byte)(s)", nil},
+		{"unsafe slice", `package p; import "unsafe"; var _ = unsafe.Add(nil, 0)`, importer.Default()},
+		{"unsafe add", `package p; import "unsafe"; var _ = unsafe.Slice((*int)(nil), 0)`, importer.Default()},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			fset := token.NewFileSet()
+			f, err := parser.ParseFile(fset, "p.go", tc.src, parser.ParseComments)
+			if err != nil {
+				t.Error(err)
+			}
+			files := []*ast.File{f}
+
+			pkg := types.NewPackage("p", "")
+			conf := &types.Config{Importer: tc.importer}
+			if _, _, err := ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions); err != nil {
+				t.Errorf("unexpected error: %v", err)
+			}
+		})
+	}
+}
+
+func TestBuildPackageFailuresGo117(t *testing.T) {
+	tests := []struct {
+		name     string
+		src      string
+		importer types.Importer
+	}{
+		{"slice to array pointer - source is not a slice", "package p; var s [4]byte; var _ = (*[4]byte)(s)", nil},
+		{"slice to array pointer - dest is not a pointer", "package p; var s []byte; var _ = ([4]byte)(s)", nil},
+		{"slice to array pointer - dest pointer elem is not an array", "package p; var s []byte; var _ = (*byte)(s)", nil},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+			fset := token.NewFileSet()
+			f, err := parser.ParseFile(fset, "p.go", tc.src, parser.ParseComments)
+			if err != nil {
+				t.Error(err)
+			}
+			files := []*ast.File{f}
+
+			pkg := types.NewPackage("p", "")
+			conf := &types.Config{Importer: tc.importer}
+			if _, _, err := ssautil.BuildPackage(conf, fset, pkg, files, ssa.SanityCheckFunctions); err == nil {
+				t.Error("want error, but got nil")
+			}
+		})
+	}
+}
diff --git a/go/ssa/doc.go b/go/ssa/doc.go
index 1a13640..fe0099b 100644
--- a/go/ssa/doc.go
+++ b/go/ssa/doc.go
@@ -50,50 +50,51 @@
 // Instruction interfaces.  The following table shows for each
 // concrete type which of these interfaces it implements.
 //
-//                      Value?          Instruction?    Member?
-//   *Alloc             ✔               ✔
-//   *BinOp             ✔               ✔
-//   *Builtin           ✔
-//   *Call              ✔               ✔
-//   *ChangeInterface   ✔               ✔
-//   *ChangeType        ✔               ✔
-//   *Const             ✔
-//   *Convert           ✔               ✔
-//   *DebugRef                          ✔
-//   *Defer                             ✔
-//   *Extract           ✔               ✔
-//   *Field             ✔               ✔
-//   *FieldAddr         ✔               ✔
-//   *FreeVar           ✔
-//   *Function          ✔                               ✔ (func)
-//   *Global            ✔                               ✔ (var)
-//   *Go                                ✔
-//   *If                                ✔
-//   *Index             ✔               ✔
-//   *IndexAddr         ✔               ✔
-//   *Jump                              ✔
-//   *Lookup            ✔               ✔
-//   *MakeChan          ✔               ✔
-//   *MakeClosure       ✔               ✔
-//   *MakeInterface     ✔               ✔
-//   *MakeMap           ✔               ✔
-//   *MakeSlice         ✔               ✔
-//   *MapUpdate                         ✔
-//   *NamedConst                                        ✔ (const)
-//   *Next              ✔               ✔
-//   *Panic                             ✔
-//   *Parameter         ✔
-//   *Phi               ✔               ✔
-//   *Range             ✔               ✔
-//   *Return                            ✔
-//   *RunDefers                         ✔
-//   *Select            ✔               ✔
-//   *Send                              ✔
-//   *Slice             ✔               ✔
-//   *Store                             ✔
-//   *Type                                              ✔ (type)
-//   *TypeAssert        ✔               ✔
-//   *UnOp              ✔               ✔
+//                      Value?          Instruction?      Member?
+//   *Alloc                ✔               ✔
+//   *BinOp                ✔               ✔
+//   *Builtin              ✔
+//   *Call                 ✔               ✔
+//   *ChangeInterface      ✔               ✔
+//   *ChangeType           ✔               ✔
+//   *Const                ✔
+//   *Convert              ✔               ✔
+//   *SliceToArrayPointer  ✔               ✔
+//   *DebugRef                             ✔
+//   *Defer                                ✔
+//   *Extract              ✔               ✔
+//   *Field                ✔               ✔
+//   *FieldAddr            ✔               ✔
+//   *FreeVar              ✔
+//   *Function             ✔                               ✔ (func)
+//   *Global               ✔                               ✔ (var)
+//   *Go                                   ✔
+//   *If                                   ✔
+//   *Index                ✔               ✔
+//   *IndexAddr            ✔               ✔
+//   *Jump                                 ✔
+//   *Lookup               ✔               ✔
+//   *MakeChan             ✔               ✔
+//   *MakeClosure          ✔               ✔
+//   *MakeInterface        ✔               ✔
+//   *MakeMap              ✔               ✔
+//   *MakeSlice            ✔               ✔
+//   *MapUpdate                            ✔
+//   *NamedConst                                           ✔ (const)
+//   *Next                 ✔               ✔
+//   *Panic                                ✔
+//   *Parameter            ✔
+//   *Phi                  ✔               ✔
+//   *Range                ✔               ✔
+//   *Return                               ✔
+//   *RunDefers                            ✔
+//   *Select               ✔               ✔
+//   *Send                                 ✔
+//   *Slice                ✔               ✔
+//   *Store                                ✔
+//   *Type                                                 ✔ (type)
+//   *TypeAssert           ✔               ✔
+//   *UnOp                 ✔               ✔
 //
 // Other key types in this package include: Program, Package, Function
 // and BasicBlock.
diff --git a/go/ssa/emit.go b/go/ssa/emit.go
index 13fe2aa..02d0e4b 100644
--- a/go/ssa/emit.go
+++ b/go/ssa/emit.go
@@ -168,7 +168,7 @@
 // emitConv emits to f code to convert Value val to exactly type typ,
 // and returns the converted value.  Implicit conversions are required
 // by language assignability rules in assignments, parameter passing,
-// etc.  Conversions cannot fail dynamically.
+// etc.
 //
 func emitConv(f *Function, val Value, typ types.Type) Value {
 	t_src := val.Type()
@@ -228,6 +228,16 @@
 		// e.g. string -> []byte/[]rune.
 	}
 
+	// Conversion from slice to array pointer?
+	if slice, ok := ut_src.(*types.Slice); ok {
+		if ptr, ok := ut_dst.(*types.Pointer); ok {
+			if arr, ok := ptr.Elem().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) {
+				c := &SliceToArrayPointer{X: val}
+				c.setType(ut_dst)
+				return f.emit(c)
+			}
+		}
+	}
 	// A representation-changing conversion?
 	// At least one of {ut_src,ut_dst} must be *Basic.
 	// (The other may be []byte or []rune.)
diff --git a/go/ssa/interp/interp.go b/go/ssa/interp/interp.go
index d776594..bf78622 100644
--- a/go/ssa/interp/interp.go
+++ b/go/ssa/interp/interp.go
@@ -210,6 +210,9 @@
 	case *ssa.Convert:
 		fr.env[instr] = conv(instr.Type(), instr.X.Type(), fr.get(instr.X))
 
+	case *ssa.SliceToArrayPointer:
+		fr.env[instr] = sliceToArrayPointer(instr.Type(), instr.X.Type(), fr.get(instr.X))
+
 	case *ssa.MakeInterface:
 		fr.env[instr] = iface{t: instr.X.Type(), v: fr.get(instr.X)}
 
diff --git a/go/ssa/interp/interp_go117_test.go b/go/ssa/interp/interp_go117_test.go
new file mode 100644
index 0000000..58bbaa3
--- /dev/null
+++ b/go/ssa/interp/interp_go117_test.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.17
+// +build go1.17
+
+package interp_test
+
+func init() {
+	testdataTests = append(testdataTests, "slice2arrayptr.go")
+}
diff --git a/go/ssa/interp/ops.go b/go/ssa/interp/ops.go
index 90d9452..9c12d4a 100644
--- a/go/ssa/interp/ops.go
+++ b/go/ssa/interp/ops.go
@@ -1357,6 +1357,29 @@
 	panic(fmt.Sprintf("unsupported conversion: %s  -> %s, dynamic type %T", t_src, t_dst, x))
 }
 
+// sliceToArrayPointer converts the value x of type slice to type t_dst
+// a pointer to array and returns the result.
+func sliceToArrayPointer(t_dst, t_src types.Type, x value) value {
+	utSrc := t_src.Underlying()
+	utDst := t_dst.Underlying()
+
+	if _, ok := utSrc.(*types.Slice); ok {
+		if utSrc, ok := utDst.(*types.Pointer); ok {
+			if arr, ok := utSrc.Elem().(*types.Array); ok {
+				x := x.([]value)
+				a := make(array, arr.Len())
+				for i := range a {
+					a[i] = x[i]
+				}
+				v := value(a)
+				return &v
+			}
+		}
+	}
+
+	panic(fmt.Sprintf("unsupported conversion: %s  -> %s, dynamic type %T", t_src, t_dst, x))
+}
+
 // checkInterface checks that the method set of x implements the
 // interface itype.
 // On success it returns "", on failure, an error message.
diff --git a/go/ssa/interp/testdata/slice2arrayptr.go b/go/ssa/interp/testdata/slice2arrayptr.go
new file mode 100644
index 0000000..21f9906
--- /dev/null
+++ b/go/ssa/interp/testdata/slice2arrayptr.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test for slice to array pointer conversion introduced in go1.17
+
+import "fmt"
+
+var s = []byte{1, 2, 3, 4}
+var a = (*[4]byte)(s)
+
+func main() {
+	for i := range s {
+		if a[i] != s[i] {
+			panic(fmt.Sprintf("value mismatched: %v - %v\n", a[i], s[i]))
+		}
+		if (*a)[i] != s[i] {
+			panic(fmt.Sprintf("value mismatched: %v - %v\n", (*a)[i], s[i]))
+		}
+	}
+}
diff --git a/go/ssa/print.go b/go/ssa/print.go
index 3333ba4..c1b6d22 100644
--- a/go/ssa/print.go
+++ b/go/ssa/print.go
@@ -159,10 +159,11 @@
 		relName(x, v.(Instruction)))
 }
 
-func (v *ChangeType) String() string      { return printConv("changetype", v, v.X) }
-func (v *Convert) String() string         { return printConv("convert", v, v.X) }
-func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) }
-func (v *MakeInterface) String() string   { return printConv("make", v, v.X) }
+func (v *ChangeType) String() string          { return printConv("changetype", v, v.X) }
+func (v *Convert) String() string             { return printConv("convert", v, v.X) }
+func (v *ChangeInterface) String() string     { return printConv("change interface", v, v.X) }
+func (v *SliceToArrayPointer) String() string { return printConv("slice to array pointer", v, v.X) }
+func (v *MakeInterface) String() string       { return printConv("make", v, v.X) }
 
 func (v *MakeClosure) String() string {
 	var b bytes.Buffer
diff --git a/go/ssa/sanity.go b/go/ssa/sanity.go
index 0a7abc5..1d4e20f 100644
--- a/go/ssa/sanity.go
+++ b/go/ssa/sanity.go
@@ -132,6 +132,7 @@
 	case *Call:
 	case *ChangeInterface:
 	case *ChangeType:
+	case *SliceToArrayPointer:
 	case *Convert:
 		if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
 			if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
diff --git a/go/ssa/ssa.go b/go/ssa/ssa.go
index 4dfdafd..8358681 100644
--- a/go/ssa/ssa.go
+++ b/go/ssa/ssa.go
@@ -437,7 +437,7 @@
 // A Builtin represents a specific use of a built-in function, e.g. len.
 //
 // Builtins are immutable values.  Builtins do not have addresses.
-// Builtins can only appear in CallCommon.Func.
+// Builtins can only appear in CallCommon.Value.
 //
 // Name() indicates the function: one of the built-in functions from the
 // Go spec (excluding "make" and "new") or one of these ssa-defined
@@ -650,6 +650,20 @@
 	X Value
 }
 
+// The SliceToArrayPointer instruction yields the conversion of slice X to
+// array pointer.
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// 	t1 = slice to array pointer *[4]byte <- []byte (t0)
+//
+type SliceToArrayPointer struct {
+	register
+	X Value
+}
+
 // MakeInterface constructs an instance of an interface type from a
 // value of a concrete type.
 //
@@ -1567,6 +1581,10 @@
 	return append(rands, &v.X)
 }
 
+func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value {
+	return append(rands, &v.X)
+}
+
 func (s *DebugRef) Operands(rands []*Value) []*Value {
 	return append(rands, &s.X)
 }
diff --git a/gopls/doc/advanced.md b/gopls/doc/advanced.md
index 93c6b8f..0fa1139 100644
--- a/gopls/doc/advanced.md
+++ b/gopls/doc/advanced.md
@@ -34,4 +34,40 @@
 (`export PATH=$HOME/go/bin:$PATH` on Unix systems) or by configuring your
 editor.
 
+## Working with generic code
+
+Gopls has experimental support for generic Go, as defined by the type
+parameters proposal ([golang/go#43651](https://golang.org/issues/43651)) and
+type set addendum ([golang/go#45346](https://golang.org/issues/45346)).
+
+To enable this support, you need to build gopls with a version of Go that
+supports type parameters: the
+[dev.typeparams branch](https://github.com/golang/go/tree/dev.typeparams). This
+can be done by checking out this branch in the Go repository, or by using
+`golang.org/dl/gotip`:
+
+```
+$ go get golang.org/dl/gotip
+$ gotip download dev.typeparams
+```
+
+For building gopls with type parameter support, it is recommended that you
+build gopls at tip. External APIs are under active development on the
+`dev.typeparams` branch, so building gopls at tip minimizes the chances of
+a build failure (though it is still possible). To get enhanced gopls features
+for generic code, build gopls with the `typeparams` build constraint (though
+this increases your chances of a build failure).
+
+```
+$ GO111MODULE=on gotip get -tags=typeparams golang.org/x/tools/gopls@master golang.org/x/tools@master
+```
+
+This will build a version of gopls that understands generic code. To actually
+run the generic code you develop, you must also tell the compiler to speak
+generics using the `-G=3` compiler flag. For example
+
+```
+$ gotip run -gcflags=-G=3 .
+```
+
 [Go project]: https://go.googlesource.com/go
diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md
index 9073d97..4c44f42 100644
--- a/gopls/doc/commands.md
+++ b/gopls/doc/commands.md
@@ -3,7 +3,7 @@
 This document describes the LSP-level commands supported by `gopls`. They cannot be invoked directly by users, and all the details are subject to change, so nobody should rely on this information.
 
 <!-- BEGIN Commands: DO NOT MANUALLY EDIT THIS SECTION -->
-### **Add dependency**
+### **Add a dependency**
 Identifier: `gopls.add_dependency`
 
 Adds a dependency to the go.mod file for a module.
@@ -21,11 +21,12 @@
 }
 ```
 
-### **asks the server to add an import path to a given Go file.**
+### **Add an import**
 Identifier: `gopls.add_import`
 
-The method will call applyEdit on the client so that clients don't have
-to apply the edit themselves.
+Ask the server to add an import path to a given Go file.  The method will
+call applyEdit on the client so that clients don't have to apply the edit
+themselves.
 
 Args:
 
@@ -124,7 +125,7 @@
 }
 ```
 
-### **go get package**
+### **go get a package**
 Identifier: `gopls.go_get_package`
 
 Runs `go get` to fetch a package.
@@ -141,10 +142,10 @@
 }
 ```
 
-### **retrieves a list of packages**
+### **List known packages**
 Identifier: `gopls.list_known_packages`
 
-that are importable from the given URI.
+Retrieve a list of packages that are importable from the given URI.
 
 Args:
 
@@ -155,6 +156,19 @@
 }
 ```
 
+Result:
+
+```
+{
+	// Packages is a list of packages relative
+	// to the URIArg passed by the command request.
+	// In other words, it omits paths that are already
+	// imported or cannot be imported due to compiler
+	// restrictions.
+	"Packages": []string,
+}
+```
+
 ### **Regenerate cgo**
 Identifier: `gopls.regenerate_cgo`
 
@@ -169,7 +183,7 @@
 }
 ```
 
-### **Remove dependency**
+### **Remove a dependency**
 Identifier: `gopls.remove_dependency`
 
 Removes a dependency from the go.mod file of a module.
@@ -204,10 +218,11 @@
 }
 ```
 
-### ****
+### **Start the gopls debug server**
 Identifier: `gopls.start_debugging`
 
-
+Start the gopls debug server if it isn't running, and return the debug
+address.
 
 Args:
 
@@ -230,6 +245,24 @@
 }
 ```
 
+Result:
+
+```
+{
+	// The URLs to use to access the debug servers, for all gopls instances in
+	// the serving path. For the common case of a single gopls instance (i.e. no
+	// daemon), this will be exactly one address.
+	// 
+	// In the case of one or more gopls instances forwarding the LSP to a daemon,
+	// URLs will contain debug addresses for each server in the serving path, in
+	// serving order. The daemon debug address will be the last entry in the
+	// slice. If any intermediate gopls instance fails to start debugging, no
+	// error will be returned but the debug URL for that server in the URLs slice
+	// will be empty.
+	"URLs": []string,
+}
+```
+
 ### **Run test(s) (legacy)**
 Identifier: `gopls.test`
 
@@ -285,7 +318,7 @@
 }
 ```
 
-### **Upgrade dependency**
+### **Upgrade a dependency**
 Identifier: `gopls.upgrade_dependency`
 
 Upgrades a dependency in the go.mod file for a module.
@@ -317,9 +350,21 @@
 }
 ```
 
-### ****
+### **Query workspace metadata**
 Identifier: `gopls.workspace_metadata`
 
+Query the server for information about active workspaces.
 
+Result:
+
+```
+{
+	// All workspaces for this session.
+	"Workspaces": []{
+		"Name": string,
+		"ModuleDir": string,
+	},
+}
+```
 
 <!-- END Commands: DO NOT MANUALLY EDIT THIS SECTION -->
diff --git a/gopls/doc/contributing.md b/gopls/doc/contributing.md
index 8229eca..307b601 100644
--- a/gopls/doc/contributing.md
+++ b/gopls/doc/contributing.md
@@ -109,8 +109,10 @@
 
 ## Debugging
 
-The easiest way to debug your change is to run can run a single `gopls` test
-with a debugger.
+The easiest way to debug your change is to run a single `gopls` test with a
+debugger.
+
+See also [Troubleshooting](troubleshooting.md#troubleshooting).
 
 <!--TODO(rstambler): Add more details about the debug server and viewing
 telemetry.-->
diff --git a/gopls/doc/generate.go b/gopls/doc/generate.go
index ed42647..91d45ba 100644
--- a/gopls/doc/generate.go
+++ b/gopls/doc/generate.go
@@ -379,12 +379,16 @@
 	}
 	// Parse the objects it contains.
 	for _, cmd := range cmds {
-		commands = append(commands, &source.CommandJSON{
+		cmdjson := &source.CommandJSON{
 			Command: cmd.Name,
 			Title:   cmd.Title,
 			Doc:     cmd.Doc,
 			ArgDoc:  argsDoc(cmd.Args),
-		})
+		}
+		if cmd.Result != nil {
+			cmdjson.ResultDoc = typeDoc(cmd.Result, 0)
+		}
+		commands = append(commands, cmdjson)
 	}
 	return commands, nil
 }
@@ -392,7 +396,7 @@
 func argsDoc(args []*commandmeta.Field) string {
 	var b strings.Builder
 	for i, arg := range args {
-		b.WriteString(argDoc(arg, 0))
+		b.WriteString(typeDoc(arg, 0))
 		if i != len(args)-1 {
 			b.WriteString(",\n")
 		}
@@ -400,12 +404,12 @@
 	return b.String()
 }
 
-func argDoc(arg *commandmeta.Field, level int) string {
+func typeDoc(arg *commandmeta.Field, level int) string {
 	// Max level to expand struct fields.
 	const maxLevel = 3
 	if len(arg.Fields) > 0 {
 		if level < maxLevel {
-			return structDoc(arg.Fields, level)
+			return arg.FieldMod + structDoc(arg.Fields, level)
 		}
 		return "{ ... }"
 	}
@@ -432,7 +436,7 @@
 		if tag == "" {
 			tag = fld.Name
 		}
-		fmt.Fprintf(&b, "%s\t%q: %s,\n", indent, tag, argDoc(fld, level+1))
+		fmt.Fprintf(&b, "%s\t%q: %s,\n", indent, tag, typeDoc(fld, level+1))
 	}
 	fmt.Fprintf(&b, "%s}", indent)
 	return b.String()
@@ -739,6 +743,9 @@
 		if command.ArgDoc != "" {
 			fmt.Fprintf(section, "Args:\n\n```\n%s\n```\n\n", command.ArgDoc)
 		}
+		if command.ResultDoc != "" {
+			fmt.Fprintf(section, "Result:\n\n```\n%s\n```\n\n", command.ResultDoc)
+		}
 	}
 	return replaceSection(doc, "Commands", section.Bytes())
 }
diff --git a/gopls/doc/semantictokens.md b/gopls/doc/semantictokens.md
new file mode 100644
index 0000000..fc541fb
--- /dev/null
+++ b/gopls/doc/semantictokens.md
@@ -0,0 +1,121 @@
+# Semantic Tokens
+
+The [LSP](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#textDocument_semanticTokens)
+specifies semantic tokens as a way of telling clients about language-specific
+properties of pieces of code in a file being edited.
+
+The client asks for a set of semantic tokens and modifiers. This note describe which ones
+gopls will return, and under what circumstances. Gopls has no control over how the client
+converts semantic tokens into colors (or some other visible indication). In vscode it
+is possible to modify the color a theme uses by setting the `editor.semanticTokenColorCustomizations`
+object. We provide a little [guidance](#Colors) later.
+
+There are 22 semantic tokens, with 10 possible modifiers. The protocol allows each semantic
+token to be used with any of the 1024 subsets of possible modifiers, but most combinations
+don't make intuitive sense (although `async documentation` has a certain appeal).
+
+The 22 semantic tokens are `namespace`, `type`, `class`, `enum`, `interface`,
+		`struct`, `typeParameter`, `parameter`, `variable`, `property`, `enumMember`,
+		`event`, `function`, `member`, `macro`, `keyword`, `modifier`, `comment`,
+		`string`, `number`, `regexp`, `operator`.
+
+The 10 modifiers are `declaration`, `definition`, `readonly`, `static`,
+		`deprecated`, `abstract`, `async`, `modification`, `documentation`, `defaultLibrary`.
+
+The authoritative lists are in the [specification](https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#semanticTokenTypes)
+
+For the implementation to work correctly the client and server have to agree on the ordering
+of the tokens and of the modifiers. Gopls, therefore, will only send tokens and modifiers
+that the client has asked for. This document says what gopls would send if the client
+asked for everything. By default, vscode asks for everything.
+
+Gopls sends 11 token types for `.go` files and 1 for `.*tmpl` files.
+Nothing is sent for any other kind of file.
+This all could change. (When Go has generics, gopls will return `typeParameter`.)
+
+For `.*tmpl` files gopls sends `macro`, and no modifiers, for each `{{`...`}}` scope.
+
+## Semantic tokens for Go files
+
+There are two contrasting guiding principles that might be used to decide what to mark
+with semantic tokens. All clients already do some kind of syntax marking. E.g., vscode
+uses a TextMate grammar. The minimal principle would send semantic tokens only for those
+language features that cannot be reliably found without parsing Go and looking at types.
+The maximal principle would attempt to convey as much as possible about the Go code,
+using all available parsing and type information.
+
+There is much to be said for returning minimal information, but the minimal principle is
+not well-specified. Gopls has no way of knowing what the clients know about the Go program
+being edited. Even in vscode the TextMate grammars can be more or less elaborate
+and change over time. (Nonetheless, a minimal implementation would not return `keyword`,
+`number`, `comment`, or `string`.)
+
+The maximal position isn't particularly well-specified either. To chose one example, a
+format string might have formatting codes (`%[4]-3.6f`), escape sequences (`\U00010604`), and regular
+characters. Should these all be distinguished? One could even imagine distinguishing
+different runes by their Unicode language assignment, or some other Unicode property, such as
+being [confusable](http://www.unicode.org/Public/security/10.0.0/confusables.txt).
+
+Gopls does not come close to either of these principles.  Semantic tokens are returned for
+identifiers, keywords, operators, comments, and literals. (Sematic tokens do not
+cover the file. They are not returned for
+white space or punctuation, and there is no semantic token for labels.)
+The following describes more precisely what gopls
+does, with a few notes on possible alternative choices.
+The references to *object* refer to the
+```types.Object``` returned by the type checker. The references to *nodes* refer to the
+```ast.Node``` from the parser.
+
+1. __`keyword`__ All Go [keywords](https://golang.org/ref/spec#Keywords) are marked `keyword`.
+1. __`namespace`__ All package names are marked `namespace`. In an import, if there is an
+alias, it would be marked. Otherwise the last component of the import path is marked.
+1. __`type`__ Objects of type ```types.TypeName``` are marked `type`.
+If they are also ```types.Basic```
+the modifier is `defaultLibrary`. (And in ```type B struct{C}```, ```B``` has modifier `definition`.)
+1. __`parameter`__ The formal arguments in ```ast.FuncDecl``` nodes are marked `parameter`.
+1. __`variable`__  Identifiers in the
+scope of ```const``` are modified with `readonly`. ```nil``` is usually a `variable` modified with both
+`readonly` and `defaultLibrary`. (```nil``` is a predefined identifier; the user can redefine it,
+in which case it would just be a variable, or whatever.) Identifiers of type ```types.Variable``` are,
+not surprisingly, marked `variable`. Identifiers being defined (node ```ast.GenDecl```) are modified
+by `definition` and, if appropriate, `readonly`. Receivers (in method declarations) are
+`variable`.
+1. __`member`__ Members are marked at their definition (```func (x foo) bar() {}```) or declaration
+in an ```interface```. Members are not marked where they are used.
+In ```x.bar()```, ```x``` will be marked
+either as a `namespace` if it is a package name, or as a `variable` if it is an interface value,
+so distinguishing ```bar``` seemed superfluous.
+1. __`function`__ Bultins (```types.Builtin```) are modified with `defaultLibrary`
+(e.g., ```make```, ```len```, ```copy```). Identifiers whose
+object is ```types.Func``` or whose node is ```ast.FuncDecl``` are `function`.
+1. __`comment`__ Comments and struct tags. (Perhaps struct tags should be `property`?)
+1. __`string`__ Strings. Could add modifiers for e.g., escapes or format codes.
+1. __`number`__ Numbers. Should the ```i``` in ```23i``` be handled specially?
+1. __`operator`__ Assignment operators, binary operators, ellipses (```...```), increment/decrement
+operators, sends (```<-```), and unary operators.
+
+Gopls will send the modifier `deprecated` if it finds a comment
+```// deprecated``` in the godoc.
+
+The unused tokens for Go code are `class`, `enum`, `interface`,
+		`struct`, `typeParameter`, `property`, `enumMember`,
+		`event`, `macro`, `modifier`,
+		`regexp`
+
+## Colors
+
+These comments are about vscode.
+
+The documentation has a [helpful](https://code.visualstudio.com/api/language-extensions/semantic-highlight-guide#custom-textmate-scope-mappings)
+description of which semantic tokens correspond to scopes in TextMate grammars. Themes seem
+to use the TextMate scopes to decide on colors.
+
+Some examples of color customizations are [here](https://medium.com/@danromans/how-to-customize-semantic-token-colorization-with-visual-studio-code-ac3eab96141b).
+
+## Note
+
+While a file is being edited it may temporarily contain either
+parsing errors or type errors. In this case gopls cannot determine some (or maybe any)
+of the semantic tokens. To avoid weird flickering it is the responsibility
+of clients to maintain the semantic token information
+in the unedited part of the file, and they do.
\ No newline at end of file
diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md
index f2de5b1..5be569f 100644
--- a/gopls/doc/settings.md
+++ b/gopls/doc/settings.md
@@ -154,6 +154,17 @@
 
 Default: `false`.
 
+#### **experimentalUseInvalidMetadata** *bool*
+
+**This setting is experimental and may be deleted.**
+
+experimentalUseInvalidMetadata enables gopls to fall back on outdated
+package metadata to provide editor features if the go command fails to
+load packages for some reason (like an invalid go.mod file). This will
+eventually be the default behavior, and this setting will be removed.
+
+Default: `false`.
+
 ### Formatting
 
 #### **local** *string*
@@ -297,11 +308,11 @@
 
 Default: `{"bounds":true,"escape":true,"inline":true,"nil":true}`.
 
-##### **experimentalDiagnosticsDelay** *time.Duration*
+##### **diagnosticsDelay** *time.Duration*
 
-**This setting is experimental and may be deleted.**
+**This is an advanced setting and should not be configured by most `gopls` users.**
 
-experimentalDiagnosticsDelay controls the amount of time that gopls waits
+diagnosticsDelay controls the amount of time that gopls waits
 after the most recent file modification before computing deep diagnostics.
 Simple diagnostics (parsing and type-checking) are always run immediately
 on recently modified packages.
@@ -310,6 +321,20 @@
 
 Default: `"250ms"`.
 
+##### **experimentalWatchedFileDelay** *time.Duration*
+
+**This setting is experimental and may be deleted.**
+
+experimentalWatchedFileDelay controls the amount of time that gopls waits
+for additional workspace/didChangeWatchedFiles notifications to arrive,
+before processing all such notifications in a single batch. This is
+intended for use by LSP clients that don't support their own batching of
+file system notifications.
+
+This option must be set to a valid duration string, for example `"100ms"`.
+
+Default: `"0s"`.
+
 #### Documentation
 
 ##### **hoverKind** *enum*
@@ -448,7 +473,7 @@
 Identifier: `tidy`
 
 Runs `go mod tidy` for a module.
-### **Upgrade dependency**
+### **Upgrade a dependency**
 
 Identifier: `upgrade_dependency`
 
diff --git a/gopls/doc/subl.md b/gopls/doc/subl.md
index ad7e667..bd130ef 100644
--- a/gopls/doc/subl.md
+++ b/gopls/doc/subl.md
@@ -8,4 +8,70 @@
 
 Finally, you should familiarise yourself with the LSP package's *Settings* and *Key Bindings*. Find them under the menu item **Preferences > Package Settings > LSP**.
 
+## Examples
+Minimal global LSP settings, that assume **gopls** and **go** appear on the PATH seen by Sublime Text:<br>
+```
+{
+    "clients": {
+        "gopls": {
+             "enabled": true,
+         }
+    }
+}
+```
+
+Global LSP settings that supply a specific PATH for finding **gopls** and **go**, as well as some settings for Sublime LSP itself:
+```
+{
+    "clients": {
+        "gopls": {
+            "enabled": true,
+            "env": {
+                "PATH": "/path/to/your/go/bin",
+            }
+        }
+    },
+    // Recommended by https://agniva.me/gopls/2021/01/02/setting-up-gopls-sublime.html
+    // except log_stderr mentioned there is no longer recognized.
+    "show_references_in_quick_panel": true,
+    "log_debug": true,
+    // These two are recommended by LSP-json as replacement for deprecated only_show_lsp_completions
+    "inhibit_snippet_completions": true,
+    "inhibit_word_completions": true,
+ }
+ ```
+
+LSP and gopls settings can also be adjusted on a per-project basis to override global settings.
+```
+{
+    "folders": [
+        {
+            "path": "/path/to/a/folder/one"
+        },
+        {
+            // If you happen to be working on Go itself, this can be helpful; go-dev/bin should be on PATH.
+            "path": "/path/to/your/go-dev/src/cmd"
+        }
+     ],
+    "settings": {
+        "LSP": {
+            "gopls": {
+                "env": {
+                    "PATH": "/path/to/your/go-dev/bin:/path/to/your/go/bin",
+                    "GOPATH": "",
+                },
+                "settings": {
+                    "experimentalWorkspaceModule": true
+                }
+            }
+        },
+        // This will apply for all languages in this project that have
+        // LSP servers, not just Go, however cannot enable just for Go.
+        "lsp_format_on_save": true,
+    }
+}
+```
+
+Usually changes to these settings are recognized after saving the project file, but it may sometimes be necessary to either restart the server(s) (**Tools > LSP > Restart Servers**) or quit and restart Sublime Text itself.
+
 [LSP]: https://packagecontrol.io/packages/LSP
diff --git a/gopls/doc/vim.md b/gopls/doc/vim.md
index 4ee175d..a6b40a4 100644
--- a/gopls/doc/vim.md
+++ b/gopls/doc/vim.md
@@ -116,8 +116,8 @@
 
 ## <a href="#neovim" id="neovim">Neovim v0.5.0+</a>
 
-To use the new (still experimental) native LSP client in Neovim, make sure you
-[install][nvim-install] the prerelease v0.5.0 version of Neovim (aka “nightly”),
+To use the new native LSP client in Neovim, make sure you
+[install][nvim-install] Neovim v.0.5.0+,
 the `nvim-lspconfig` configuration helper plugin, and check the
 [`gopls` configuration section][nvim-lspconfig] there.
 
@@ -165,7 +165,7 @@
   -- …
 
   function goimports(timeout_ms)
-    local context = { source = { organizeImports = true } }
+    local context = { only = { "source.organizeImports" } }
     vim.validate { context = { context, "t", true } }
 
     local params = vim.lsp.util.make_range_params()
diff --git a/gopls/go.mod b/gopls/go.mod
index 6367d7f..a79da5c 100644
--- a/gopls/go.mod
+++ b/gopls/go.mod
@@ -19,3 +19,5 @@
 	mvdan.cc/gofumpt v0.1.1
 	mvdan.cc/xurls/v2 v2.2.0
 )
+
+replace golang.org/x/tools => ../
diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go
index 46d7dd7..a307ba7 100644
--- a/gopls/internal/hooks/diff.go
+++ b/gopls/internal/hooks/diff.go
@@ -14,7 +14,7 @@
 
 func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) {
 	// The go-diff library has an unresolved panic (see golang/go#278774).
-	// TOOD(rstambler): Remove the recover once the issue has been fixed
+	// TODO(rstambler): Remove the recover once the issue has been fixed
 	// upstream.
 	defer func() {
 		if r := recover(); r != nil {
diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go
index 0801ecf..360e956 100644
--- a/gopls/internal/regtest/bench/bench_test.go
+++ b/gopls/internal/regtest/bench/bench_test.go
@@ -7,6 +7,8 @@
 import (
 	"flag"
 	"fmt"
+	"os"
+	"runtime/pprof"
 	"testing"
 	"time"
 
@@ -131,8 +133,9 @@
 }
 
 var (
-	benchDir  = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set regtest_bench_file.")
-	benchFile = flag.String("didchange_file", "", "The file to modify")
+	benchDir     = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set regtest_bench_file.")
+	benchFile    = flag.String("didchange_file", "", "The file to modify")
+	benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
 )
 
 // TestBenchmarkDidChange benchmarks modifications of a single file by making
@@ -162,6 +165,17 @@
 		// Insert the text we'll be modifying at the top of the file.
 		env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
 		result := testing.Benchmark(func(b *testing.B) {
+			if *benchProfile != "" {
+				profile, err := os.Create(*benchProfile)
+				if err != nil {
+					t.Fatal(err)
+				}
+				defer profile.Close()
+				if err := pprof.StartCPUProfile(profile); err != nil {
+					t.Fatal(err)
+				}
+				defer pprof.StopCPUProfile()
+			}
 			b.ResetTimer()
 			for i := 0; i < b.N; i++ {
 				env.EditBuffer(*benchFile, fake.Edit{
diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go
index a2a1ae4..d89b8e0 100644
--- a/gopls/internal/regtest/codelens/codelens_test.go
+++ b/gopls/internal/regtest/codelens/codelens_test.go
@@ -166,11 +166,16 @@
 				if vendoring {
 					env.RunGoCommand("mod", "vendor")
 				}
+				env.Await(env.DoneWithChangeWatchedFiles())
 				env.OpenFile("go.mod")
 				env.ExecuteCodeLensCommand("go.mod", command.CheckUpgrades)
 				d := &protocol.PublishDiagnosticsParams{}
-				env.Await(OnceMet(env.DiagnosticAtRegexpWithMessage("go.mod", `require`, "can be upgraded"),
-					ReadDiagnostics("go.mod", d)))
+				env.Await(
+					OnceMet(
+						env.DiagnosticAtRegexpWithMessage("go.mod", `require`, "can be upgraded"),
+						ReadDiagnostics("go.mod", d),
+					),
+				)
 				env.ApplyQuickFixes("go.mod", d.Diagnostics)
 				env.Await(env.DoneWithChangeWatchedFiles())
 				if got := env.Editor.BufferText("go.mod"); got != wantGoMod {
diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go
index afdd494..795f7ae 100644
--- a/gopls/internal/regtest/completion/completion_test.go
+++ b/gopls/internal/regtest/completion/completion_test.go
@@ -322,7 +322,6 @@
 		env.AcceptCompletion("main.go", pos, item)
 
 		// Await the diagnostics to add example.com/blah to the go.mod file.
-		env.SaveBufferWithoutActions("main.go")
 		env.Await(
 			env.DiagnosticAtRegexp("main.go", `"example.com/blah"`),
 		)
@@ -443,9 +442,9 @@
 		}{
 			{`var _ a = aaaa()`, []string{"aaaa1", "aaaa2"}},
 			{`var _ b = bbbb()`, []string{"bbbb1", "bbbb2"}},
-			{`var _ c = xxxx()`, []string{"***xxxxd", "**xxxxe", "xxxxc"}},
-			{`var _ d = xxxx()`, []string{"***xxxxe", "*xxxxc", "xxxxd"}},
-			{`var _ e = xxxx()`, []string{"**xxxxc", "*xxxxd", "xxxxe"}},
+			{`var _ c = xxxx()`, []string{"xxxxc", "xxxxd", "xxxxe"}},
+			{`var _ d = xxxx()`, []string{"xxxxc", "xxxxd", "xxxxe"}},
+			{`var _ e = xxxx()`, []string{"xxxxc", "xxxxd", "xxxxe"}},
 		}
 		for _, tt := range tests {
 			completions := env.Completion("main.go", env.RegexpSearch("main.go", tt.re))
@@ -505,7 +504,6 @@
 }
 
 func TestUnimportedCompletion_VSCodeIssue1489(t *testing.T) {
-	t.Skip("broken due to golang/vscode-go#1489")
 	testenv.NeedsGo1Point(t, 14)
 
 	const src = `
@@ -525,8 +523,7 @@
 }
 `
 	WithOptions(
-		WindowsLineEndings,
-		ProxyFiles(proxy),
+		EditorConfig{WindowsLineEndings: true},
 	).Run(t, src, func(t *testing.T, env *Env) {
 		// Trigger unimported completions for the example.com/blah package.
 		env.OpenFile("main.go")
@@ -538,6 +535,10 @@
 		}
 		env.AcceptCompletion("main.go", pos, completions.Items[0])
 		env.Await(env.DoneWithChange())
-		t.Log(env.Editor.BufferText("main.go"))
+		got := env.Editor.BufferText("main.go")
+		want := "package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"math\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Println(\"a\")\r\n\tmath.Sqrt(${1:})\r\n}\r\n"
+		if got != want {
+			t.Errorf("unimported completion: got %q, want %q", got, want)
+		}
 	})
 }
diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go
index 019ba65..9ee102c 100644
--- a/gopls/internal/regtest/diagnostics/diagnostics_test.go
+++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go
@@ -7,7 +7,6 @@
 import (
 	"context"
 	"fmt"
-	"log"
 	"os/exec"
 	"testing"
 
@@ -147,12 +146,14 @@
 		env.Await(
 			env.DiagnosticAtRegexp("a.go", "a = 1"),
 			env.DiagnosticAtRegexp("b.go", "a = 2"),
-			env.DiagnosticAtRegexp("c.go", "a = 3"))
+			env.DiagnosticAtRegexp("c.go", "a = 3"),
+		)
 		env.CloseBuffer("c.go")
 		env.Await(
 			env.DiagnosticAtRegexp("a.go", "a = 1"),
 			env.DiagnosticAtRegexp("b.go", "a = 2"),
-			EmptyDiagnostics("c.go"))
+			EmptyDiagnostics("c.go"),
+		)
 	})
 }
 
@@ -225,7 +226,6 @@
 // Tests golang/go#38878: deleting a test file on disk while it's still open
 // should not clear its errors.
 func TestDeleteTestVariant_DiskOnly(t *testing.T) {
-	log.SetFlags(log.Lshortfile)
 	Run(t, test38878, func(t *testing.T, env *Env) {
 		env.OpenFile("a_test.go")
 		env.Await(DiagnosticAt("a_test.go", 5, 3))
@@ -438,7 +438,7 @@
 func TestMissingDependency(t *testing.T) {
 	Run(t, testPackageWithRequire, func(t *testing.T, env *Env) {
 		env.OpenFile("print.go")
-		env.Await(LogMatching(protocol.Error, "initial workspace load failed", 1))
+		env.Await(LogMatching(protocol.Error, "initial workspace load failed", 1, false))
 	})
 }
 
@@ -1294,7 +1294,6 @@
 func main() {}
 `
 	Run(t, dir, func(t *testing.T, env *Env) {
-		log.SetFlags(log.Lshortfile)
 		env.OpenFile("main.go")
 		env.OpenFile("other.go")
 		x := env.DiagnosticsFor("main.go")
@@ -1569,6 +1568,7 @@
 		env.Await(
 			env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
 			EmptyDiagnostics("bob/bob.go"),
+			RegistrationMatching("didChangeWatchedFiles"),
 		)
 	})
 }
@@ -1615,6 +1615,42 @@
 	})
 }
 
+// Tests golang/go#46667: deleting a problematic import path should resolve
+// import cycle errors.
+func TestResolveImportCycle(t *testing.T) {
+	const mod = `
+-- go.mod --
+module mod.test
+
+go 1.16
+-- a/a.go --
+package a
+
+import "mod.test/b"
+
+const A = b.A
+const B = 2
+-- b/b.go --
+package b
+
+import "mod.test/a"
+
+const A = 1
+const B = a.B
+	`
+	Run(t, mod, func(t *testing.T, env *Env) {
+		env.OpenFile("a/a.go")
+		env.OpenFile("b/b.go")
+		env.Await(env.DiagnosticAtRegexp("a/a.go", `"mod.test/b"`))
+		env.RegexpReplace("b/b.go", `const B = a\.B`, "")
+		env.SaveBuffer("b/b.go")
+		env.Await(
+			EmptyOrNoDiagnostics("a/a.go"),
+			EmptyOrNoDiagnostics("b/b.go"),
+		)
+	})
+}
+
 func TestBadImport(t *testing.T) {
 	testenv.NeedsGo1Point(t, 14)
 
@@ -1886,29 +1922,33 @@
 	})
 }
 
-// Tests golang/go#45075, a panic in fillreturns breaks diagnostics.
+// Tests golang/go#45075: A panic in fillreturns broke diagnostics.
+// Expect an error log indicating that fillreturns panicked, as well type
+// errors for the broken code.
 func TestFillReturnsPanic(t *testing.T) {
 	// At tip, the panic no longer reproduces.
 	testenv.SkipAfterGo1Point(t, 16)
+
 	const files = `
 -- go.mod --
 module mod.com
 
-go 1.16
+go 1.15
 -- main.go --
 package main
 
-
 func foo() int {
 	return x, nil
 }
-
 `
 	Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		env.Await(
-			env.DiagnosticAtRegexpWithMessage("main.go", `return x`, "wrong number of return values"),
-			LogMatching(protocol.Error, `.*analysis fillreturns.*panicked.*`, 2),
+			OnceMet(
+				env.DoneWithOpen(),
+				LogMatching(protocol.Error, `.*analysis fillreturns.*panicked.*`, 1, true),
+				env.DiagnosticAtRegexpWithMessage("main.go", `return x`, "wrong number of return values"),
+			),
 		)
 	})
 }
@@ -1931,7 +1971,164 @@
 		env.Await(
 			OnceMet(
 				env.DoneWithOpen(),
-				LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1),
+				LogMatching(protocol.Info, `.*query=\[builtin mod.com/...\].*`, 1, false),
+			),
+		)
+	})
+}
+
+func TestUseOfInvalidMetadata(t *testing.T) {
+	testenv.NeedsGo1Point(t, 13)
+
+	const mod = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- main.go --
+package main
+
+import (
+	"mod.com/a"
+	//"os"
+)
+
+func _() {
+	a.Hello()
+	os.Getenv("")
+	//var x int
+}
+-- a/a.go --
+package a
+
+func Hello() {}
+`
+	WithOptions(
+		EditorConfig{
+			ExperimentalUseInvalidMetadata: true,
+		},
+		Modes(Singleton),
+	).Run(t, mod, func(t *testing.T, env *Env) {
+		env.OpenFile("go.mod")
+		env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file
+		env.SaveBufferWithoutActions("go.mod")
+		env.Await(
+			env.DiagnosticAtRegexp("go.mod", "modul"),
+		)
+		// Confirm that language features work with invalid metadata.
+		env.OpenFile("main.go")
+		file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", "Hello"))
+		wantPos := env.RegexpSearch("a/a.go", "Hello")
+		if file != "a/a.go" && pos != wantPos {
+			t.Fatalf("expected a/a.go:%s, got %s:%s", wantPos, file, pos)
+		}
+		// Confirm that new diagnostics appear with invalid metadata by adding
+		// an unused variable to the body of the function.
+		env.RegexpReplace("main.go", "//var x int", "var x int")
+		env.Await(
+			env.DiagnosticAtRegexp("main.go", "x"),
+		)
+		// Add an import and confirm that we get a diagnostic for it, since the
+		// metadata will not have been updated.
+		env.RegexpReplace("main.go", "//\"os\"", "\"os\"")
+		env.Await(
+			env.DiagnosticAtRegexp("main.go", `"os"`),
+		)
+		// Fix the go.mod file and expect the diagnostic to resolve itself.
+		env.RegexpReplace("go.mod", "modul mod.com", "module mod.com")
+		env.SaveBuffer("go.mod")
+		env.Await(
+			env.DiagnosticAtRegexp("main.go", "x"),
+			env.NoDiagnosticAtRegexp("main.go", `"os"`),
+			EmptyDiagnostics("go.mod"),
+		)
+	})
+}
+
+func TestReloadInvalidMetadata(t *testing.T) {
+	// We only use invalid metadata for Go versions > 1.12.
+	testenv.NeedsGo1Point(t, 13)
+
+	const mod = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- main.go --
+package main
+
+func _() {}
+`
+	WithOptions(
+		EditorConfig{
+			ExperimentalUseInvalidMetadata: true,
+		},
+		// ExperimentalWorkspaceModule has a different failure mode for this
+		// case.
+		Modes(Singleton),
+	).Run(t, mod, func(t *testing.T, env *Env) {
+		env.Await(
+			OnceMet(
+				InitialWorkspaceLoad,
+				CompletedWork("Load", 1, false),
+			),
+		)
+
+		// Break the go.mod file on disk, expecting a reload.
+		env.WriteWorkspaceFile("go.mod", `modul mod.com
+
+go 1.12
+`)
+		env.Await(
+			OnceMet(
+				env.DoneWithChangeWatchedFiles(),
+				env.DiagnosticAtRegexp("go.mod", "modul"),
+				CompletedWork("Load", 1, false),
+			),
+		)
+
+		env.OpenFile("main.go")
+		env.Await(env.DoneWithOpen())
+		// The first edit after the go.mod file invalidation should cause a reload.
+		// Any subsequent simple edits should not.
+		content := `package main
+
+func main() {
+	_ = 1
+}
+`
+		env.EditBuffer("main.go", fake.NewEdit(0, 0, 3, 0, content))
+		env.Await(
+			OnceMet(
+				env.DoneWithChange(),
+				CompletedWork("Load", 2, false),
+				NoLogMatching(protocol.Error, "error loading file"),
+			),
+		)
+		env.RegexpReplace("main.go", "_ = 1", "_ = 2")
+		env.Await(
+			OnceMet(
+				env.DoneWithChange(),
+				CompletedWork("Load", 2, false),
+				NoLogMatching(protocol.Error, "error loading file"),
+			),
+		)
+		// Add an import to the main.go file and confirm that it does get
+		// reloaded, but the reload fails, so we see a diagnostic on the new
+		// "fmt" import.
+		env.EditBuffer("main.go", fake.NewEdit(0, 0, 5, 0, `package main
+
+import "fmt"
+
+func main() {
+	fmt.Println("")
+}
+`))
+		env.Await(
+			OnceMet(
+				env.DoneWithChange(),
+				env.DiagnosticAtRegexp("main.go", `"fmt"`),
+				CompletedWork("Load", 3, false),
 			),
 		)
 	})
diff --git a/gopls/internal/regtest/misc/formatting_test.go b/gopls/internal/regtest/misc/formatting_test.go
index 52d89e4..1e14237 100644
--- a/gopls/internal/regtest/misc/formatting_test.go
+++ b/gopls/internal/regtest/misc/formatting_test.go
@@ -171,7 +171,7 @@
 // Import organization in these files has historically been a source of bugs.
 func TestCRLFLineEndings(t *testing.T) {
 	for _, tt := range []struct {
-		issue, want string
+		issue, input, want string
 	}{
 		{
 			issue: "41057",
@@ -224,10 +224,38 @@
 }
 `,
 		},
+		{
+			issue: "47200",
+			input: `package main
+
+import "fmt"
+
+func main() {
+	math.Sqrt(9)
+	fmt.Println("hello")
+}
+`,
+			want: `package main
+
+import (
+	"fmt"
+	"math"
+)
+
+func main() {
+	math.Sqrt(9)
+	fmt.Println("hello")
+}
+`,
+		},
 	} {
 		t.Run(tt.issue, func(t *testing.T) {
 			Run(t, "-- main.go --", func(t *testing.T, env *Env) {
-				crlf := strings.ReplaceAll(tt.want, "\n", "\r\n")
+				input := tt.input
+				if input == "" {
+					input = tt.want
+				}
+				crlf := strings.ReplaceAll(input, "\n", "\r\n")
 				env.CreateBuffer("main.go", crlf)
 				env.Await(env.DoneWithOpen())
 				env.OrganizeImports("main.go")
diff --git a/gopls/internal/regtest/misc/generate_test.go b/gopls/internal/regtest/misc/generate_test.go
index 6904fe0..4478951 100644
--- a/gopls/internal/regtest/misc/generate_test.go
+++ b/gopls/internal/regtest/misc/generate_test.go
@@ -21,35 +21,53 @@
 module fake.test
 
 go 1.14
--- lib/generate.go --
+-- generate.go --
 // +build ignore
 
 package main
 
-import "io/ioutil"
+import (
+	"io/ioutil"
+	"os"
+)
 
 func main() {
-	ioutil.WriteFile("generated.go", []byte("package lib\n\nconst answer = 42"), 0644)
-}
--- lib/lib.go --
-package lib
-
-func GetAnswer() int {
-	return answer
+	ioutil.WriteFile("generated.go", []byte("package " + os.Args[1] + "\n\nconst Answer = 21"), 0644)
 }
 
-//go:generate go run generate.go
+-- lib1/lib.go --
+package lib1
+
+//go:generate go run ../generate.go lib1
+
+-- lib2/lib.go --
+package lib2
+
+//go:generate go run ../generate.go lib2
+
+-- main.go --
+package main
+
+import (
+	"fake.test/lib1"
+	"fake.test/lib2"
+)
+
+func main() {
+	println(lib1.Answer + lib2.Answer)
+}
 `
 
 	Run(t, generatedWorkspace, func(t *testing.T, env *Env) {
 		env.Await(
-			env.DiagnosticAtRegexp("lib/lib.go", "answer"),
+			env.DiagnosticAtRegexp("main.go", "lib1.(Answer)"),
 		)
-		env.RunGenerate("./lib")
+		env.RunGenerate("./lib1")
+		env.RunGenerate("./lib2")
 		env.Await(
 			OnceMet(
 				env.DoneWithChangeWatchedFiles(),
-				EmptyDiagnostics("lib/lib.go")),
+				EmptyDiagnostics("main.go")),
 		)
 	})
 }
diff --git a/gopls/internal/regtest/misc/hover_test.go b/gopls/internal/regtest/misc/hover_test.go
index 7a361f9..79e60e2 100644
--- a/gopls/internal/regtest/misc/hover_test.go
+++ b/gopls/internal/regtest/misc/hover_test.go
@@ -9,6 +9,7 @@
 	"testing"
 
 	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/internal/testenv"
 )
 
 func TestHoverUnexported(t *testing.T) {
@@ -56,3 +57,34 @@
 		}
 	})
 }
+
+func TestHoverIntLiteral(t *testing.T) {
+	testenv.NeedsGo1Point(t, 13)
+	const source = `
+-- main.go --
+package main
+
+var (
+	bigBin = 0b1001001
+)
+
+var hex = 0xe34e
+
+func main() {
+}
+`
+	Run(t, source, func(t *testing.T, env *Env) {
+		env.OpenFile("main.go")
+		hexExpected := "58190"
+		got, _ := env.Hover("main.go", env.RegexpSearch("main.go", "hex"))
+		if got != nil && !strings.Contains(got.Value, hexExpected) {
+			t.Errorf("Hover: missing expected field '%s'. Got:\n%q", hexExpected, got.Value)
+		}
+
+		binExpected := "73"
+		got, _ = env.Hover("main.go", env.RegexpSearch("main.go", "bigBin"))
+		if got != nil && !strings.Contains(got.Value, binExpected) {
+			t.Errorf("Hover: missing expected field '%s'. Got:\n%q", binExpected, got.Value)
+		}
+	})
+}
diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go
index 129a5ff..6861743 100644
--- a/gopls/internal/regtest/misc/shared_test.go
+++ b/gopls/internal/regtest/misc/shared_test.go
@@ -31,6 +31,7 @@
 		// Create a second test session connected to the same workspace and server
 		// as the first.
 		env2 := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true)
+		env2.Await(InitialWorkspaceLoad)
 		testFunc(env1, env2)
 	})
 }
@@ -52,7 +53,9 @@
 
 func TestShutdown(t *testing.T) {
 	runShared(t, func(env1 *Env, env2 *Env) {
-		env1.CloseEditor()
+		if err := env1.Editor.Close(env1.Ctx); err != nil {
+			t.Errorf("closing first editor: %v", err)
+		}
 		// Now make an edit in editor #2 to trigger diagnostics.
 		env2.OpenFile("main.go")
 		env2.RegexpReplace("main.go", "\\)\n(})", "")
diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go
index 8d98539..5b432e1 100644
--- a/gopls/internal/regtest/watch/watch_test.go
+++ b/gopls/internal/regtest/watch/watch_test.go
@@ -395,7 +395,7 @@
 			env.Await(
 				OnceMet(
 					env.DoneWithOpen(),
-					LogMatching(protocol.Info, "a_unneeded.go", 1),
+					LogMatching(protocol.Info, "a_unneeded.go", 1, false),
 				),
 			)
 
@@ -413,7 +413,7 @@
 					// There should only be one log message containing
 					// a_unneeded.go, from the initial workspace load, which we
 					// check for earlier. If there are more, there's a bug.
-					LogMatching(protocol.Info, "a_unneeded.go", 1),
+					LogMatching(protocol.Info, "a_unneeded.go", 1, false),
 				),
 				EmptyDiagnostics("a/a.go"),
 			)
@@ -429,7 +429,7 @@
 			env.Await(
 				OnceMet(
 					env.DoneWithOpen(),
-					LogMatching(protocol.Info, "a_unneeded.go", 1),
+					LogMatching(protocol.Info, "a_unneeded.go", 1, false),
 				),
 			)
 
@@ -447,7 +447,7 @@
 					// There should only be one log message containing
 					// a_unneeded.go, from the initial workspace load, which we
 					// check for earlier. If there are more, there's a bug.
-					LogMatching(protocol.Info, "a_unneeded.go", 1),
+					LogMatching(protocol.Info, "a_unneeded.go", 1, false),
 				),
 				EmptyDiagnostics("a/a.go"),
 			)
diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go
index 56a7af8..4c5d1fc 100644
--- a/gopls/internal/regtest/workspace/workspace_test.go
+++ b/gopls/internal/regtest/workspace/workspace_test.go
@@ -341,12 +341,15 @@
 		Modes(Experimental),
 	).Run(t, multiModule, func(t *testing.T, env *Env) {
 		env.OpenFile("moda/a/a.go")
+		env.Await(env.DoneWithOpen())
 
 		original, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
 		if want := "modb/b/b.go"; !strings.HasSuffix(original, want) {
 			t.Errorf("expected %s, got %v", want, original)
 		}
 		env.CloseBuffer(original)
+		env.Await(env.DoneWithClose())
+
 		env.RemoveWorkspaceFile("modb/b/b.go")
 		env.RemoveWorkspaceFile("modb/go.mod")
 		env.Await(
@@ -361,6 +364,7 @@
 			),
 		)
 		env.ApplyQuickFixes("moda/a/go.mod", d.Diagnostics)
+		env.Await(env.DoneWithChangeWatchedFiles())
 		got, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
 		if want := "b.com@v1.2.3/b/b.go"; !strings.HasSuffix(got, want) {
 			t.Errorf("expected %s, got %v", want, got)
@@ -607,6 +611,132 @@
 	})
 }
 
+func TestUseGoWork(t *testing.T) {
+	// This test validates certain functionality related to using a go.work
+	// file to specify workspace modules.
+	testenv.NeedsGo1Point(t, 14)
+	const multiModule = `
+-- moda/a/go.mod --
+module a.com
+
+require b.com v1.2.3
+-- moda/a/go.sum --
+b.com v1.2.3 h1:tXrlXP0rnjRpKNmkbLYoWBdq0ikb3C3bKK9//moAWBI=
+b.com v1.2.3/go.mod h1:D+J7pfFBZK5vdIdZEFquR586vKKIkqG7Qjw9AxG5BQ8=
+-- moda/a/a.go --
+package a
+
+import (
+	"b.com/b"
+)
+
+func main() {
+	var x int
+	_ = b.Hello()
+}
+-- modb/go.mod --
+module b.com
+
+require example.com v1.2.3
+-- modb/go.sum --
+example.com v1.2.3 h1:Yryq11hF02fEf2JlOS2eph+ICE2/ceevGV3C9dl5V/c=
+example.com v1.2.3/go.mod h1:Y2Rc5rVWjWur0h3pd9aEvK5Pof8YKDANh9gHA2Maujo=
+-- modb/b/b.go --
+package b
+
+func Hello() int {
+	var x int
+}
+-- go.work --
+go 1.17
+
+directory (
+	./moda/a
+)
+`
+	WithOptions(
+		ProxyFiles(workspaceModuleProxy),
+		Modes(Experimental),
+	).Run(t, multiModule, func(t *testing.T, env *Env) {
+		// Initially, the gopls.mod should cause only the a.com module to be
+		// loaded. Validate this by jumping to a definition in b.com and ensuring
+		// that we go to the module cache.
+		env.OpenFile("moda/a/a.go")
+		env.Await(env.DoneWithOpen())
+
+		// To verify which modules are loaded, we'll jump to the definition of
+		// b.Hello.
+		checkHelloLocation := func(want string) error {
+			location, _ := env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
+			if !strings.HasSuffix(location, want) {
+				return fmt.Errorf("expected %s, got %v", want, location)
+			}
+			return nil
+		}
+
+		// Initially this should be in the module cache, as b.com is not replaced.
+		if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil {
+			t.Fatal(err)
+		}
+
+		// Now, modify the gopls.mod file on disk to activate the b.com module in
+		// the workspace.
+		env.WriteWorkspaceFile("go.work", `
+go 1.17
+
+directory (
+	./moda/a
+	./modb
+)
+`)
+		env.Await(env.DoneWithChangeWatchedFiles())
+		// Check that go.mod diagnostics picked up the newly active mod file.
+		// The local version of modb has an extra dependency we need to download.
+		env.OpenFile("modb/go.mod")
+		env.Await(env.DoneWithOpen())
+
+		var d protocol.PublishDiagnosticsParams
+		env.Await(
+			OnceMet(
+				env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"),
+				ReadDiagnostics("modb/go.mod", &d),
+			),
+		)
+		env.ApplyQuickFixes("modb/go.mod", d.Diagnostics)
+		env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x"))
+		// Jumping to definition should now go to b.com in the workspace.
+		if err := checkHelloLocation("modb/b/b.go"); err != nil {
+			t.Fatal(err)
+		}
+
+		// Now, let's modify the gopls.mod *overlay* (not on disk), and verify that
+		// this change is only picked up once it is saved.
+		env.OpenFile("go.work")
+		env.Await(env.DoneWithOpen())
+		env.SetBufferContent("go.work", `go 1.17
+
+directory (
+	./moda/a
+)`)
+
+		// Editing the gopls.mod removes modb from the workspace modules, and so
+		// should clear outstanding diagnostics...
+		env.Await(OnceMet(
+			env.DoneWithChange(),
+			EmptyDiagnostics("modb/go.mod"),
+		))
+		// ...but does not yet cause a workspace reload, so we should still jump to modb.
+		if err := checkHelloLocation("modb/b/b.go"); err != nil {
+			t.Fatal(err)
+		}
+		// Saving should reload the workspace.
+		env.SaveBufferWithoutActions("go.work")
+		if err := checkHelloLocation("b.com@v1.2.3/b/b.go"); err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
 func TestNonWorkspaceFileCreation(t *testing.T) {
 	testenv.NeedsGo1Point(t, 13)
 
@@ -913,3 +1043,41 @@
 		)
 	})
 }
+
+// Sometimes users may have their module cache within the workspace.
+// We shouldn't consider any module in the module cache to be in the workspace.
+func TestGOMODCACHEInWorkspace(t *testing.T) {
+	const mod = `
+-- a/go.mod --
+module a.com
+
+go 1.12
+-- a/a.go --
+package a
+
+func _() {}
+-- a/c/c.go --
+package c
+-- gopath/src/b/b.go --
+package b
+-- gopath/pkg/mod/example.com/go.mod --
+module example.com
+
+go 1.12
+-- gopath/pkg/mod/example.com/main.go --
+package main
+`
+	WithOptions(
+		EditorConfig{Env: map[string]string{
+			"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath"),
+		}},
+		Modes(Singleton),
+	).Run(t, mod, func(t *testing.T, env *Env) {
+		env.Await(
+			// Confirm that the build configuration is seen as valid,
+			// even though there are technically multiple go.mod files in the
+			// worskpace.
+			LogMatching(protocol.Info, ".*valid build configuration = true.*", 1, false),
+		)
+	})
+}
diff --git a/internal/jsonrpc2_v2/conn.go b/internal/jsonrpc2_v2/conn.go
index 6d92c0c..606c3f9 100644
--- a/internal/jsonrpc2_v2/conn.go
+++ b/internal/jsonrpc2_v2/conn.go
@@ -50,7 +50,7 @@
 	writerBox   chan Writer
 	outgoingBox chan map[ID]chan<- *Response
 	incomingBox chan map[ID]*incoming
-	async       async
+	async       *async
 }
 
 type AsyncCall struct {
@@ -87,6 +87,7 @@
 		writerBox:   make(chan Writer, 1),
 		outgoingBox: make(chan map[ID]chan<- *Response, 1),
 		incomingBox: make(chan map[ID]*incoming, 1),
+		async:       newAsync(),
 	}
 
 	options, err := binder.Bind(ctx, c)
@@ -104,7 +105,6 @@
 	}
 	c.outgoingBox <- make(map[ID]chan<- *Response)
 	c.incomingBox <- make(map[ID]*incoming)
-	c.async.init()
 	// the goroutines started here will continue until the underlying stream is closed
 	reader := options.Framer.Reader(rwc)
 	readToQueue := make(chan *incoming)
@@ -112,6 +112,7 @@
 	go c.readIncoming(ctx, reader, readToQueue)
 	go c.manageQueue(ctx, options.Preempter, readToQueue, queueToDeliver)
 	go c.deliverMessages(ctx, options.Handler, queueToDeliver)
+
 	// releaseing the writer must be the last thing we do in case any requests
 	// are blocked waiting for the connection to be ready
 	c.writerBox <- options.Framer.Writer(rwc)
diff --git a/internal/jsonrpc2_v2/jsonrpc2.go b/internal/jsonrpc2_v2/jsonrpc2.go
index 4e853d5..271f42c 100644
--- a/internal/jsonrpc2_v2/jsonrpc2.go
+++ b/internal/jsonrpc2_v2/jsonrpc2.go
@@ -57,17 +57,19 @@
 	return f(ctx, req)
 }
 
-// async is a small helper for things with an asynchronous result that you can
-// wait for.
+// async is a small helper for operations with an asynchronous result that you
+// can wait for.
 type async struct {
-	ready  chan struct{}
-	errBox chan error
+	ready  chan struct{} // signals that the operation has completed
+	errBox chan error    // guards the operation result
 }
 
-func (a *async) init() {
+func newAsync() *async {
+	var a async
 	a.ready = make(chan struct{})
 	a.errBox = make(chan error, 1)
 	a.errBox <- nil
+	return &a
 }
 
 func (a *async) done() {
diff --git a/internal/jsonrpc2_v2/jsonrpc2_test.go b/internal/jsonrpc2_v2/jsonrpc2_test.go
index 6d057b4..1157779 100644
--- a/internal/jsonrpc2_v2/jsonrpc2_test.go
+++ b/internal/jsonrpc2_v2/jsonrpc2_test.go
@@ -126,7 +126,7 @@
 func testConnection(t *testing.T, framer jsonrpc2.Framer) {
 	stacktest.NoLeak(t)
 	ctx := eventtest.NewContext(context.Background(), t)
-	listener, err := jsonrpc2.NetPipe(ctx)
+	listener, err := jsonrpc2.NetPipeListener(ctx)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/internal/jsonrpc2_v2/net.go b/internal/jsonrpc2_v2/net.go
index c8cfaab..0b413d8 100644
--- a/internal/jsonrpc2_v2/net.go
+++ b/internal/jsonrpc2_v2/net.go
@@ -80,11 +80,11 @@
 	return n.dialer.DialContext(ctx, n.network, n.address)
 }
 
-// NetPipe returns a new Listener that listens using net.Pipe.
+// NetPipeListener returns a new Listener that listens using net.Pipe.
 // It is only possibly to connect to it using the Dialier returned by the
 // Dialer method, each call to that method will generate a new pipe the other
 // side of which will be returnd from the Accept call.
-func NetPipe(ctx context.Context) (Listener, error) {
+func NetPipeListener(ctx context.Context) (Listener, error) {
 	return &netPiper{
 		done:   make(chan struct{}),
 		dialed: make(chan io.ReadWriteCloser),
diff --git a/internal/jsonrpc2_v2/serve.go b/internal/jsonrpc2_v2/serve.go
index f3b78f5..98e8894 100644
--- a/internal/jsonrpc2_v2/serve.go
+++ b/internal/jsonrpc2_v2/serve.go
@@ -42,7 +42,7 @@
 type Server struct {
 	listener Listener
 	binder   Binder
-	async    async
+	async    *async
 }
 
 // Dial uses the dialer to make a new connection, wraps the returned
@@ -68,8 +68,8 @@
 	server := &Server{
 		listener: listener,
 		binder:   binder,
+		async:    newAsync(),
 	}
-	server.async.init()
 	go server.run(ctx)
 	return server, nil
 }
diff --git a/internal/jsonrpc2_v2/serve_test.go b/internal/jsonrpc2_v2/serve_test.go
index 7f1dbc3..26cf6a5 100644
--- a/internal/jsonrpc2_v2/serve_test.go
+++ b/internal/jsonrpc2_v2/serve_test.go
@@ -89,7 +89,7 @@
 			return jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{})
 		}},
 		{"pipe", func(ctx context.Context) (jsonrpc2.Listener, error) {
-			return jsonrpc2.NetPipe(ctx)
+			return jsonrpc2.NetPipeListener(ctx)
 		}},
 	}
 
diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go
index c9c50f9..faf0306 100644
--- a/internal/lsp/cache/analysis.go
+++ b/internal/lsp/cache/analysis.go
@@ -26,9 +26,7 @@
 
 func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) {
 	var roots []*actionHandle
-
 	for _, a := range analyzers {
-
 		if !a.IsEnabled(s.view) {
 			continue
 		}
@@ -259,7 +257,7 @@
 	// Run the analysis.
 	pass := &analysis.Pass{
 		Analyzer:   analyzer,
-		Fset:       snapshot.view.session.cache.fset,
+		Fset:       snapshot.FileSet(),
 		Files:      syntax,
 		Pkg:        pkg.GetTypes(),
 		TypesInfo:  pkg.GetTypesInfo(),
diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go
index 00f24eb..89094b0 100644
--- a/internal/lsp/cache/check.go
+++ b/internal/lsp/cache/check.go
@@ -26,6 +26,7 @@
 	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/packagesinternal"
 	"golang.org/x/tools/internal/span"
+	"golang.org/x/tools/internal/typeparams"
 	"golang.org/x/tools/internal/typesinternal"
 	errors "golang.org/x/xerrors"
 )
@@ -41,7 +42,7 @@
 	mode source.ParseMode
 
 	// m is the metadata associated with the package.
-	m *metadata
+	m *knownMetadata
 
 	// key is the hashed key for the package.
 	key packageHandleKey
@@ -81,6 +82,9 @@
 }
 
 // buildPackageHandle returns a packageHandle for a given package and mode.
+// It assumes that the given ID already has metadata available, so it does not
+// attempt to reload missing or invalid metadata. The caller must reload
+// metadata if needed.
 func (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, error) {
 	if ph := s.getPackage(id, mode); ph != nil {
 		return ph, nil
@@ -117,7 +121,7 @@
 		}
 
 		data := &packageData{}
-		data.pkg, data.err = typeCheck(ctx, snapshot, m, mode, deps)
+		data.pkg, data.err = typeCheck(ctx, snapshot, m.metadata, mode, deps)
 		// Make sure that the workers above have finished before we return,
 		// especially in case of cancellation.
 		wg.Wait()
@@ -167,14 +171,22 @@
 	var depKeys []packageHandleKey
 	for _, depID := range depList {
 		depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
-		if err != nil {
-			event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
+		// Don't use invalid metadata for dependencies if the top-level
+		// metadata is valid. We only load top-level packages, so if the
+		// top-level is valid, all of its dependencies should be as well.
+		if err != nil || m.valid && !depHandle.m.valid {
+			if err != nil {
+				event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
+			} else {
+				event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
+			}
+
 			if ctx.Err() != nil {
 				return nil, nil, ctx.Err()
 			}
 			// One bad dependency should not prevent us from checking the entire package.
 			// Add a special key to mark a bad dependency.
-			depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", id)))
+			depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID)))
 			continue
 		}
 		deps[depHandle.m.pkgPath] = depHandle
@@ -332,7 +344,6 @@
 			}
 		}
 	}
-
 	// If this is a replaced module in the workspace, the version is
 	// meaningless, and we don't want clients to access it.
 	if m.module != nil {
@@ -436,6 +447,7 @@
 		},
 		typesSizes: m.typesSizes,
 	}
+	typeparams.InitInferred(pkg.typesInfo)
 
 	for _, gf := range pkg.m.goFiles {
 		// In the presence of line directives, we may need to report errors in
@@ -498,7 +510,7 @@
 			}
 			dep := resolveImportPath(pkgPath, pkg, deps)
 			if dep == nil {
-				return nil, snapshot.missingPkgError(pkgPath)
+				return nil, snapshot.missingPkgError(ctx, pkgPath)
 			}
 			if !source.IsValidImport(string(m.pkgPath), string(dep.m.pkgPath)) {
 				return nil, errors.Errorf("invalid use of internal package %s", pkgPath)
@@ -527,8 +539,10 @@
 	for _, cgf := range pkg.compiledGoFiles {
 		files = append(files, cgf.File)
 	}
+
 	// Type checking errors are handled via the config, so ignore them here.
 	_ = check.Files(files)
+
 	// If the context was cancelled, we may have returned a ton of transient
 	// errors to the type checker. Swallow them.
 	if ctx.Err() != nil {
@@ -713,18 +727,22 @@
 
 // missingPkgError returns an error message for a missing package that varies
 // based on the user's workspace mode.
-func (s *snapshot) missingPkgError(pkgPath string) error {
-	if s.workspaceMode()&moduleMode != 0 {
-		return fmt.Errorf("no required module provides package %q", pkgPath)
-	}
-	gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath))
-
+func (s *snapshot) missingPkgError(ctx context.Context, pkgPath string) error {
 	var b strings.Builder
-	b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg))
+	if s.workspaceMode()&moduleMode == 0 {
+		gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath))
 
-	for _, gopath := range strings.Split(s.view.gopath, ":") {
-		gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath))
-		b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg))
+		b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg))
+
+		for _, gopath := range filepath.SplitList(s.view.gopath) {
+			gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath))
+			b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg))
+		}
+	} else {
+		b.WriteString(fmt.Sprintf("no required module provides package %q", pkgPath))
+		if err := s.getInitializationError(ctx); err != nil {
+			b.WriteString(fmt.Sprintf("(workspace configuration error: %s)", err.MainError))
+		}
 	}
 	return errors.New(b.String())
 }
diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go
index 6cc3e45..b866646 100644
--- a/internal/lsp/cache/errors.go
+++ b/internal/lsp/cache/errors.go
@@ -34,7 +34,7 @@
 			URI:      spn.URI(),
 			Range:    rng,
 			Severity: protocol.SeverityError,
-			Source:   source.TypeError,
+			Source:   source.ListError,
 			Message:  msg,
 		}}, nil
 	}
@@ -257,7 +257,7 @@
 	for _, fix := range diag.SuggestedFixes {
 		edits := make(map[span.URI][]protocol.TextEdit)
 		for _, e := range fix.TextEdits {
-			spn, err := span.NewRange(snapshot.view.session.cache.fset, e.Pos, e.End).Span()
+			spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
 			if err != nil {
 				return nil, err
 			}
@@ -373,7 +373,7 @@
 		// Search file imports for the import that is causing the import cycle.
 		for _, imp := range cgf.File.Imports {
 			if imp.Path.Value == circImp {
-				spn, err := span.NewRange(snapshot.view.session.cache.fset, imp.Pos(), imp.End()).Span()
+				spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span()
 				if err != nil {
 					return msg, span.Span{}, false
 				}
diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go
index 56a4e72..1baa3e5 100644
--- a/internal/lsp/cache/load.go
+++ b/internal/lsp/cache/load.go
@@ -45,14 +45,30 @@
 
 	// config is the *packages.Config associated with the loaded package.
 	config *packages.Config
+
+	// isIntermediateTestVariant reports whether the given package is an
+	// intermediate test variant, e.g.
+	// "golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test]".
+	isIntermediateTestVariant bool
 }
 
 // load calls packages.Load for the given scopes, updating package metadata,
 // import graph, and mapped files with the result.
-func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) error {
+func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) (err error) {
 	var query []string
 	var containsDir bool // for logging
 	for _, scope := range scopes {
+		if !s.shouldLoad(scope) {
+			continue
+		}
+		// Unless the context was canceled, set "shouldLoad" to false for all
+		// of the metadata we attempted to load.
+		defer func() {
+			if errors.Is(err, context.Canceled) {
+				return
+			}
+			s.clearShouldLoad(scope)
+		}()
 		switch scope := scope.(type) {
 		case packagePath:
 			if source.IsCommandLineArguments(string(scope)) {
@@ -71,7 +87,12 @@
 			}
 			query = append(query, fmt.Sprintf("file=%s", uri.Filename()))
 		case moduleLoadScope:
-			query = append(query, fmt.Sprintf("%s/...", scope))
+			switch scope {
+			case "std", "cmd":
+				query = append(query, string(scope))
+			default:
+				query = append(query, fmt.Sprintf("%s/...", scope))
+			}
 		case viewLoadScope:
 			// If we are outside of GOPATH, a module, or some other known
 			// build system, don't load subdirectories.
@@ -93,6 +114,15 @@
 	}
 	sort.Strings(query) // for determinism
 
+	if s.view.Options().VerboseWorkDoneProgress {
+		work := s.view.session.progress.Start(ctx, "Load", fmt.Sprintf("Loading query=%s", query), nil, nil)
+		defer func() {
+			go func() {
+				work.End("Done.")
+			}()
+		}()
+	}
+
 	ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query))
 	defer done()
 
@@ -110,7 +140,7 @@
 	// Set a last resort deadline on packages.Load since it calls the go
 	// command, which may hang indefinitely if it has a bug. golang/go#42132
 	// and golang/go#42255 have more context.
-	ctx, cancel := context.WithTimeout(ctx, 15*time.Minute)
+	ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
 	defer cancel()
 
 	cfg := s.config(ctx, inv)
@@ -164,7 +194,9 @@
 			continue
 		}
 		// Set the metadata for this package.
-		m, err := s.setMetadata(ctx, packagePath(pkg.PkgPath), pkg, cfg, map[packageID]struct{}{})
+		s.mu.Lock()
+		m, err := s.setMetadataLocked(ctx, packagePath(pkg.PkgPath), pkg, cfg, map[packageID]struct{}{})
+		s.mu.Unlock()
 		if err != nil {
 			return err
 		}
@@ -358,10 +390,10 @@
 	return span.URIFromPath(v.(*workspaceDirData).dir), nil
 }
 
-// setMetadata extracts metadata from pkg and records it in s. It
+// setMetadataLocked extracts metadata from pkg and records it in s. It
 // recurses through pkg.Imports to ensure that metadata exists for all
 // dependencies.
-func (s *snapshot) setMetadata(ctx context.Context, pkgPath packagePath, pkg *packages.Package, cfg *packages.Config, seen map[packageID]struct{}) (*metadata, error) {
+func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath packagePath, pkg *packages.Package, cfg *packages.Config, seen map[packageID]struct{}) (*metadata, error) {
 	id := packageID(pkg.ID)
 	if _, ok := seen[id]; ok {
 		return nil, errors.Errorf("import cycle detected: %q", id)
@@ -388,16 +420,18 @@
 		m.errors = append(m.errors, err)
 	}
 
+	uris := map[span.URI]struct{}{}
 	for _, filename := range pkg.CompiledGoFiles {
 		uri := span.URIFromPath(filename)
 		m.compiledGoFiles = append(m.compiledGoFiles, uri)
-		s.addID(uri, m.id)
+		uris[uri] = struct{}{}
 	}
 	for _, filename := range pkg.GoFiles {
 		uri := span.URIFromPath(filename)
 		m.goFiles = append(m.goFiles, uri)
-		s.addID(uri, m.id)
+		uris[uri] = struct{}{}
 	}
+	s.updateIDForURIsLocked(id, uris)
 
 	// TODO(rstambler): is this still necessary?
 	copied := map[packageID]struct{}{
@@ -420,24 +454,30 @@
 			m.missingDeps[importPkgPath] = struct{}{}
 			continue
 		}
-		if s.getMetadata(importID) == nil {
-			if _, err := s.setMetadata(ctx, importPkgPath, importPkg, cfg, copied); err != nil {
+		if s.noValidMetadataForIDLocked(importID) {
+			if _, err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, copied); err != nil {
 				event.Error(ctx, "error in dependency", err)
 			}
 		}
 	}
 
 	// Add the metadata to the cache.
-	s.mu.Lock()
-	defer s.mu.Unlock()
 
-	// TODO: We should make sure not to set duplicate metadata,
-	// and instead panic here. This can be done by making sure not to
-	// reset metadata information for packages we've already seen.
-	if original, ok := s.metadata[m.id]; ok {
-		m = original
+	// If we've already set the metadata for this snapshot, reuse it.
+	if original, ok := s.metadata[m.id]; ok && original.valid {
+		// Since we've just reloaded, clear out shouldLoad.
+		original.shouldLoad = false
+		m = original.metadata
 	} else {
-		s.metadata[m.id] = m
+		s.metadata[m.id] = &knownMetadata{
+			metadata: m,
+			valid:    true,
+		}
+		// Invalidate any packages we may have associated with this metadata.
+		for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
+			key := packageKey{mode, m.id}
+			delete(s.packages, key)
+		}
 	}
 
 	// Set the workspace packages. If any of the package's files belong to the
@@ -463,6 +503,7 @@
 			s.workspacePackages[m.id] = m.forTest
 		default:
 			// A test variant of some intermediate package. We don't care about it.
+			m.isIntermediateTestVariant = true
 		}
 	}
 	return m, nil
diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go
index 3827cac..d455a25 100644
--- a/internal/lsp/cache/parse.go
+++ b/internal/lsp/cache/parse.go
@@ -59,7 +59,7 @@
 	}
 	parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
 		snapshot := arg.(*snapshot)
-		return parseGo(ctx, snapshot.view.session.cache.fset, fh, mode)
+		return parseGo(ctx, snapshot.FileSet(), fh, mode)
 	}, nil)
 
 	pgh := &parseGoHandle{
@@ -120,8 +120,7 @@
 		return nil, err
 	}
 	astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} {
-		snapshot := arg.(*snapshot)
-		return buildASTCache(ctx, snapshot, pgf)
+		return buildASTCache(pgf)
 	}, nil)
 
 	d, err := astHandle.Get(ctx, s.generation, s)
@@ -160,7 +159,7 @@
 
 // buildASTCache builds caches to aid in quickly going from the typed
 // world to the syntactic world.
-func buildASTCache(ctx context.Context, snapshot *snapshot, pgf *source.ParsedGoFile) *astCacheData {
+func buildASTCache(pgf *source.ParsedGoFile) *astCacheData {
 	var (
 		// path contains all ancestors, including n.
 		path []ast.Node
@@ -1071,7 +1070,17 @@
 
 	exprBytes := make([]byte, 0, int(to-from)+3)
 	// Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF.
-	exprBytes = append(exprBytes, src[tok.Offset(from):tok.Offset(to-1)+1]...)
+	// It also panics if the position is not in the range of the file, and
+	// badExprs may not necessarily have good positions, so check first.
+	if !inRange(tok, from) {
+		return false
+	}
+	if !inRange(tok, to-1) {
+		return false
+	}
+	fromOffset := tok.Offset(from)
+	toOffset := tok.Offset(to-1) + 1
+	exprBytes = append(exprBytes, src[fromOffset:toOffset]...)
 	exprBytes = bytes.TrimSpace(exprBytes)
 
 	// If our expression ends in "]" (e.g. "[]"), add a phantom selector
@@ -1103,6 +1112,12 @@
 	return replaceNode(parent, bad, at)
 }
 
+// inRange reports whether the given position is in the given token.File.
+func inRange(tok *token.File, pos token.Pos) bool {
+	size := tok.Pos(tok.Size())
+	return int(pos) >= tok.Base() && pos <= size
+}
+
 // precedingToken scans src to find the token preceding pos.
 func precedingToken(pos token.Pos, tok *token.File, src []byte) token.Token {
 	s := &scanner.Scanner{}
diff --git a/internal/lsp/cache/pkg.go b/internal/lsp/cache/pkg.go
index aa07564..5a87a14 100644
--- a/internal/lsp/cache/pkg.go
+++ b/internal/lsp/cache/pkg.go
@@ -61,6 +61,10 @@
 	return string(p.m.pkgPath)
 }
 
+func (p *pkg) ParseMode() source.ParseMode {
+	return p.mode
+}
+
 func (p *pkg) CompiledGoFiles() []*source.ParsedGoFile {
 	return p.compiledGoFiles
 }
diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go
index 657a0ee..2cd85b9 100644
--- a/internal/lsp/cache/session.go
+++ b/internal/lsp/cache/session.go
@@ -14,6 +14,7 @@
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/imports"
+	"golang.org/x/tools/internal/lsp/progress"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/xcontext"
@@ -27,7 +28,7 @@
 	optionsMu sync.Mutex
 	options   *source.Options
 
-	viewMu  sync.Mutex
+	viewMu  sync.RWMutex
 	views   []*View
 	viewMap map[span.URI]*View // map of URI->best view
 
@@ -36,6 +37,8 @@
 
 	// gocmdRunner guards go command calls from concurrency errors.
 	gocmdRunner *gocommand.Runner
+
+	progress *progress.Tracker
 }
 
 type overlay struct {
@@ -131,6 +134,11 @@
 	s.options = options
 }
 
+func (s *Session) SetProgressTracker(tracker *progress.Tracker) {
+	// The progress tracker should be set before any view is initialized.
+	s.progress = tracker
+}
+
 func (s *Session) Shutdown(ctx context.Context) {
 	s.viewMu.Lock()
 	defer s.viewMu.Unlock()
@@ -173,14 +181,14 @@
 	}
 	root := folder
 	if options.ExpandWorkspaceToModule {
-		root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(options), options.ExperimentalWorkspaceModule)
+		root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), options.ExperimentalWorkspaceModule)
 		if err != nil {
 			return nil, nil, func() {}, err
 		}
 	}
 
 	// Build the gopls workspace, collecting active modules in the view.
-	workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule)
+	workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule)
 	if err != nil {
 		return nil, nil, func() {}, err
 	}
@@ -221,7 +229,7 @@
 		generation:        s.cache.store.Generation(generationName(v, 0)),
 		packages:          make(map[packageKey]*packageHandle),
 		ids:               make(map[span.URI][]packageID),
-		metadata:          make(map[packageID]*metadata),
+		metadata:          make(map[packageID]*knownMetadata),
 		files:             make(map[span.URI]source.VersionedFileHandle),
 		goFiles:           make(map[parseKey]*parseGoHandle),
 		importedBy:        make(map[packageID][]packageID),
@@ -240,27 +248,21 @@
 	snapshot := v.snapshot
 	release := snapshot.generation.Acquire(initCtx)
 	go func() {
+		defer release()
 		snapshot.initialize(initCtx, true)
-		if v.tempWorkspace != "" {
-			var err error
-			var wsdir span.URI
-			wsdir, err = snapshot.getWorkspaceDir(initCtx)
-			if err == nil {
-				err = copyWorkspace(v.tempWorkspace, wsdir)
-			}
-			if err != nil {
-				event.Error(ctx, "copying workspace dir", err)
-			}
+		// Ensure that the view workspace is written at least once following
+		// initialization.
+		if err := v.updateWorkspace(initCtx); err != nil {
+			event.Error(ctx, "copying workspace dir", err)
 		}
-		release()
 	}()
 	return v, snapshot, snapshot.generation.Acquire(ctx), nil
 }
 
 // View returns the view by name.
 func (s *Session) View(name string) source.View {
-	s.viewMu.Lock()
-	defer s.viewMu.Unlock()
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	for _, view := range s.views {
 		if view.Name() == name {
 			return view
@@ -276,9 +278,8 @@
 }
 
 func (s *Session) viewOf(uri span.URI) (*View, error) {
-	s.viewMu.Lock()
-	defer s.viewMu.Unlock()
-
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	// Check if we already know this file.
 	if v, found := s.viewMap[uri]; found {
 		return v, nil
@@ -292,8 +293,8 @@
 }
 
 func (s *Session) viewsOf(uri span.URI) []*View {
-	s.viewMu.Lock()
-	defer s.viewMu.Unlock()
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 
 	var views []*View
 	for _, view := range s.views {
@@ -305,8 +306,8 @@
 }
 
 func (s *Session) Views() []source.View {
-	s.viewMu.Lock()
-	defer s.viewMu.Unlock()
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	result := make([]source.View, len(s.views))
 	for i, v := range s.views {
 		result[i] = v
@@ -416,6 +417,8 @@
 }
 
 func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) {
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	views := make(map[*View]map[span.URI]*fileChange)
 	affectedViews := map[span.URI][]*View{}
 
@@ -515,6 +518,8 @@
 }
 
 func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification {
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	var snapshots []*snapshot
 	for _, v := range s.views {
 		snapshot, release := v.getSnapshot(ctx)
@@ -553,8 +558,7 @@
 		for _, dir := range dirs {
 			result[dir] = struct{}{}
 		}
-		subdirs := snapshot.allKnownSubdirs(ctx)
-		for dir := range subdirs {
+		for _, dir := range snapshot.getKnownSubdirs(dirs) {
 			result[dir] = struct{}{}
 		}
 	}
@@ -704,6 +708,8 @@
 }
 
 func (s *Session) FileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
+	s.viewMu.RLock()
+	defer s.viewMu.RUnlock()
 	patterns := map[string]struct{}{}
 	for _, view := range s.views {
 		snapshot, release := view.getSnapshot(ctx)
diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go
index a47cc21..c741885 100644
--- a/internal/lsp/cache/snapshot.go
+++ b/internal/lsp/cache/snapshot.go
@@ -73,7 +73,7 @@
 
 	// metadata maps file IDs to their associated metadata.
 	// It may invalidated on calls to go/packages.
-	metadata map[packageID]*metadata
+	metadata map[packageID]*knownMetadata
 
 	// importedBy maps package IDs to the list of packages that import them.
 	importedBy map[packageID][]packageID
@@ -111,6 +111,14 @@
 
 	workspace          *workspace
 	workspaceDirHandle *memoize.Handle
+
+	// knownSubdirs is the set of subdirectories in the workspace, used to
+	// create glob patterns for file watching.
+	knownSubdirs map[span.URI]struct{}
+	// unprocessedSubdirChanges are any changes that might affect the set of
+	// subdirectories in the workspace. They are not reflected to knownSubdirs
+	// during the snapshot cloning step as it can slow down cloning.
+	unprocessedSubdirChanges []*fileChange
 }
 
 type packageKey struct {
@@ -123,6 +131,18 @@
 	analyzer *analysis.Analyzer
 }
 
+// knownMetadata is a wrapper around metadata that tracks its validity.
+type knownMetadata struct {
+	*metadata
+
+	// valid is true if the given metadata is valid.
+	// Invalid metadata can still be used if a metadata reload fails.
+	valid bool
+
+	// shouldLoad is true if the given metadata should be reloaded.
+	shouldLoad bool
+}
+
 func (s *snapshot) ID() uint64 {
 	return s.id
 }
@@ -225,7 +245,7 @@
 			packages.NeedDeps |
 			packages.NeedTypesSizes |
 			packages.NeedModule,
-		Fset:    s.view.session.cache.fset,
+		Fset:    s.FileSet(),
 		Overlay: s.buildOverlay(),
 		ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
 			panic("go/packages must not be used to parse files")
@@ -503,13 +523,13 @@
 	if fh.Kind() != source.Go {
 		return nil, fmt.Errorf("no packages for non-Go file %s", uri)
 	}
-	ids := s.getIDsForURI(uri)
-	reload := len(ids) == 0
-	for _, id := range ids {
+	knownIDs := s.getIDsForURI(uri)
+	reload := len(knownIDs) == 0
+	for _, id := range knownIDs {
 		// Reload package metadata if any of the metadata has missing
 		// dependencies, in case something has changed since the last time we
 		// reloaded it.
-		if m := s.getMetadata(id); m == nil {
+		if s.noValidMetadataForID(id) {
 			reload = true
 			break
 		}
@@ -518,13 +538,26 @@
 		// calls to packages.Load. Determine what we should do instead.
 	}
 	if reload {
-		if err := s.load(ctx, false, fileURI(uri)); err != nil {
+		err = s.load(ctx, false, fileURI(uri))
+
+		if !s.useInvalidMetadata() && err != nil {
+			return nil, err
+		}
+		// We've tried to reload and there are still no known IDs for the URI.
+		// Return the load error, if there was one.
+		knownIDs = s.getIDsForURI(uri)
+		if len(knownIDs) == 0 {
 			return nil, err
 		}
 	}
-	// Get the list of IDs from the snapshot again, in case it has changed.
+
 	var phs []*packageHandle
-	for _, id := range s.getIDsForURI(uri) {
+	for _, id := range knownIDs {
+		// Filter out any intermediate test variants. We typically aren't
+		// interested in these packages for file= style queries.
+		if m := s.getMetadata(id); m != nil && m.isIntermediateTestVariant {
+			continue
+		}
 		var parseModes []source.ParseMode
 		switch mode {
 		case source.TypecheckAll:
@@ -547,10 +580,16 @@
 			phs = append(phs, ph)
 		}
 	}
-
 	return phs, nil
 }
 
+// Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has
+// issues with overlays that will cause confusing error messages if we reuse
+// old metadata.
+func (s *snapshot) useInvalidMetadata() bool {
+	return s.view.goversion >= 13 && s.view.Options().ExperimentalUseInvalidMetadata
+}
+
 func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
@@ -580,13 +619,15 @@
 	return ph.check(ctx, s)
 }
 
-// transitiveReverseDependencies populates the uris map with file URIs
+// transitiveReverseDependencies populates the ids map with package IDs
 // belonging to the provided package and its transitive reverse dependencies.
 func (s *snapshot) transitiveReverseDependencies(id packageID, ids map[packageID]struct{}) {
 	if _, ok := ids[id]; ok {
 		return
 	}
-	if s.getMetadata(id) == nil {
+	m := s.getMetadata(id)
+	// Only use invalid metadata if we support it.
+	if m == nil || !(m.valid || s.useInvalidMetadata()) {
 		return
 	}
 	ids[id] = struct{}{}
@@ -684,6 +725,13 @@
 	return ids
 }
 
+func (s *snapshot) getWorkspacePkgPath(id packageID) packagePath {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.workspacePackages[id]
+}
+
 func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
 	// Work-around microsoft/vscode#100870 by making sure that we are,
 	// at least, watching the user's entire workspace. This will still be
@@ -712,7 +760,7 @@
 	// of the directories in the workspace. We find them by adding the
 	// directories of every file in the snapshot's workspace directories.
 	var dirNames []string
-	for uri := range s.allKnownSubdirs(ctx) {
+	for _, uri := range s.getKnownSubdirs(dirs) {
 		dirNames = append(dirNames, uri.Filename())
 	}
 	sort.Strings(dirNames)
@@ -722,40 +770,89 @@
 	return patterns
 }
 
-// allKnownSubdirs returns all of the subdirectories within the snapshot's
-// workspace directories. None of the workspace directories are included.
-func (s *snapshot) allKnownSubdirs(ctx context.Context) map[span.URI]struct{} {
+// collectAllKnownSubdirs collects all of the subdirectories within the
+// snapshot's workspace directories. None of the workspace directories are
+// included.
+func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
 	dirs := s.workspace.dirs(ctx, s)
 
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	seen := make(map[span.URI]struct{})
+
+	s.knownSubdirs = map[span.URI]struct{}{}
 	for uri := range s.files {
-		dir := filepath.Dir(uri.Filename())
-		var matched span.URI
-		for _, wsDir := range dirs {
-			if source.InDir(wsDir.Filename(), dir) {
-				matched = wsDir
-				break
-			}
-		}
-		// Don't watch any directory outside of the workspace directories.
-		if matched == "" {
+		s.addKnownSubdirLocked(uri, dirs)
+	}
+}
+
+func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	// First, process any pending changes and update the set of known
+	// subdirectories.
+	for _, c := range s.unprocessedSubdirChanges {
+		if c.isUnchanged {
 			continue
 		}
-		for {
-			if dir == "" || dir == matched.Filename() {
-				break
-			}
-			uri := span.URIFromPath(dir)
-			if _, ok := seen[uri]; ok {
-				break
-			}
-			seen[uri] = struct{}{}
-			dir = filepath.Dir(dir)
+		if !c.exists {
+			s.removeKnownSubdirLocked(c.fileHandle.URI())
+		} else {
+			s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs)
 		}
 	}
-	return seen
+	s.unprocessedSubdirChanges = nil
+
+	var result []span.URI
+	for uri := range s.knownSubdirs {
+		result = append(result, uri)
+	}
+	return result
+}
+
+func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
+	dir := filepath.Dir(uri.Filename())
+	// First check if the directory is already known, because then we can
+	// return early.
+	if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok {
+		return
+	}
+	var matched span.URI
+	for _, wsDir := range dirs {
+		if source.InDir(wsDir.Filename(), dir) {
+			matched = wsDir
+			break
+		}
+	}
+	// Don't watch any directory outside of the workspace directories.
+	if matched == "" {
+		return
+	}
+	for {
+		if dir == "" || dir == matched.Filename() {
+			break
+		}
+		uri := span.URIFromPath(dir)
+		if _, ok := s.knownSubdirs[uri]; ok {
+			break
+		}
+		s.knownSubdirs[uri] = struct{}{}
+		dir = filepath.Dir(dir)
+	}
+}
+
+func (s *snapshot) removeKnownSubdirLocked(uri span.URI) {
+	dir := filepath.Dir(uri.Filename())
+	for dir != "" {
+		uri := span.URIFromPath(dir)
+		if _, ok := s.knownSubdirs[uri]; !ok {
+			break
+		}
+		if info, _ := os.Stat(dir); info == nil {
+			delete(s.knownSubdirs, uri)
+		}
+		dir = filepath.Dir(dir)
+	}
 }
 
 // knownFilesInDir returns the files known to the given snapshot that are in
@@ -927,46 +1024,138 @@
 	return s.ids[uri]
 }
 
-func (s *snapshot) getMetadataForURILocked(uri span.URI) (metadata []*metadata) {
-	// TODO(matloob): uri can be a file or directory. Should we update the mappings
-	// to map directories to their contained packages?
-
-	for _, id := range s.ids[uri] {
-		if m, ok := s.metadata[id]; ok {
-			metadata = append(metadata, m)
-		}
-	}
-	return metadata
-}
-
-func (s *snapshot) getMetadata(id packageID) *metadata {
+func (s *snapshot) getMetadata(id packageID) *knownMetadata {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
 	return s.metadata[id]
 }
 
-func (s *snapshot) addID(uri span.URI, id packageID) {
+func (s *snapshot) shouldLoad(scope interface{}) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	for i, existingID := range s.ids[uri] {
-		// TODO: We should make sure not to set duplicate IDs,
-		// and instead panic here. This can be done by making sure not to
-		// reset metadata information for packages we've already seen.
-		if existingID == id {
+	switch scope := scope.(type) {
+	case packagePath:
+		var meta *knownMetadata
+		for _, m := range s.metadata {
+			if m.pkgPath != scope {
+				continue
+			}
+			meta = m
+		}
+		if meta == nil || meta.shouldLoad {
+			return true
+		}
+		return false
+	case fileURI:
+		uri := span.URI(scope)
+		ids := s.ids[uri]
+		if len(ids) == 0 {
+			return true
+		}
+		for _, id := range ids {
+			m, ok := s.metadata[id]
+			if !ok || m.shouldLoad {
+				return true
+			}
+		}
+		return false
+	default:
+		return true
+	}
+}
+
+func (s *snapshot) clearShouldLoad(scope interface{}) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	switch scope := scope.(type) {
+	case packagePath:
+		var meta *knownMetadata
+		for _, m := range s.metadata {
+			if m.pkgPath == scope {
+				meta = m
+			}
+		}
+		if meta == nil {
 			return
 		}
-		// If the package previously only had a command-line-arguments ID,
-		// we should just replace it.
-		if source.IsCommandLineArguments(string(existingID)) {
-			s.ids[uri][i] = id
-			// Delete command-line-arguments if it was a workspace package.
-			delete(s.workspacePackages, existingID)
+		meta.shouldLoad = false
+	case fileURI:
+		uri := span.URI(scope)
+		ids := s.ids[uri]
+		if len(ids) == 0 {
 			return
 		}
+		for _, id := range ids {
+			if m, ok := s.metadata[id]; ok {
+				m.shouldLoad = false
+			}
+		}
 	}
-	s.ids[uri] = append(s.ids[uri], id)
+}
+
+// noValidMetadataForURILocked reports whether there is any valid metadata for
+// the given URI.
+func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool {
+	ids, ok := s.ids[uri]
+	if !ok {
+		return true
+	}
+	for _, id := range ids {
+		if m, ok := s.metadata[id]; ok && m.valid {
+			return false
+		}
+	}
+	return true
+}
+
+// noValidMetadataForID reports whether there is no valid metadata for the
+// given ID.
+func (s *snapshot) noValidMetadataForID(id packageID) bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.noValidMetadataForIDLocked(id)
+}
+
+func (s *snapshot) noValidMetadataForIDLocked(id packageID) bool {
+	m := s.metadata[id]
+	return m == nil || !m.valid
+}
+
+// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI.
+// Any existing invalid IDs are removed from the set of known IDs. IDs that are
+// not "command-line-arguments" are preferred, so if a new ID comes in for a
+// URI that previously only had "command-line-arguments", the new ID will
+// replace the "command-line-arguments" ID.
+func (s *snapshot) updateIDForURIsLocked(id packageID, uris map[span.URI]struct{}) {
+	for uri := range uris {
+		// Collect the new set of IDs, preserving any valid existing IDs.
+		newIDs := []packageID{id}
+		for _, existingID := range s.ids[uri] {
+			// Don't set duplicates of the same ID.
+			if existingID == id {
+				continue
+			}
+			// If the package previously only had a command-line-arguments ID,
+			// delete the command-line-arguments workspace package.
+			if source.IsCommandLineArguments(string(existingID)) {
+				delete(s.workspacePackages, existingID)
+				continue
+			}
+			// If the metadata for an existing ID is invalid, and we are
+			// setting metadata for a new, valid ID--don't preserve the old ID.
+			if m, ok := s.metadata[existingID]; !ok || !m.valid {
+				continue
+			}
+			newIDs = append(newIDs, existingID)
+		}
+		sort.Slice(newIDs, func(i, j int) bool {
+			return newIDs[i] < newIDs[j]
+		})
+		s.ids[uri] = newIDs
+	}
 }
 
 func (s *snapshot) isWorkspacePackage(id packageID) bool {
@@ -1046,12 +1235,20 @@
 func (s *snapshot) awaitLoaded(ctx context.Context) error {
 	loadErr := s.awaitLoadedAllErrors(ctx)
 
-	// If we still have absolutely no metadata, check if the view failed to
-	// initialize and return any errors.
-	// TODO(rstambler): Should we clear the error after we return it?
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if len(s.metadata) == 0 && loadErr != nil {
+
+	// If we still have absolutely no metadata, check if the view failed to
+	// initialize and return any errors.
+	if s.useInvalidMetadata() && len(s.metadata) > 0 {
+		return nil
+	}
+	for _, m := range s.metadata {
+		if m.valid {
+			return nil
+		}
+	}
+	if loadErr != nil {
 		return loadErr.MainError
 	}
 	return nil
@@ -1148,6 +1345,13 @@
 	return nil
 }
 
+func (s *snapshot) getInitializationError(ctx context.Context) *source.CriticalError {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.initializedErr
+}
+
 func (s *snapshot) AwaitInitialized(ctx context.Context) {
 	select {
 	case <-ctx.Done():
@@ -1166,7 +1370,7 @@
 	missingMetadata := len(s.workspacePackages) == 0 || len(s.metadata) == 0
 	pkgPathSet := map[packagePath]struct{}{}
 	for id, pkgPath := range s.workspacePackages {
-		if s.metadata[id] != nil {
+		if m, ok := s.metadata[id]; ok && m.valid {
 			continue
 		}
 		missingMetadata = true
@@ -1238,7 +1442,7 @@
 		s.mu.Lock()
 		for _, scope := range scopes {
 			uri := span.URI(scope.(fileURI))
-			if s.getMetadataForURILocked(uri) == nil {
+			if s.noValidMetadataForURILocked(uri) {
 				s.unloadableFiles[uri] = struct{}{}
 			}
 		}
@@ -1271,7 +1475,7 @@
 		if _, ok := s.unloadableFiles[uri]; ok {
 			continue
 		}
-		if s.getMetadataForURILocked(uri) == nil {
+		if s.noValidMetadataForURILocked(uri) {
 			files = append(files, fh)
 		}
 	}
@@ -1327,9 +1531,25 @@
 	}
 }
 
+// unappliedChanges is a file source that handles an uncloned snapshot.
+type unappliedChanges struct {
+	originalSnapshot *snapshot
+	changes          map[span.URI]*fileChange
+}
+
+func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
+	if c, ok := ac.changes[uri]; ok {
+		return c.fileHandle, nil
+	}
+	return ac.originalSnapshot.GetFile(ctx, uri)
+}
+
 func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, bool) {
 	var vendorChanged bool
-	newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes)
+	newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{
+		originalSnapshot: s,
+		changes:          changes,
+	})
 
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -1349,7 +1569,7 @@
 		initializedErr:    s.initializedErr,
 		ids:               make(map[span.URI][]packageID, len(s.ids)),
 		importedBy:        make(map[packageID][]packageID, len(s.importedBy)),
-		metadata:          make(map[packageID]*metadata, len(s.metadata)),
+		metadata:          make(map[packageID]*knownMetadata, len(s.metadata)),
 		packages:          make(map[packageKey]*packageHandle, len(s.packages)),
 		actions:           make(map[actionKey]*actionHandle, len(s.actions)),
 		files:             make(map[span.URI]source.VersionedFileHandle, len(s.files)),
@@ -1359,6 +1579,7 @@
 		parseModHandles:   make(map[span.URI]*parseModHandle, len(s.parseModHandles)),
 		modTidyHandles:    make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)),
 		modWhyHandles:     make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)),
+		knownSubdirs:      make(map[span.URI]struct{}, len(s.knownSubdirs)),
 		workspace:         newWorkspace,
 	}
 
@@ -1404,9 +1625,20 @@
 		result.modWhyHandles[k] = v
 	}
 
+	// Add all of the known subdirectories, but don't update them for the
+	// changed files. We need to rebuild the workspace module to know the
+	// true set of known subdirectories, but we don't want to do that in clone.
+	for k, v := range s.knownSubdirs {
+		result.knownSubdirs[k] = v
+	}
+	for _, c := range changes {
+		result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
+	}
+
 	// directIDs keeps track of package IDs that have directly changed.
 	// It maps id->invalidateMetadata.
 	directIDs := map[packageID]bool{}
+
 	// Invalidate all package metadata if the workspace module has changed.
 	if workspaceReload {
 		for k := range s.metadata {
@@ -1415,6 +1647,7 @@
 	}
 
 	changedPkgNames := map[packageID]struct{}{}
+	anyImportDeleted := false
 	for uri, change := range changes {
 		// Maybe reinitialize the view if we see a change in the vendor
 		// directory.
@@ -1427,8 +1660,12 @@
 
 		// Check if the file's package name or imports have changed,
 		// and if so, invalidate this file's packages' metadata.
-		shouldInvalidateMetadata, pkgNameChanged := s.shouldInvalidateMetadata(ctx, result, originalFH, change.fileHandle)
+		var shouldInvalidateMetadata, pkgNameChanged, importDeleted bool
+		if !isGoMod(uri) {
+			shouldInvalidateMetadata, pkgNameChanged, importDeleted = s.shouldInvalidateMetadata(ctx, result, originalFH, change.fileHandle)
+		}
 		invalidateMetadata := forceReloadMetadata || workspaceReload || shouldInvalidateMetadata
+		anyImportDeleted = anyImportDeleted || importDeleted
 
 		// Mark all of the package IDs containing the given file.
 		// TODO: if the file has moved into a new package, we should invalidate that too.
@@ -1468,15 +1705,36 @@
 		delete(result.unloadableFiles, uri)
 	}
 
+	// Deleting an import can cause list errors due to import cycles to be
+	// resolved. The best we can do without parsing the list error message is to
+	// hope that list errors may have been resolved by a deleted import.
+	//
+	// We could do better by parsing the list error message. We already do this
+	// to assign a better range to the list error, but for such critical
+	// functionality as metadata, it's better to be conservative until it proves
+	// impractical.
+	//
+	// We could also do better by looking at which imports were deleted and
+	// trying to find cycles they are involved in. This fails when the file goes
+	// from an unparseable state to a parseable state, as we don't have a
+	// starting point to compare with.
+	if anyImportDeleted {
+		for id, metadata := range s.metadata {
+			if len(metadata.errors) > 0 {
+				directIDs[id] = true
+			}
+		}
+	}
+
 	// Invalidate reverse dependencies too.
 	// TODO(heschi): figure out the locking model and use transitiveReverseDeps?
-	// transitiveIDs keeps track of transitive reverse dependencies.
+	// idsToInvalidate keeps track of transitive reverse dependencies.
 	// If an ID is present in the map, invalidate its types.
 	// If an ID's value is true, invalidate its metadata too.
-	transitiveIDs := make(map[packageID]bool)
+	idsToInvalidate := map[packageID]bool{}
 	var addRevDeps func(packageID, bool)
 	addRevDeps = func(id packageID, invalidateMetadata bool) {
-		current, seen := transitiveIDs[id]
+		current, seen := idsToInvalidate[id]
 		newInvalidateMetadata := current || invalidateMetadata
 
 		// If we've already seen this ID, and the value of invalidate
@@ -1484,7 +1742,7 @@
 		if seen && current == newInvalidateMetadata {
 			return
 		}
-		transitiveIDs[id] = newInvalidateMetadata
+		idsToInvalidate[id] = newInvalidateMetadata
 		for _, rid := range s.getImportedByLocked(id) {
 			addRevDeps(rid, invalidateMetadata)
 		}
@@ -1495,7 +1753,7 @@
 
 	// Copy the package type information.
 	for k, v := range s.packages {
-		if _, ok := transitiveIDs[k.id]; ok {
+		if _, ok := idsToInvalidate[k.id]; ok {
 			continue
 		}
 		newGen.Inherit(v.handle)
@@ -1503,26 +1761,94 @@
 	}
 	// Copy the package analysis information.
 	for k, v := range s.actions {
-		if _, ok := transitiveIDs[k.pkg.id]; ok {
+		if _, ok := idsToInvalidate[k.pkg.id]; ok {
 			continue
 		}
 		newGen.Inherit(v.handle)
 		result.actions[k] = v
 	}
+
+	// If the workspace mode has changed, we must delete all metadata, as it
+	// is unusable and may produce confusing or incorrect diagnostics.
+	// If a file has been deleted, we must delete metadata all packages
+	// containing that file.
+	workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
+	skipID := map[packageID]bool{}
+	for _, c := range changes {
+		if c.exists {
+			continue
+		}
+		// The file has been deleted.
+		if ids, ok := s.ids[c.fileHandle.URI()]; ok {
+			for _, id := range ids {
+				skipID[id] = true
+			}
+		}
+	}
+
+	// Collect all of the IDs that are reachable from the workspace packages.
+	// Any unreachable IDs will have their metadata deleted outright.
+	reachableID := map[packageID]bool{}
+	var addForwardDeps func(packageID)
+	addForwardDeps = func(id packageID) {
+		if reachableID[id] {
+			return
+		}
+		reachableID[id] = true
+		m, ok := s.metadata[id]
+		if !ok {
+			return
+		}
+		for _, depID := range m.deps {
+			addForwardDeps(depID)
+		}
+	}
+	for id := range s.workspacePackages {
+		addForwardDeps(id)
+	}
+
+	// Copy the URI to package ID mappings, skipping only those URIs whose
+	// metadata will be reloaded in future calls to load.
+	deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
+	idsInSnapshot := map[packageID]bool{} // track all known IDs
+	for uri, ids := range s.ids {
+		for _, id := range ids {
+			invalidateMetadata := idsToInvalidate[id]
+			if skipID[id] || (invalidateMetadata && deleteInvalidMetadata) {
+				continue
+			}
+			// The ID is not reachable from any workspace package, so it should
+			// be deleted.
+			if !reachableID[id] {
+				continue
+			}
+			idsInSnapshot[id] = true
+			result.ids[uri] = append(result.ids[uri], id)
+		}
+	}
+
 	// Copy the package metadata. We only need to invalidate packages directly
 	// containing the affected file, and only if it changed in a relevant way.
 	for k, v := range s.metadata {
-		if invalidateMetadata, ok := transitiveIDs[k]; invalidateMetadata && ok {
+		if !idsInSnapshot[k] {
+			// Delete metadata for IDs that are no longer reachable from files
+			// in the snapshot.
 			continue
 		}
-		result.metadata[k] = v
+		invalidateMetadata := idsToInvalidate[k]
+		// Mark invalidated metadata rather than deleting it outright.
+		result.metadata[k] = &knownMetadata{
+			metadata:   v.metadata,
+			valid:      v.valid && !invalidateMetadata,
+			shouldLoad: v.shouldLoad || invalidateMetadata,
+		}
 	}
 	// Copy the URI to package ID mappings, skipping only those URIs whose
 	// metadata will be reloaded in future calls to load.
 	for k, ids := range s.ids {
 		var newIDs []packageID
 		for _, id := range ids {
-			if invalidateMetadata, ok := transitiveIDs[id]; invalidateMetadata && ok {
+			if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok {
 				continue
 			}
 			newIDs = append(newIDs, id)
@@ -1531,6 +1857,7 @@
 			result.ids[k] = newIDs
 		}
 	}
+
 	// Copy the set of initially loaded packages.
 	for id, pkgPath := range s.workspacePackages {
 		// Packages with the id "command-line-arguments" are generated by the
@@ -1538,7 +1865,7 @@
 		// module. Do not cache them as workspace packages for longer than
 		// necessary.
 		if source.IsCommandLineArguments(string(id)) {
-			if invalidateMetadata, ok := transitiveIDs[id]; invalidateMetadata && ok {
+			if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok {
 				continue
 			}
 		}
@@ -1590,7 +1917,7 @@
 
 	// If the snapshot's workspace mode has changed, the packages loaded using
 	// the previous mode are no longer relevant, so clear them out.
-	if s.workspaceMode() != result.workspaceMode() {
+	if workspaceModeChanged {
 		result.workspacePackages = map[packageID]packagePath{}
 	}
 
@@ -1672,13 +1999,13 @@
 
 // shouldInvalidateMetadata reparses a file's package and import declarations to
 // determine if the file requires a metadata reload.
-func (s *snapshot) shouldInvalidateMetadata(ctx context.Context, newSnapshot *snapshot, originalFH, currentFH source.FileHandle) (invalidate, pkgNameChanged bool) {
+func (s *snapshot) shouldInvalidateMetadata(ctx context.Context, newSnapshot *snapshot, originalFH, currentFH source.FileHandle) (invalidate, pkgNameChanged, importDeleted bool) {
 	if originalFH == nil {
-		return true, false
+		return true, false, false
 	}
 	// If the file hasn't changed, there's no need to reload.
 	if originalFH.FileIdentity() == currentFH.FileIdentity() {
-		return false, false
+		return false, false, false
 	}
 	// Get the original and current parsed files in order to check package name
 	// and imports. Use the new snapshot to parse to avoid modifying the
@@ -1686,53 +2013,77 @@
 	original, originalErr := newSnapshot.ParseGo(ctx, originalFH, source.ParseHeader)
 	current, currentErr := newSnapshot.ParseGo(ctx, currentFH, source.ParseHeader)
 	if originalErr != nil || currentErr != nil {
-		return (originalErr == nil) != (currentErr == nil), false
+		return (originalErr == nil) != (currentErr == nil), false, (currentErr != nil) // we don't know if an import was deleted
 	}
 	// Check if the package's metadata has changed. The cases handled are:
 	//    1. A package's name has changed
 	//    2. A file's imports have changed
 	if original.File.Name.Name != current.File.Name.Name {
-		return true, true
+		invalidate = true
+		pkgNameChanged = true
 	}
-	importSet := make(map[string]struct{})
+	origImportSet := make(map[string]struct{})
 	for _, importSpec := range original.File.Imports {
-		importSet[importSpec.Path.Value] = struct{}{}
+		origImportSet[importSpec.Path.Value] = struct{}{}
+	}
+	curImportSet := make(map[string]struct{})
+	for _, importSpec := range current.File.Imports {
+		curImportSet[importSpec.Path.Value] = struct{}{}
 	}
 	// If any of the current imports were not in the original imports.
-	for _, importSpec := range current.File.Imports {
-		if _, ok := importSet[importSpec.Path.Value]; ok {
+	for path := range curImportSet {
+		if _, ok := origImportSet[path]; ok {
+			delete(origImportSet, path)
 			continue
 		}
 		// If the import path is obviously not valid, we can skip reloading
 		// metadata. For now, valid means properly quoted and without a
 		// terminal slash.
-		path, err := strconv.Unquote(importSpec.Path.Value)
-		if err != nil {
+		if isBadImportPath(path) {
 			continue
 		}
-		if path == "" {
-			continue
-		}
-		if path[len(path)-1] == '/' {
-			continue
-		}
-		return true, false
+		invalidate = true
 	}
 
-	// Re-evaluate build constraints and embed patterns. It would be preferable
-	// to only do this on save, but we don't have the prior versions accessible.
-	oldComments := extractMagicComments(original.File)
-	newComments := extractMagicComments(current.File)
+	for path := range origImportSet {
+		if !isBadImportPath(path) {
+			invalidate = true
+			importDeleted = true
+		}
+	}
+
+	if !invalidate {
+		invalidate = magicCommentsChanged(original.File, current.File)
+	}
+	return invalidate, pkgNameChanged, importDeleted
+}
+
+func magicCommentsChanged(original *ast.File, current *ast.File) bool {
+	oldComments := extractMagicComments(original)
+	newComments := extractMagicComments(current)
 	if len(oldComments) != len(newComments) {
-		return true, false
+		return true
 	}
 	for i := range oldComments {
 		if oldComments[i] != newComments[i] {
-			return true, false
+			return true
 		}
 	}
+	return false
+}
 
-	return false, false
+func isBadImportPath(path string) bool {
+	path, err := strconv.Unquote(path)
+	if err != nil {
+		return true
+	}
+	if path == "" {
+		return true
+	}
+	if path[len(path)-1] == '/' {
+		return true
+	}
+	return false
 }
 
 var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`)
@@ -1785,8 +2136,8 @@
 
 // BuildGoplsMod generates a go.mod file for all modules in the workspace. It
 // bypasses any existing gopls.mod.
-func BuildGoplsMod(ctx context.Context, root span.URI, s source.Snapshot) (*modfile.File, error) {
-	allModules, err := findModules(root, pathExcludedByFilterFunc(s.View().Options()), 0)
+func (s *snapshot) BuildGoplsMod(ctx context.Context) (*modfile.File, error) {
+	allModules, err := findModules(s.view.folder, pathExcludedByFilterFunc(s.view.rootURI.Filename(), s.view.gomodcache, s.View().Options()), 0)
 	if err != nil {
 		return nil, err
 	}
diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go
index ad53852..3f39882 100644
--- a/internal/lsp/cache/view.go
+++ b/internal/lsp/cache/view.go
@@ -343,7 +343,7 @@
 		if err != nil {
 			return err
 		}
-		if strings.HasSuffix(filepath.Ext(path), "tmpl") && !pathExcludedByFilter(path, s.view.options) &&
+		if strings.HasSuffix(filepath.Ext(path), "tmpl") && !pathExcludedByFilter(path, dir, s.view.gomodcache, s.view.options) &&
 			!fi.IsDir() {
 			k := span.URIFromPath(path)
 			fh, err := s.GetVersionedFile(ctx, k)
@@ -371,7 +371,7 @@
 	}
 	// Filters are applied relative to the workspace folder.
 	if inFolder {
-		return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.Options())
+		return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options())
 	}
 	return true
 }
@@ -393,12 +393,16 @@
 	if v.knownFile(c.URI) {
 		return true
 	}
-	// The gopls.mod may not be "known" because we first access it through the
-	// session. As a result, treat changes to the view's gopls.mod file as
-	// always relevant, even if they are only on-disk changes.
-	// TODO(rstambler): Make sure the gopls.mod is always known to the view.
-	if c.URI == goplsModURI(v.rootURI) {
-		return true
+	// The go.work/gopls.mod may not be "known" because we first access it
+	// through the session. As a result, treat changes to the view's go.work or
+	// gopls.mod file as always relevant, even if they are only on-disk
+	// changes.
+	// TODO(rstambler): Make sure the go.work/gopls.mod files are always known
+	// to the view.
+	for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
+		if c.URI == uriForSource(v.rootURI, src) {
+			return true
+		}
 	}
 	// If the file is not known to the view, and the change is only on-disk,
 	// we should not invalidate the snapshot. This is necessary because Emacs
@@ -557,6 +561,7 @@
 	}
 	s.initializeOnce.Do(func() {
 		s.loadWorkspace(ctx, firstAttempt)
+		s.collectAllKnownSubdirs(ctx)
 	})
 }
 
@@ -602,28 +607,40 @@
 	} else {
 		scopes = append(scopes, viewLoadScope("LOAD_VIEW"))
 	}
-	var err error
+
+	// If we're loading anything, ensure we also load builtin.
+	// TODO(rstambler): explain the rationale for this.
 	if len(scopes) > 0 {
-		err = s.load(ctx, firstAttempt, append(scopes, packagePath("builtin"))...)
+		scopes = append(scopes, packagePath("builtin"))
 	}
-	if ctx.Err() != nil {
+	err := s.load(ctx, firstAttempt, scopes...)
+
+	// If the context is canceled on the first attempt, loading has failed
+	// because the go command has timed out--that should be a critical error.
+	if err != nil && !firstAttempt && ctx.Err() != nil {
 		return
 	}
 
 	var criticalErr *source.CriticalError
-	if err != nil {
+	switch {
+	case err != nil && ctx.Err() != nil:
+		event.Error(ctx, fmt.Sprintf("initial workspace load: %v", err), err)
+		criticalErr = &source.CriticalError{
+			MainError: err,
+		}
+	case err != nil:
 		event.Error(ctx, "initial workspace load failed", err)
 		extractedDiags, _ := s.extractGoCommandErrors(ctx, err.Error())
 		criticalErr = &source.CriticalError{
 			MainError: err,
 			DiagList:  append(modDiagnostics, extractedDiags...),
 		}
-	} else if len(modDiagnostics) == 1 {
+	case len(modDiagnostics) == 1:
 		criticalErr = &source.CriticalError{
 			MainError: fmt.Errorf(modDiagnostics[0].Message),
 			DiagList:  modDiagnostics,
 		}
-	} else if len(modDiagnostics) > 1 {
+	case len(modDiagnostics) > 1:
 		criticalErr = &source.CriticalError{
 			MainError: fmt.Errorf("error loading module names"),
 			DiagList:  modDiagnostics,
@@ -642,6 +659,10 @@
 	// Detach the context so that content invalidation cannot be canceled.
 	ctx = xcontext.Detach(ctx)
 
+	// This should be the only time we hold the view's snapshot lock for any period of time.
+	v.snapshotMu.Lock()
+	defer v.snapshotMu.Unlock()
+
 	// Cancel all still-running previous requests, since they would be
 	// operating on stale data.
 	v.snapshot.cancel()
@@ -649,34 +670,41 @@
 	// Do not clone a snapshot until its view has finished initializing.
 	v.snapshot.AwaitInitialized(ctx)
 
-	// This should be the only time we hold the view's snapshot lock for any period of time.
-	v.snapshotMu.Lock()
-	defer v.snapshotMu.Unlock()
-
 	oldSnapshot := v.snapshot
 
 	var workspaceChanged bool
 	v.snapshot, workspaceChanged = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
-	if workspaceChanged && v.tempWorkspace != "" {
-		snap := v.snapshot
-		release := snap.generation.Acquire(ctx)
-		go func() {
-			defer release()
-			wsdir, err := snap.getWorkspaceDir(ctx)
-			if err != nil {
-				event.Error(ctx, "getting workspace dir", err)
-			}
-			if err := copyWorkspace(v.tempWorkspace, wsdir); err != nil {
-				event.Error(ctx, "copying workspace dir", err)
-			}
-		}()
+	if workspaceChanged {
+		if err := v.updateWorkspaceLocked(ctx); err != nil {
+			event.Error(ctx, "copying workspace dir", err)
+		}
 	}
 	go oldSnapshot.generation.Destroy()
 
 	return v.snapshot, v.snapshot.generation.Acquire(ctx)
 }
 
-func copyWorkspace(dst span.URI, src span.URI) error {
+func (v *View) updateWorkspace(ctx context.Context) error {
+	if v.tempWorkspace == "" {
+		return nil
+	}
+	v.snapshotMu.Lock()
+	defer v.snapshotMu.Unlock()
+	return v.updateWorkspaceLocked(ctx)
+}
+
+// updateWorkspaceLocked should only be called when v.snapshotMu is held. It
+// guarantees that workspace module content will be copied to v.tempWorkace at
+// some point in the future. We do not guarantee that the temp workspace sees
+// all changes to the workspace module, only that it is eventually consistent
+// with the workspace module of the latest snapshot.
+func (v *View) updateWorkspaceLocked(ctx context.Context) error {
+	release := v.snapshot.generation.Acquire(ctx)
+	defer release()
+	src, err := v.snapshot.getWorkspaceDir(ctx)
+	if err != nil {
+		return err
+	}
 	for _, name := range []string{"go.mod", "go.sum"} {
 		srcname := filepath.Join(src.Filename(), name)
 		srcf, err := os.Open(srcname)
@@ -684,7 +712,7 @@
 			return errors.Errorf("opening snapshot %s: %w", name, err)
 		}
 		defer srcf.Close()
-		dstname := filepath.Join(dst.Filename(), name)
+		dstname := filepath.Join(v.tempWorkspace.Filename(), name)
 		dstf, err := os.Create(dstname)
 		if err != nil {
 			return errors.Errorf("truncating view %s: %w", name, err)
@@ -774,7 +802,7 @@
 func findWorkspaceRoot(ctx context.Context, folder span.URI, fs source.FileSource, excludePath func(string) bool, experimental bool) (span.URI, error) {
 	patterns := []string{"go.mod"}
 	if experimental {
-		patterns = []string{"gopls.mod", "go.mod"}
+		patterns = []string{"go.work", "gopls.mod", "go.mod"}
 	}
 	for _, basename := range patterns {
 		dir, err := findRootPattern(ctx, folder, basename, fs)
@@ -1016,24 +1044,29 @@
 		if !strings.HasPrefix(f, folder) {
 			return false
 		}
-		if !pathExcludedByFilter(strings.TrimPrefix(f, folder), opts) {
+		if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) {
 			return false
 		}
 	}
 	return true
 }
 
-func pathExcludedByFilterFunc(opts *source.Options) func(string) bool {
+func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool {
 	return func(path string) bool {
-		return pathExcludedByFilter(path, opts)
+		return pathExcludedByFilter(path, root, gomodcache, opts)
 	}
 }
 
-func pathExcludedByFilter(path string, opts *source.Options) bool {
+func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool {
 	path = strings.TrimPrefix(filepath.ToSlash(path), "/")
+	gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/")
 
 	excluded := false
-	for _, filter := range opts.DirectoryFilters {
+	filters := opts.DirectoryFilters
+	if gomodcache != "" {
+		filters = append(filters, "-"+gomodcache)
+	}
+	for _, filter := range filters {
 		op, prefix := filter[0], filter[1:]
 		// Non-empty prefixes have to be precise directory matches.
 		if prefix != "" {
diff --git a/internal/lsp/cache/view_test.go b/internal/lsp/cache/view_test.go
index 802215a..f0923d4 100644
--- a/internal/lsp/cache/view_test.go
+++ b/internal/lsp/cache/view_test.go
@@ -161,12 +161,12 @@
 		opts := &source.Options{}
 		opts.DirectoryFilters = tt.filters
 		for _, inc := range tt.included {
-			if pathExcludedByFilter(inc, opts) {
+			if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) {
 				t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)
 			}
 		}
 		for _, exc := range tt.excluded {
-			if !pathExcludedByFilter(exc, opts) {
+			if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) {
 				t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc)
 			}
 		}
diff --git a/internal/lsp/cache/workspace.go b/internal/lsp/cache/workspace.go
index d4b5303..4204bcc 100644
--- a/internal/lsp/cache/workspace.go
+++ b/internal/lsp/cache/workspace.go
@@ -6,6 +6,7 @@
 
 import (
 	"context"
+	"fmt"
 	"os"
 	"path/filepath"
 	"sort"
@@ -15,6 +16,7 @@
 	"golang.org/x/mod/modfile"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/lsp/source"
+	workfile "golang.org/x/tools/internal/mod/modfile"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/xcontext"
 	errors "golang.org/x/xerrors"
@@ -25,6 +27,7 @@
 const (
 	legacyWorkspace = iota
 	goplsModWorkspace
+	goWorkWorkspace
 	fileSystemWorkspace
 )
 
@@ -34,6 +37,8 @@
 		return "legacy"
 	case goplsModWorkspace:
 		return "gopls.mod"
+	case goWorkWorkspace:
+		return "go.work"
 	case fileSystemWorkspace:
 		return "file system"
 	default:
@@ -86,24 +91,9 @@
 	// In experimental mode, the user may have a gopls.mod file that defines
 	// their workspace.
 	if experimental {
-		goplsModFH, err := fs.GetFile(ctx, goplsModURI(root))
-		if err != nil {
-			return nil, err
-		}
-		contents, err := goplsModFH.Read()
+		ws, err := parseExplicitWorkspaceFile(ctx, root, fs, excludePath)
 		if err == nil {
-			file, activeModFiles, err := parseGoplsMod(root, goplsModFH.URI(), contents)
-			if err != nil {
-				return nil, err
-			}
-			return &workspace{
-				root:           root,
-				excludePath:    excludePath,
-				activeModFiles: activeModFiles,
-				knownModFiles:  activeModFiles,
-				mod:            file,
-				moduleSource:   goplsModWorkspace,
-			}, nil
+			return ws, nil
 		}
 	}
 	// Otherwise, in all other modes, search for all of the go.mod files in the
@@ -145,6 +135,41 @@
 	}, nil
 }
 
+func parseExplicitWorkspaceFile(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool) (*workspace, error) {
+	for _, src := range []workspaceSource{goWorkWorkspace, goplsModWorkspace} {
+		fh, err := fs.GetFile(ctx, uriForSource(root, src))
+		if err != nil {
+			return nil, err
+		}
+		contents, err := fh.Read()
+		if err != nil {
+			continue
+		}
+		var file *modfile.File
+		var activeModFiles map[span.URI]struct{}
+		switch src {
+		case goWorkWorkspace:
+			file, activeModFiles, err = parseGoWork(ctx, root, fh.URI(), contents, fs)
+		case goplsModWorkspace:
+			file, activeModFiles, err = parseGoplsMod(root, fh.URI(), contents)
+		}
+		if err != nil {
+			return nil, err
+		}
+		return &workspace{
+			root:           root,
+			excludePath:    excludePath,
+			activeModFiles: activeModFiles,
+			knownModFiles:  activeModFiles,
+			mod:            file,
+			moduleSource:   src,
+		}, nil
+	}
+	return nil, noHardcodedWorkspace
+}
+
+var noHardcodedWorkspace = errors.New("no hardcoded workspace")
+
 func (w *workspace) getKnownModFiles() map[span.URI]struct{} {
 	return w.knownModFiles
 }
@@ -246,7 +271,7 @@
 // Some workspace changes may affect workspace contents without requiring a
 // reload of metadata (for example, unsaved changes to a go.mod or go.sum
 // file).
-func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange) (_ *workspace, changed, reload bool) {
+func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, changed, reload bool) {
 	// Prevent races to w.modFile or w.wsDirs below, if wmhas not yet been built.
 	w.buildMu.Lock()
 	defer w.buildMu.Unlock()
@@ -269,42 +294,55 @@
 		result.activeModFiles[k] = v
 	}
 
-	// First handle changes to the gopls.mod file. This must be considered before
-	// any changes to go.mod or go.sum files, as the gopls.mod file determines
-	// which modules we care about. In legacy workspace mode we don't consider
-	// the gopls.mod file.
+	// First handle changes to the go.work or gopls.mod file. This must be
+	// considered before any changes to go.mod or go.sum files, as these files
+	// determine which modules we care about. In legacy workspace mode we don't
+	// consider the gopls.mod or go.work files.
 	if w.moduleSource != legacyWorkspace {
-		// If gopls.mod has changed we need to either re-read it if it exists or
-		// walk the filesystem if it has been deleted.
-		gmURI := goplsModURI(w.root)
-		// File opens/closes are just no-ops.
-		if change, ok := changes[gmURI]; ok && !change.isUnchanged {
+		// If go.work/gopls.mod has changed we need to either re-read it if it
+		// exists or walk the filesystem if it has been deleted.
+		// go.work should override the gopls.mod if both exist.
+		for _, src := range []workspaceSource{goplsModWorkspace, goWorkWorkspace} {
+			uri := uriForSource(w.root, src)
+			// File opens/closes are just no-ops.
+			change, ok := changes[uri]
+			if !ok || change.isUnchanged {
+				continue
+			}
 			if change.exists {
-				// Only invalidate if the gopls.mod actually parses.
-				// Otherwise, stick with the current gopls.mod.
-				parsedFile, parsedModules, err := parseGoplsMod(w.root, gmURI, change.content)
+				// Only invalidate if the file if it actually parses.
+				// Otherwise, stick with the current file.
+				var parsedFile *modfile.File
+				var parsedModules map[span.URI]struct{}
+				var err error
+				switch src {
+				case goWorkWorkspace:
+					parsedFile, parsedModules, err = parseGoWork(ctx, w.root, uri, change.content, fs)
+				case goplsModWorkspace:
+					parsedFile, parsedModules, err = parseGoplsMod(w.root, uri, change.content)
+				}
 				if err == nil {
 					changed = true
 					reload = change.fileHandle.Saved()
 					result.mod = parsedFile
-					result.moduleSource = goplsModWorkspace
+					result.moduleSource = src
 					result.knownModFiles = parsedModules
 					result.activeModFiles = make(map[span.URI]struct{})
 					for k, v := range parsedModules {
 						result.activeModFiles[k] = v
 					}
 				} else {
-					// An unparseable gopls.mod file should not invalidate the
-					// workspace: nothing good could come from changing the
-					// workspace in this case.
-					event.Error(ctx, "parsing gopls.mod", err)
+					// An unparseable file should not invalidate the workspace:
+					// nothing good could come from changing the workspace in
+					// this case.
+					event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
 				}
 			} else {
-				// gopls.mod is deleted. search for modules again.
+				// go.work/gopls.mod is deleted. search for modules again.
 				changed = true
 				reload = true
 				result.moduleSource = fileSystemWorkspace
-				// The parsed gopls.mod is no longer valid.
+				// The parsed file is no longer valid.
 				result.mod = nil
 				knownModFiles, err := findModules(w.root, w.excludePath, 0)
 				if err != nil {
@@ -325,7 +363,7 @@
 	// Next, handle go.mod changes that could affect our workspace. If we're
 	// reading our tracked modules from the gopls.mod, there's nothing to do
 	// here.
-	if result.moduleSource != goplsModWorkspace {
+	if result.moduleSource != goplsModWorkspace && result.moduleSource != goWorkWorkspace {
 		for uri, change := range changes {
 			if change.isUnchanged || !isGoMod(uri) || !source.InDir(result.root.Filename(), uri.Filename()) {
 				continue
@@ -370,8 +408,17 @@
 }
 
 // goplsModURI returns the URI for the gopls.mod file contained in root.
-func goplsModURI(root span.URI) span.URI {
-	return span.URIFromPath(filepath.Join(root.Filename(), "gopls.mod"))
+func uriForSource(root span.URI, src workspaceSource) span.URI {
+	var basename string
+	switch src {
+	case goplsModWorkspace:
+		basename = "gopls.mod"
+	case goWorkWorkspace:
+		basename = "go.work"
+	default:
+		return ""
+	}
+	return span.URIFromPath(filepath.Join(root.Filename(), basename))
 }
 
 // modURI returns the URI for the go.mod file contained in root.
@@ -429,6 +476,32 @@
 	return modules, nil
 }
 
+func parseGoWork(ctx context.Context, root, uri span.URI, contents []byte, fs source.FileSource) (*modfile.File, map[span.URI]struct{}, error) {
+	workFile, err := workfile.ParseWork(uri.Filename(), contents, nil)
+	if err != nil {
+		return nil, nil, errors.Errorf("parsing go.work: %w", err)
+	}
+	modFiles := make(map[span.URI]struct{})
+	for _, dir := range workFile.Directory {
+		// The resulting modfile must use absolute paths, so that it can be
+		// written to a temp directory.
+		dir.DiskPath = absolutePath(root, dir.DiskPath)
+		modURI := span.URIFromPath(filepath.Join(dir.DiskPath, "go.mod"))
+		modFiles[modURI] = struct{}{}
+	}
+	modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
+	if err != nil {
+		return nil, nil, err
+	}
+	if workFile.Go.Version != "" {
+		if err := modFile.AddGoStmt(workFile.Go.Version); err != nil {
+			return nil, nil, err
+		}
+	}
+
+	return modFile, modFiles, nil
+}
+
 func parseGoplsMod(root, uri span.URI, contents []byte) (*modfile.File, map[span.URI]struct{}, error) {
 	modFile, err := modfile.Parse(uri.Filename(), contents, nil)
 	if err != nil {
@@ -439,19 +512,23 @@
 		if replace.New.Version != "" {
 			return nil, nil, errors.Errorf("gopls.mod: replaced module %q@%q must not have version", replace.New.Path, replace.New.Version)
 		}
-		dirFP := filepath.FromSlash(replace.New.Path)
-		if !filepath.IsAbs(dirFP) {
-			dirFP = filepath.Join(root.Filename(), dirFP)
-			// The resulting modfile must use absolute paths, so that it can be
-			// written to a temp directory.
-			replace.New.Path = dirFP
-		}
-		modURI := span.URIFromPath(filepath.Join(dirFP, "go.mod"))
+		// The resulting modfile must use absolute paths, so that it can be
+		// written to a temp directory.
+		replace.New.Path = absolutePath(root, replace.New.Path)
+		modURI := span.URIFromPath(filepath.Join(replace.New.Path, "go.mod"))
 		modFiles[modURI] = struct{}{}
 	}
 	return modFile, modFiles, nil
 }
 
+func absolutePath(root span.URI, path string) string {
+	dirFP := filepath.FromSlash(path)
+	if !filepath.IsAbs(dirFP) {
+		dirFP = filepath.Join(root.Filename(), dirFP)
+	}
+	return dirFP
+}
+
 // errExhausted is returned by findModules if the file scan limit is reached.
 var errExhausted = errors.New("exhausted")
 
diff --git a/internal/lsp/cache/workspace_test.go b/internal/lsp/cache/workspace_test.go
index 8524061..a03aedc 100644
--- a/internal/lsp/cache/workspace_test.go
+++ b/internal/lsp/cache/workspace_test.go
@@ -296,7 +296,7 @@
 						t.Fatal(err)
 					}
 				}
-				got, gotChanged, gotReload := w.invalidate(ctx, changes)
+				got, gotChanged, gotReload := w.invalidate(ctx, changes, fs)
 				if gotChanged != test.wantChanged {
 					t.Errorf("w.invalidate(): got changed %t, want %t", gotChanged, test.wantChanged)
 				}
diff --git a/internal/lsp/cmd/cmd.go b/internal/lsp/cmd/cmd.go
index 1acf197..ad344f7 100644
--- a/internal/lsp/cmd/cmd.go
+++ b/internal/lsp/cmd/cmd.go
@@ -527,8 +527,13 @@
 
 	c.Client.diagnosticsDone = make(chan struct{})
 	_, err := c.Server.NonstandardRequest(ctx, "gopls/diagnoseFiles", map[string]interface{}{"files": untypedFiles})
+	if err != nil {
+		close(c.Client.diagnosticsDone)
+		return err
+	}
+
 	<-c.Client.diagnosticsDone
-	return err
+	return nil
 }
 
 func (c *connection) terminate(ctx context.Context) {
diff --git a/internal/lsp/cmd/info.go b/internal/lsp/cmd/info.go
index fd53d8a..87ba428 100644
--- a/internal/lsp/cmd/info.go
+++ b/internal/lsp/cmd/info.go
@@ -178,6 +178,6 @@
 	} else {
 		txt += opts.LicensesText
 	}
-	fmt.Fprintf(os.Stdout, txt)
+	fmt.Fprint(os.Stdout, txt)
 	return nil
 }
diff --git a/internal/lsp/cmd/serve.go b/internal/lsp/cmd/serve.go
index 6d0787e..4164b58 100644
--- a/internal/lsp/cmd/serve.go
+++ b/internal/lsp/cmd/serve.go
@@ -56,6 +56,22 @@
 	f.PrintDefaults()
 }
 
+func (s *Serve) remoteArgs(network, address string) []string {
+	args := []string{"serve",
+		"-listen", fmt.Sprintf(`%s;%s`, network, address),
+	}
+	if s.RemoteDebug != "" {
+		args = append(args, "-debug", s.RemoteDebug)
+	}
+	if s.RemoteListenTimeout != 0 {
+		args = append(args, "-listen.timeout", s.RemoteListenTimeout.String())
+	}
+	if s.RemoteLogfile != "" {
+		args = append(args, "-logfile", s.RemoteLogfile)
+	}
+	return args
+}
+
 // Run configures a server based on the flags, and then runs it.
 // It blocks until the server shuts down.
 func (s *Serve) Run(ctx context.Context, args ...string) error {
@@ -77,12 +93,11 @@
 	}
 	var ss jsonrpc2.StreamServer
 	if s.app.Remote != "" {
-		network, addr := lsprpc.ParseAddr(s.app.Remote)
-		ss = lsprpc.NewForwarder(network, addr,
-			lsprpc.RemoteDebugAddress(s.RemoteDebug),
-			lsprpc.RemoteListenTimeout(s.RemoteListenTimeout),
-			lsprpc.RemoteLogfile(s.RemoteLogfile),
-		)
+		var err error
+		ss, err = lsprpc.NewForwarder(s.app.Remote, s.remoteArgs)
+		if err != nil {
+			return errors.Errorf("creating forwarder: %w", err)
+		}
 	} else {
 		ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon)
 	}
diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go
index b63a92a..2e92726 100644
--- a/internal/lsp/cmd/test/cmdtest.go
+++ b/internal/lsp/cmd/test/cmdtest.go
@@ -100,6 +100,10 @@
 	//TODO: function extraction not supported on command line
 }
 
+func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {
+	//TODO: function extraction not supported on command line
+}
+
 func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {
 	//TODO: import addition not supported on command line
 }
diff --git a/internal/lsp/code_action.go b/internal/lsp/code_action.go
index ac22bc0..b58e954 100644
--- a/internal/lsp/code_action.go
+++ b/internal/lsp/code_action.go
@@ -289,8 +289,8 @@
 	}
 	puri := protocol.URIFromSpanURI(uri)
 	var commands []protocol.Command
-	if _, ok, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok {
-		cmd, err := command.NewApplyFixCommand("Extract to function", command.ApplyFixArgs{
+	if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok {
+		cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{
 			URI:   puri,
 			Fix:   source.ExtractFunction,
 			Range: rng,
@@ -299,6 +299,17 @@
 			return nil, err
 		}
 		commands = append(commands, cmd)
+		if methodOk {
+			cmd, err := command.NewApplyFixCommand("Extract method", command.ApplyFixArgs{
+				URI:   puri,
+				Fix:   source.ExtractMethod,
+				Range: rng,
+			})
+			if err != nil {
+				return nil, err
+			}
+			commands = append(commands, cmd)
+		}
 	}
 	if _, _, ok, _ := source.CanExtractVariable(srng, pgf.File); ok {
 		cmd, err := command.NewApplyFixCommand("Extract variable", command.ApplyFixArgs{
@@ -312,11 +323,11 @@
 		commands = append(commands, cmd)
 	}
 	var actions []protocol.CodeAction
-	for _, cmd := range commands {
+	for i := range commands {
 		actions = append(actions, protocol.CodeAction{
-			Title:   cmd.Title,
+			Title:   commands[i].Title,
 			Kind:    protocol.RefactorExtract,
-			Command: &cmd,
+			Command: &commands[i],
 		})
 	}
 	return actions, nil
diff --git a/internal/lsp/command.go b/internal/lsp/command.go
index e877ac1..d810735 100644
--- a/internal/lsp/command.go
+++ b/internal/lsp/command.go
@@ -18,9 +18,9 @@
 	"golang.org/x/mod/modfile"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/gocommand"
-	"golang.org/x/tools/internal/lsp/cache"
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/debug"
+	"golang.org/x/tools/internal/lsp/progress"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/span"
@@ -66,7 +66,7 @@
 type commandDeps struct {
 	snapshot source.Snapshot            // present if cfg.forURI was set
 	fh       source.VersionedFileHandle // present if cfg.forURI was set
-	work     *workDone                  // present cfg.progress was set
+	work     *progress.WorkDone         // present cfg.progress was set
 }
 
 type commandFunc func(context.Context, commandDeps) error
@@ -91,19 +91,21 @@
 	}
 	ctx, cancel := context.WithCancel(xcontext.Detach(ctx))
 	if cfg.progress != "" {
-		deps.work = c.s.progress.start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel)
+		deps.work = c.s.progress.Start(ctx, cfg.progress, "Running...", c.params.WorkDoneToken, cancel)
 	}
 	runcmd := func() error {
 		defer cancel()
 		err := run(ctx, deps)
-		switch {
-		case errors.Is(err, context.Canceled):
-			deps.work.end("canceled")
-		case err != nil:
-			event.Error(ctx, "command error", err)
-			deps.work.end("failed")
-		default:
-			deps.work.end("completed")
+		if deps.work != nil {
+			switch {
+			case errors.Is(err, context.Canceled):
+				deps.work.End("canceled")
+			case err != nil:
+				event.Error(ctx, "command error", err)
+				deps.work.End("failed")
+			default:
+				deps.work.End("completed")
+			}
 		}
 		return err
 	}
@@ -350,7 +352,7 @@
 	})
 }
 
-func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *workDone, uri protocol.DocumentURI, tests, benchmarks []string) error {
+func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, work *progress.WorkDone, uri protocol.DocumentURI, tests, benchmarks []string) error {
 	// TODO: fix the error reporting when this runs async.
 	pkgs, err := snapshot.PackagesForFile(ctx, uri.SpanURI(), source.TypecheckWorkspace)
 	if err != nil {
@@ -363,8 +365,8 @@
 
 	// create output
 	buf := &bytes.Buffer{}
-	ew := &eventWriter{ctx: ctx, operation: "test"}
-	out := io.MultiWriter(ew, workDoneWriter{work}, buf)
+	ew := progress.NewEventWriter(ctx, "test")
+	out := io.MultiWriter(ew, progress.NewWorkDoneWriter(work), buf)
 
 	// Run `go test -run Func` on each test.
 	var failedTests int
@@ -436,7 +438,7 @@
 		progress:    title,
 		forURI:      args.Dir,
 	}, func(ctx context.Context, deps commandDeps) error {
-		er := &eventWriter{ctx: ctx, operation: "generate"}
+		er := progress.NewEventWriter(ctx, "generate")
 
 		pattern := "."
 		if args.Recursive {
@@ -447,7 +449,7 @@
 			Args:       []string{"-x", pattern},
 			WorkingDir: args.Dir.SpanURI().Filename(),
 		}
-		stderr := io.MultiWriter(er, workDoneWriter{deps.work})
+		stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(deps.work))
 		if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil {
 			return err
 		}
@@ -647,7 +649,7 @@
 		v := views[0]
 		snapshot, release := v.Snapshot(ctx)
 		defer release()
-		modFile, err := cache.BuildGoplsMod(ctx, snapshot.View().Folder(), snapshot)
+		modFile, err := snapshot.BuildGoplsMod(ctx)
 		if err != nil {
 			return errors.Errorf("getting workspace mod file: %w", err)
 		}
diff --git a/internal/lsp/command/commandmeta/meta.go b/internal/lsp/command/commandmeta/meta.go
index c036d7a..1a6a2c7 100644
--- a/internal/lsp/command/commandmeta/meta.go
+++ b/internal/lsp/command/commandmeta/meta.go
@@ -28,7 +28,7 @@
 	Title  string
 	Doc    string
 	Args   []*Field
-	Result types.Type
+	Result *Field
 }
 
 func (c *Command) ID() string {
@@ -36,10 +36,11 @@
 }
 
 type Field struct {
-	Name    string
-	Doc     string
-	JSONTag string
-	Type    types.Type
+	Name     string
+	Doc      string
+	JSONTag  string
+	Type     types.Type
+	FieldMod string
 	// In some circumstances, we may want to recursively load additional field
 	// descriptors for fields of struct types, documenting their internals.
 	Fields []*Field
@@ -110,15 +111,15 @@
 		return nil, fmt.Errorf("final return must be error")
 	}
 	if rlen == 2 {
-		c.Result = sig.Results().At(0).Type()
+		obj := sig.Results().At(0)
+		c.Result, err = l.loadField(pkg, obj, "", "")
+		if err != nil {
+			return nil, err
+		}
 	}
-	ftype := node.Type.(*ast.FuncType)
-	if sig.Params().Len() != ftype.Params.NumFields() {
-		panic("bug: mismatching method params")
-	}
-	for i, p := range ftype.Params.List {
-		pt := sig.Params().At(i)
-		fld, err := l.loadField(pkg, p, pt, "")
+	for i := 0; i < sig.Params().Len(); i++ {
+		obj := sig.Params().At(i)
+		fld, err := l.loadField(pkg, obj, "", "")
 		if err != nil {
 			return nil, err
 		}
@@ -136,20 +137,29 @@
 	return c, nil
 }
 
-func (l *fieldLoader) loadField(pkg *packages.Package, node *ast.Field, obj *types.Var, tag string) (*Field, error) {
+func (l *fieldLoader) loadField(pkg *packages.Package, obj *types.Var, doc, tag string) (*Field, error) {
 	if existing, ok := l.loaded[obj]; ok {
 		return existing, nil
 	}
 	fld := &Field{
 		Name:    obj.Name(),
-		Doc:     strings.TrimSpace(node.Doc.Text()),
+		Doc:     strings.TrimSpace(doc),
 		Type:    obj.Type(),
 		JSONTag: reflect.StructTag(tag).Get("json"),
 	}
 	under := fld.Type.Underlying()
-	if p, ok := under.(*types.Pointer); ok {
-		under = p.Elem()
+	// Quick-and-dirty handling for various underyling types.
+	switch p := under.(type) {
+	case *types.Pointer:
+		under = p.Elem().Underlying()
+	case *types.Array:
+		under = p.Elem().Underlying()
+		fld.FieldMod = fmt.Sprintf("[%d]", p.Len())
+	case *types.Slice:
+		under = p.Elem().Underlying()
+		fld.FieldMod = "[]"
 	}
+
 	if s, ok := under.(*types.Struct); ok {
 		for i := 0; i < s.NumFields(); i++ {
 			obj2 := s.Field(i)
@@ -160,12 +170,12 @@
 					return nil, fmt.Errorf("missing import for %q: %q", pkg.ID, obj2.Pkg().Path())
 				}
 			}
-			node2, err := findField(pkg2, obj2.Pos())
+			node, err := findField(pkg2, obj2.Pos())
 			if err != nil {
 				return nil, err
 			}
 			tag := s.Tag(i)
-			structField, err := l.loadField(pkg2, node2, obj2, tag)
+			structField, err := l.loadField(pkg2, obj2, node.Doc.Text(), tag)
 			if err != nil {
 				return nil, err
 			}
diff --git a/internal/lsp/command/gen/gen.go b/internal/lsp/command/gen/gen.go
index 3934f1a..8f7a2d5 100644
--- a/internal/lsp/command/gen/gen.go
+++ b/internal/lsp/command/gen/gen.go
@@ -120,9 +120,11 @@
 				d.Imports[pth] = true
 			}
 		}
-		pth := pkgPath(c.Result)
-		if pth != "" && pth != thispkg {
-			d.Imports[pth] = true
+		if c.Result != nil {
+			pth := pkgPath(c.Result.Type)
+			if pth != "" && pth != thispkg {
+				d.Imports[pth] = true
+			}
 		}
 	}
 
diff --git a/internal/lsp/command/interface.go b/internal/lsp/command/interface.go
index 2347950..360dfc3 100644
--- a/internal/lsp/command/interface.go
+++ b/internal/lsp/command/interface.go
@@ -78,22 +78,22 @@
 	// Checks for module upgrades.
 	CheckUpgrades(context.Context, CheckUpgradesArgs) error
 
-	// AddDependency: Add dependency
+	// AddDependency: Add a dependency
 	//
 	// Adds a dependency to the go.mod file for a module.
 	AddDependency(context.Context, DependencyArgs) error
 
-	// UpgradeDependency: Upgrade dependency
+	// UpgradeDependency: Upgrade a dependency
 	//
 	// Upgrades a dependency in the go.mod file for a module.
 	UpgradeDependency(context.Context, DependencyArgs) error
 
-	// RemoveDependency: Remove dependency
+	// RemoveDependency: Remove a dependency
 	//
 	// Removes a dependency from the go.mod file of a module.
 	RemoveDependency(context.Context, RemoveDependencyArgs) error
 
-	// GoGetPackage: go get package
+	// GoGetPackage: go get a package
 	//
 	// Runs `go get` to fetch a package.
 	GoGetPackage(context.Context, GoGetPackageArgs) error
@@ -115,17 +115,27 @@
 	// (Re)generate the gopls.mod file for a workspace.
 	GenerateGoplsMod(context.Context, URIArg) error
 
-	// ListKnownPackages: retrieves a list of packages
-	// that are importable from the given URI.
+	// ListKnownPackages: List known packages
+	//
+	// Retrieve a list of packages that are importable from the given URI.
 	ListKnownPackages(context.Context, URIArg) (ListKnownPackagesResult, error)
 
-	// AddImport: asks the server to add an import path to a given Go file.
-	// The method will call applyEdit on the client so that clients don't have
-	// to apply the edit themselves.
+	// AddImport: Add an import
+	//
+	// Ask the server to add an import path to a given Go file.  The method will
+	// call applyEdit on the client so that clients don't have to apply the edit
+	// themselves.
 	AddImport(context.Context, AddImportArgs) error
 
+	// WorkspaceMetadata: Query workspace metadata
+	//
+	// Query the server for information about active workspaces.
 	WorkspaceMetadata(context.Context) (WorkspaceMetadataResult, error)
 
+	// StartDebugging: Start the gopls debug server
+	//
+	// Start the gopls debug server if it isn't running, and return the debug
+	// address.
 	StartDebugging(context.Context, DebuggingArgs) (DebuggingResult, error)
 }
 
@@ -223,11 +233,14 @@
 }
 
 type WorkspaceMetadataResult struct {
+	// All workspaces for this session.
 	Workspaces []Workspace
 }
 
 type Workspace struct {
-	Name      string
+	// The workspace name.
+	Name string
+	// The workspace module directory.
 	ModuleDir string
 }
 
diff --git a/internal/lsp/debounce.go b/internal/lsp/debounce.go
index 80cf78b..06f4114 100644
--- a/internal/lsp/debounce.go
+++ b/internal/lsp/debounce.go
@@ -9,73 +9,63 @@
 	"time"
 )
 
-type debounceFunc struct {
+type debounceEvent struct {
 	order uint64
 	done  chan struct{}
 }
 
 type debouncer struct {
-	mu    sync.Mutex
-	funcs map[string]*debounceFunc
+	mu     sync.Mutex
+	events map[string]*debounceEvent
 }
 
 func newDebouncer() *debouncer {
 	return &debouncer{
-		funcs: make(map[string]*debounceFunc),
+		events: make(map[string]*debounceEvent),
 	}
 }
 
-// debounce waits timeout before running f, if no subsequent call is made with
-// the same key in the intervening time. If a later call to debounce with the
-// same key occurs while the original call is blocking, the original call will
-// return immediately without running its f.
-//
-// If order is specified, it will be used to order calls logically, so calls
-// with lesser order will not cancel calls with greater order.
-func (d *debouncer) debounce(key string, order uint64, timeout time.Duration, f func()) {
-	if timeout == 0 {
-		// Degenerate case: no debouncing.
-		f()
-		return
-	}
+// debounce returns a channel that receives a boolean reporting whether,
+// by the time the delay channel receives a value, this call is (or will be)
+// the most recent call with the highest order number for its key.
+func (d *debouncer) debounce(key string, order uint64, delay <-chan time.Time) <-chan bool {
+	okc := make(chan bool, 1)
 
-	// First, atomically acquire the current func, cancel it, and insert this
-	// call into d.funcs.
 	d.mu.Lock()
-	current, ok := d.funcs[key]
-	if ok && current.order > order {
-		// If we have a logical ordering of events (as is the case for snapshots),
-		// don't overwrite a later event with an earlier event.
-		d.mu.Unlock()
-		return
-	}
-	if ok {
-		close(current.done)
+	if prev, ok := d.events[key]; ok {
+		if prev.order > order {
+			// If we have a logical ordering of events (as is the case for snapshots),
+			// don't overwrite a later event with an earlier event.
+			d.mu.Unlock()
+			okc <- false
+			return okc
+		}
+		close(prev.done)
 	}
 	done := make(chan struct{})
-	next := &debounceFunc{
+	next := &debounceEvent{
 		order: order,
 		done:  done,
 	}
-	d.funcs[key] = next
+	d.events[key] = next
 	d.mu.Unlock()
 
-	// Next, wait to be cancelled or for our wait to expire. There is a race here
-	// that we must handle: our timer could expire while another goroutine holds
-	// d.mu.
-	select {
-	case <-done:
-	case <-time.After(timeout):
-		d.mu.Lock()
-		if d.funcs[key] != next {
-			// We lost the race: another event has arrived for the key and started
-			// waiting. We could reasonably choose to run f at this point, but doing
-			// nothing is simpler.
+	go func() {
+		ok := false
+		select {
+		case <-delay:
+			d.mu.Lock()
+			if d.events[key] == next {
+				ok = true
+				delete(d.events, key)
+			} else {
+				// The event was superseded before we acquired d.mu.
+			}
 			d.mu.Unlock()
-			return
+		case <-done:
 		}
-		delete(d.funcs, key)
-		d.mu.Unlock()
-		f()
-	}
+		okc <- ok
+	}()
+
+	return okc
 }
diff --git a/internal/lsp/debounce_test.go b/internal/lsp/debounce_test.go
index 841b780..b5597fa 100644
--- a/internal/lsp/debounce_test.go
+++ b/internal/lsp/debounce_test.go
@@ -5,17 +5,16 @@
 package lsp
 
 import (
-	"sync"
 	"testing"
 	"time"
 )
 
 func TestDebouncer(t *testing.T) {
 	t.Parallel()
+
 	type event struct {
 		key       string
 		order     uint64
-		fired     bool
 		wantFired bool
 	}
 	tests := []struct {
@@ -57,29 +56,24 @@
 	for _, test := range tests {
 		test := test
 		t.Run(test.label, func(t *testing.T) {
-			t.Parallel()
 			d := newDebouncer()
-			var wg sync.WaitGroup
+
+			delays := make([]chan time.Time, len(test.events))
+			okcs := make([]<-chan bool, len(test.events))
+
+			// Register the events in deterministic order, synchronously.
 			for i, e := range test.events {
-				wg.Add(1)
-				go func(e *event) {
-					d.debounce(e.key, e.order, 500*time.Millisecond, func() {
-						e.fired = true
-					})
-					wg.Done()
-				}(e)
-				// For a bit more fidelity, sleep to try to make things actually
-				// execute in order. This doesn't have to be perfect, but could be done
-				// properly using fake timers.
-				if i < len(test.events)-1 {
-					time.Sleep(10 * time.Millisecond)
-				}
+				delays[i] = make(chan time.Time, 1)
+				okcs[i] = d.debounce(e.key, e.order, delays[i])
 			}
-			wg.Wait()
-			for _, event := range test.events {
-				if event.fired != event.wantFired {
-					t.Errorf("(key: %q, order: %d): fired = %t, want %t",
-						event.key, event.order, event.fired, event.wantFired)
+
+			// Now see which event fired.
+			for i, okc := range okcs {
+				event := test.events[i]
+				delays[i] <- time.Now()
+				fired := <-okc
+				if fired != event.wantFired {
+					t.Errorf("[key: %q, order: %d]: fired = %t, want %t", event.key, event.order, fired, event.wantFired)
 				}
 			}
 		})
diff --git a/internal/lsp/debug/info.go b/internal/lsp/debug/info.go
index be4f926..fddfff2 100644
--- a/internal/lsp/debug/info.go
+++ b/internal/lsp/debug/info.go
@@ -26,7 +26,7 @@
 )
 
 // Version is a manually-updated mechanism for tracking versions.
-const Version = "v0.7.0"
+const Version = "v0.7.2"
 
 // ServerVersion is the format used by gopls to report its version to the
 // client. This format is structured so that the client can parse it easily.
diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go
index 8d559e1..ef23372 100644
--- a/internal/lsp/diagnostics.go
+++ b/internal/lsp/diagnostics.go
@@ -12,6 +12,7 @@
 	"path/filepath"
 	"strings"
 	"sync"
+	"time"
 
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/lsp/debug/log"
@@ -91,14 +92,26 @@
 	s.publishDiagnostics(ctx, true, snapshot)
 }
 
+func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onDisk bool) {
+	var diagnosticWG sync.WaitGroup
+	for snapshot, uris := range snapshots {
+		diagnosticWG.Add(1)
+		go func(snapshot source.Snapshot, uris []span.URI) {
+			defer diagnosticWG.Done()
+			s.diagnoseSnapshot(snapshot, uris, onDisk)
+		}(snapshot, uris)
+	}
+	diagnosticWG.Wait()
+}
+
 func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) {
 	ctx := snapshot.BackgroundContext()
 	ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", tag.Snapshot.Of(snapshot.ID()))
 	defer done()
 
-	delay := snapshot.View().Options().ExperimentalDiagnosticsDelay
+	delay := snapshot.View().Options().DiagnosticsDelay
 	if delay > 0 {
-		// Experimental 2-phase diagnostics.
+		// 2-phase diagnostics.
 		//
 		// The first phase just parses and checks packages that have been
 		// affected by file modifications (no analysis).
@@ -107,10 +120,10 @@
 		// delay.
 		s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
 		s.publishDiagnostics(ctx, false, snapshot)
-		s.debouncer.debounce(snapshot.View().Name(), snapshot.ID(), delay, func() {
+		if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.ID(), time.After(delay)); ok {
 			s.diagnose(ctx, snapshot, false)
 			s.publishDiagnostics(ctx, true, snapshot)
-		})
+		}
 		return
 	}
 
@@ -379,12 +392,12 @@
 		for _, d := range err.DiagList {
 			s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d})
 		}
-		errMsg = strings.Replace(err.MainError.Error(), "\n", " ", -1)
+		errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ")
 	}
 
 	if s.criticalErrorStatus == nil {
 		if errMsg != "" {
-			s.criticalErrorStatus = s.progress.start(ctx, WorkspaceLoadFailure, errMsg, nil, nil)
+			s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil)
 		}
 		return
 	}
@@ -392,10 +405,10 @@
 	// If an error is already shown to the user, update it or mark it as
 	// resolved.
 	if errMsg == "" {
-		s.criticalErrorStatus.end("Done.")
+		s.criticalErrorStatus.End("Done.")
 		s.criticalErrorStatus = nil
 	} else {
-		s.criticalErrorStatus.report(errMsg, 0)
+		s.criticalErrorStatus.Report(errMsg, 0)
 	}
 }
 
diff --git a/internal/lsp/fake/edit.go b/internal/lsp/fake/edit.go
index c3f07e2..8b04c39 100644
--- a/internal/lsp/fake/edit.go
+++ b/internal/lsp/fake/edit.go
@@ -18,6 +18,10 @@
 	Line, Column int
 }
 
+func (p Pos) String() string {
+	return fmt.Sprintf("%v:%v", p.Line, p.Column)
+}
+
 // Range corresponds to protocol.Range, but uses the editor friend Pos
 // instead of UTF-16 oriented protocol.Position
 type Range struct {
diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go
index 501d32c..61867d5 100644
--- a/internal/lsp/fake/editor.go
+++ b/internal/lsp/fake/editor.go
@@ -114,11 +114,10 @@
 	// Whether to edit files with windows line endings.
 	WindowsLineEndings bool
 
-	DirectoryFilters []string
-
-	VerboseOutput bool
-
-	ImportShortcut string
+	ImportShortcut                 string
+	DirectoryFilters               []string
+	VerboseOutput                  bool
+	ExperimentalUseInvalidMetadata bool
 }
 
 // NewEditor Creates a new Editor.
@@ -206,9 +205,11 @@
 func (e *Editor) overlayEnv() map[string]string {
 	env := make(map[string]string)
 	for k, v := range e.defaultEnv {
+		v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
 		env[k] = v
 	}
 	for k, v := range e.Config.Env {
+		v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
 		env[k] = v
 	}
 	return env
@@ -228,6 +229,9 @@
 	if e.Config.DirectoryFilters != nil {
 		config["directoryFilters"] = e.Config.DirectoryFilters
 	}
+	if e.Config.ExperimentalUseInvalidMetadata {
+		config["experimentalUseInvalidMetadata"] = true
+	}
 	if e.Config.CodeLenses != nil {
 		config["codelenses"] = e.Config.CodeLenses
 	}
@@ -252,9 +256,7 @@
 		config["importShortcut"] = e.Config.ImportShortcut
 	}
 
-	// TODO(rFindley): change to the new settings name once it is no longer
-	// designated experimental.
-	config["experimentalDiagnosticsDelay"] = "10ms"
+	config["diagnosticsDelay"] = "10ms"
 
 	// ExperimentalWorkspaceModule is only set as a mode, not a configuration.
 	return config
diff --git a/internal/lsp/fake/sandbox.go b/internal/lsp/fake/sandbox.go
index 34f1ba1..f628f2d 100644
--- a/internal/lsp/fake/sandbox.go
+++ b/internal/lsp/fake/sandbox.go
@@ -163,7 +163,7 @@
 }
 
 func validateConfig(config SandboxConfig) error {
-	if filepath.IsAbs(config.Workdir) && (config.Files != nil || config.InGoPath) {
+	if filepath.IsAbs(config.Workdir) && (len(config.Files) > 0 || config.InGoPath) {
 		return errors.New("absolute Workdir cannot be set in conjunction with Files or InGoPath")
 	}
 	if config.Workdir != "" && config.InGoPath {
diff --git a/internal/lsp/fake/workdir.go b/internal/lsp/fake/workdir.go
index aa9ef84..d836deb 100644
--- a/internal/lsp/fake/workdir.go
+++ b/internal/lsp/fake/workdir.go
@@ -190,6 +190,9 @@
 	if err := os.RemoveAll(fp); err != nil {
 		return errors.Errorf("removing %q: %w", path, err)
 	}
+	w.fileMu.Lock()
+	defer w.fileMu.Unlock()
+
 	evts := []FileEvent{{
 		Path: path,
 		ProtocolEvent: protocol.FileEvent{
@@ -198,6 +201,7 @@
 		},
 	}}
 	w.sendEvents(ctx, evts)
+	delete(w.files, path)
 	return nil
 }
 
diff --git a/internal/lsp/general.go b/internal/lsp/general.go
index 3c7bbfe..3c409d3 100644
--- a/internal/lsp/general.go
+++ b/internal/lsp/general.go
@@ -46,7 +46,7 @@
 		event.Error(ctx, "creating temp dir", err)
 		s.tempDir = ""
 	}
-	s.progress.supportsWorkDoneProgress = params.Capabilities.Window.WorkDoneProgress
+	s.progress.SetSupportsWorkDoneProgress(params.Capabilities.Window.WorkDoneProgress)
 
 	options := s.session.Options()
 	defer func() { s.session.SetOptions(options) }()
@@ -217,11 +217,11 @@
 
 	var wg sync.WaitGroup
 	if s.session.Options().VerboseWorkDoneProgress {
-		work := s.progress.start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil)
+		work := s.progress.Start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil)
 		defer func() {
 			go func() {
 				wg.Wait()
-				work.end("Done.")
+				work.End("Done.")
 			}()
 		}()
 	}
@@ -233,11 +233,11 @@
 		if !uri.IsFile() {
 			continue
 		}
-		work := s.progress.start(ctx, "Setting up workspace", "Loading packages...", nil, nil)
+		work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil)
 		snapshot, release, err := s.addView(ctx, folder.Name, uri)
 		if err != nil {
 			viewErrors[uri] = err
-			work.end(fmt.Sprintf("Error loading packages: %s", err))
+			work.End(fmt.Sprintf("Error loading packages: %s", err))
 			continue
 		}
 		var swg sync.WaitGroup
@@ -247,7 +247,7 @@
 			defer swg.Done()
 			defer allFoldersWg.Done()
 			snapshot.AwaitInitialized(ctx)
-			work.end("Finished loading packages.")
+			work.End("Finished loading packages.")
 		}()
 
 		// Print each view's environment.
diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go
index a349a50..f095489 100644
--- a/internal/lsp/lsp_test.go
+++ b/internal/lsp/lsp_test.go
@@ -92,6 +92,7 @@
 		normalizers: tests.CollectNormalizers(datum.Exported),
 		editRecv:    make(chan map[span.URI]string, 1),
 	}
+
 	r.server = NewServer(session, testClient{runner: r})
 	tests.Run(t, r, datum)
 }
@@ -582,7 +583,7 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+	actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
 		TextDocument: protocol.TextDocumentIdentifier{
 			URI: protocol.URIFromSpanURI(uri),
 		},
@@ -594,6 +595,12 @@
 	if err != nil {
 		t.Fatal(err)
 	}
+	var actions []protocol.CodeAction
+	for _, action := range actionsRaw {
+		if action.Command.Title == "Extract function" {
+			actions = append(actions, action)
+		}
+	}
 	// Hack: We assume that we only get one code action per range.
 	// TODO(rstambler): Support multiple code actions per test.
 	if len(actions) == 0 || len(actions) > 1 {
@@ -617,6 +624,58 @@
 	}
 }
 
+func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {
+	uri := start.URI()
+	m, err := r.data.Mapper(uri)
+	if err != nil {
+		t.Fatal(err)
+	}
+	spn := span.New(start.URI(), start.Start(), end.End())
+	rng, err := m.Range(spn)
+	if err != nil {
+		t.Fatal(err)
+	}
+	actionsRaw, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+		TextDocument: protocol.TextDocumentIdentifier{
+			URI: protocol.URIFromSpanURI(uri),
+		},
+		Range: rng,
+		Context: protocol.CodeActionContext{
+			Only: []protocol.CodeActionKind{"refactor.extract"},
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	var actions []protocol.CodeAction
+	for _, action := range actionsRaw {
+		if action.Command.Title == "Extract method" {
+			actions = append(actions, action)
+		}
+	}
+	// Hack: We assume that we only get one matching code action per range.
+	// TODO(rstambler): Support multiple code actions per test.
+	if len(actions) == 0 || len(actions) > 1 {
+		t.Fatalf("unexpected number of code actions, want 1, got %v", len(actions))
+	}
+	_, err = r.server.ExecuteCommand(r.ctx, &protocol.ExecuteCommandParams{
+		Command:   actions[0].Command.Command,
+		Arguments: actions[0].Command.Arguments,
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	res := <-r.editRecv
+	for u, got := range res {
+		want := string(r.data.Golden("methodextraction_"+tests.SpanName(spn), u.Filename(), func() ([]byte, error) {
+			return []byte(got), nil
+		}))
+		if want != got {
+			t.Errorf("method extraction failed for %s:\n%s", u.Filename(), tests.Diff(t, want, got))
+		}
+	}
+}
+
 func (r *runner) Definition(t *testing.T, spn span.Span, d tests.Definition) {
 	sm, err := r.data.Mapper(d.Src.URI())
 	if err != nil {
diff --git a/internal/lsp/lsppos/lsppos.go b/internal/lsp/lsppos/lsppos.go
new file mode 100644
index 0000000..f27bde5
--- /dev/null
+++ b/internal/lsp/lsppos/lsppos.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lsppos provides utilities for working with LSP positions.
+//
+// See https://microsoft.github.io/language-server-protocol/specification#textDocuments
+// for a description of LSP positions. Notably:
+//  - Positions are specified by a 0-based line count and 0-based utf-16
+//    character offset.
+//  - Positions are line-ending agnostic: there is no way to specify \r|\n or
+//    \n|. Instead the former maps to the end of the current line, and the
+//    latter to the start of the next line.
+package lsppos
+
+import (
+	"sort"
+	"unicode/utf8"
+)
+
+type Mapper struct {
+	nonASCII bool
+	src      []byte
+
+	// Start-of-line positions. If src is newline-terminated, the final entry will be empty.
+	lines []int
+}
+
+func NewMapper(src []byte) *Mapper {
+	m := &Mapper{src: src}
+	if len(src) == 0 {
+		return m
+	}
+	m.lines = []int{0}
+	for offset, b := range src {
+		if b == '\n' {
+			m.lines = append(m.lines, offset+1)
+		}
+		if b >= utf8.RuneSelf {
+			m.nonASCII = true
+		}
+	}
+	return m
+}
+
+func (m *Mapper) Position(offset int) (line, char int) {
+	if offset < 0 || offset > len(m.src) {
+		return -1, -1
+	}
+	nextLine := sort.Search(len(m.lines), func(i int) bool {
+		return offset < m.lines[i]
+	})
+	if nextLine == 0 {
+		return -1, -1
+	}
+	line = nextLine - 1
+	start := m.lines[line]
+	var charOffset int
+	if m.nonASCII {
+		charOffset = UTF16len(m.src[start:offset])
+	} else {
+		charOffset = offset - start
+	}
+
+	var eol int
+	if line == len(m.lines)-1 {
+		eol = len(m.src)
+	} else {
+		eol = m.lines[line+1] - 1
+	}
+
+	// Adjustment for line-endings: \r|\n is the same as |\r\n.
+	if offset == eol && offset > 0 && m.src[offset-1] == '\r' {
+		charOffset--
+	}
+
+	return line, charOffset
+}
+
+func UTF16len(buf []byte) int {
+	cnt := 0
+	for _, r := range string(buf) {
+		cnt++
+		if r >= 1<<16 {
+			cnt++
+		}
+	}
+	return cnt
+}
diff --git a/internal/lsp/lsprpc/autostart_default.go b/internal/lsp/lsprpc/autostart_default.go
index dc04f66..b23a1e5 100644
--- a/internal/lsp/lsprpc/autostart_default.go
+++ b/internal/lsp/lsprpc/autostart_default.go
@@ -11,13 +11,13 @@
 )
 
 var (
-	startRemote           = startRemoteDefault
+	daemonize             = func(*exec.Cmd) {}
 	autoNetworkAddress    = autoNetworkAddressDefault
 	verifyRemoteOwnership = verifyRemoteOwnershipDefault
 )
 
-func startRemoteDefault(goplsPath string, args ...string) error {
-	cmd := exec.Command(goplsPath, args...)
+func runRemote(cmd *exec.Cmd) error {
+	daemonize(cmd)
 	if err := cmd.Start(); err != nil {
 		return errors.Errorf("starting remote gopls: %w", err)
 	}
diff --git a/internal/lsp/lsprpc/autostart_posix.go b/internal/lsp/lsprpc/autostart_posix.go
index 45089b8..d5644e2 100644
--- a/internal/lsp/lsprpc/autostart_posix.go
+++ b/internal/lsp/lsprpc/autostart_posix.go
@@ -11,7 +11,6 @@
 	"crypto/sha256"
 	"errors"
 	"fmt"
-	exec "golang.org/x/sys/execabs"
 	"log"
 	"os"
 	"os/user"
@@ -19,24 +18,21 @@
 	"strconv"
 	"syscall"
 
+	exec "golang.org/x/sys/execabs"
+
 	"golang.org/x/xerrors"
 )
 
 func init() {
-	startRemote = startRemotePosix
+	daemonize = daemonizePosix
 	autoNetworkAddress = autoNetworkAddressPosix
 	verifyRemoteOwnership = verifyRemoteOwnershipPosix
 }
 
-func startRemotePosix(goplsPath string, args ...string) error {
-	cmd := exec.Command(goplsPath, args...)
+func daemonizePosix(cmd *exec.Cmd) {
 	cmd.SysProcAttr = &syscall.SysProcAttr{
 		Setsid: true,
 	}
-	if err := cmd.Start(); err != nil {
-		return xerrors.Errorf("starting remote gopls: %w", err)
-	}
-	return nil
 }
 
 // autoNetworkAddress resolves an id on the 'auto' pseduo-network to a
diff --git a/internal/lsp/lsprpc/binder.go b/internal/lsp/lsprpc/binder.go
index 3f5cb3b..f3320e1 100644
--- a/internal/lsp/lsprpc/binder.go
+++ b/internal/lsp/lsprpc/binder.go
@@ -7,14 +7,29 @@
 import (
 	"context"
 	"encoding/json"
+	"fmt"
 
+	"golang.org/x/tools/internal/event"
 	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
 	"golang.org/x/tools/internal/lsp/protocol"
+	"golang.org/x/tools/internal/xcontext"
 	errors "golang.org/x/xerrors"
 )
 
+// The BinderFunc type adapts a bind function to implement the jsonrpc2.Binder
+// interface.
+type BinderFunc func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error)
+
+func (f BinderFunc) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
+	return f(ctx, conn)
+}
+
+// Middleware defines a transformation of jsonrpc2 Binders, that may be
+// composed to build jsonrpc2 servers.
+type Middleware func(jsonrpc2_v2.Binder) jsonrpc2_v2.Binder
+
+// A ServerFunc is used to construct an LSP server for a given client.
 type ServerFunc func(context.Context, protocol.ClientCloser) protocol.Server
-type ClientFunc func(context.Context, protocol.Server) protocol.Client
 
 // ServerBinder binds incoming connections to a new server.
 type ServerBinder struct {
@@ -22,7 +37,7 @@
 }
 
 func NewServerBinder(newServer ServerFunc) *ServerBinder {
-	return &ServerBinder{newServer}
+	return &ServerBinder{newServer: newServer}
 }
 
 func (b *ServerBinder) Bind(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
@@ -71,6 +86,7 @@
 
 type ForwardBinder struct {
 	dialer jsonrpc2_v2.Dialer
+	onBind func(*jsonrpc2_v2.Connection)
 }
 
 func NewForwardBinder(dialer jsonrpc2_v2.Dialer) *ForwardBinder {
@@ -86,12 +102,30 @@
 	if err != nil {
 		return opts, err
 	}
+	if b.onBind != nil {
+		b.onBind(serverConn)
+	}
 	server := protocol.ServerDispatcherV2(serverConn)
+	preempter := &canceler{
+		conn: conn,
+	}
+	detached := xcontext.Detach(ctx)
+	go func() {
+		conn.Wait()
+		if err := serverConn.Close(); err != nil {
+			event.Log(detached, fmt.Sprintf("closing remote connection: %v", err))
+		}
+	}()
 	return jsonrpc2_v2.ConnectionOptions{
-		Handler: protocol.ServerHandlerV2(server),
+		Handler:   protocol.ServerHandlerV2(server),
+		Preempter: preempter,
 	}, nil
 }
 
+// A ClientFunc is used to construct an LSP client for a given server.
+type ClientFunc func(context.Context, protocol.Server) protocol.Client
+
+// ClientBinder binds an LSP client to an incoming connection.
 type ClientBinder struct {
 	newClient ClientFunc
 }
diff --git a/internal/lsp/lsprpc/binder_test.go b/internal/lsp/lsprpc/binder_test.go
index d29de0f..f7dd830 100644
--- a/internal/lsp/lsprpc/binder_test.go
+++ b/internal/lsp/lsprpc/binder_test.go
@@ -2,10 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// TODO(rFindley): move this to lsprpc_test once it no longer shares with
-//                 lsprpc_test.go.
-
-package lsprpc
+package lsprpc_test
 
 import (
 	"context"
@@ -16,102 +13,142 @@
 
 	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
 	"golang.org/x/tools/internal/lsp/protocol"
+
+	. "golang.org/x/tools/internal/lsp/lsprpc"
 )
 
-type testEnv struct {
-	listener  jsonrpc2_v2.Listener
-	conn      *jsonrpc2_v2.Connection
-	rpcServer *jsonrpc2_v2.Server
+type TestEnv struct {
+	Listeners []jsonrpc2_v2.Listener
+	Conns     []*jsonrpc2_v2.Connection
+	Servers   []*jsonrpc2_v2.Server
 }
 
-func (e testEnv) Shutdown(t *testing.T) {
-	if err := e.listener.Close(); err != nil {
-		t.Error(err)
+func (e *TestEnv) Shutdown(t *testing.T) {
+	for _, l := range e.Listeners {
+		if err := l.Close(); err != nil {
+			t.Error(err)
+		}
 	}
-	if err := e.conn.Close(); err != nil {
-		t.Error(err)
+	for _, c := range e.Conns {
+		if err := c.Close(); err != nil {
+			t.Error(err)
+		}
 	}
-	if err := e.rpcServer.Wait(); err != nil {
-		t.Error(err)
+	for _, s := range e.Servers {
+		if err := s.Wait(); err != nil {
+			t.Error(err)
+		}
 	}
 }
 
-func startServing(ctx context.Context, t *testing.T, server protocol.Server, client protocol.Client) testEnv {
-	listener, err := jsonrpc2_v2.NetPipe(ctx)
+func (e *TestEnv) serve(ctx context.Context, t *testing.T, server jsonrpc2_v2.Binder) (jsonrpc2_v2.Listener, *jsonrpc2_v2.Server) {
+	l, err := jsonrpc2_v2.NetPipeListener(ctx)
 	if err != nil {
 		t.Fatal(err)
 	}
-	newServer := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
+	e.Listeners = append(e.Listeners, l)
+	s, err := jsonrpc2_v2.Serve(ctx, l, server)
+	if err != nil {
+		t.Fatal(err)
+	}
+	e.Servers = append(e.Servers, s)
+	return l, s
+}
+
+func (e *TestEnv) dial(ctx context.Context, t *testing.T, dialer jsonrpc2_v2.Dialer, client jsonrpc2_v2.Binder, forwarded bool) *jsonrpc2_v2.Connection {
+	if forwarded {
+		l, _ := e.serve(ctx, t, NewForwardBinder(dialer))
+		dialer = l.Dialer()
+	}
+	conn, err := jsonrpc2_v2.Dial(ctx, dialer, client)
+	if err != nil {
+		t.Fatal(err)
+	}
+	e.Conns = append(e.Conns, conn)
+	return conn
+}
+
+func staticClientBinder(client protocol.Client) jsonrpc2_v2.Binder {
+	f := func(context.Context, protocol.Server) protocol.Client { return client }
+	return NewClientBinder(f)
+}
+
+func staticServerBinder(server protocol.Server) jsonrpc2_v2.Binder {
+	f := func(ctx context.Context, client protocol.ClientCloser) protocol.Server {
 		return server
 	}
-	serverBinder := NewServerBinder(newServer)
-	rpcServer, err := jsonrpc2_v2.Serve(ctx, listener, serverBinder)
-	if err != nil {
-		t.Fatal(err)
-	}
-	clientBinder := NewClientBinder(func(context.Context, protocol.Server) protocol.Client { return client })
-	conn, err := jsonrpc2_v2.Dial(ctx, listener.Dialer(), clientBinder)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return testEnv{
-		listener:  listener,
-		rpcServer: rpcServer,
-		conn:      conn,
-	}
+	return NewServerBinder(f)
 }
 
 func TestClientLoggingV2(t *testing.T) {
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
+	ctx := context.Background()
 
-	client := fakeClient{logs: make(chan string, 10)}
-	env := startServing(ctx, t, pingServer{}, client)
-	defer env.Shutdown(t)
-	if err := protocol.ServerDispatcherV2(env.conn).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
-		t.Errorf("DidOpen: %v", err)
-	}
-	select {
-	case got := <-client.logs:
-		want := "ping"
-		matched, err := regexp.MatchString(want, got)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !matched {
-			t.Errorf("got log %q, want a log containing %q", got, want)
-		}
-	case <-time.After(1 * time.Second):
-		t.Error("timeout waiting for client log")
+	for name, forwarded := range map[string]bool{
+		"forwarded":  true,
+		"standalone": false,
+	} {
+		t.Run(name, func(t *testing.T) {
+			client := FakeClient{Logs: make(chan string, 10)}
+			env := new(TestEnv)
+			defer env.Shutdown(t)
+			l, _ := env.serve(ctx, t, staticServerBinder(PingServer{}))
+			conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
+
+			if err := protocol.ServerDispatcherV2(conn).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{}); err != nil {
+				t.Errorf("DidOpen: %v", err)
+			}
+			select {
+			case got := <-client.Logs:
+				want := "ping"
+				matched, err := regexp.MatchString(want, got)
+				if err != nil {
+					t.Fatal(err)
+				}
+				if !matched {
+					t.Errorf("got log %q, want a log containing %q", got, want)
+				}
+			case <-time.After(1 * time.Second):
+				t.Error("timeout waiting for client log")
+			}
+		})
 	}
 }
 
 func TestRequestCancellationV2(t *testing.T) {
 	ctx := context.Background()
 
-	server := waitableServer{
-		started:   make(chan struct{}),
-		completed: make(chan error),
-	}
-	client := fakeClient{logs: make(chan string, 10)}
-	env := startServing(ctx, t, server, client)
-	defer env.Shutdown(t)
+	for name, forwarded := range map[string]bool{
+		"forwarded":  true,
+		"standalone": false,
+	} {
+		t.Run(name, func(t *testing.T) {
+			server := WaitableServer{
+				Started:   make(chan struct{}),
+				Completed: make(chan error),
+			}
+			env := new(TestEnv)
+			defer env.Shutdown(t)
+			l, _ := env.serve(ctx, t, staticServerBinder(server))
+			client := FakeClient{Logs: make(chan string, 10)}
+			conn := env.dial(ctx, t, l.Dialer(), staticClientBinder(client), forwarded)
 
-	sd := protocol.ServerDispatcherV2(env.conn)
-	ctx, cancel := context.WithCancel(ctx)
+			sd := protocol.ServerDispatcherV2(conn)
+			ctx, cancel := context.WithCancel(ctx)
 
-	result := make(chan error)
-	go func() {
-		_, err := sd.Hover(ctx, &protocol.HoverParams{})
-		result <- err
-	}()
-	// Wait for the Hover request to start.
-	<-server.started
-	cancel()
-	if err := <-result; err == nil {
-		t.Error("nil error for cancelled Hover(), want non-nil")
-	}
-	if err := <-server.completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
-		t.Errorf("Hover(): unexpected server-side error %v", err)
+			result := make(chan error)
+			go func() {
+				_, err := sd.Hover(ctx, &protocol.HoverParams{})
+				result <- err
+			}()
+			// Wait for the Hover request to start.
+			<-server.Started
+			cancel()
+			if err := <-result; err == nil {
+				t.Error("nil error for cancelled Hover(), want non-nil")
+			}
+			if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
+				t.Errorf("Hover(): unexpected server-side error %v", err)
+			}
+		})
 	}
 }
diff --git a/internal/lsp/lsprpc/commandinterceptor.go b/internal/lsp/lsprpc/commandinterceptor.go
new file mode 100644
index 0000000..5c36af7
--- /dev/null
+++ b/internal/lsp/lsprpc/commandinterceptor.go
@@ -0,0 +1,47 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+	"context"
+	"encoding/json"
+
+	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+	"golang.org/x/tools/internal/lsp/protocol"
+)
+
+// HandlerMiddleware is a middleware that only modifies the jsonrpc2 handler.
+type HandlerMiddleware func(jsonrpc2_v2.Handler) jsonrpc2_v2.Handler
+
+// BindHandler transforms a HandlerMiddleware into a Middleware.
+func BindHandler(hmw HandlerMiddleware) Middleware {
+	return Middleware(func(binder jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
+		return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
+			opts, err := binder.Bind(ctx, conn)
+			if err != nil {
+				return opts, err
+			}
+			opts.Handler = hmw(opts.Handler)
+			return opts, nil
+		})
+	})
+}
+
+func CommandInterceptor(command string, run func(*protocol.ExecuteCommandParams) (interface{}, error)) Middleware {
+	return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
+		return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+			if req.Method == "workspace/executeCommand" {
+				var params protocol.ExecuteCommandParams
+				if err := json.Unmarshal(req.Params, &params); err == nil {
+					if params.Command == command {
+						return run(&params)
+					}
+				}
+			}
+
+			return delegate.Handle(ctx, req)
+		})
+	})
+}
diff --git a/internal/lsp/lsprpc/commandinterceptor_test.go b/internal/lsp/lsprpc/commandinterceptor_test.go
new file mode 100644
index 0000000..06550e8
--- /dev/null
+++ b/internal/lsp/lsprpc/commandinterceptor_test.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+	"context"
+	"testing"
+
+	"golang.org/x/tools/internal/lsp/protocol"
+
+	. "golang.org/x/tools/internal/lsp/lsprpc"
+)
+
+func TestCommandInterceptor(t *testing.T) {
+	const command = "foo"
+	caught := false
+	intercept := func(_ *protocol.ExecuteCommandParams) (interface{}, error) {
+		caught = true
+		return map[string]interface{}{}, nil
+	}
+
+	ctx := context.Background()
+	env := new(TestEnv)
+	defer env.Shutdown(t)
+	mw := CommandInterceptor(command, intercept)
+	l, _ := env.serve(ctx, t, mw(noopBinder))
+	conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
+
+	params := &protocol.ExecuteCommandParams{
+		Command: command,
+	}
+	var res interface{}
+	err := conn.Call(ctx, "workspace/executeCommand", params).Await(ctx, &res)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !caught {
+		t.Errorf("workspace/executeCommand was not intercepted")
+	}
+}
diff --git a/internal/lsp/lsprpc/dialer.go b/internal/lsp/lsprpc/dialer.go
new file mode 100644
index 0000000..713307c
--- /dev/null
+++ b/internal/lsp/lsprpc/dialer.go
@@ -0,0 +1,115 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"time"
+
+	exec "golang.org/x/sys/execabs"
+	"golang.org/x/tools/internal/event"
+	errors "golang.org/x/xerrors"
+)
+
+// AutoNetwork is the pseudo network type used to signal that gopls should use
+// automatic discovery to resolve a remote address.
+const AutoNetwork = "auto"
+
+// An AutoDialer is a jsonrpc2 dialer that understands the 'auto' network.
+type AutoDialer struct {
+	network, addr string // the 'real' network and address
+	isAuto        bool   // whether the server is on the 'auto' network
+
+	executable string
+	argFunc    func(network, addr string) []string
+}
+
+func NewAutoDialer(rawAddr string, argFunc func(network, addr string) []string) (*AutoDialer, error) {
+	d := AutoDialer{
+		argFunc: argFunc,
+	}
+	d.network, d.addr = ParseAddr(rawAddr)
+	if d.network == AutoNetwork {
+		d.isAuto = true
+		bin, err := os.Executable()
+		if err != nil {
+			return nil, errors.Errorf("getting executable: %w", err)
+		}
+		d.executable = bin
+		d.network, d.addr = autoNetworkAddress(bin, d.addr)
+	}
+	return &d, nil
+}
+
+// Dial implements the jsonrpc2.Dialer interface.
+func (d *AutoDialer) Dial(ctx context.Context) (io.ReadWriteCloser, error) {
+	conn, err := d.dialNet(ctx)
+	return conn, err
+}
+
+// TODO(rFindley): remove this once we no longer need to integrate with v1 of
+// the jsonrpc2 package.
+func (d *AutoDialer) dialNet(ctx context.Context) (net.Conn, error) {
+	// Attempt to verify that we own the remote. This is imperfect, but if we can
+	// determine that the remote is owned by a different user, we should fail.
+	ok, err := verifyRemoteOwnership(d.network, d.addr)
+	if err != nil {
+		// If the ownership check itself failed, we fail open but log an error to
+		// the user.
+		event.Error(ctx, "unable to check daemon socket owner, failing open", err)
+	} else if !ok {
+		// We successfully checked that the socket is not owned by us, we fail
+		// closed.
+		return nil, fmt.Errorf("socket %q is owned by a different user", d.addr)
+	}
+	const dialTimeout = 1 * time.Second
+	// Try dialing our remote once, in case it is already running.
+	netConn, err := net.DialTimeout(d.network, d.addr, dialTimeout)
+	if err == nil {
+		return netConn, nil
+	}
+	if d.isAuto && d.argFunc != nil {
+		if d.network == "unix" {
+			// Sometimes the socketfile isn't properly cleaned up when the server
+			// shuts down. Since we have already tried and failed to dial this
+			// address, it should *usually* be safe to remove the socket before
+			// binding to the address.
+			// TODO(rfindley): there is probably a race here if multiple server
+			// instances are simultaneously starting up.
+			if _, err := os.Stat(d.addr); err == nil {
+				if err := os.Remove(d.addr); err != nil {
+					return nil, errors.Errorf("removing remote socket file: %w", err)
+				}
+			}
+		}
+		args := d.argFunc(d.network, d.addr)
+		cmd := exec.Command(d.executable, args...)
+		if err := runRemote(cmd); err != nil {
+			return nil, err
+		}
+	}
+
+	const retries = 5
+	// It can take some time for the newly started server to bind to our address,
+	// so we retry for a bit.
+	for retry := 0; retry < retries; retry++ {
+		startDial := time.Now()
+		netConn, err = net.DialTimeout(d.network, d.addr, dialTimeout)
+		if err == nil {
+			return netConn, nil
+		}
+		event.Log(ctx, fmt.Sprintf("failed attempt #%d to connect to remote: %v\n", retry+2, err))
+		// In case our failure was a fast-failure, ensure we wait at least
+		// f.dialTimeout before trying again.
+		if retry != retries-1 {
+			time.Sleep(dialTimeout - time.Since(startDial))
+		}
+	}
+	return nil, errors.Errorf("dialing remote: %w", err)
+}
diff --git a/internal/lsp/lsprpc/goenv.go b/internal/lsp/lsprpc/goenv.go
new file mode 100644
index 0000000..4b16d8d
--- /dev/null
+++ b/internal/lsp/lsprpc/goenv.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+
+	"golang.org/x/tools/internal/event"
+	"golang.org/x/tools/internal/gocommand"
+	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+	"golang.org/x/tools/internal/lsp/protocol"
+)
+
+func GoEnvMiddleware() (Middleware, error) {
+	return BindHandler(func(delegate jsonrpc2_v2.Handler) jsonrpc2_v2.Handler {
+		return jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+			if req.Method == "initialize" {
+				if err := addGoEnvToInitializeRequestV2(ctx, req); err != nil {
+					event.Error(ctx, "adding go env to initialize", err)
+				}
+			}
+			return delegate.Handle(ctx, req)
+		})
+	}), nil
+}
+
+func addGoEnvToInitializeRequestV2(ctx context.Context, req *jsonrpc2_v2.Request) error {
+	var params protocol.ParamInitialize
+	if err := json.Unmarshal(req.Params, &params); err != nil {
+		return err
+	}
+	var opts map[string]interface{}
+	switch v := params.InitializationOptions.(type) {
+	case nil:
+		opts = make(map[string]interface{})
+	case map[string]interface{}:
+		opts = v
+	default:
+		return fmt.Errorf("unexpected type for InitializationOptions: %T", v)
+	}
+	envOpt, ok := opts["env"]
+	if !ok {
+		envOpt = make(map[string]interface{})
+	}
+	env, ok := envOpt.(map[string]interface{})
+	if !ok {
+		return fmt.Errorf("env option is %T, expected a map", envOpt)
+	}
+	goenv, err := getGoEnv(ctx, env)
+	if err != nil {
+		return err
+	}
+	for govar, value := range goenv {
+		env[govar] = value
+	}
+	opts["env"] = env
+	params.InitializationOptions = opts
+	raw, err := json.Marshal(params)
+	if err != nil {
+		return fmt.Errorf("marshaling updated options: %v", err)
+	}
+	req.Params = json.RawMessage(raw)
+	return nil
+}
+
+func getGoEnv(ctx context.Context, env map[string]interface{}) (map[string]string, error) {
+	var runEnv []string
+	for k, v := range env {
+		runEnv = append(runEnv, fmt.Sprintf("%s=%s", k, v))
+	}
+	runner := gocommand.Runner{}
+	output, err := runner.Run(ctx, gocommand.Invocation{
+		Verb: "env",
+		Args: []string{"-json"},
+		Env:  runEnv,
+	})
+	if err != nil {
+		return nil, err
+	}
+	envmap := make(map[string]string)
+	if err := json.Unmarshal(output.Bytes(), &envmap); err != nil {
+		return nil, err
+	}
+	return envmap, nil
+}
diff --git a/internal/lsp/lsprpc/goenv_test.go b/internal/lsp/lsprpc/goenv_test.go
new file mode 100644
index 0000000..cdfe23c
--- /dev/null
+++ b/internal/lsp/lsprpc/goenv_test.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+	"context"
+	"testing"
+
+	"golang.org/x/tools/internal/lsp/protocol"
+	"golang.org/x/tools/internal/testenv"
+
+	. "golang.org/x/tools/internal/lsp/lsprpc"
+)
+
+type initServer struct {
+	protocol.Server
+
+	params *protocol.ParamInitialize
+}
+
+func (s *initServer) Initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
+	s.params = params
+	return &protocol.InitializeResult{}, nil
+}
+
+func TestGoEnvMiddleware(t *testing.T) {
+	testenv.NeedsGo1Point(t, 13)
+
+	ctx := context.Background()
+
+	server := &initServer{}
+	env := new(TestEnv)
+	defer env.Shutdown(t)
+	l, _ := env.serve(ctx, t, staticServerBinder(server))
+	mw, err := GoEnvMiddleware()
+	if err != nil {
+		t.Fatal(err)
+	}
+	binder := mw(NewForwardBinder(l.Dialer()))
+	l, _ = env.serve(ctx, t, binder)
+	conn := env.dial(ctx, t, l.Dialer(), noopBinder, true)
+	dispatch := protocol.ServerDispatcherV2(conn)
+	initParams := &protocol.ParamInitialize{}
+	initParams.InitializationOptions = map[string]interface{}{
+		"env": map[string]interface{}{
+			"GONOPROXY": "example.com",
+		},
+	}
+	if _, err := dispatch.Initialize(ctx, initParams); err != nil {
+		t.Fatal(err)
+	}
+
+	if server.params == nil {
+		t.Fatalf("initialize params are unset")
+	}
+	envOpts := server.params.InitializationOptions.(map[string]interface{})["env"].(map[string]interface{})
+
+	// Check for an arbitrary Go variable. It should be set.
+	if _, ok := envOpts["GOPRIVATE"]; !ok {
+		t.Errorf("Go environment variable GOPRIVATE unset in initialization options")
+	}
+	// Check that the variable present in our user config was not overwritten.
+	if got, want := envOpts["GONOPROXY"], "example.com"; got != want {
+		t.Errorf("GONOPROXY=%q, want %q", got, want)
+	}
+}
diff --git a/internal/lsp/lsprpc/lsprpc.go b/internal/lsp/lsprpc/lsprpc.go
index 730f9f7..9177078 100644
--- a/internal/lsp/lsprpc/lsprpc.go
+++ b/internal/lsp/lsprpc/lsprpc.go
@@ -20,9 +20,7 @@
 	"time"
 
 	"golang.org/x/tools/internal/event"
-	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/jsonrpc2"
-	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
 	"golang.org/x/tools/internal/lsp"
 	"golang.org/x/tools/internal/lsp/cache"
 	"golang.org/x/tools/internal/lsp/command"
@@ -32,10 +30,6 @@
 	errors "golang.org/x/xerrors"
 )
 
-// AutoNetwork is the pseudo network type used to signal that gopls should use
-// automatic discovery to resolve a remote address.
-const AutoNetwork = "auto"
-
 // Unique identifiers for client/server.
 var serverIndex int64
 
@@ -114,13 +108,7 @@
 // be instrumented with telemetry, and want to be able to in some cases hijack
 // the jsonrpc2 connection with the daemon.
 type Forwarder struct {
-	network, addr string
-
-	// goplsPath is the path to the current executing gopls binary.
-	goplsPath string
-
-	// configuration for the auto-started gopls remote.
-	remoteConfig remoteConfig
+	dialer *AutoDialer
 
 	mu sync.Mutex
 	// Hold on to the server connection so that we can redo the handshake if any
@@ -129,68 +117,19 @@
 	serverID   string
 }
 
-type remoteConfig struct {
-	debug         string
-	listenTimeout time.Duration
-	logfile       string
-}
-
-// A RemoteOption configures the behavior of the auto-started remote.
-type RemoteOption interface {
-	set(*remoteConfig)
-}
-
-// RemoteDebugAddress configures the address used by the auto-started Gopls daemon
-// for serving debug information.
-type RemoteDebugAddress string
-
-func (d RemoteDebugAddress) set(cfg *remoteConfig) {
-	cfg.debug = string(d)
-}
-
-// RemoteListenTimeout configures the amount of time the auto-started gopls
-// daemon will wait with no client connections before shutting down.
-type RemoteListenTimeout time.Duration
-
-func (d RemoteListenTimeout) set(cfg *remoteConfig) {
-	cfg.listenTimeout = time.Duration(d)
-}
-
-// RemoteLogfile configures the logfile location for the auto-started gopls
-// daemon.
-type RemoteLogfile string
-
-func (l RemoteLogfile) set(cfg *remoteConfig) {
-	cfg.logfile = string(l)
-}
-
-func defaultRemoteConfig() remoteConfig {
-	return remoteConfig{
-		listenTimeout: 1 * time.Minute,
-	}
-}
-
 // NewForwarder creates a new Forwarder, ready to forward connections to the
-// remote server specified by network and addr.
-func NewForwarder(network, addr string, opts ...RemoteOption) *Forwarder {
-	gp, err := os.Executable()
+// remote server specified by rawAddr. If provided and rawAddr indicates an
+// 'automatic' address (starting with 'auto;'), argFunc may be used to start a
+// remote server for the auto-discovered address.
+func NewForwarder(rawAddr string, argFunc func(network, address string) []string) (*Forwarder, error) {
+	dialer, err := NewAutoDialer(rawAddr, argFunc)
 	if err != nil {
-		log.Printf("error getting gopls path for forwarder: %v", err)
-		gp = ""
+		return nil, err
 	}
-
-	rcfg := defaultRemoteConfig()
-	for _, opt := range opts {
-		opt.set(&rcfg)
-	}
-
 	fwd := &Forwarder{
-		network:      network,
-		addr:         addr,
-		goplsPath:    gp,
-		remoteConfig: rcfg,
+		dialer: dialer,
 	}
-	return fwd
+	return fwd, nil
 }
 
 // QueryServerState queries the server state of the current server.
@@ -248,7 +187,7 @@
 func (f *Forwarder) ServeStream(ctx context.Context, clientConn jsonrpc2.Conn) error {
 	client := protocol.ClientDispatcher(clientConn)
 
-	netConn, err := f.connectToRemote(ctx)
+	netConn, err := f.dialer.dialNet(ctx)
 	if err != nil {
 		return errors.Errorf("forwarder: connecting to remote: %w", err)
 	}
@@ -294,19 +233,19 @@
 	return err
 }
 
-func (f *Forwarder) Binder() *ForwardBinder {
-	network, address := realNetworkAddress(f.network, f.addr, f.goplsPath)
-	dialer := jsonrpc2_v2.NetDialer(network, address, net.Dialer{
-		Timeout: 5 * time.Second,
-	})
-	return NewForwardBinder(dialer)
-}
-
+// TODO(rfindley): remove this handshaking in favor of middleware.
 func (f *Forwarder) handshake(ctx context.Context) {
+	// This call to os.Execuable is redundant, and will be eliminated by the
+	// transition to the V2 API.
+	goplsPath, err := os.Executable()
+	if err != nil {
+		event.Error(ctx, "getting executable for handshake", err)
+		goplsPath = ""
+	}
 	var (
 		hreq = handshakeRequest{
 			ServerID:  f.serverID,
-			GoplsPath: f.goplsPath,
+			GoplsPath: goplsPath,
 		}
 		hresp handshakeResponse
 	)
@@ -319,8 +258,8 @@
 		// here.  Handshakes have become functional in nature.
 		event.Error(ctx, "forwarder: gopls handshake failed", err)
 	}
-	if hresp.GoplsPath != f.goplsPath {
-		event.Error(ctx, "", fmt.Errorf("forwarder: gopls path mismatch: forwarder is %q, remote is %q", f.goplsPath, hresp.GoplsPath))
+	if hresp.GoplsPath != goplsPath {
+		event.Error(ctx, "", fmt.Errorf("forwarder: gopls path mismatch: forwarder is %q, remote is %q", goplsPath, hresp.GoplsPath))
 	}
 	event.Log(ctx, "New server",
 		tag.NewServer.Of(f.serverID),
@@ -331,108 +270,12 @@
 	)
 }
 
-func (f *Forwarder) connectToRemote(ctx context.Context) (net.Conn, error) {
-	return connectToRemote(ctx, f.network, f.addr, f.goplsPath, f.remoteConfig)
-}
-
-func ConnectToRemote(ctx context.Context, addr string, opts ...RemoteOption) (net.Conn, error) {
-	rcfg := defaultRemoteConfig()
-	for _, opt := range opts {
-		opt.set(&rcfg)
-	}
-	// This is not strictly necessary, as it won't be used if not connecting to
-	// the 'auto' remote.
-	goplsPath, err := os.Executable()
+func ConnectToRemote(ctx context.Context, addr string) (net.Conn, error) {
+	dialer, err := NewAutoDialer(addr, nil)
 	if err != nil {
-		return nil, fmt.Errorf("unable to resolve gopls path: %v", err)
+		return nil, err
 	}
-	network, address := ParseAddr(addr)
-	return connectToRemote(ctx, network, address, goplsPath, rcfg)
-}
-
-func realNetworkAddress(inNetwork, inAddr, goplsPath string) (network, address string) {
-	if inNetwork != AutoNetwork {
-		return inNetwork, inAddr
-	}
-	// The "auto" network is a fake network used for service discovery. It
-	// resolves a known address based on gopls binary path.
-	return autoNetworkAddress(goplsPath, inAddr)
-}
-
-func connectToRemote(ctx context.Context, inNetwork, inAddr, goplsPath string, rcfg remoteConfig) (net.Conn, error) {
-	var (
-		netConn          net.Conn
-		err              error
-		network, address = realNetworkAddress(inNetwork, inAddr, goplsPath)
-	)
-	// Attempt to verify that we own the remote. This is imperfect, but if we can
-	// determine that the remote is owned by a different user, we should fail.
-	ok, err := verifyRemoteOwnership(network, address)
-	if err != nil {
-		// If the ownership check itself failed, we fail open but log an error to
-		// the user.
-		event.Error(ctx, "unable to check daemon socket owner, failing open", err)
-	} else if !ok {
-		// We successfully checked that the socket is not owned by us, we fail
-		// closed.
-		return nil, fmt.Errorf("socket %q is owned by a different user", address)
-	}
-	const dialTimeout = 1 * time.Second
-	// Try dialing our remote once, in case it is already running.
-	netConn, err = net.DialTimeout(network, address, dialTimeout)
-	if err == nil {
-		return netConn, nil
-	}
-	// If our remote is on the 'auto' network, start it if it doesn't exist.
-	if inNetwork == AutoNetwork {
-		if goplsPath == "" {
-			return nil, fmt.Errorf("cannot auto-start remote: gopls path is unknown")
-		}
-		if network == "unix" {
-			// Sometimes the socketfile isn't properly cleaned up when gopls shuts
-			// down. Since we have already tried and failed to dial this address, it
-			// should *usually* be safe to remove the socket before binding to the
-			// address.
-			// TODO(rfindley): there is probably a race here if multiple gopls
-			// instances are simultaneously starting up.
-			if _, err := os.Stat(address); err == nil {
-				if err := os.Remove(address); err != nil {
-					return nil, errors.Errorf("removing remote socket file: %w", err)
-				}
-			}
-		}
-		args := []string{"serve",
-			"-listen", fmt.Sprintf(`%s;%s`, network, address),
-			"-listen.timeout", rcfg.listenTimeout.String(),
-		}
-		if rcfg.logfile != "" {
-			args = append(args, "-logfile", rcfg.logfile)
-		}
-		if rcfg.debug != "" {
-			args = append(args, "-debug", rcfg.debug)
-		}
-		if err := startRemote(goplsPath, args...); err != nil {
-			return nil, errors.Errorf("startRemote(%q, %v): %w", goplsPath, args, err)
-		}
-	}
-
-	const retries = 5
-	// It can take some time for the newly started server to bind to our address,
-	// so we retry for a bit.
-	for retry := 0; retry < retries; retry++ {
-		startDial := time.Now()
-		netConn, err = net.DialTimeout(network, address, dialTimeout)
-		if err == nil {
-			return netConn, nil
-		}
-		event.Log(ctx, fmt.Sprintf("failed attempt #%d to connect to remote: %v\n", retry+2, err))
-		// In case our failure was a fast-failure, ensure we wait at least
-		// f.dialTimeout before trying again.
-		if retry != retries-1 {
-			time.Sleep(dialTimeout - time.Since(startDial))
-		}
-	}
-	return nil, errors.Errorf("dialing remote: %w", err)
+	return dialer.dialNet(ctx)
 }
 
 // handler intercepts messages to the daemon to enrich them with local
@@ -516,27 +359,6 @@
 	return jsonrpc2.NewCall(call.ID(), "initialize", params)
 }
 
-func getGoEnv(ctx context.Context, env map[string]interface{}) (map[string]string, error) {
-	var runEnv []string
-	for k, v := range env {
-		runEnv = append(runEnv, fmt.Sprintf("%s=%s", k, v))
-	}
-	runner := gocommand.Runner{}
-	output, err := runner.Run(ctx, gocommand.Invocation{
-		Verb: "env",
-		Args: []string{"-json"},
-		Env:  runEnv,
-	})
-	if err != nil {
-		return nil, err
-	}
-	envmap := make(map[string]string)
-	if err := json.Unmarshal(output.Bytes(), &envmap); err != nil {
-		return nil, err
-	}
-	return envmap, nil
-}
-
 func (f *Forwarder) replyWithDebugAddress(outerCtx context.Context, r jsonrpc2.Replier, args command.DebuggingArgs) jsonrpc2.Replier {
 	di := debug.GetInstance(outerCtx)
 	if di == nil {
diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go
index 2f2cf1a..24decbe 100644
--- a/internal/lsp/lsprpc/lsprpc_test.go
+++ b/internal/lsp/lsprpc/lsprpc_test.go
@@ -22,14 +22,14 @@
 	"golang.org/x/tools/internal/testenv"
 )
 
-type fakeClient struct {
+type FakeClient struct {
 	protocol.Client
 
-	logs chan string
+	Logs chan string
 }
 
-func (c fakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
-	c.logs <- params.Message
+func (c FakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
+	c.Logs <- params.Message
 	return nil
 }
 
@@ -43,9 +43,9 @@
 	return nil
 }
 
-type pingServer struct{ fakeServer }
+type PingServer struct{ fakeServer }
 
-func (s pingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
+func (s PingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
 	event.Log(ctx, "ping")
 	return nil
 }
@@ -54,8 +54,8 @@
 	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()
 
-	server := pingServer{}
-	client := fakeClient{logs: make(chan string, 10)}
+	server := PingServer{}
+	client := FakeClient{Logs: make(chan string, 10)}
 
 	ctx = debug.WithInstance(ctx, "", "")
 	ss := NewStreamServer(cache.New(nil), false)
@@ -70,7 +70,7 @@
 	}
 
 	select {
-	case got := <-client.logs:
+	case got := <-client.Logs:
 		want := "ping"
 		matched, err := regexp.MatchString(want, got)
 		if err != nil {
@@ -84,20 +84,20 @@
 	}
 }
 
-// waitableServer instruments LSP request so that we can control their timing.
+// WaitableServer instruments LSP request so that we can control their timing.
 // The requests chosen are arbitrary: we simply needed one that blocks, and
 // another that doesn't.
-type waitableServer struct {
+type WaitableServer struct {
 	fakeServer
 
-	started   chan struct{}
-	completed chan error
+	Started   chan struct{}
+	Completed chan error
 }
 
-func (s waitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (_ *protocol.Hover, err error) {
-	s.started <- struct{}{}
+func (s WaitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (_ *protocol.Hover, err error) {
+	s.Started <- struct{}{}
 	defer func() {
-		s.completed <- err
+		s.Completed <- err
 	}()
 	select {
 	case <-ctx.Done():
@@ -107,7 +107,7 @@
 	return &protocol.Hover{}, nil
 }
 
-func (s waitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
+func (s WaitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
 	return item, nil
 }
 
@@ -126,7 +126,10 @@
 	tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
 
 	forwarderCtx := debug.WithInstance(ctx, "", "")
-	forwarder := NewForwarder("tcp", tsDirect.Addr)
+	forwarder, err := NewForwarder("tcp;"+tsDirect.Addr, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
 	tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder, nil)
 	return tsDirect, tsForwarded, func() {
 		checkClose(t, tsDirect.Close)
@@ -136,9 +139,9 @@
 
 func TestRequestCancellation(t *testing.T) {
 	ctx := context.Background()
-	server := waitableServer{
-		started:   make(chan struct{}),
-		completed: make(chan error),
+	server := WaitableServer{
+		Started:   make(chan struct{}),
+		Completed: make(chan error),
 	}
 	tsDirect, tsForwarded, cleanup := setupForwarding(ctx, t, server)
 	defer cleanup()
@@ -167,12 +170,12 @@
 				result <- err
 			}()
 			// Wait for the Hover request to start.
-			<-server.started
+			<-server.Started
 			cancel()
 			if err := <-result; err == nil {
 				t.Error("nil error for cancelled Hover(), want non-nil")
 			}
-			if err := <-server.completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
+			if err := <-server.Completed; err == nil || !strings.Contains(err.Error(), "cancelled hover") {
 				t.Errorf("Hover(): unexpected server-side error %v", err)
 			}
 		})
@@ -218,7 +221,10 @@
 	ss := NewStreamServer(cache, false)
 	tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
 
-	forwarder := NewForwarder("tcp", tsBackend.Addr)
+	forwarder, err := NewForwarder("tcp;"+tsBackend.Addr, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
 	tsForwarder := servertest.NewPipeServer(clientCtx, forwarder, nil)
 
 	conn1 := tsForwarder.Connect(clientCtx)
diff --git a/internal/lsp/lsprpc/middleware.go b/internal/lsp/lsprpc/middleware.go
new file mode 100644
index 0000000..2ee83a2
--- /dev/null
+++ b/internal/lsp/lsprpc/middleware.go
@@ -0,0 +1,145 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc
+
+import (
+	"context"
+	"encoding/json"
+	"sync"
+
+	"golang.org/x/tools/internal/event"
+	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+	"golang.org/x/xerrors"
+)
+
+// Metadata holds arbitrary data transferred between jsonrpc2 peers.
+type Metadata map[string]interface{}
+
+// PeerInfo holds information about a peering between jsonrpc2 servers.
+type PeerInfo struct {
+	// RemoteID is the identity of the current server on its peer.
+	RemoteID int64
+
+	// LocalID is the identity of the peer on the server.
+	LocalID int64
+
+	// IsClient reports whether the peer is a client. If false, the peer is a
+	// server.
+	IsClient bool
+
+	// Metadata holds arbitrary information provided by the peer.
+	Metadata Metadata
+}
+
+// Handshaker handles both server and client handshaking over jsonrpc2. To
+// instrument server-side handshaking, use Handshaker.Middleware. To instrument
+// client-side handshaking, call Handshaker.ClientHandshake for any new
+// client-side connections.
+type Handshaker struct {
+	// Metadata will be shared with peers via handshaking.
+	Metadata Metadata
+
+	mu     sync.Mutex
+	prevID int64
+	peers  map[int64]PeerInfo
+}
+
+// Peers returns the peer info this handshaker knows about by way of either the
+// server-side handshake middleware, or client-side handshakes.
+func (h *Handshaker) Peers() []PeerInfo {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	var c []PeerInfo
+	for _, v := range h.peers {
+		c = append(c, v)
+	}
+	return c
+}
+
+// Middleware is a jsonrpc2 middleware function to augment connection binding
+// to handle the handshake method, and record disconnections.
+func (h *Handshaker) Middleware(inner jsonrpc2_v2.Binder) jsonrpc2_v2.Binder {
+	return BinderFunc(func(ctx context.Context, conn *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
+		opts, err := inner.Bind(ctx, conn)
+		if err != nil {
+			return opts, err
+		}
+
+		localID := h.nextID()
+		info := &PeerInfo{
+			RemoteID: localID,
+			Metadata: h.Metadata,
+		}
+
+		// Wrap the delegated handler to accept the handshake.
+		delegate := opts.Handler
+		opts.Handler = jsonrpc2_v2.HandlerFunc(func(ctx context.Context, req *jsonrpc2_v2.Request) (interface{}, error) {
+			if req.Method == handshakeMethod {
+				var peerInfo PeerInfo
+				if err := json.Unmarshal(req.Params, &peerInfo); err != nil {
+					return nil, xerrors.Errorf("%w: unmarshaling client info: %v", jsonrpc2_v2.ErrInvalidParams, err)
+				}
+				peerInfo.LocalID = localID
+				peerInfo.IsClient = true
+				h.recordPeer(peerInfo)
+				return info, nil
+			}
+			return delegate.Handle(ctx, req)
+		})
+
+		// Record the dropped client.
+		go h.cleanupAtDisconnect(conn, localID)
+
+		return opts, nil
+	})
+}
+
+// ClientHandshake performs a client-side handshake with the server at the
+// other end of conn, recording the server's peer info and watching for conn's
+// disconnection.
+func (h *Handshaker) ClientHandshake(ctx context.Context, conn *jsonrpc2_v2.Connection) {
+	localID := h.nextID()
+	info := &PeerInfo{
+		RemoteID: localID,
+		Metadata: h.Metadata,
+	}
+
+	call := conn.Call(ctx, handshakeMethod, info)
+	var serverInfo PeerInfo
+	if err := call.Await(ctx, &serverInfo); err != nil {
+		event.Error(ctx, "performing handshake", err)
+		return
+	}
+	serverInfo.LocalID = localID
+	h.recordPeer(serverInfo)
+
+	go h.cleanupAtDisconnect(conn, localID)
+}
+
+func (h *Handshaker) nextID() int64 {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	h.prevID++
+	return h.prevID
+}
+
+func (h *Handshaker) cleanupAtDisconnect(conn *jsonrpc2_v2.Connection, peerID int64) {
+	conn.Wait()
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	delete(h.peers, peerID)
+}
+
+func (h *Handshaker) recordPeer(info PeerInfo) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	if h.peers == nil {
+		h.peers = make(map[int64]PeerInfo)
+	}
+	h.peers[info.LocalID] = info
+}
diff --git a/internal/lsp/lsprpc/middleware_test.go b/internal/lsp/lsprpc/middleware_test.go
new file mode 100644
index 0000000..a385f10
--- /dev/null
+++ b/internal/lsp/lsprpc/middleware_test.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lsprpc_test
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"testing"
+	"time"
+
+	jsonrpc2_v2 "golang.org/x/tools/internal/jsonrpc2_v2"
+	. "golang.org/x/tools/internal/lsp/lsprpc"
+)
+
+var noopBinder = BinderFunc(func(context.Context, *jsonrpc2_v2.Connection) (jsonrpc2_v2.ConnectionOptions, error) {
+	return jsonrpc2_v2.ConnectionOptions{}, nil
+})
+
+func TestHandshakeMiddleware(t *testing.T) {
+	sh := &Handshaker{
+		Metadata: Metadata{
+			"answer": 42,
+		},
+	}
+	ctx := context.Background()
+	env := new(TestEnv)
+	defer env.Shutdown(t)
+	l, _ := env.serve(ctx, t, sh.Middleware(noopBinder))
+	conn := env.dial(ctx, t, l.Dialer(), noopBinder, false)
+	ch := &Handshaker{
+		Metadata: Metadata{
+			"question": 6 * 9,
+		},
+	}
+
+	check := func(connected bool) error {
+		clients := sh.Peers()
+		servers := ch.Peers()
+		want := 0
+		if connected {
+			want = 1
+		}
+		if got := len(clients); got != want {
+			return fmt.Errorf("got %d clients on the server, want %d", got, want)
+		}
+		if got := len(servers); got != want {
+			return fmt.Errorf("got %d servers on the client, want %d", got, want)
+		}
+		if !connected {
+			return nil
+		}
+		client := clients[0]
+		server := servers[0]
+		if _, ok := client.Metadata["question"]; !ok {
+			return errors.New("no client metadata")
+		}
+		if _, ok := server.Metadata["answer"]; !ok {
+			return errors.New("no server metadata")
+		}
+		if client.LocalID != server.RemoteID {
+			return fmt.Errorf("client.LocalID == %d, server.PeerID == %d", client.LocalID, server.RemoteID)
+		}
+		if client.RemoteID != server.LocalID {
+			return fmt.Errorf("client.PeerID == %d, server.LocalID == %d", client.RemoteID, server.LocalID)
+		}
+		return nil
+	}
+
+	if err := check(false); err != nil {
+		t.Fatalf("before handshake: %v", err)
+	}
+	ch.ClientHandshake(ctx, conn)
+	if err := check(true); err != nil {
+		t.Fatalf("after handshake: %v", err)
+	}
+	conn.Close()
+	// Wait for up to ~2s for connections to get cleaned up.
+	delay := 25 * time.Millisecond
+	for retries := 3; retries >= 0; retries-- {
+		time.Sleep(delay)
+		err := check(false)
+		if err == nil {
+			return
+		}
+		if retries == 0 {
+			t.Fatalf("after closing connection: %v", err)
+		}
+		delay *= 4
+	}
+}
diff --git a/internal/lsp/mod/diagnostics.go b/internal/lsp/mod/diagnostics.go
index 6495aeb..625bc63 100644
--- a/internal/lsp/mod/diagnostics.go
+++ b/internal/lsp/mod/diagnostics.go
@@ -88,7 +88,7 @@
 	// Packages in the workspace can contribute diagnostics to go.mod files.
 	wspkgs, err := snapshot.WorkspacePackages(ctx)
 	if err != nil && !source.IsNonFatalGoModError(err) {
-		event.Error(ctx, "diagnosing go.mod", err)
+		event.Error(ctx, fmt.Sprintf("workspace packages: diagnosing %s", pm.URI), err)
 	}
 	if err == nil {
 		for _, pkg := range wspkgs {
@@ -102,7 +102,7 @@
 
 	tidied, err := snapshot.ModTidy(ctx, pm)
 	if err != nil && !source.IsNonFatalGoModError(err) {
-		event.Error(ctx, "diagnosing go.mod", err)
+		event.Error(ctx, fmt.Sprintf("tidy: diagnosing %s", pm.URI), err)
 	}
 	if err == nil {
 		for _, d := range tidied.Diagnostics {
diff --git a/internal/lsp/progress.go b/internal/lsp/progress/progress.go
similarity index 79%
rename from internal/lsp/progress.go
rename to internal/lsp/progress/progress.go
index 719e9c3..18e1bd0 100644
--- a/internal/lsp/progress.go
+++ b/internal/lsp/progress/progress.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package lsp
+package progress
 
 import (
 	"context"
@@ -18,22 +18,26 @@
 	errors "golang.org/x/xerrors"
 )
 
-type progressTracker struct {
+type Tracker struct {
 	client                   protocol.Client
 	supportsWorkDoneProgress bool
 
 	mu         sync.Mutex
-	inProgress map[protocol.ProgressToken]*workDone
+	inProgress map[protocol.ProgressToken]*WorkDone
 }
 
-func newProgressTracker(client protocol.Client) *progressTracker {
-	return &progressTracker{
+func NewTracker(client protocol.Client) *Tracker {
+	return &Tracker{
 		client:     client,
-		inProgress: make(map[protocol.ProgressToken]*workDone),
+		inProgress: make(map[protocol.ProgressToken]*WorkDone),
 	}
 }
 
-// start notifies the client of work being done on the server. It uses either
+func (tracker *Tracker) SetSupportsWorkDoneProgress(b bool) {
+	tracker.supportsWorkDoneProgress = b
+}
+
+// Start notifies the client of work being done on the server. It uses either
 // ShowMessage RPCs or $/progress messages, depending on the capabilities of
 // the client.  The returned WorkDone handle may be used to report incremental
 // progress, and to report work completion. In particular, it is an error to
@@ -59,8 +63,8 @@
 //    // Do the work...
 //  }
 //
-func (t *progressTracker) start(ctx context.Context, title, message string, token protocol.ProgressToken, cancel func()) *workDone {
-	wd := &workDone{
+func (t *Tracker) Start(ctx context.Context, title, message string, token protocol.ProgressToken, cancel func()) *WorkDone {
+	wd := &WorkDone{
 		ctx:    xcontext.Detach(ctx),
 		client: t.client,
 		token:  token,
@@ -119,7 +123,7 @@
 	return wd
 }
 
-func (t *progressTracker) cancel(ctx context.Context, token protocol.ProgressToken) error {
+func (t *Tracker) Cancel(ctx context.Context, token protocol.ProgressToken) error {
 	t.mu.Lock()
 	defer t.mu.Unlock()
 	wd, ok := t.inProgress[token]
@@ -133,9 +137,9 @@
 	return nil
 }
 
-// workDone represents a unit of work that is reported to the client via the
+// WorkDone represents a unit of work that is reported to the client via the
 // progress API.
-type workDone struct {
+type WorkDone struct {
 	// ctx is detached, for sending $/progress updates.
 	ctx    context.Context
 	client protocol.Client
@@ -153,7 +157,11 @@
 	cleanup func()
 }
 
-func (wd *workDone) doCancel() {
+func (wd *WorkDone) Token() protocol.ProgressToken {
+	return wd.token
+}
+
+func (wd *WorkDone) doCancel() {
 	wd.cancelMu.Lock()
 	defer wd.cancelMu.Unlock()
 	if !wd.cancelled {
@@ -162,7 +170,7 @@
 }
 
 // report reports an update on WorkDone report back to the client.
-func (wd *workDone) report(message string, percentage float64) {
+func (wd *WorkDone) Report(message string, percentage float64) {
 	if wd == nil {
 		return
 	}
@@ -196,7 +204,7 @@
 }
 
 // end reports a workdone completion back to the client.
-func (wd *workDone) end(message string) {
+func (wd *WorkDone) End(message string) {
 	if wd == nil {
 		return
 	}
@@ -227,27 +235,35 @@
 	}
 }
 
-// eventWriter writes every incoming []byte to
+// EventWriter writes every incoming []byte to
 // event.Print with the operation=generate tag
 // to distinguish its logs from others.
-type eventWriter struct {
+type EventWriter struct {
 	ctx       context.Context
 	operation string
 }
 
-func (ew *eventWriter) Write(p []byte) (n int, err error) {
+func NewEventWriter(ctx context.Context, operation string) *EventWriter {
+	return &EventWriter{ctx: ctx, operation: operation}
+}
+
+func (ew *EventWriter) Write(p []byte) (n int, err error) {
 	event.Log(ew.ctx, string(p), tag.Operation.Of(ew.operation))
 	return len(p), nil
 }
 
-// workDoneWriter wraps a workDone handle to provide a Writer interface,
+// WorkDoneWriter wraps a workDone handle to provide a Writer interface,
 // so that workDone reporting can more easily be hooked into commands.
-type workDoneWriter struct {
-	wd *workDone
+type WorkDoneWriter struct {
+	wd *WorkDone
 }
 
-func (wdw workDoneWriter) Write(p []byte) (n int, err error) {
-	wdw.wd.report(string(p), 0)
+func NewWorkDoneWriter(wd *WorkDone) *WorkDoneWriter {
+	return &WorkDoneWriter{wd: wd}
+}
+
+func (wdw WorkDoneWriter) Write(p []byte) (n int, err error) {
+	wdw.wd.Report(string(p), 0)
 	// Don't fail just because of a failure to report progress.
 	return len(p), nil
 }
diff --git a/internal/lsp/progress_test.go b/internal/lsp/progress/progress_test.go
similarity index 90%
rename from internal/lsp/progress_test.go
rename to internal/lsp/progress/progress_test.go
index 40ca3d2..b3c8219 100644
--- a/internal/lsp/progress_test.go
+++ b/internal/lsp/progress/progress_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package lsp
+package progress
 
 import (
 	"context"
@@ -63,10 +63,10 @@
 	return nil
 }
 
-func setup(token protocol.ProgressToken) (context.Context, *progressTracker, *fakeClient) {
+func setup(token protocol.ProgressToken) (context.Context, *Tracker, *fakeClient) {
 	c := &fakeClient{}
-	tracker := newProgressTracker(c)
-	tracker.supportsWorkDoneProgress = true
+	tracker := NewTracker(c)
+	tracker.SetSupportsWorkDoneProgress(true)
 	return context.Background(), tracker, c
 }
 
@@ -113,7 +113,7 @@
 			ctx, cancel := context.WithCancel(ctx)
 			defer cancel()
 			tracker.supportsWorkDoneProgress = test.supported
-			work := tracker.start(ctx, "work", "message", test.token, nil)
+			work := tracker.Start(ctx, "work", "message", test.token, nil)
 			client.mu.Lock()
 			gotCreated, gotBegun := client.created, client.begun
 			client.mu.Unlock()
@@ -124,14 +124,14 @@
 				t.Errorf("got %d work begun, want %d", gotBegun, test.wantBegun)
 			}
 			// Ignore errors: this is just testing the reporting behavior.
-			work.report("report", 50)
+			work.Report("report", 50)
 			client.mu.Lock()
 			gotReported := client.reported
 			client.mu.Unlock()
 			if gotReported != test.wantReported {
 				t.Errorf("got %d progress reports, want %d", gotReported, test.wantCreated)
 			}
-			work.end("done")
+			work.End("done")
 			client.mu.Lock()
 			gotEnded, gotMessages := client.ended, client.messages
 			client.mu.Unlock()
@@ -150,8 +150,8 @@
 		ctx, tracker, _ := setup(token)
 		var canceled bool
 		cancel := func() { canceled = true }
-		work := tracker.start(ctx, "work", "message", token, cancel)
-		if err := tracker.cancel(ctx, work.token); err != nil {
+		work := tracker.Start(ctx, "work", "message", token, cancel)
+		if err := tracker.Cancel(ctx, work.Token()); err != nil {
 			t.Fatal(err)
 		}
 		if !canceled {
diff --git a/internal/lsp/protocol/tsclient.go b/internal/lsp/protocol/tsclient.go
index 24199a1..5cdfc9b 100644
--- a/internal/lsp/protocol/tsclient.go
+++ b/internal/lsp/protocol/tsclient.go
@@ -6,8 +6,8 @@
 
 // Package protocol contains data types and code for LSP jsonrpcs
 // generated automatically from vscode-languageserver-node
-// commit: 2645fb54ea1e764aff71dee0ecc8aceff3aabf56
-// last fetched Tue May 18 2021 08:24:56 GMT-0400 (Eastern Daylight Time)
+// commit: 092c2afc3ad7e4d2b03fe8ac0deb418ec4276915
+// last fetched Sat Jul 03 2021 10:17:05 GMT-0700 (Pacific Daylight Time)
 
 // Code generated (see typescript/README.md) DO NOT EDIT.
 
diff --git a/internal/lsp/protocol/tsprotocol.go b/internal/lsp/protocol/tsprotocol.go
index 1c4e83e..209da9b 100644
--- a/internal/lsp/protocol/tsprotocol.go
+++ b/internal/lsp/protocol/tsprotocol.go
@@ -4,8 +4,8 @@
 
 // Package protocol contains data types and code for LSP jsonrpcs
 // generated automatically from vscode-languageserver-node
-// commit: 2645fb54ea1e764aff71dee0ecc8aceff3aabf56
-// last fetched Tue May 18 2021 08:24:56 GMT-0400 (Eastern Daylight Time)
+// commit: 092c2afc3ad7e4d2b03fe8ac0deb418ec4276915
+// last fetched Sat Jul 03 2021 10:17:05 GMT-0700 (Pacific Daylight Time)
 package protocol
 
 // Code generated (see typescript/README.md) DO NOT EDIT.
diff --git a/internal/lsp/protocol/tsserver.go b/internal/lsp/protocol/tsserver.go
index 49aaf0c..948250c 100644
--- a/internal/lsp/protocol/tsserver.go
+++ b/internal/lsp/protocol/tsserver.go
@@ -6,8 +6,8 @@
 
 // Package protocol contains data types and code for LSP jsonrpcs
 // generated automatically from vscode-languageserver-node
-// commit: 2645fb54ea1e764aff71dee0ecc8aceff3aabf56
-// last fetched Tue May 18 2021 08:24:56 GMT-0400 (Eastern Daylight Time)
+// commit: 092c2afc3ad7e4d2b03fe8ac0deb418ec4276915
+// last fetched Sat Jul 03 2021 10:17:05 GMT-0700 (Pacific Daylight Time)
 
 // Code generated (see typescript/README.md) DO NOT EDIT.
 
diff --git a/internal/lsp/protocol/typescript/util.ts b/internal/lsp/protocol/typescript/util.ts
index f4a12b8..08b9204 100644
--- a/internal/lsp/protocol/typescript/util.ts
+++ b/internal/lsp/protocol/typescript/util.ts
@@ -15,7 +15,7 @@
   `${dir}/${srcDir}/protocol/src/browser/main.ts`, `${dir}${srcDir}/types/src/main.ts`,
   `${dir}${srcDir}/jsonrpc/src/node/main.ts`
 ];
-export const gitHash = '2645fb54ea1e764aff71dee0ecc8aceff3aabf56';
+export const gitHash = '092c2afc3ad7e4d2b03fe8ac0deb418ec4276915';
 let outFname = 'tsprotocol.go';
 let fda: number, fdb: number, fde: number;  // file descriptors
 
diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go
index 748e698..8fb6afb 100644
--- a/internal/lsp/regtest/expectation.go
+++ b/internal/lsp/regtest/expectation.go
@@ -29,7 +29,7 @@
 var (
 	// InitialWorkspaceLoad is an expectation that the workspace initial load has
 	// completed. It is verified via workdone reporting.
-	InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1)
+	InitialWorkspaceLoad = CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromInitialWorkspaceLoad), 1, false)
 )
 
 // A Verdict is the result of checking an expectation against the current
@@ -79,24 +79,30 @@
 
 // OnceMet returns an Expectation that, once the precondition is met, asserts
 // that mustMeet is met.
-func OnceMet(precondition Expectation, mustMeet Expectation) *SimpleExpectation {
+func OnceMet(precondition Expectation, mustMeets ...Expectation) *SimpleExpectation {
 	check := func(s State) Verdict {
 		switch pre := precondition.Check(s); pre {
 		case Unmeetable:
 			return Unmeetable
 		case Met:
-			verdict := mustMeet.Check(s)
-			if verdict != Met {
-				return Unmeetable
+			for _, mustMeet := range mustMeets {
+				verdict := mustMeet.Check(s)
+				if verdict != Met {
+					return Unmeetable
+				}
 			}
 			return Met
 		default:
 			return Unmet
 		}
 	}
+	var descriptions []string
+	for _, mustMeet := range mustMeets {
+		descriptions = append(descriptions, mustMeet.Description())
+	}
 	return &SimpleExpectation{
 		check:       check,
-		description: fmt.Sprintf("once %q is met, must have %q", precondition.Description(), mustMeet.Description()),
+		description: fmt.Sprintf("once %q is met, must have %q", precondition.Description(), strings.Join(descriptions, "\n")),
 	}
 }
 
@@ -190,7 +196,7 @@
 // to be completely processed.
 func (e *Env) DoneWithOpen() Expectation {
 	opens := e.Editor.Stats().DidOpen
-	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens)
+	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidOpen), opens, true)
 }
 
 // StartedChange expects there to have been i work items started for
@@ -203,28 +209,28 @@
 // editor to be completely processed.
 func (e *Env) DoneWithChange() Expectation {
 	changes := e.Editor.Stats().DidChange
-	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes)
+	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), changes, true)
 }
 
 // DoneWithSave expects all didSave notifications currently sent by the editor
 // to be completely processed.
 func (e *Env) DoneWithSave() Expectation {
 	saves := e.Editor.Stats().DidSave
-	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves)
+	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidSave), saves, true)
 }
 
 // DoneWithChangeWatchedFiles expects all didChangeWatchedFiles notifications
 // currently sent by the editor to be completely processed.
 func (e *Env) DoneWithChangeWatchedFiles() Expectation {
 	changes := e.Editor.Stats().DidChangeWatchedFiles
-	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes)
+	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChangeWatchedFiles), changes, true)
 }
 
 // DoneWithClose expects all didClose notifications currently sent by the
 // editor to be completely processed.
 func (e *Env) DoneWithClose() Expectation {
 	changes := e.Editor.Stats().DidClose
-	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes)
+	return CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidClose), changes, true)
 }
 
 // StartedWork expect a work item to have been started >= atLeast times.
@@ -247,16 +253,20 @@
 //
 // Since the Progress API doesn't include any hidden metadata, we must use the
 // progress notification title to identify the work we expect to be completed.
-func CompletedWork(title string, atLeast uint64) SimpleExpectation {
+func CompletedWork(title string, count uint64, atLeast bool) SimpleExpectation {
 	check := func(s State) Verdict {
-		if s.completedWork[title] >= atLeast {
+		if s.completedWork[title] == count || atLeast && s.completedWork[title] > count {
 			return Met
 		}
 		return Unmet
 	}
+	desc := fmt.Sprintf("completed work %q %v times", title, count)
+	if atLeast {
+		desc = fmt.Sprintf("completed work %q at least %d time(s)", title, count)
+	}
 	return SimpleExpectation{
 		check:       check,
-		description: fmt.Sprintf("completed work %q at least %d time(s)", title, atLeast),
+		description: desc,
 	}
 }
 
@@ -303,7 +313,7 @@
 
 // LogMatching asserts that the client has received a log message
 // of type typ matching the regexp re.
-func LogMatching(typ protocol.MessageType, re string, count int) LogExpectation {
+func LogMatching(typ protocol.MessageType, re string, count int, atLeast bool) LogExpectation {
 	rec, err := regexp.Compile(re)
 	if err != nil {
 		panic(err)
@@ -315,14 +325,19 @@
 				found++
 			}
 		}
-		if found == count {
+		// Check for an exact or "at least" match.
+		if found == count || (found >= count && atLeast) {
 			return Met
 		}
 		return Unmet
 	}
+	desc := fmt.Sprintf("log message matching %q expected %v times", re, count)
+	if atLeast {
+		desc = fmt.Sprintf("log message matching %q expected at least %v times", re, count)
+	}
 	return LogExpectation{
 		check:       check,
-		description: fmt.Sprintf("log message matching %q", re),
+		description: desc,
 	}
 }
 
@@ -514,7 +529,24 @@
 	}
 	return SimpleExpectation{
 		check:       check,
-		description: "empty diagnostics",
+		description: fmt.Sprintf("empty diagnostics for %q", name),
+	}
+}
+
+// EmptyOrNoDiagnostics asserts that either no diagnostics are sent for the
+// workspace-relative path name, or empty diagnostics are sent.
+// TODO(rFindley): this subtlety shouldn't be necessary. Gopls should always
+// send at least one diagnostic set for open files.
+func EmptyOrNoDiagnostics(name string) Expectation {
+	check := func(s State) Verdict {
+		if diags := s.diagnostics[name]; diags == nil || len(diags.Diagnostics) == 0 {
+			return Met
+		}
+		return Unmet
+	}
+	return SimpleExpectation{
+		check:       check,
+		description: fmt.Sprintf("empty or no diagnostics for %q", name),
 	}
 }
 
diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go
index 5eeacd8..05867c4 100644
--- a/internal/lsp/regtest/runner.go
+++ b/internal/lsp/regtest/runner.go
@@ -29,6 +29,7 @@
 	"golang.org/x/tools/internal/lsp/lsprpc"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
+	"golang.org/x/tools/internal/xcontext"
 )
 
 // Mode is a bitmask that defines for which execution modes a test should run.
@@ -44,7 +45,7 @@
 	// SeparateProcess forwards connection to a shared separate gopls process.
 	SeparateProcess
 	// Experimental enables all of the experimental configurations that are
-	// being developed. Currently, it enables the workspace module.
+	// being developed.
 	Experimental
 )
 
@@ -177,10 +178,6 @@
 	})
 }
 
-var WindowsLineEndings = optionSetter(func(opts *runConfig) {
-	opts.editor.WindowsLineEndings = true
-})
-
 // SkipLogs skips the buffering of logs during test execution. It is intended
 // for long-running stress tests.
 func SkipLogs() RunOption {
@@ -240,7 +237,7 @@
 		{"singleton", Singleton, singletonServer},
 		{"forwarded", Forwarded, r.forwardedServer},
 		{"separate_process", SeparateProcess, r.separateProcessServer},
-		{"experimental_workspace_module", Experimental, experimentalWorkspaceModule},
+		{"experimental", Experimental, experimentalServer},
 	}
 
 	for _, tc := range tests {
@@ -307,7 +304,13 @@
 				if t.Failed() || testing.Verbose() {
 					ls.printBuffers(t.Name(), os.Stderr)
 				}
-				env.CloseEditor()
+				// For tests that failed due to a timeout, don't fail to shutdown
+				// because ctx is done.
+				closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second)
+				defer cancel()
+				if err := env.Editor.Close(closeCtx); err != nil {
+					t.Errorf("closing editor: %v", err)
+				}
 			}()
 			// Always await the initial workspace load.
 			env.Await(InitialWorkspaceLoad)
@@ -395,9 +398,12 @@
 	return lsprpc.NewStreamServer(cache.New(optsHook), false)
 }
 
-func experimentalWorkspaceModule(_ context.Context, _ *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
+func experimentalServer(_ context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
 	options := func(o *source.Options) {
 		optsHook(o)
+		o.EnableAllExperiments()
+		// ExperimentalWorkspaceModule is not (as of writing) enabled by
+		// source.Options.EnableAllExperiments, but we want to test it.
 		o.ExperimentalWorkspaceModule = true
 	}
 	return lsprpc.NewStreamServer(cache.New(options), false)
@@ -405,7 +411,7 @@
 
 func (r *Runner) forwardedServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
 	ts := r.getTestServer(optsHook)
-	return lsprpc.NewForwarder("tcp", ts.Addr)
+	return newForwarder("tcp", ts.Addr)
 }
 
 // getTestServer gets the shared test server instance to connect to, or creates
@@ -426,7 +432,16 @@
 	// TODO(rfindley): can we use the autostart behavior here, instead of
 	// pre-starting the remote?
 	socket := r.getRemoteSocket(t)
-	return lsprpc.NewForwarder("unix", socket)
+	return newForwarder("unix", socket)
+}
+
+func newForwarder(network, address string) *lsprpc.Forwarder {
+	server, err := lsprpc.NewForwarder(network+";"+address, nil)
+	if err != nil {
+		// This should never happen, as we are passing an explicit address.
+		panic(fmt.Sprintf("internal error: unable to create forwarder: %v", err))
+	}
+	return server
 }
 
 // runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go
index 68e1b74..5677ab0 100644
--- a/internal/lsp/regtest/wrappers.go
+++ b/internal/lsp/regtest/wrappers.go
@@ -6,14 +6,12 @@
 
 import (
 	"encoding/json"
-	"io"
 	"path"
 	"testing"
 
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/fake"
 	"golang.org/x/tools/internal/lsp/protocol"
-	errors "golang.org/x/xerrors"
 )
 
 func (e *Env) ChangeFilesOnDisk(events []fake.FileEvent) {
@@ -247,19 +245,6 @@
 	return highlights
 }
 
-func checkIsFatal(t testing.TB, err error) {
-	t.Helper()
-	if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrClosedPipe) {
-		t.Fatal(err)
-	}
-}
-
-// CloseEditor shuts down the editor, calling t.Fatal on any error.
-func (e *Env) CloseEditor() {
-	e.T.Helper()
-	checkIsFatal(e.T, e.Editor.Close(e.Ctx))
-}
-
 // RunGenerate runs go:generate on the given dir, calling t.Fatal on any error.
 // It waits for the generate command to complete and checks for file changes
 // before returning.
diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go
index c0ed972..073336b 100644
--- a/internal/lsp/semantic.go
+++ b/internal/lsp/semantic.go
@@ -11,7 +11,7 @@
 	"go/ast"
 	"go/token"
 	"go/types"
-	"log"
+	"path/filepath"
 	"sort"
 	"strings"
 	"time"
@@ -93,7 +93,7 @@
 	if err != nil {
 		return nil, err
 	}
-	// don't return errors on pgf.ParseErr. Do what we can.
+	// ignore pgf.ParseErr. Do what we can.
 	if rng == nil && len(pgf.Src) > maxFullFileSize {
 		err := fmt.Errorf("semantic tokens: file %s too large for full (%d>%d)",
 			fh.URI().Filename(), len(pgf.Src), maxFullFileSize)
@@ -122,6 +122,7 @@
 
 func (e *encoded) semantics() {
 	f := e.pgf.File
+	// may not be in range, but harmless
 	e.token(f.Package, len("package"), tokKeyword, nil)
 	e.token(f.Name.NamePos, len(f.Name.Name), tokNamespace, nil)
 	inspect := func(n ast.Node) bool {
@@ -166,8 +167,11 @@
 )
 
 func (e *encoded) token(start token.Pos, leng int, typ tokenType, mods []string) {
-	if start == 0 {
-		e.unexpected("token at token.NoPos")
+
+	if !start.IsValid() {
+		// This is not worth reporting
+		//e.unexpected("token at token.NoPos")
+		return
 	}
 	if start >= e.end || start+token.Pos(leng) <= e.start {
 		return
@@ -186,10 +190,7 @@
 		return
 	}
 	if lspRange.End.Line != lspRange.Start.Line {
-		// abrupt end of file, without \n. TODO(pjw): fix?
-		pos := e.fset.PositionFor(start, false)
-		msg := fmt.Sprintf("token at %s:%d.%d overflows", pos.Filename, pos.Line, pos.Column)
-		event.Log(e.ctx, msg)
+		// this happens if users are typing at the end of the file, but report nothing
 		return
 	}
 	// token is all on one line
@@ -236,12 +237,26 @@
 	if len(e.stack) > 0 {
 		loc := e.stack[len(e.stack)-1].Pos()
 		add := e.pgf.Tok.PositionFor(loc, false)
-		msg = append(msg, fmt.Sprintf("(line:%d,col:%d)", add.Line, add.Column))
+		nm := filepath.Base(add.Filename)
+		msg = append(msg, fmt.Sprintf("(%s:%d,col:%d)", nm, add.Line, add.Column))
 	}
 	msg = append(msg, "]")
 	return strings.Join(msg, " ")
 }
 
+// find the line in the source
+func (e *encoded) srcLine(x ast.Node) string {
+	file := e.pgf.Tok
+	line := file.Line(x.Pos())
+	start := file.Offset(file.LineStart(line))
+	end := start
+	for ; end < len(e.pgf.Src) && e.pgf.Src[end] != '\n'; end++ {
+
+	}
+	ans := e.pgf.Src[start:end]
+	return string(ans)
+}
+
 func (e *encoded) inspector(n ast.Node) bool {
 	pop := func() {
 		e.stack = e.stack[:len(e.stack)-1]
@@ -381,12 +396,12 @@
 	case *ast.UnaryExpr:
 		e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
 	case *ast.ValueSpec:
-	// things we only see with parsing or type errors, so we ignore them
+	// things only seen with parsing or type errors, so ignore them
 	case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
 		return true
 	// not going to see these
 	case *ast.File, *ast.Package:
-		log.Printf("implement %T %s", x, e.pgf.Tok.PositionFor(x.Pos(), false))
+		e.unexpected(fmt.Sprintf("implement %T %s", x, e.pgf.Tok.PositionFor(x.Pos(), false)))
 	// other things we knowingly ignore
 	case *ast.Comment, *ast.CommentGroup:
 		pop()
@@ -409,7 +424,8 @@
 	use := e.ti.Uses[x]
 	switch y := use.(type) {
 	case nil:
-		e.token(x.NamePos, len(x.Name), tokVariable, []string{"definition"})
+		e.unkIdent(x)
+		return
 	case *types.Builtin:
 		e.token(x.NamePos, len(x.Name), tokFunction, []string{"defaultLibrary"})
 	case *types.Const:
@@ -462,6 +478,134 @@
 	}
 }
 
+// both e.ti.Defs and e.ti.Uses are nil. use the parse stack
+// a lot of these only happen when the package doesn't compile
+func (e *encoded) unkIdent(x *ast.Ident) {
+	tok := func(tok tokenType, mod []string) {
+		e.token(x.Pos(), len(x.Name), tok, mod)
+	}
+	def := []string{"definition"}
+	n := len(e.stack) - 2 // parent of Ident
+	if n < 0 {
+		e.unexpected("no stack?")
+		return
+	}
+	switch nd := e.stack[n].(type) {
+	case *ast.BinaryExpr, *ast.UnaryExpr, *ast.ParenExpr, *ast.StarExpr,
+		*ast.IncDecStmt, *ast.SliceExpr, *ast.ExprStmt, *ast.IndexExpr,
+		*ast.ReturnStmt,
+		*ast.IfStmt,       /* condition */
+		*ast.KeyValueExpr: // either key or value
+		tok(tokVariable, nil)
+	case *ast.Ellipsis:
+		tok(tokType, nil)
+	case *ast.CaseClause:
+		if n-2 >= 0 {
+			if _, ok := e.stack[n-2].(*ast.TypeSwitchStmt); ok {
+				tok(tokType, nil)
+				return
+			}
+		}
+		tok(tokVariable, nil)
+	case *ast.ArrayType:
+		if x == nd.Len {
+			tok(tokVariable, nil)
+		} else {
+			tok(tokType, nil)
+		}
+	case *ast.MapType:
+		tok(tokType, nil)
+	case *ast.CallExpr:
+		if x == nd.Fun {
+			tok(tokFunction, nil)
+			return
+		}
+		tok(tokVariable, nil)
+	case *ast.TypeAssertExpr:
+		if x == nd.X {
+			tok(tokVariable, nil)
+		} else if x == nd.Type {
+			tok(tokType, nil)
+		}
+	case *ast.ValueSpec:
+		for _, p := range nd.Names {
+			if p == x {
+				tok(tokVariable, def)
+				return
+			}
+		}
+		for _, p := range nd.Values {
+			if p == x {
+				tok(tokVariable, nil)
+				return
+			}
+		}
+		tok(tokType, nil)
+	case *ast.SelectorExpr: // e.ti.Selections[nd] is nil, so no help
+		if n-1 >= 0 {
+			if ce, ok := e.stack[n-1].(*ast.CallExpr); ok {
+				// ... CallExpr SelectorExpr Ident (_.x())
+				if ce.Fun == nd && nd.Sel == x {
+					tok(tokFunction, nil)
+					return
+				}
+			}
+		}
+		tok(tokVariable, nil)
+	case *ast.AssignStmt:
+		for _, p := range nd.Lhs {
+			// x := ..., or x = ...
+			if p == x {
+				if nd.Tok != token.DEFINE {
+					def = nil
+				}
+				tok(tokVariable, def)
+				return
+			}
+		}
+		// RHS, = x
+		tok(tokVariable, nil)
+	case *ast.TypeSpec: // it's a type if it is either the Name or the Type
+		if x == nd.Type {
+			def = nil
+		}
+		tok(tokType, def)
+	case *ast.Field:
+		// ident could be type in a field, or a method in an interface type, or a variable
+		if x == nd.Type {
+			tok(tokType, nil)
+			return
+		}
+		if n-2 >= 0 {
+			_, okit := e.stack[n-2].(*ast.InterfaceType)
+			_, okfl := e.stack[n-1].(*ast.FieldList)
+			if okit && okfl {
+				tok(tokMember, def)
+				return
+			}
+		}
+		tok(tokVariable, nil)
+	case *ast.LabeledStmt, *ast.BranchStmt:
+		// nothing to report
+	case *ast.CompositeLit:
+		if nd.Type == x {
+			tok(tokType, nil)
+			return
+		}
+		tok(tokVariable, nil)
+	case *ast.RangeStmt:
+		if nd.Tok != token.DEFINE {
+			def = nil
+		}
+		tok(tokVariable, def)
+	case *ast.FuncDecl:
+		tok(tokFunction, def)
+	default:
+		msg := fmt.Sprintf("%T undexpected: %s %s%q", nd, x.Name, e.strStack(), e.srcLine(x))
+		e.unexpected(msg)
+	}
+}
+
 func isDeprecated(n *ast.CommentGroup) bool {
 	if n == nil {
 		return false
@@ -642,16 +786,17 @@
 		}
 		// and fall through for _
 	}
-	if d.Path.Value == "" {
+	val := d.Path.Value
+	if len(val) < 2 || val[0] != '"' || val[len(val)-1] != '"' {
+		// avoid panics on imports without a properly quoted string
 		return
 	}
-	nm := d.Path.Value[1 : len(d.Path.Value)-1] // trailing "
-	v := strings.LastIndex(nm, "/")
-	if v != -1 {
-		nm = nm[v+1:]
-	}
+	nm := val[1 : len(val)-1] // remove surrounding "s
+	nm = filepath.Base(nm)
+	// in import "lib/math", 'math' is the package name
 	start := d.Path.End() - token.Pos(1+len(nm))
 	e.token(start, len(nm), tokNamespace, nil)
+	// There may be more cases, as import strings are implementation defined.
 }
 
 // log unexpected state
diff --git a/internal/lsp/server.go b/internal/lsp/server.go
index 99786fe..becfc71 100644
--- a/internal/lsp/server.go
+++ b/internal/lsp/server.go
@@ -11,6 +11,7 @@
 	"sync"
 
 	"golang.org/x/tools/internal/jsonrpc2"
+	"golang.org/x/tools/internal/lsp/progress"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/span"
@@ -22,6 +23,8 @@
 // NewServer creates an LSP server and binds it to handle incoming client
 // messages on on the supplied stream.
 func NewServer(session source.Session, client protocol.ClientCloser) *Server {
+	tracker := progress.NewTracker(client)
+	session.SetProgressTracker(tracker)
 	return &Server{
 		diagnostics:           map[span.URI]*fileReports{},
 		gcOptimizationDetails: make(map[string]struct{}),
@@ -30,8 +33,9 @@
 		session:               session,
 		client:                client,
 		diagnosticsSema:       make(chan struct{}, concurrentAnalyses),
-		progress:              newProgressTracker(client),
-		debouncer:             newDebouncer(),
+		progress:              tracker,
+		diagDebouncer:         newDebouncer(),
+		watchedFileDebouncer:  newDebouncer(),
 	}
 }
 
@@ -99,19 +103,29 @@
 	// expensive.
 	diagnosticsSema chan struct{}
 
-	progress *progressTracker
+	progress *progress.Tracker
 
-	// debouncer is used for debouncing diagnostics.
-	debouncer *debouncer
+	// diagDebouncer is used for debouncing diagnostics.
+	diagDebouncer *debouncer
+
+	// watchedFileDebouncer is used for batching didChangeWatchedFiles notifications.
+	watchedFileDebouncer *debouncer
+	fileChangeMu         sync.Mutex
+	pendingOnDiskChanges []*pendingModificationSet
 
 	// When the workspace fails to load, we show its status through a progress
 	// report with an error message.
 	criticalErrorStatusMu sync.Mutex
-	criticalErrorStatus   *workDone
+	criticalErrorStatus   *progress.WorkDone
+}
+
+type pendingModificationSet struct {
+	diagnoseDone chan struct{}
+	changes      []source.FileModification
 }
 
 func (s *Server) workDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
-	return s.progress.cancel(ctx, params.Token)
+	return s.progress.Cancel(ctx, params.Token)
 }
 
 func (s *Server) nonstandardRequest(ctx context.Context, method string, params interface{}) (interface{}, error) {
diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go
index 2d7db9a..9accafa 100755
--- a/internal/lsp/source/api_json.go
+++ b/internal/lsp/source/api_json.go
@@ -145,6 +145,19 @@
 				Hierarchy:  "build",
 			},
 			{
+				Name: "experimentalUseInvalidMetadata",
+				Type: "bool",
+				Doc:  "experimentalUseInvalidMetadata enables gopls to fall back on outdated\npackage metadata to provide editor features if the go command fails to\nload packages for some reason (like an invalid go.mod file). This will\neventually be the default behavior, and this setting will be removed.\n",
+				EnumKeys: EnumKeys{
+					ValueType: "",
+					Keys:      nil,
+				},
+				EnumValues: nil,
+				Default:    "false",
+				Status:     "experimental",
+				Hierarchy:  "build",
+			},
+			{
 				Name: "hoverKind",
 				Type: "enum",
 				Doc:  "hoverKind controls the information that appears in the hover text.\nSingleLine and Structured are intended for use only by authors of editor plugins.\n",
@@ -614,15 +627,28 @@
 				Hierarchy:  "ui.diagnostic",
 			},
 			{
-				Name: "experimentalDiagnosticsDelay",
+				Name: "diagnosticsDelay",
 				Type: "time.Duration",
-				Doc:  "experimentalDiagnosticsDelay controls the amount of time that gopls waits\nafter the most recent file modification before computing deep diagnostics.\nSimple diagnostics (parsing and type-checking) are always run immediately\non recently modified packages.\n\nThis option must be set to a valid duration string, for example `\"250ms\"`.\n",
+				Doc:  "diagnosticsDelay controls the amount of time that gopls waits\nafter the most recent file modification before computing deep diagnostics.\nSimple diagnostics (parsing and type-checking) are always run immediately\non recently modified packages.\n\nThis option must be set to a valid duration string, for example `\"250ms\"`.\n",
 				EnumKeys: EnumKeys{
 					ValueType: "",
 					Keys:      nil,
 				},
 				EnumValues: nil,
 				Default:    "\"250ms\"",
+				Status:     "advanced",
+				Hierarchy:  "ui.diagnostic",
+			},
+			{
+				Name: "experimentalWatchedFileDelay",
+				Type: "time.Duration",
+				Doc:  "experimentalWatchedFileDelay controls the amount of time that gopls waits\nfor additional workspace/didChangeWatchedFiles notifications to arrive,\nbefore processing all such notifications in a single batch. This is\nintended for use by LSP clients that don't support their own batching of\nfile system notifications.\n\nThis option must be set to a valid duration string, for example `\"100ms\"`.\n",
+				EnumKeys: EnumKeys{
+					ValueType: "",
+					Keys:      nil,
+				},
+				EnumValues: nil,
+				Default:    "\"0s\"",
 				Status:     "experimental",
 				Hierarchy:  "ui.diagnostic",
 			},
@@ -731,124 +757,144 @@
 	},
 	Commands: []*CommandJSON{
 		{
-			Command: "gopls.add_dependency",
-			Title:   "Add dependency",
-			Doc:     "Adds a dependency to the go.mod file for a module.",
-			ArgDoc:  "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+			Command:   "gopls.add_dependency",
+			Title:     "Add a dependency",
+			Doc:       "Adds a dependency to the go.mod file for a module.",
+			ArgDoc:    "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.add_import",
-			Title:   "asks the server to add an import path to a given Go file.",
-			Doc:     "The method will call applyEdit on the client so that clients don't have\nto apply the edit themselves.",
-			ArgDoc:  "{\n\t// ImportPath is the target import path that should\n\t// be added to the URI file\n\t\"ImportPath\": string,\n\t// URI is the file that the ImportPath should be\n\t// added to\n\t\"URI\": string,\n}",
+			Command:   "gopls.add_import",
+			Title:     "Add an import",
+			Doc:       "Ask the server to add an import path to a given Go file.  The method will\ncall applyEdit on the client so that clients don't have to apply the edit\nthemselves.",
+			ArgDoc:    "{\n\t// ImportPath is the target import path that should\n\t// be added to the URI file\n\t\"ImportPath\": string,\n\t// URI is the file that the ImportPath should be\n\t// added to\n\t\"URI\": string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.apply_fix",
-			Title:   "Apply a fix",
-			Doc:     "Applies a fix to a region of source code.",
-			ArgDoc:  "{\n\t// The fix to apply.\n\t\"Fix\": string,\n\t// The file URI for the document to fix.\n\t\"URI\": string,\n\t// The document range to scan for fixes.\n\t\"Range\": {\n\t\t\"start\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t\t\"end\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t},\n}",
+			Command:   "gopls.apply_fix",
+			Title:     "Apply a fix",
+			Doc:       "Applies a fix to a region of source code.",
+			ArgDoc:    "{\n\t// The fix to apply.\n\t\"Fix\": string,\n\t// The file URI for the document to fix.\n\t\"URI\": string,\n\t// The document range to scan for fixes.\n\t\"Range\": {\n\t\t\"start\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t\t\"end\": {\n\t\t\t\"line\": uint32,\n\t\t\t\"character\": uint32,\n\t\t},\n\t},\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.check_upgrades",
-			Title:   "Check for upgrades",
-			Doc:     "Checks for module upgrades.",
-			ArgDoc:  "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The modules to check.\n\t\"Modules\": []string,\n}",
+			Command:   "gopls.check_upgrades",
+			Title:     "Check for upgrades",
+			Doc:       "Checks for module upgrades.",
+			ArgDoc:    "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The modules to check.\n\t\"Modules\": []string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.gc_details",
-			Title:   "Toggle gc_details",
-			Doc:     "Toggle the calculation of gc annotations.",
-			ArgDoc:  "string",
+			Command:   "gopls.gc_details",
+			Title:     "Toggle gc_details",
+			Doc:       "Toggle the calculation of gc annotations.",
+			ArgDoc:    "string",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.generate",
-			Title:   "Run go generate",
-			Doc:     "Runs `go generate` for a given directory.",
-			ArgDoc:  "{\n\t// URI for the directory to generate.\n\t\"Dir\": string,\n\t// Whether to generate recursively (go generate ./...)\n\t\"Recursive\": bool,\n}",
+			Command:   "gopls.generate",
+			Title:     "Run go generate",
+			Doc:       "Runs `go generate` for a given directory.",
+			ArgDoc:    "{\n\t// URI for the directory to generate.\n\t\"Dir\": string,\n\t// Whether to generate recursively (go generate ./...)\n\t\"Recursive\": bool,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.generate_gopls_mod",
-			Title:   "Generate gopls.mod",
-			Doc:     "(Re)generate the gopls.mod file for a workspace.",
-			ArgDoc:  "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			Command:   "gopls.generate_gopls_mod",
+			Title:     "Generate gopls.mod",
+			Doc:       "(Re)generate the gopls.mod file for a workspace.",
+			ArgDoc:    "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.go_get_package",
-			Title:   "go get package",
-			Doc:     "Runs `go get` to fetch a package.",
-			ArgDoc:  "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The package to go get.\n\t\"Pkg\": string,\n\t\"AddRequire\": bool,\n}",
+			Command:   "gopls.go_get_package",
+			Title:     "go get a package",
+			Doc:       "Runs `go get` to fetch a package.",
+			ArgDoc:    "{\n\t// Any document URI within the relevant module.\n\t\"URI\": string,\n\t// The package to go get.\n\t\"Pkg\": string,\n\t\"AddRequire\": bool,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.list_known_packages",
-			Title:   "retrieves a list of packages",
-			Doc:     "that are importable from the given URI.",
-			ArgDoc:  "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			Command:   "gopls.list_known_packages",
+			Title:     "List known packages",
+			Doc:       "Retrieve a list of packages that are importable from the given URI.",
+			ArgDoc:    "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			ResultDoc: "{\n\t// Packages is a list of packages relative\n\t// to the URIArg passed by the command request.\n\t// In other words, it omits paths that are already\n\t// imported or cannot be imported due to compiler\n\t// restrictions.\n\t\"Packages\": []string,\n}",
 		},
 		{
-			Command: "gopls.regenerate_cgo",
-			Title:   "Regenerate cgo",
-			Doc:     "Regenerates cgo definitions.",
-			ArgDoc:  "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			Command:   "gopls.regenerate_cgo",
+			Title:     "Regenerate cgo",
+			Doc:       "Regenerates cgo definitions.",
+			ArgDoc:    "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.remove_dependency",
-			Title:   "Remove dependency",
-			Doc:     "Removes a dependency from the go.mod file of a module.",
-			ArgDoc:  "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}",
+			Command:   "gopls.remove_dependency",
+			Title:     "Remove a dependency",
+			Doc:       "Removes a dependency from the go.mod file of a module.",
+			ArgDoc:    "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// The module path to remove.\n\t\"ModulePath\": string,\n\t\"OnlyDiagnostic\": bool,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.run_tests",
-			Title:   "Run test(s)",
-			Doc:     "Runs `go test` for a specific set of test or benchmark functions.",
-			ArgDoc:  "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}",
+			Command:   "gopls.run_tests",
+			Title:     "Run test(s)",
+			Doc:       "Runs `go test` for a specific set of test or benchmark functions.",
+			ArgDoc:    "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.start_debugging",
-			Title:   "",
-			Doc:     "",
-			ArgDoc:  "{\n\t// Optional: the address (including port) for the debug server to listen on.\n\t// If not provided, the debug server will bind to \"localhost:0\", and the\n\t// full debug URL will be contained in the result.\n\t// \n\t// If there is more than one gopls instance along the serving path (i.e. you\n\t// are using a daemon), each gopls instance will attempt to start debugging.\n\t// If Addr specifies a port, only the daemon will be able to bind to that\n\t// port, and each intermediate gopls instance will fail to start debugging.\n\t// For this reason it is recommended not to specify a port (or equivalently,\n\t// to specify \":0\").\n\t// \n\t// If the server was already debugging this field has no effect, and the\n\t// result will contain the previously configured debug URL(s).\n\t\"Addr\": string,\n}",
+			Command:   "gopls.start_debugging",
+			Title:     "Start the gopls debug server",
+			Doc:       "Start the gopls debug server if it isn't running, and return the debug\naddress.",
+			ArgDoc:    "{\n\t// Optional: the address (including port) for the debug server to listen on.\n\t// If not provided, the debug server will bind to \"localhost:0\", and the\n\t// full debug URL will be contained in the result.\n\t// \n\t// If there is more than one gopls instance along the serving path (i.e. you\n\t// are using a daemon), each gopls instance will attempt to start debugging.\n\t// If Addr specifies a port, only the daemon will be able to bind to that\n\t// port, and each intermediate gopls instance will fail to start debugging.\n\t// For this reason it is recommended not to specify a port (or equivalently,\n\t// to specify \":0\").\n\t// \n\t// If the server was already debugging this field has no effect, and the\n\t// result will contain the previously configured debug URL(s).\n\t\"Addr\": string,\n}",
+			ResultDoc: "{\n\t// The URLs to use to access the debug servers, for all gopls instances in\n\t// the serving path. For the common case of a single gopls instance (i.e. no\n\t// daemon), this will be exactly one address.\n\t// \n\t// In the case of one or more gopls instances forwarding the LSP to a daemon,\n\t// URLs will contain debug addresses for each server in the serving path, in\n\t// serving order. The daemon debug address will be the last entry in the\n\t// slice. If any intermediate gopls instance fails to start debugging, no\n\t// error will be returned but the debug URL for that server in the URLs slice\n\t// will be empty.\n\t\"URLs\": []string,\n}",
 		},
 		{
-			Command: "gopls.test",
-			Title:   "Run test(s) (legacy)",
-			Doc:     "Runs `go test` for a specific set of test or benchmark functions.",
-			ArgDoc:  "string,\n[]string,\n[]string",
+			Command:   "gopls.test",
+			Title:     "Run test(s) (legacy)",
+			Doc:       "Runs `go test` for a specific set of test or benchmark functions.",
+			ArgDoc:    "string,\n[]string,\n[]string",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.tidy",
-			Title:   "Run go mod tidy",
-			Doc:     "Runs `go mod tidy` for a module.",
-			ArgDoc:  "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+			Command:   "gopls.tidy",
+			Title:     "Run go mod tidy",
+			Doc:       "Runs `go mod tidy` for a module.",
+			ArgDoc:    "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.toggle_gc_details",
-			Title:   "Toggle gc_details",
-			Doc:     "Toggle the calculation of gc annotations.",
-			ArgDoc:  "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			Command:   "gopls.toggle_gc_details",
+			Title:     "Toggle gc_details",
+			Doc:       "Toggle the calculation of gc annotations.",
+			ArgDoc:    "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.update_go_sum",
-			Title:   "Update go.sum",
-			Doc:     "Updates the go.sum file for a module.",
-			ArgDoc:  "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+			Command:   "gopls.update_go_sum",
+			Title:     "Update go.sum",
+			Doc:       "Updates the go.sum file for a module.",
+			ArgDoc:    "{\n\t// The file URIs.\n\t\"URIs\": []string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.upgrade_dependency",
-			Title:   "Upgrade dependency",
-			Doc:     "Upgrades a dependency in the go.mod file for a module.",
-			ArgDoc:  "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+			Command:   "gopls.upgrade_dependency",
+			Title:     "Upgrade a dependency",
+			Doc:       "Upgrades a dependency in the go.mod file for a module.",
+			ArgDoc:    "{\n\t// The go.mod file URI.\n\t\"URI\": string,\n\t// Additional args to pass to the go command.\n\t\"GoCmdArgs\": []string,\n\t// Whether to add a require directive.\n\t\"AddRequire\": bool,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.vendor",
-			Title:   "Run go mod vendor",
-			Doc:     "Runs `go mod vendor` for a module.",
-			ArgDoc:  "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			Command:   "gopls.vendor",
+			Title:     "Run go mod vendor",
+			Doc:       "Runs `go mod vendor` for a module.",
+			ArgDoc:    "{\n\t// The file URI.\n\t\"URI\": string,\n}",
+			ResultDoc: "",
 		},
 		{
-			Command: "gopls.workspace_metadata",
-			Title:   "",
-			Doc:     "",
-			ArgDoc:  "",
+			Command:   "gopls.workspace_metadata",
+			Title:     "Query workspace metadata",
+			Doc:       "Query the server for information about active workspaces.",
+			ArgDoc:    "",
+			ResultDoc: "{\n\t// All workspaces for this session.\n\t\"Workspaces\": []{\n\t\t\"Name\": string,\n\t\t\"ModuleDir\": string,\n\t},\n}",
 		},
 	},
 	Lenses: []*LensJSON{
@@ -879,7 +925,7 @@
 		},
 		{
 			Lens:  "upgrade_dependency",
-			Title: "Upgrade dependency",
+			Title: "Upgrade a dependency",
 			Doc:   "Upgrades a dependency in the go.mod file for a module.",
 		},
 		{
diff --git a/internal/lsp/source/completion/builtin.go b/internal/lsp/source/completion/builtin.go
index 086e3f7..39732d8 100644
--- a/internal/lsp/source/completion/builtin.go
+++ b/internal/lsp/source/completion/builtin.go
@@ -73,6 +73,9 @@
 			// Infer first append() arg type as apparent return type of
 			// append().
 			inf.objType = parentInf.objType
+			if parentInf.variadic {
+				inf.objType = types.NewSlice(inf.objType)
+			}
 			break
 		}
 
diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go
index 6d4bef9..741e6b3 100644
--- a/internal/lsp/source/completion/completion.go
+++ b/internal/lsp/source/completion/completion.go
@@ -370,31 +370,14 @@
 	// expanded calls for function invocations.
 	names []string
 
-	// expandFuncCall is true if obj should be invoked in the completion.
-	// For example, expandFuncCall=true yields "foo()", expandFuncCall=false yields "foo".
-	expandFuncCall bool
-
-	// takeAddress is true if the completion should take a pointer to obj.
-	// For example, takeAddress=true yields "&foo", takeAddress=false yields "foo".
-	takeAddress bool
+	// mods contains modifications that should be applied to the
+	// candidate when inserted. For example, "foo" may be insterted as
+	// "*foo" or "foo()".
+	mods []typeModKind
 
 	// addressable is true if a pointer can be taken to the candidate.
 	addressable bool
 
-	// makePointer is true if the candidate type name T should be made into *T.
-	makePointer bool
-
-	// dereference is a count of how many times to dereference the candidate obj.
-	// For example, dereference=2 turns "foo" into "**foo" when formatting.
-	dereference int
-
-	// takeSlice is true if obj is an array that should be converted to a slice.
-	takeSlice bool
-
-	// variadic is true if this candidate fills a variadic param and
-	// needs "..." appended.
-	variadic bool
-
 	// convertTo is a type that this candidate should be cast to. For
 	// example, if convertTo is float64, "foo" should be formatted as
 	// "float64(foo)".
@@ -405,6 +388,15 @@
 	imp *importInfo
 }
 
+func (c candidate) hasMod(mod typeModKind) bool {
+	for _, m := range c.mods {
+		if m == mod {
+			return true
+		}
+	}
+	return false
+}
+
 // ErrIsDefinition is an error that informs the user they got no
 // completions because they tried to complete the name of a new object
 // being defined.
@@ -1768,20 +1760,24 @@
 	return nil
 }
 
-// typeModifier represents an operator that changes the expected type.
-type typeModifier struct {
-	mod      typeMod
+// typeMod represents an operator that changes the expected type.
+type typeMod struct {
+	mod      typeModKind
 	arrayLen int64
 }
 
-type typeMod int
+type typeModKind int
 
 const (
-	dereference typeMod = iota // pointer indirection: "*"
-	reference                  // adds level of pointer: "&" for values, "*" for type names
-	chanRead                   // channel read operator ("<-")
-	slice                      // make a slice type ("[]" in "[]int")
-	array                      // make an array type ("[2]" in "[2]int")
+	dereference   typeModKind = iota // pointer indirection: "*"
+	reference                        // adds level of pointer: "&" for values, "*" for type names
+	chanRead                         // channel read operator: "<-"
+	sliceType                        // make a slice type: "[]" in "[]int"
+	arrayType                        // make an array type: "[2]" in "[2]int"
+	invoke                           // make a function call: "()" in "foo()"
+	takeSlice                        // take slice of array: "[:]" in "foo[:]"
+	takeDotDotDot                    // turn slice into variadic args: "..." in "foo..."
+	index                            // index into slice/array: "[0]" in "foo[0]"
 )
 
 type objKind int
@@ -1832,7 +1828,7 @@
 
 	// modifiers are prefixes such as "*", "&" or "<-" that influence how
 	// a candidate type relates to the expected type.
-	modifiers []typeModifier
+	modifiers []typeMod
 
 	// convertibleTo is a type our candidate type must be convertible to.
 	convertibleTo types.Type
@@ -1882,7 +1878,7 @@
 
 	// modifiers are prefixes such as "*", "&" or "<-" that influence how
 	// a candidate type relates to the expected type.
-	modifiers []typeModifier
+	modifiers []typeMod
 
 	// assertableFrom is a type that must be assertable to our candidate type.
 	assertableFrom types.Type
@@ -2108,13 +2104,13 @@
 			}
 			return inf
 		case *ast.StarExpr:
-			inf.modifiers = append(inf.modifiers, typeModifier{mod: dereference})
+			inf.modifiers = append(inf.modifiers, typeMod{mod: dereference})
 		case *ast.UnaryExpr:
 			switch node.Op {
 			case token.AND:
-				inf.modifiers = append(inf.modifiers, typeModifier{mod: reference})
+				inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
 			case token.ARROW:
-				inf.modifiers = append(inf.modifiers, typeModifier{mod: chanRead})
+				inf.modifiers = append(inf.modifiers, typeMod{mod: chanRead})
 			}
 		case *ast.DeferStmt, *ast.GoStmt:
 			inf.objKind |= kindFunc
@@ -2209,9 +2205,9 @@
 		switch mod.mod {
 		case reference:
 			typ = types.NewPointer(typ)
-		case array:
+		case arrayType:
 			typ = types.NewArray(typ, mod.arrayLen)
-		case slice:
+		case sliceType:
 			typ = types.NewSlice(typ)
 		}
 	}
@@ -2325,7 +2321,7 @@
 			}
 			return typeNameInference{}
 		case *ast.StarExpr:
-			inf.modifiers = append(inf.modifiers, typeModifier{mod: reference})
+			inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
 		case *ast.CompositeLit:
 			// We want a type name if position is in the "Type" part of a
 			// composite literal (e.g. "Foo<>{}").
@@ -2338,7 +2334,7 @@
 					// the composite literal and not the type name, but if
 					// affects our type completion nonetheless.
 					if u, ok := c.path[i+1].(*ast.UnaryExpr); ok && u.Op == token.AND {
-						inf.modifiers = append(inf.modifiers, typeModifier{mod: reference})
+						inf.modifiers = append(inf.modifiers, typeMod{mod: reference})
 					}
 				}
 			}
@@ -2349,13 +2345,13 @@
 				inf.wantTypeName = true
 				if n.Len == nil {
 					// No "Len" expression means a slice type.
-					inf.modifiers = append(inf.modifiers, typeModifier{mod: slice})
+					inf.modifiers = append(inf.modifiers, typeMod{mod: sliceType})
 				} else {
 					// Try to get the array type using the constant value of "Len".
 					tv, ok := c.pkg.GetTypesInfo().Types[n.Len]
 					if ok && tv.Value != nil && tv.Value.Kind() == constant.Int {
 						if arrayLen, ok := constant.Int64Val(tv.Value); ok {
-							inf.modifiers = append(inf.modifiers, typeModifier{mod: array, arrayLen: arrayLen})
+							inf.modifiers = append(inf.modifiers, typeMod{mod: arrayType, arrayLen: arrayLen})
 						}
 					}
 				}
@@ -2399,84 +2395,82 @@
 	return types.NewVar(token.NoPos, c.pkg.GetTypes(), "", T)
 }
 
+// derivableTypes iterates types you can derive from t. For example,
+// from "foo" we might derive "&foo", and "foo()".
+func derivableTypes(t types.Type, addressable bool, f func(t types.Type, addressable bool, mod typeModKind) bool) bool {
+	switch t := t.Underlying().(type) {
+	case *types.Signature:
+		// If t is a func type with a single result, offer the result type.
+		if t.Results().Len() == 1 && f(t.Results().At(0).Type(), false, invoke) {
+			return true
+		}
+	case *types.Array:
+		if f(t.Elem(), true, index) {
+			return true
+		}
+		// Try converting array to slice.
+		if f(types.NewSlice(t.Elem()), false, takeSlice) {
+			return true
+		}
+	case *types.Pointer:
+		if f(t.Elem(), false, dereference) {
+			return true
+		}
+	case *types.Slice:
+		if f(t.Elem(), true, index) {
+			return true
+		}
+	case *types.Map:
+		if f(t.Elem(), false, index) {
+			return true
+		}
+	case *types.Chan:
+		if f(t.Elem(), false, chanRead) {
+			return true
+		}
+	}
+
+	// Check if c is addressable and a pointer to c matches our type inference.
+	if addressable && f(types.NewPointer(t), false, reference) {
+		return true
+	}
+
+	return false
+}
+
 // anyCandType reports whether f returns true for any candidate type
-// derivable from c. For example, from "foo" we might derive "&foo",
-// and "foo()".
+// derivable from c. It searches up to three levels of type
+// modification. For example, given "foo" we could discover "***foo"
+// or "*foo()".
 func (c *candidate) anyCandType(f func(t types.Type, addressable bool) bool) bool {
 	if c.obj == nil || c.obj.Type() == nil {
 		return false
 	}
 
-	objType := c.obj.Type()
+	const maxDepth = 3
 
-	if f(objType, c.addressable) {
-		return true
-	}
-
-	// If c is a func type with a single result, offer the result type.
-	if sig, ok := objType.Underlying().(*types.Signature); ok {
-		if sig.Results().Len() == 1 && f(sig.Results().At(0).Type(), false) {
-			// Mark the candidate so we know to append "()" when formatting.
-			c.expandFuncCall = true
-			return true
-		}
-	}
-
-	var (
-		seenPtrTypes map[types.Type]bool
-		ptrType      = objType
-		ptrDepth     int
-	)
-
-	// Check if dereferencing c would match our type inference. We loop
-	// since c could have arbitrary levels of pointerness.
-	for {
-		ptr, ok := ptrType.Underlying().(*types.Pointer)
-		if !ok {
-			break
-		}
-
-		ptrDepth++
-
-		// Avoid pointer type cycles.
-		if seenPtrTypes[ptrType] {
-			break
-		}
-
-		if _, named := ptrType.(*types.Named); named {
-			// Lazily allocate "seen" since it isn't used normally.
-			if seenPtrTypes == nil {
-				seenPtrTypes = make(map[types.Type]bool)
+	var searchTypes func(t types.Type, addressable bool, mods []typeModKind) bool
+	searchTypes = func(t types.Type, addressable bool, mods []typeModKind) bool {
+		if f(t, addressable) {
+			if len(mods) > 0 {
+				newMods := make([]typeModKind, len(mods)+len(c.mods))
+				copy(newMods, mods)
+				copy(newMods[len(mods):], c.mods)
+				c.mods = newMods
 			}
-
-			// Track named pointer types we have seen to detect cycles.
-			seenPtrTypes[ptrType] = true
-		}
-
-		if f(ptr.Elem(), false) {
-			// Mark the candidate so we know to prepend "*" when formatting.
-			c.dereference = ptrDepth
 			return true
 		}
 
-		ptrType = ptr.Elem()
-	}
-
-	// Check if c is addressable and a pointer to c matches our type inference.
-	if c.addressable && f(types.NewPointer(objType), false) {
-		// Mark the candidate so we know to prepend "&" when formatting.
-		c.takeAddress = true
-		return true
-	}
-
-	if array, ok := objType.Underlying().(*types.Array); ok {
-		if f(types.NewSlice(array.Elem()), false) {
-			c.takeSlice = true
-			return true
+		if len(mods) == maxDepth {
+			return false
 		}
+
+		return derivableTypes(t, addressable, func(t types.Type, addressable bool, mod typeModKind) bool {
+			return searchTypes(t, addressable, append(mods, mod))
+		})
 	}
 
-	return false
+	return searchTypes(c.obj.Type(), c.addressable, make([]typeModKind, 0, maxDepth))
 }
 
 // matchingCandidate reports whether cand matches our type inferences.
@@ -2510,7 +2504,7 @@
 	if sig, ok := candType.Underlying().(*types.Signature); ok {
 		if c.inference.assigneesMatch(cand, sig) {
 			// Invoke the candidate if its results are multi-assignable.
-			cand.expandFuncCall = true
+			cand.mods = append(cand.mods, invoke)
 			return true
 		}
 	}
@@ -2518,7 +2512,9 @@
 	// Default to invoking *types.Func candidates. This is so function
 	// completions in an empty statement (or other cases with no expected type)
 	// are invoked by default.
-	cand.expandFuncCall = isFunc(cand.obj)
+	if isFunc(cand.obj) {
+		cand.mods = append(cand.mods, invoke)
+	}
 
 	return false
 }
@@ -2572,7 +2568,7 @@
 			}
 
 			if expType == variadicType {
-				cand.variadic = true
+				cand.mods = append(cand.mods, takeDotDotDot)
 			}
 
 			// Lower candidate score for untyped conversions. This avoids
@@ -2611,7 +2607,7 @@
 			// matches.
 			if ci.kindMatches(candType) {
 				if ci.objKind == kindFunc {
-					cand.expandFuncCall = true
+					cand.mods = append(cand.mods, invoke)
 				}
 				return true
 			}
@@ -2814,11 +2810,11 @@
 		if c.inference.typeName.compLitType {
 			// If we are completing a composite literal type as in
 			// "foo<>{}", to make a pointer we must prepend "&".
-			cand.takeAddress = true
+			cand.mods = append(cand.mods, reference)
 		} else {
 			// If we are completing a normal type name such as "foo<>", to
 			// make a pointer we must prepend "*".
-			cand.makePointer = true
+			cand.mods = append(cand.mods, dereference)
 		}
 		return true
 	}
diff --git a/internal/lsp/source/completion/deep_completion.go b/internal/lsp/source/completion/deep_completion.go
index 71a6726..45a02ff 100644
--- a/internal/lsp/source/completion/deep_completion.go
+++ b/internal/lsp/source/completion/deep_completion.go
@@ -249,7 +249,7 @@
 	}
 
 	// Lower score of method calls so we prefer fields and vars over calls.
-	if cand.expandFuncCall {
+	if cand.hasMod(invoke) {
 		if sig, ok := obj.Type().Underlying().(*types.Signature); ok && sig.Recv() != nil {
 			cand.score *= 0.9
 		}
@@ -260,6 +260,12 @@
 		cand.score *= 1.1
 	}
 
+	// Slight penalty for index modifier (e.g. changing "foo" to
+	// "foo[]") to curb false positives.
+	if cand.hasMod(index) {
+		cand.score *= 0.9
+	}
+
 	// Favor shallow matches by lowering score according to depth.
 	cand.score -= cand.score * c.deepState.scorePenalty(cand)
 
diff --git a/internal/lsp/source/completion/format.go b/internal/lsp/source/completion/format.go
index 985b79f..166ba55 100644
--- a/internal/lsp/source/completion/format.go
+++ b/internal/lsp/source/completion/format.go
@@ -46,20 +46,14 @@
 		detail        = types.TypeString(obj.Type(), c.qf)
 		insert        = label
 		kind          = protocol.TextCompletion
-		snip          *snippet.Builder
+		snip          snippet.Builder
 		protocolEdits []protocol.TextEdit
 	)
 	if obj.Type() == nil {
 		detail = ""
 	}
 
-	// expandFuncCall mutates the completion label, detail, and snippet
-	// to that of an invocation of sig.
-	expandFuncCall := func(sig *types.Signature) {
-		s := source.NewSignature(ctx, c.snapshot, c.pkg, sig, nil, c.qf)
-		snip = c.functionCallSnippet(label, s.Params())
-		detail = "func" + s.Format()
-	}
+	snip.WriteText(insert)
 
 	switch obj := obj.(type) {
 	case *types.TypeName:
@@ -74,17 +68,13 @@
 		}
 		if obj.IsField() {
 			kind = protocol.FieldCompletion
-			snip = c.structFieldSnippet(cand, label, detail)
+			c.structFieldSnippet(cand, detail, &snip)
 		} else {
 			kind = protocol.VariableCompletion
 		}
 		if obj.Type() == nil {
 			break
 		}
-
-		if sig, ok := obj.Type().Underlying().(*types.Signature); ok && cand.expandFuncCall {
-			expandFuncCall(sig)
-		}
 	case *types.Func:
 		sig, ok := obj.Type().Underlying().(*types.Signature)
 		if !ok {
@@ -94,10 +84,6 @@
 		if sig != nil && sig.Recv() != nil {
 			kind = protocol.MethodCompletion
 		}
-
-		if cand.expandFuncCall {
-			expandFuncCall(sig)
-		}
 	case *types.PkgName:
 		kind = protocol.ModuleCompletion
 		detail = fmt.Sprintf("%q", obj.Imported().Path())
@@ -106,10 +92,58 @@
 		detail = "label"
 	}
 
+	var prefix string
+	for _, mod := range cand.mods {
+		switch mod {
+		case reference:
+			prefix = "&" + prefix
+		case dereference:
+			prefix = "*" + prefix
+		case chanRead:
+			prefix = "<-" + prefix
+		}
+	}
+
+	var (
+		suffix   string
+		funcType = obj.Type()
+	)
+Suffixes:
+	for _, mod := range cand.mods {
+		switch mod {
+		case invoke:
+			if sig, ok := funcType.Underlying().(*types.Signature); ok {
+				s := source.NewSignature(ctx, c.snapshot, c.pkg, sig, nil, c.qf)
+				c.functionCallSnippet("", s.Params(), &snip)
+				if sig.Results().Len() == 1 {
+					funcType = sig.Results().At(0).Type()
+				}
+				detail = "func" + s.Format()
+			}
+
+			if !c.opts.snippets {
+				// Without snippets the candidate will not include "()". Don't
+				// add further suffixes since they will be invalid. For
+				// example, with snippets "foo()..." would become "foo..."
+				// without snippets if we added the dotDotDot.
+				break Suffixes
+			}
+		case takeSlice:
+			suffix += "[:]"
+		case takeDotDotDot:
+			suffix += "..."
+		case index:
+			snip.WriteText("[")
+			snip.WritePlaceholder(nil)
+			snip.WriteText("]")
+		}
+	}
+
 	// If this candidate needs an additional import statement,
 	// add the additional text edits needed.
 	if cand.imp != nil {
 		addlEdits, err := c.importEdits(cand.imp)
+
 		if err != nil {
 			return CompletionItem{}, err
 		}
@@ -123,20 +157,6 @@
 		}
 	}
 
-	var prefix, suffix string
-
-	// Prepend "&" or "*" operator as appropriate.
-	if cand.takeAddress {
-		prefix = "&"
-	} else if cand.makePointer {
-		prefix = "*"
-	} else if cand.dereference > 0 {
-		prefix = strings.Repeat("*", cand.dereference)
-	}
-
-	// Include "*" and "&" prefixes in the label.
-	label = prefix + label
-
 	if cand.convertTo != nil {
 		typeName := types.TypeString(cand.convertTo, c.qf)
 
@@ -151,15 +171,6 @@
 		suffix = ")"
 	}
 
-	if cand.takeSlice {
-		suffix += "[:]"
-	}
-
-	// Add variadic "..." only if snippets if enabled or cand is not a function
-	if cand.variadic && (c.opts.snippets || !cand.expandFuncCall) {
-		suffix += "..."
-	}
-
 	if prefix != "" {
 		// If we are in a selector, add an edit to place prefix before selector.
 		if sel := enclosingSelector(c.path, c.pos); sel != nil {
@@ -171,17 +182,13 @@
 		} else {
 			// If there is no selector, just stick the prefix at the start.
 			insert = prefix + insert
-			if snip != nil {
-				snip.PrependText(prefix)
-			}
+			snip.PrependText(prefix)
 		}
 	}
 
 	if suffix != "" {
 		insert += suffix
-		if snip != nil {
-			snip.WriteText(suffix)
-		}
+		snip.WriteText(suffix)
 	}
 
 	detail = strings.TrimPrefix(detail, "untyped ")
@@ -197,7 +204,7 @@
 		Kind:                kind,
 		Score:               cand.score,
 		Depth:               len(cand.path),
-		snippet:             snip,
+		snippet:             &snip,
 		obj:                 obj,
 	}
 	// If the user doesn't want documentation for completion items.
@@ -282,7 +289,8 @@
 			return CompletionItem{}, err
 		}
 		item.Detail = "func" + sig.Format()
-		item.snippet = c.functionCallSnippet(obj.Name(), sig.Params())
+		item.snippet = &snippet.Builder{}
+		c.functionCallSnippet(obj.Name(), sig.Params(), item.snippet)
 	case *types.TypeName:
 		if types.IsInterface(obj.Type()) {
 			item.Kind = protocol.InterfaceCompletion
diff --git a/internal/lsp/source/completion/literal.go b/internal/lsp/source/completion/literal.go
index 7594ed4..0fc7a81 100644
--- a/internal/lsp/source/completion/literal.go
+++ b/internal/lsp/source/completion/literal.go
@@ -103,7 +103,7 @@
 
 	// If prefix matches the type name, client may want a composite literal.
 	if score := c.matcher.Score(matchName); score > 0 {
-		if cand.takeAddress {
+		if cand.hasMod(reference) {
 			if sel != nil {
 				// If we are in a selector we must place the "&" before the selector.
 				// For example, "foo.B<>" must complete to "&foo.Bar{}", not
@@ -144,7 +144,7 @@
 	// If prefix matches "make", client may want a "make()"
 	// invocation. We also include the type name to allow for more
 	// flexible fuzzy matching.
-	if score := c.matcher.Score("make." + matchName); !cand.takeAddress && score > 0 {
+	if score := c.matcher.Score("make." + matchName); !cand.hasMod(reference) && score > 0 {
 		switch literalType.Underlying().(type) {
 		case *types.Slice:
 			// The second argument to "make()" for slices is required, so default to "0".
@@ -157,7 +157,7 @@
 	}
 
 	// If prefix matches "func", client may want a function literal.
-	if score := c.matcher.Score("func"); !cand.takeAddress && score > 0 && !source.IsInterface(expType) {
+	if score := c.matcher.Score("func"); !cand.hasMod(reference) && score > 0 && !source.IsInterface(expType) {
 		switch t := literalType.Underlying().(type) {
 		case *types.Signature:
 			c.functionLiteral(ctx, t, float64(score))
@@ -369,6 +369,11 @@
 // basicLiteral adds a literal completion item for the given basic
 // type name typeName.
 func (c *completer) basicLiteral(T types.Type, typeName string, matchScore float64, edits []protocol.TextEdit) {
+	// Never give type conversions like "untyped int()".
+	if isUntyped(T) {
+		return
+	}
+
 	snip := &snippet.Builder{}
 	snip.WriteText(typeName + "(")
 	snip.WriteFinalTabstop()
diff --git a/internal/lsp/source/completion/snippet.go b/internal/lsp/source/completion/snippet.go
index 4a4288e..3649314 100644
--- a/internal/lsp/source/completion/snippet.go
+++ b/internal/lsp/source/completion/snippet.go
@@ -11,29 +11,27 @@
 )
 
 // structFieldSnippets calculates the snippet for struct literal field names.
-func (c *completer) structFieldSnippet(cand candidate, label, detail string) *snippet.Builder {
+func (c *completer) structFieldSnippet(cand candidate, detail string, snip *snippet.Builder) {
 	if !c.wantStructFieldCompletions() {
-		return nil
+		return
 	}
 
 	// If we are in a deep completion then we can't be completing a field
 	// name (e.g. "Foo{f<>}" completing to "Foo{f.Bar}" should not generate
 	// a snippet).
 	if len(cand.path) > 0 {
-		return nil
+		return
 	}
 
 	clInfo := c.enclosingCompositeLiteral
 
 	// If we are already in a key-value expression, we don't want a snippet.
 	if clInfo.kv != nil {
-		return nil
+		return
 	}
 
-	snip := &snippet.Builder{}
-
 	// A plain snippet turns "Foo{Ba<>" into "Foo{Bar: <>".
-	snip.WriteText(label + ": ")
+	snip.WriteText(": ")
 	snip.WritePlaceholder(func(b *snippet.Builder) {
 		// A placeholder snippet turns "Foo{Ba<>" into "Foo{Bar: <*int*>".
 		if c.opts.placeholders {
@@ -48,12 +46,10 @@
 	if fset.Position(c.pos).Line != fset.Position(clInfo.cl.Lbrace).Line {
 		snip.WriteText(",")
 	}
-
-	return snip
 }
 
 // functionCallSnippets calculates the snippet for function calls.
-func (c *completer) functionCallSnippet(name string, params []string) *snippet.Builder {
+func (c *completer) functionCallSnippet(name string, params []string, snip *snippet.Builder) {
 	// If there is no suffix then we need to reuse existing call parens
 	// "()" if present. If there is an identifier suffix then we always
 	// need to include "()" since we don't overwrite the suffix.
@@ -66,17 +62,17 @@
 			// inserted when fixing the AST. In this case, we do still need
 			// to insert the calling "()" parens.
 			if n.Fun == c.path[0] && n.Lparen != n.Rparen {
-				return nil
+				return
 			}
 		case *ast.SelectorExpr:
 			if len(c.path) > 2 {
 				if call, ok := c.path[2].(*ast.CallExpr); ok && call.Fun == c.path[1] && call.Lparen != call.Rparen {
-					return nil
+					return
 				}
 			}
 		}
 	}
-	snip := &snippet.Builder{}
+
 	snip.WriteText(name + "(")
 
 	if c.opts.placeholders {
@@ -97,6 +93,4 @@
 	}
 
 	snip.WriteText(")")
-
-	return snip
 }
diff --git a/internal/lsp/source/extract.go b/internal/lsp/source/extract.go
index e366095..4f0de59 100644
--- a/internal/lsp/source/extract.go
+++ b/internal/lsp/source/extract.go
@@ -33,20 +33,23 @@
 	// TODO: stricter rules for selectorExpr.
 	case *ast.BasicLit, *ast.CompositeLit, *ast.IndexExpr, *ast.SliceExpr,
 		*ast.UnaryExpr, *ast.BinaryExpr, *ast.SelectorExpr:
-		lhsNames = append(lhsNames, generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0))
+		lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
+		lhsNames = append(lhsNames, lhsName)
 	case *ast.CallExpr:
 		tup, ok := info.TypeOf(expr).(*types.Tuple)
 		if !ok {
 			// If the call expression only has one return value, we can treat it the
 			// same as our standard extract variable case.
-			lhsNames = append(lhsNames,
-				generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0))
+			lhsName, _ := generateAvailableIdentifier(expr.Pos(), file, path, info, "x", 0)
+			lhsNames = append(lhsNames, lhsName)
 			break
 		}
+		idx := 0
 		for i := 0; i < tup.Len(); i++ {
 			// Generate a unique variable for each return value.
-			lhsNames = append(lhsNames,
-				generateAvailableIdentifier(expr.Pos(), file, path, info, "x", i))
+			var lhsName string
+			lhsName, idx = generateAvailableIdentifier(expr.Pos(), file, path, info, "x", idx)
+			lhsNames = append(lhsNames, lhsName)
 		}
 	default:
 		return nil, fmt.Errorf("cannot extract %T", expr)
@@ -133,15 +136,24 @@
 }
 
 // generateAvailableIdentifier adjusts the new function name until there are no collisons in scope.
-// Possible collisions include other function and variable names.
-func generateAvailableIdentifier(pos token.Pos, file *ast.File, path []ast.Node, info *types.Info, prefix string, idx int) string {
+// Possible collisions include other function and variable names. Returns the next index to check for prefix.
+func generateAvailableIdentifier(pos token.Pos, file *ast.File, path []ast.Node, info *types.Info, prefix string, idx int) (string, int) {
 	scopes := CollectScopes(info, path, pos)
-	name := prefix + fmt.Sprintf("%d", idx)
-	for file.Scope.Lookup(name) != nil || !isValidName(name, scopes) {
+	return generateIdentifier(idx, prefix, func(name string) bool {
+		return file.Scope.Lookup(name) != nil || !isValidName(name, scopes)
+	})
+}
+
+func generateIdentifier(idx int, prefix string, hasCollision func(string) bool) (string, int) {
+	name := prefix
+	if idx != 0 {
+		name += fmt.Sprintf("%d", idx)
+	}
+	for hasCollision(name) {
 		idx++
 		name = fmt.Sprintf("%v%d", prefix, idx)
 	}
-	return name
+	return name, idx + 1
 }
 
 // isValidName checks for variable collision in scope.
@@ -171,28 +183,42 @@
 	zeroVal ast.Expr
 }
 
+// extractMethod refactors the selected block of code into a new method.
+func extractMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+	return extractFunctionMethod(fset, rng, src, file, pkg, info, true)
+}
+
 // extractFunction refactors the selected block of code into a new function.
+func extractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+	return extractFunctionMethod(fset, rng, src, file, pkg, info, false)
+}
+
+// extractFunctionMethod refactors the selected block of code into a new function/method.
 // It also replaces the selected block of code with a call to the extracted
 // function. First, we manually adjust the selection range. We remove trailing
 // and leading whitespace characters to ensure the range is precisely bounded
 // by AST nodes. Next, we determine the variables that will be the parameters
-// and return values of the extracted function. Lastly, we construct the call
-// of the function and insert this call as well as the extracted function into
+// and return values of the extracted function/method. Lastly, we construct the call
+// of the function/method and insert this call as well as the extracted function/method into
 // their proper locations.
-func extractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
-	p, ok, err := CanExtractFunction(fset, rng, src, file)
-	if !ok {
-		return nil, fmt.Errorf("extractFunction: cannot extract %s: %v",
+func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, pkg *types.Package, info *types.Info, isMethod bool) (*analysis.SuggestedFix, error) {
+	errorPrefix := "extractFunction"
+	if isMethod {
+		errorPrefix = "extractMethod"
+	}
+	p, ok, methodOk, err := CanExtractFunction(fset, rng, src, file)
+	if (!ok && !isMethod) || (!methodOk && isMethod) {
+		return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix,
 			fset.Position(rng.Start), err)
 	}
 	tok, path, rng, outer, start := p.tok, p.path, p.rng, p.outer, p.start
 	fileScope := info.Scopes[file]
 	if fileScope == nil {
-		return nil, fmt.Errorf("extractFunction: file scope is empty")
+		return nil, fmt.Errorf("%s: file scope is empty", errorPrefix)
 	}
 	pkgScope := fileScope.Parent()
 	if pkgScope == nil {
-		return nil, fmt.Errorf("extractFunction: package scope is empty")
+		return nil, fmt.Errorf("%s: package scope is empty", errorPrefix)
 	}
 
 	// A return statement is non-nested if its parent node is equal to the parent node
@@ -230,6 +256,25 @@
 	}
 
 	var (
+		receiverUsed bool
+		receiver     *ast.Field
+		receiverName string
+		receiverObj  types.Object
+	)
+	if isMethod {
+		if outer == nil || outer.Recv == nil || len(outer.Recv.List) == 0 {
+			return nil, fmt.Errorf("%s: cannot extract need method receiver", errorPrefix)
+		}
+		receiver = outer.Recv.List[0]
+		if len(receiver.Names) == 0 || receiver.Names[0] == nil {
+			return nil, fmt.Errorf("%s: cannot extract need method receiver name", errorPrefix)
+		}
+		recvName := receiver.Names[0]
+		receiverName = recvName.Name
+		receiverObj = info.ObjectOf(recvName)
+	}
+
+	var (
 		params, returns         []ast.Expr     // used when calling the extracted function
 		paramTypes, returnTypes []*ast.Field   // used in the signature of the extracted function
 		uninitialized           []types.Object // vars we will need to initialize before the call
@@ -302,6 +347,11 @@
 		// extracted function. (1) it must be free (isFree), and (2) its first
 		// use within the selection cannot be its own definition (isDefined).
 		if v.free && !v.defined {
+			// Skip the selector for a method.
+			if isMethod && v.obj == receiverObj {
+				receiverUsed = true
+				continue
+			}
 			params = append(params, identifier)
 			paramTypes = append(paramTypes, &ast.Field{
 				Names: []*ast.Ident{identifier},
@@ -465,9 +515,17 @@
 	if canDefine {
 		sym = token.DEFINE
 	}
-	funName := generateAvailableIdentifier(rng.Start, file, path, info, "fn", 0)
+	var name, funName string
+	if isMethod {
+		name = "newMethod"
+		// TODO(suzmue): generate a name that does not conflict for "newMethod".
+		funName = name
+	} else {
+		name = "newFunction"
+		funName, _ = generateAvailableIdentifier(rng.Start, file, path, info, name, 0)
+	}
 	extractedFunCall := generateFuncCall(hasNonNestedReturn, hasReturnValues, params,
-		append(returns, getNames(retVars)...), funName, sym)
+		append(returns, getNames(retVars)...), funName, sym, receiverName)
 
 	// Build the extracted function.
 	newFunc := &ast.FuncDecl{
@@ -478,6 +536,18 @@
 		},
 		Body: extractedBlock,
 	}
+	if isMethod {
+		var names []*ast.Ident
+		if receiverUsed {
+			names = append(names, ast.NewIdent(receiverName))
+		}
+		newFunc.Recv = &ast.FieldList{
+			List: []*ast.Field{{
+				Names: names,
+				Type:  receiver.Type,
+			}},
+		}
+	}
 
 	// Create variable declarations for any identifiers that need to be initialized prior to
 	// calling the extracted function. We do not manually initialize variables if every return
@@ -838,24 +908,24 @@
 
 // CanExtractFunction reports whether the code in the given range can be
 // extracted to a function.
-func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, error) {
+func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) {
 	if rng.Start == rng.End {
-		return nil, false, fmt.Errorf("start and end are equal")
+		return nil, false, false, fmt.Errorf("start and end are equal")
 	}
 	tok := fset.File(file.Pos())
 	if tok == nil {
-		return nil, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
+		return nil, false, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
 	}
 	rng = adjustRangeForWhitespace(rng, tok, src)
 	path, _ := astutil.PathEnclosingInterval(file, rng.Start, rng.End)
 	if len(path) == 0 {
-		return nil, false, fmt.Errorf("no path enclosing interval")
+		return nil, false, false, fmt.Errorf("no path enclosing interval")
 	}
 	// Node that encloses the selection must be a statement.
 	// TODO: Support function extraction for an expression.
 	_, ok := path[0].(ast.Stmt)
 	if !ok {
-		return nil, false, fmt.Errorf("node is not a statement")
+		return nil, false, false, fmt.Errorf("node is not a statement")
 	}
 
 	// Find the function declaration that encloses the selection.
@@ -867,7 +937,7 @@
 		}
 	}
 	if outer == nil {
-		return nil, false, fmt.Errorf("no enclosing function")
+		return nil, false, false, fmt.Errorf("no enclosing function")
 	}
 
 	// Find the nodes at the start and end of the selection.
@@ -887,7 +957,7 @@
 		return n.Pos() <= rng.End
 	})
 	if start == nil || end == nil {
-		return nil, false, fmt.Errorf("range does not map to AST nodes")
+		return nil, false, false, fmt.Errorf("range does not map to AST nodes")
 	}
 	return &fnExtractParams{
 		tok:   tok,
@@ -895,7 +965,7 @@
 		rng:   rng,
 		outer: outer,
 		start: start,
-	}, true, nil
+	}, true, outer.Recv != nil, nil
 }
 
 // objUsed checks if the object is used within the range. It returns the first
@@ -996,7 +1066,8 @@
 	var cond *ast.Ident
 	if !hasNonNestedReturns {
 		// Generate information for the added bool value.
-		cond = &ast.Ident{Name: generateAvailableIdentifier(pos, file, path, info, "cond", 0)}
+		name, _ := generateAvailableIdentifier(pos, file, path, info, "shouldReturn", 0)
+		cond = &ast.Ident{Name: name}
 		retVars = append(retVars, &returnVariable{
 			name:    cond,
 			decl:    &ast.Field{Type: ast.NewIdent("bool")},
@@ -1005,7 +1076,8 @@
 	}
 	// Generate information for the values in the return signature of the enclosing function.
 	if enclosing.Results != nil {
-		for i, field := range enclosing.Results.List {
+		idx := 0
+		for _, field := range enclosing.Results.List {
 			typ := info.TypeOf(field.Type)
 			if typ == nil {
 				return nil, nil, fmt.Errorf(
@@ -1015,9 +1087,11 @@
 			if expr == nil {
 				return nil, nil, fmt.Errorf("nil AST expression")
 			}
+			var name string
+			name, idx = generateAvailableIdentifier(pos, file,
+				path, info, "returnValue", idx)
 			retVars = append(retVars, &returnVariable{
-				name: ast.NewIdent(generateAvailableIdentifier(pos, file,
-					path, info, "ret", i)),
+				name: ast.NewIdent(name),
 				decl: &ast.Field{Type: expr},
 				zeroVal: analysisinternal.ZeroValue(
 					fset, file, pkg, typ),
@@ -1079,13 +1153,22 @@
 
 // generateFuncCall constructs a call expression for the extracted function, described by the
 // given parameters and return variables.
-func generateFuncCall(hasNonNestedReturn, hasReturnVals bool, params, returns []ast.Expr, name string, token token.Token) ast.Node {
+func generateFuncCall(hasNonNestedReturn, hasReturnVals bool, params, returns []ast.Expr, name string, token token.Token, selector string) ast.Node {
 	var replace ast.Node
-	if hasReturnVals {
-		callExpr := &ast.CallExpr{
-			Fun:  ast.NewIdent(name),
+	callExpr := &ast.CallExpr{
+		Fun:  ast.NewIdent(name),
+		Args: params,
+	}
+	if selector != "" {
+		callExpr = &ast.CallExpr{
+			Fun: &ast.SelectorExpr{
+				X:   ast.NewIdent(selector),
+				Sel: ast.NewIdent(name),
+			},
 			Args: params,
 		}
+	}
+	if hasReturnVals {
 		if hasNonNestedReturn {
 			// Create a return statement that returns the result of the function call.
 			replace = &ast.ReturnStmt{
@@ -1101,10 +1184,7 @@
 			}
 		}
 	} else {
-		replace = &ast.CallExpr{
-			Fun:  ast.NewIdent(name),
-			Args: params,
-		}
+		replace = callExpr
 	}
 	return replace
 }
diff --git a/internal/lsp/source/fix.go b/internal/lsp/source/fix.go
index 6a01239..3308aee 100644
--- a/internal/lsp/source/fix.go
+++ b/internal/lsp/source/fix.go
@@ -32,6 +32,7 @@
 	UndeclaredName  = "undeclared_name"
 	ExtractVariable = "extract_variable"
 	ExtractFunction = "extract_function"
+	ExtractMethod   = "extract_method"
 )
 
 // suggestedFixes maps a suggested fix command id to its handler.
@@ -40,6 +41,7 @@
 	UndeclaredName:  undeclaredname.SuggestedFix,
 	ExtractVariable: extractVariable,
 	ExtractFunction: extractFunction,
+	ExtractMethod:   extractMethod,
 }
 
 func SuggestedFixFromCommand(cmd protocol.Command, kind protocol.CodeActionKind) SuggestedFix {
diff --git a/internal/lsp/source/folding_range.go b/internal/lsp/source/folding_range.go
index 00e6ba0..be3f4b0 100644
--- a/internal/lsp/source/folding_range.go
+++ b/internal/lsp/source/folding_range.go
@@ -9,6 +9,7 @@
 	"go/ast"
 	"go/token"
 	"sort"
+	"strings"
 
 	"golang.org/x/tools/internal/lsp/protocol"
 )
@@ -151,17 +152,27 @@
 }
 
 // commentsFoldingRange returns the folding ranges for all comment blocks in file.
-// The folding range starts at the end of the first comment, and ends at the end of the
+// The folding range starts at the end of the first line of the comment block, and ends at the end of the
 // comment block and has kind protocol.Comment.
 func commentsFoldingRange(fset *token.FileSet, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) {
 	for _, commentGrp := range file.Comments {
-		// Don't fold single comments.
-		if len(commentGrp.List) <= 1 {
+		startGrp, endGrp := fset.Position(commentGrp.Pos()), fset.Position(commentGrp.End())
+		if startGrp.Line == endGrp.Line {
+			// Don't fold single line comments.
 			continue
 		}
+
+		firstComment := commentGrp.List[0]
+		startPos, endLinePos := firstComment.Pos(), firstComment.End()
+		startCmmnt, endCmmnt := fset.Position(startPos), fset.Position(endLinePos)
+		if startCmmnt.Line != endCmmnt.Line {
+			// If the first comment spans multiple lines, then we want to have the
+			// folding range start at the end of the first line.
+			endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0]))
+		}
 		comments = append(comments, &FoldingRangeInfo{
 			// Fold from the end of the first line comment to the end of the comment block.
-			MappedRange: NewMappedRange(fset, m, commentGrp.List[0].End(), commentGrp.End()),
+			MappedRange: NewMappedRange(fset, m, endLinePos, commentGrp.End()),
 			Kind:        protocol.Comment,
 		})
 	}
diff --git a/internal/lsp/source/format.go b/internal/lsp/source/format.go
index 087c210..0d61172 100644
--- a/internal/lsp/source/format.go
+++ b/internal/lsp/source/format.go
@@ -19,7 +19,9 @@
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/imports"
 	"golang.org/x/tools/internal/lsp/diff"
+	"golang.org/x/tools/internal/lsp/lsppos"
 	"golang.org/x/tools/internal/lsp/protocol"
+	"golang.org/x/tools/internal/span"
 )
 
 // Format formats a file with a given range.
@@ -177,7 +179,7 @@
 	if err != nil {
 		return nil, err
 	}
-	return ToProtocolEdits(pgf.Mapper, edits)
+	return ProtocolEditsFromSource([]byte(left), edits, pgf.Mapper.Converter)
 }
 
 // importPrefix returns the prefix of the given file content through the final
@@ -241,7 +243,7 @@
 				// comment by scanning the content of the file.
 				startOffset := tok.Offset(c.Pos())
 				if startLine != endLine && bytes.Contains(src[startOffset:], []byte("\r")) {
-					if commentEnd := scanForCommentEnd(tok, src[startOffset:]); commentEnd > 0 {
+					if commentEnd := scanForCommentEnd(src[startOffset:]); commentEnd > 0 {
 						end = startOffset + commentEnd
 					}
 				}
@@ -257,7 +259,7 @@
 
 // scanForCommentEnd returns the offset of the end of the multi-line comment
 // at the start of the given byte slice.
-func scanForCommentEnd(tok *token.File, src []byte) int {
+func scanForCommentEnd(src []byte) int {
 	var s scanner.Scanner
 	s.Init(bytes.NewReader(src))
 	s.Mode ^= scanner.SkipComments
@@ -280,6 +282,37 @@
 	return ToProtocolEdits(pgf.Mapper, edits)
 }
 
+// ProtocolEditsFromSource converts text edits to LSP edits using the original
+// source.
+func ProtocolEditsFromSource(src []byte, edits []diff.TextEdit, converter span.Converter) ([]protocol.TextEdit, error) {
+	m := lsppos.NewMapper(src)
+	var result []protocol.TextEdit
+	for _, edit := range edits {
+		spn, err := edit.Span.WithOffset(converter)
+		if err != nil {
+			return nil, fmt.Errorf("computing offsets: %v", err)
+		}
+		startLine, startChar := m.Position(spn.Start().Offset())
+		endLine, endChar := m.Position(spn.End().Offset())
+		if startLine < 0 || endLine < 0 {
+			return nil, fmt.Errorf("out of bound span: %v", spn)
+		}
+
+		pstart := protocol.Position{Line: uint32(startLine), Character: uint32(startChar)}
+		pend := protocol.Position{Line: uint32(endLine), Character: uint32(endChar)}
+		if pstart == pend && edit.NewText == "" {
+			// Degenerate case, which may result from a diff tool wanting to delete
+			// '\r' in line endings. Filter it out.
+			continue
+		}
+		result = append(result, protocol.TextEdit{
+			Range:   protocol.Range{Start: pstart, End: pend},
+			NewText: edit.NewText,
+		})
+	}
+	return result, nil
+}
+
 func ToProtocolEdits(m *protocol.ColumnMapper, edits []diff.TextEdit) ([]protocol.TextEdit, error) {
 	if edits == nil {
 		return nil, nil
diff --git a/internal/lsp/source/hover.go b/internal/lsp/source/hover.go
index ee38dd7..d3be098 100644
--- a/internal/lsp/source/hover.go
+++ b/internal/lsp/source/hover.go
@@ -19,6 +19,7 @@
 
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/lsp/protocol"
+	"golang.org/x/tools/internal/typeparams"
 	errors "golang.org/x/xerrors"
 )
 
@@ -116,6 +117,16 @@
 			}
 			h.Signature = prefix + h.Signature
 		}
+
+		// Check if the variable is an integer whose value we can present in a more
+		// user-friendly way, i.e. `var hex = 0xe34e` becomes `var hex = 58190`
+		if spec, ok := x.(*ast.ValueSpec); ok && len(spec.Values) > 0 {
+			if lit, ok := spec.Values[0].(*ast.BasicLit); ok && len(spec.Names) > 0 {
+				val := constant.MakeFromLiteral(types.ExprString(lit), lit.Kind, 0)
+				h.Signature = fmt.Sprintf("var %s = %s", spec.Names[0], val)
+			}
+		}
+
 	case types.Object:
 		// If the variable is implicitly declared in a type switch, we need to
 		// manually generate its object string.
@@ -125,10 +136,10 @@
 				break
 			}
 		}
-		h.Signature = objectString(x, i.qf)
+		h.Signature = objectString(x, i.qf, i.Inferred)
 	}
 	if obj := i.Declaration.obj; obj != nil {
-		h.SingleLine = objectString(obj, i.qf)
+		h.SingleLine = objectString(obj, i.qf, nil)
 	}
 	obj := i.Declaration.obj
 	if obj == nil {
@@ -237,7 +248,21 @@
 
 // objectString is a wrapper around the types.ObjectString function.
 // It handles adding more information to the object string.
-func objectString(obj types.Object, qf types.Qualifier) string {
+func objectString(obj types.Object, qf types.Qualifier, inferred *types.Signature) string {
+	// If the signature type was inferred, prefer the preferred signature with a
+	// comment showing the generic signature.
+	if sig, _ := obj.Type().(*types.Signature); sig != nil && len(typeparams.ForSignature(sig)) > 0 && inferred != nil {
+		obj2 := types.NewFunc(obj.Pos(), obj.Pkg(), obj.Name(), inferred)
+		str := types.ObjectString(obj2, qf)
+		// Try to avoid overly long lines.
+		if len(str) > 60 {
+			str += "\n"
+		} else {
+			str += " "
+		}
+		str += "// " + types.TypeString(sig, qf)
+		return str
+	}
 	str := types.ObjectString(obj, qf)
 	switch obj := obj.(type) {
 	case *types.Const:
@@ -439,6 +464,15 @@
 		if comment == nil {
 			comment = spec.Comment
 		}
+
+		// We need the AST nodes for variable declarations of basic literals with
+		// associated values so that we can augment their hover with more information.
+		if _, ok := obj.(*types.Var); ok && spec.Type == nil && len(spec.Values) > 0 {
+			if _, ok := spec.Values[0].(*ast.BasicLit); ok {
+				return &HoverInformation{source: spec, comment: comment}
+			}
+		}
+
 		return &HoverInformation{source: obj, comment: comment}
 	}
 
diff --git a/internal/lsp/source/identifier.go b/internal/lsp/source/identifier.go
index 9fb3daa..ee8684b 100644
--- a/internal/lsp/source/identifier.go
+++ b/internal/lsp/source/identifier.go
@@ -18,6 +18,7 @@
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/span"
+	"golang.org/x/tools/internal/typeparams"
 	errors "golang.org/x/xerrors"
 )
 
@@ -32,6 +33,8 @@
 		Object types.Object
 	}
 
+	Inferred *types.Signature
+
 	Declaration Declaration
 
 	ident *ast.Ident
@@ -82,6 +85,10 @@
 		return nil, fmt.Errorf("no packages for file %v", fh.URI())
 	}
 	sort.Slice(pkgs, func(i, j int) bool {
+		// Prefer packages with a more complete parse mode.
+		if pkgs[i].ParseMode() != pkgs[j].ParseMode() {
+			return pkgs[i].ParseMode() > pkgs[j].ParseMode()
+		}
 		return len(pkgs[i].CompiledGoFiles()) < len(pkgs[j].CompiledGoFiles())
 	})
 	var findErr error
@@ -292,6 +299,8 @@
 		return result, nil
 	}
 
+	result.Inferred = inferredSignature(pkg.GetTypesInfo(), path)
+
 	result.Type.Object = typeToObject(typ)
 	if result.Type.Object != nil {
 		// Identifiers with the type "error" are a special case with no position.
@@ -337,6 +346,52 @@
 	return nil, nil
 }
 
+// inferredSignature determines the resolved non-generic signature for an
+// identifier with a generic signature that is the operand of an IndexExpr or
+// CallExpr.
+//
+// If no such signature exists, it returns nil.
+func inferredSignature(info *types.Info, path []ast.Node) *types.Signature {
+	if len(path) < 2 {
+		return nil
+	}
+	// There are four ways in which a signature may be resolved:
+	//  1. It has no explicit type arguments, but the CallExpr can be fully
+	//     inferred from function arguments.
+	//  2. It has full type arguments, and the IndexExpr has a non-generic type.
+	//  3. For a partially instantiated IndexExpr representing a function-valued
+	//     expression (i.e. not part of a CallExpr), type arguments may be
+	//     inferred using constraint type inference.
+	//  4. For a partially instantiated IndexExpr that is part of a CallExpr,
+	//     type arguments may be inferred using both constraint type inference
+	//     and function argument inference.
+	//
+	// These branches are handled below.
+	switch n := path[1].(type) {
+	case *ast.CallExpr:
+		_, sig := typeparams.GetInferred(info, n)
+		return sig
+	case *ast.IndexExpr:
+		// If the IndexExpr is fully instantiated, we consider that 'inference' for
+		// gopls' purposes.
+		sig, _ := info.TypeOf(n).(*types.Signature)
+		if sig != nil && len(typeparams.ForSignature(sig)) == 0 {
+			return sig
+		}
+		_, sig = typeparams.GetInferred(info, n)
+		if sig != nil {
+			return sig
+		}
+		if len(path) >= 2 {
+			if call, _ := path[2].(*ast.CallExpr); call != nil {
+				_, sig := typeparams.GetInferred(info, call)
+				return sig
+			}
+		}
+	}
+	return nil
+}
+
 func searchForEnclosing(info *types.Info, path []ast.Node) types.Type {
 	for _, n := range path {
 		switch n := n.(type) {
diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go
index 3dc3f17..c78bab0 100644
--- a/internal/lsp/source/options.go
+++ b/internal/lsp/source/options.go
@@ -112,7 +112,7 @@
 				},
 				UIOptions: UIOptions{
 					DiagnosticOptions: DiagnosticOptions{
-						ExperimentalDiagnosticsDelay: 250 * time.Millisecond,
+						DiagnosticsDelay: 250 * time.Millisecond,
 						Annotations: map[Annotation]bool{
 							Bounds: true,
 							Escape: true,
@@ -264,6 +264,12 @@
 	// downloads rather than requiring user action. This option will eventually
 	// be removed.
 	AllowImplicitNetworkAccess bool `status:"experimental"`
+
+	// ExperimentalUseInvalidMetadata enables gopls to fall back on outdated
+	// package metadata to provide editor features if the go command fails to
+	// load packages for some reason (like an invalid go.mod file). This will
+	// eventually be the default behavior, and this setting will be removed.
+	ExperimentalUseInvalidMetadata bool `status:"experimental"`
 }
 
 type UIOptions struct {
@@ -371,13 +377,22 @@
 	// that should be reported by the gc_details command.
 	Annotations map[Annotation]bool `status:"experimental"`
 
-	// ExperimentalDiagnosticsDelay controls the amount of time that gopls waits
+	// DiagnosticsDelay controls the amount of time that gopls waits
 	// after the most recent file modification before computing deep diagnostics.
 	// Simple diagnostics (parsing and type-checking) are always run immediately
 	// on recently modified packages.
 	//
 	// This option must be set to a valid duration string, for example `"250ms"`.
-	ExperimentalDiagnosticsDelay time.Duration `status:"experimental"`
+	DiagnosticsDelay time.Duration `status:"advanced"`
+
+	// ExperimentalWatchedFileDelay controls the amount of time that gopls waits
+	// for additional workspace/didChangeWatchedFiles notifications to arrive,
+	// before processing all such notifications in a single batch. This is
+	// intended for use by LSP clients that don't support their own batching of
+	// file system notifications.
+	//
+	// This option must be set to a valid duration string, for example `"100ms"`.
+	ExperimentalWatchedFileDelay time.Duration `status:"experimental"`
 }
 
 type NavigationOptions struct {
@@ -606,7 +621,7 @@
 		for name, value := range opts {
 			if b, ok := value.(bool); name == "allExperiments" && ok && b {
 				enableExperiments = true
-				options.enableAllExperiments()
+				options.EnableAllExperiments()
 			}
 		}
 		seen := map[string]struct{}{}
@@ -718,13 +733,15 @@
 	}
 }
 
-// enableAllExperiments turns on all of the experimental "off-by-default"
+// EnableAllExperiments turns on all of the experimental "off-by-default"
 // features offered by gopls. Any experimental features specified in maps
 // should be enabled in enableAllExperimentMaps.
-func (o *Options) enableAllExperiments() {
+func (o *Options) EnableAllExperiments() {
 	o.SemanticTokens = true
 	o.ExperimentalPostfixCompletions = true
 	o.ExperimentalTemplateSupport = true
+	o.ExperimentalUseInvalidMetadata = true
+	o.ExperimentalWatchedFileDelay = 50 * time.Millisecond
 }
 
 func (o *Options) enableAllExperimentMaps() {
@@ -912,8 +929,15 @@
 	case "experimentalTemplateSupport":
 		result.setBool(&o.ExperimentalTemplateSupport)
 
-	case "experimentalDiagnosticsDelay":
-		result.setDuration(&o.ExperimentalDiagnosticsDelay)
+	case "experimentalDiagnosticsDelay", "diagnosticsDelay":
+		if name == "experimentalDiagnosticsDelay" {
+			result.State = OptionDeprecated
+			result.Replacement = "diagnosticsDelay"
+		}
+		result.setDuration(&o.DiagnosticsDelay)
+
+	case "experimentalWatchedFileDelay":
+		result.setDuration(&o.ExperimentalWatchedFileDelay)
 
 	case "experimentalPackageCacheKey":
 		result.setBool(&o.ExperimentalPackageCacheKey)
@@ -924,6 +948,9 @@
 	case "allowImplicitNetworkAccess":
 		result.setBool(&o.AllowImplicitNetworkAccess)
 
+	case "experimentalUseInvalidMetadata":
+		result.setBool(&o.ExperimentalUseInvalidMetadata)
+
 	case "allExperiments":
 		// This setting should be handled before all of the other options are
 		// processed, so do nothing here.
@@ -1267,10 +1294,11 @@
 }
 
 type CommandJSON struct {
-	Command string
-	Title   string
-	Doc     string
-	ArgDoc  string
+	Command   string
+	Title     string
+	Doc       string
+	ArgDoc    string
+	ResultDoc string
 }
 
 type LensJSON struct {
diff --git a/internal/lsp/source/rename.go b/internal/lsp/source/rename.go
index da7faf8..70dfcfb 100644
--- a/internal/lsp/source/rename.go
+++ b/internal/lsp/source/rename.go
@@ -289,6 +289,38 @@
 				return decl.Doc
 			}
 		case *ast.Ident:
+		case *ast.AssignStmt:
+			// *ast.AssignStmt doesn't have an associated comment group.
+			// So, we try to find a comment just before the identifier.
+
+			// Try to find a comment group only for short variable declarations (:=).
+			if decl.Tok != token.DEFINE {
+				return nil
+			}
+
+			var file *ast.File
+			for _, f := range pkg.GetSyntax() {
+				if f.Pos() <= id.Pos() && id.Pos() <= f.End() {
+					file = f
+					break
+				}
+			}
+			if file == nil {
+				return nil
+			}
+
+			identLine := r.fset.Position(id.Pos()).Line
+			for _, comment := range file.Comments {
+				if comment.Pos() > id.Pos() {
+					// Comment is after the identifier.
+					continue
+				}
+
+				lastCommentLine := r.fset.Position(comment.End()).Line
+				if lastCommentLine+1 == identLine {
+					return comment
+				}
+			}
 		default:
 			return nil
 		}
diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go
index 620a8cf..9c52f99 100644
--- a/internal/lsp/source/signature_help.go
+++ b/internal/lsp/source/signature_help.go
@@ -51,7 +51,12 @@
 			// which may be the parameter to the *ast.CallExpr.
 			// Don't show signature help in this case.
 			return nil, 0, errors.Errorf("no signature help within a function declaration")
+		case *ast.BasicLit:
+			if node.Kind == token.STRING {
+				return nil, 0, errors.Errorf("no signature help within a string literal")
+			}
 		}
+
 	}
 	if callExpr == nil || callExpr.Fun == nil {
 		return nil, 0, errors.Errorf("cannot find an enclosing function")
diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go
index c09b2fe..f1ab3ff 100644
--- a/internal/lsp/source/source_test.go
+++ b/internal/lsp/source/source_test.go
@@ -935,6 +935,7 @@
 func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
 }
 func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {}
+func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span)   {}
 func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens)   {}
 func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string)     {}
 
diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go
index fdc76f6..c3f17b0 100644
--- a/internal/lsp/source/types_format.go
+++ b/internal/lsp/source/types_format.go
@@ -252,11 +252,7 @@
 	if field == nil {
 		return nil, fmt.Errorf("no declaration for object %s", obj.Name())
 	}
-	typ, ok := field.Type.(ast.Expr)
-	if !ok {
-		return nil, fmt.Errorf("unexpected type for node (%T)", field.Type)
-	}
-	return typ, nil
+	return field.Type, nil
 }
 
 // qualifyExpr applies the "pkgName." prefix to any *ast.Ident in the expr.
diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go
index a917a54..a30cc75 100644
--- a/internal/lsp/source/util.go
+++ b/internal/lsp/source/util.go
@@ -274,19 +274,35 @@
 	return 1
 }
 
-// FindPackageFromPos finds the parsed file for a position in a given search
-// package.
+// FindPackageFromPos finds the first package containing pos in its
+// type-checked AST.
 func FindPackageFromPos(ctx context.Context, snapshot Snapshot, pos token.Pos) (Package, error) {
 	tok := snapshot.FileSet().File(pos)
 	if tok == nil {
 		return nil, errors.Errorf("no file for pos %v", pos)
 	}
 	uri := span.URIFromPath(tok.Name())
-	pkgs, err := snapshot.PackagesForFile(ctx, uri, TypecheckWorkspace)
+	// Search all packages: some callers may be working with packages not
+	// type-checked in workspace mode.
+	pkgs, err := snapshot.PackagesForFile(ctx, uri, TypecheckAll)
 	if err != nil {
 		return nil, err
 	}
-	return pkgs[0], nil
+	// Only return the package if it actually type-checked the given position.
+	for _, pkg := range pkgs {
+		parsed, err := pkg.File(uri)
+		if err != nil {
+			return nil, err
+		}
+		if parsed == nil {
+			continue
+		}
+		if parsed.Tok.Base() != tok.Base() {
+			continue
+		}
+		return pkg, nil
+	}
+	return nil, errors.Errorf("no package for given file position")
 }
 
 // findFileInDeps finds uri in pkg or its dependencies.
diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go
index a139c50..74b77ca 100644
--- a/internal/lsp/source/view.go
+++ b/internal/lsp/source/view.go
@@ -20,6 +20,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/imports"
+	"golang.org/x/tools/internal/lsp/progress"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/span"
 	errors "golang.org/x/xerrors"
@@ -160,6 +161,10 @@
 
 	// GetCriticalError returns any critical errors in the workspace.
 	GetCriticalError(ctx context.Context) *CriticalError
+
+	// BuildGoplsMod generates a go.mod file for all modules in the workspace.
+	// It bypasses any existing gopls.mod.
+	BuildGoplsMod(ctx context.Context) (*modfile.File, error)
 }
 
 // PackageFilter sets how a package is filtered out from a set of packages
@@ -345,6 +350,9 @@
 	// known by the view. For views within a module, this is the module root,
 	// any directory in the module root, and any replace targets.
 	FileWatchingGlobPatterns(ctx context.Context) map[string]struct{}
+
+	// SetProgressTracker sets the progress tracker for the session.
+	SetProgressTracker(tracker *progress.Tracker)
 }
 
 // Overlay is the type for a file held in memory on a session.
@@ -573,6 +581,7 @@
 	Version() *module.Version
 	HasListOrParseErrors() bool
 	HasTypeErrors() bool
+	ParseMode() ParseMode
 }
 
 type CriticalError struct {
diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go
index c0aabf2..18583ae 100644
--- a/internal/lsp/source/workspace_symbol.go
+++ b/internal/lsp/source/workspace_symbol.go
@@ -57,79 +57,77 @@
 // See the comment for symbolCollector for more information.
 type matcherFunc func(name string) float64
 
-// A symbolizer returns the best symbol match for name with pkg, according to
-// some heuristic.
+// A symbolizer returns the best symbol match for a name with pkg, according to
+// some heuristic. The symbol name is passed as the slice nameParts of logical
+// name pieces. For example, for myType.field the caller can pass either
+// []string{"myType.field"} or []string{"myType.", "field"}.
 //
 // See the comment for symbolCollector for more information.
-type symbolizer func(name string, pkg Package, m matcherFunc) (string, float64)
+type symbolizer func(nameParts []string, pkg Package, m matcherFunc) (string, float64)
 
-func fullyQualifiedSymbolMatch(name string, pkg Package, matcher matcherFunc) (string, float64) {
-	_, score := dynamicSymbolMatch(name, pkg, matcher)
+func fullyQualifiedSymbolMatch(nameParts []string, pkg Package, matcher matcherFunc) (string, float64) {
+	_, score := dynamicSymbolMatch(nameParts, pkg, matcher)
+	path := append([]string{pkg.PkgPath() + "."}, nameParts...)
 	if score > 0 {
-		return pkg.PkgPath() + "." + name, score
+		return strings.Join(path, ""), score
 	}
 	return "", 0
 }
 
-func dynamicSymbolMatch(name string, pkg Package, matcher matcherFunc) (string, float64) {
-	// Prefer any package-qualified match.
-	pkgQualified := pkg.Name() + "." + name
-	if match, score := bestMatch(pkgQualified, matcher); match != "" {
-		return match, score
+func dynamicSymbolMatch(nameParts []string, pkg Package, matcher matcherFunc) (string, float64) {
+	var best string
+	fullName := strings.Join(nameParts, "")
+	var score float64
+	var name string
+
+	// Compute the match score by finding the highest scoring suffix. In these
+	// cases the matched symbol is still the full name: it is confusing to match
+	// an unqualified nested field or method.
+	if match := bestMatch("", nameParts, matcher); match > score {
+		best = fullName
+		score = match
 	}
-	fullyQualified := pkg.PkgPath() + "." + name
-	if match, score := bestMatch(fullyQualified, matcher); match != "" {
-		return match, score
+
+	// Next: try to match a package-qualified name.
+	name = pkg.Name() + "." + fullName
+	if match := matcher(name); match > score {
+		best = name
+		score = match
 	}
-	return "", 0
+
+	// Finally: consider a fully qualified name.
+	prefix := pkg.PkgPath() + "."
+	fullyQualified := prefix + fullName
+	// As with field/method selectors, consider suffixes from right to left, but
+	// always return a fully-qualified symbol.
+	pathParts := strings.SplitAfter(prefix, "/")
+	if match := bestMatch(fullName, pathParts, matcher); match > score {
+		best = fullyQualified
+		score = match
+	}
+	return best, score
 }
 
-func packageSymbolMatch(name string, pkg Package, matcher matcherFunc) (string, float64) {
-	qualified := pkg.Name() + "." + name
+func bestMatch(name string, prefixParts []string, matcher matcherFunc) float64 {
+	var score float64
+	for i := len(prefixParts) - 1; i >= 0; i-- {
+		name = prefixParts[i] + name
+		if match := matcher(name); match > score {
+			score = match
+		}
+	}
+	return score
+}
+
+func packageSymbolMatch(components []string, pkg Package, matcher matcherFunc) (string, float64) {
+	path := append([]string{pkg.Name() + "."}, components...)
+	qualified := strings.Join(path, "")
 	if matcher(qualified) > 0 {
 		return qualified, 1
 	}
 	return "", 0
 }
 
-// bestMatch returns the highest scoring symbol suffix of fullPath, starting
-// from the right and splitting on selectors and path components.
-//
-// e.g. given a symbol path of the form 'host.com/dir/pkg.type.field', we
-// check the match quality of the following:
-//  - field
-//  - type.field
-//  - pkg.type.field
-//  - dir/pkg.type.field
-//  - host.com/dir/pkg.type.field
-//
-// and return the best match, along with its score.
-//
-// This is used to implement the 'dynamic' symbol style.
-func bestMatch(fullPath string, matcher matcherFunc) (string, float64) {
-	pathParts := strings.Split(fullPath, "/")
-	dottedParts := strings.Split(pathParts[len(pathParts)-1], ".")
-
-	var best string
-	var score float64
-
-	for i := 0; i < len(dottedParts); i++ {
-		path := strings.Join(dottedParts[len(dottedParts)-1-i:], ".")
-		if match := matcher(path); match > score {
-			best = path
-			score = match
-		}
-	}
-	for i := 0; i < len(pathParts); i++ {
-		path := strings.Join(pathParts[len(pathParts)-1-i:], "/")
-		if match := matcher(path); match > score {
-			best = path
-			score = match
-		}
-	}
-	return best, score
-}
-
 // symbolCollector holds context as we walk Packages, gathering symbols that
 // match a given query.
 //
@@ -420,7 +418,13 @@
 // or named. path is the path of nested identifiers containing the field.
 func (sc *symbolCollector) walkField(field *ast.Field, unnamedKind, namedKind protocol.SymbolKind, path ...*ast.Ident) {
 	if len(field.Names) == 0 {
-		sc.match(types.ExprString(field.Type), unnamedKind, field, path...)
+		switch typ := field.Type.(type) {
+		case *ast.SelectorExpr:
+			// embedded qualified type
+			sc.match(typ.Sel.Name, unnamedKind, field, path...)
+		default:
+			sc.match(types.ExprString(field.Type), unnamedKind, field, path...)
+		}
 	}
 	for _, name := range field.Names {
 		sc.match(name.Name, namedKind, name, path...)
@@ -466,18 +470,14 @@
 	}
 
 	isExported := isExported(name)
-	if len(path) > 0 {
-		var nameBuilder strings.Builder
-		for _, ident := range path {
-			nameBuilder.WriteString(ident.Name)
-			nameBuilder.WriteString(".")
-			if !ident.IsExported() {
-				isExported = false
-			}
+	var names []string
+	for _, ident := range path {
+		names = append(names, ident.Name+".")
+		if !ident.IsExported() {
+			isExported = false
 		}
-		nameBuilder.WriteString(name)
-		name = nameBuilder.String()
 	}
+	names = append(names, name)
 
 	// Factors to apply to the match score for the purpose of downranking
 	// results.
@@ -501,7 +501,7 @@
 		// can be noisy.
 		fieldFactor = 0.5
 	)
-	symbol, score := sc.symbolizer(name, sc.current.pkg, sc.matcher)
+	symbol, score := sc.symbolizer(names, sc.current.pkg, sc.matcher)
 
 	// Downrank symbols outside of the workspace.
 	if !sc.current.isWorkspace {
diff --git a/internal/lsp/source/workspace_symbol_test.go b/internal/lsp/source/workspace_symbol_test.go
index f3d9dbb..def73ce 100644
--- a/internal/lsp/source/workspace_symbol_test.go
+++ b/internal/lsp/source/workspace_symbol_test.go
@@ -5,7 +5,6 @@
 package source
 
 import (
-	"strings"
 	"testing"
 )
 
@@ -45,53 +44,3 @@
 		}
 	}
 }
-
-func TestBestMatch(t *testing.T) {
-	tests := []struct {
-		desc      string
-		symbol    string
-		matcher   matcherFunc
-		wantMatch string
-		wantScore float64
-	}{
-		{
-			desc:      "shortest match",
-			symbol:    "foo/bar/baz.quux",
-			matcher:   func(string) float64 { return 1.0 },
-			wantMatch: "quux",
-			wantScore: 1.0,
-		},
-		{
-			desc:   "partial match",
-			symbol: "foo/bar/baz.quux",
-			matcher: func(s string) float64 {
-				if strings.HasPrefix(s, "bar") {
-					return 1.0
-				}
-				return 0.0
-			},
-			wantMatch: "bar/baz.quux",
-			wantScore: 1.0,
-		},
-		{
-			desc:   "longest match",
-			symbol: "foo/bar/baz.quux",
-			matcher: func(s string) float64 {
-				parts := strings.Split(s, "/")
-				return float64(len(parts))
-			},
-			wantMatch: "foo/bar/baz.quux",
-			wantScore: 3.0,
-		},
-	}
-
-	for _, test := range tests {
-		test := test
-		t.Run(test.desc, func(t *testing.T) {
-			gotMatch, gotScore := bestMatch(test.symbol, test.matcher)
-			if gotMatch != test.wantMatch || gotScore != test.wantScore {
-				t.Errorf("bestMatch(%q, matcher) = (%q, %.2g), want (%q, %.2g)", test.symbol, gotMatch, gotScore, test.wantMatch, test.wantScore)
-			}
-		})
-	}
-}
diff --git a/internal/lsp/testdata/address/address.go b/internal/lsp/testdata/address/address.go
index 59d5d4c..3f1c2fa 100644
--- a/internal/lsp/testdata/address/address.go
+++ b/internal/lsp/testdata/address/address.go
@@ -13,37 +13,32 @@
 		b int    //@item(addrB, "b", "int", "var")
 	)
 
-	&b //@item(addrBRef, "&b", "int", "var")
-
-	wantsPtr()   //@rank(")", addrBRef, addrA),snippet(")", addrBRef, "&b", "&b")
+	wantsPtr()   //@rank(")", addrB, addrA),snippet(")", addrB, "&b", "&b")
 	wantsPtr(&b) //@snippet(")", addrB, "b", "b")
 
-	wantsVariadicPtr() //@rank(")", addrBRef, addrA),snippet(")", addrBRef, "&b", "&b")
+	wantsVariadicPtr() //@rank(")", addrB, addrA),snippet(")", addrB, "&b", "&b")
 
 	var s foo
 	s.c          //@item(addrDeepC, "s.c", "int", "field")
-	&s.c         //@item(addrDeepCRef, "&s.c", "int", "field")
-	wantsPtr()   //@snippet(")", addrDeepCRef, "&s.c", "&s.c")
-	wantsPtr(s)  //@snippet(")", addrDeepCRef, "&s.c", "&s.c")
+	wantsPtr()   //@snippet(")", addrDeepC, "&s.c", "&s.c")
+	wantsPtr(s)  //@snippet(")", addrDeepC, "&s.c", "&s.c")
 	wantsPtr(&s) //@snippet(")", addrDeepC, "s.c", "s.c")
 
 	// don't add "&" in item (it gets added as an additional edit)
 	wantsPtr(&s.c) //@snippet(")", addrFieldC, "c", "c")
 
 	// check dereferencing as well
-	var c *int
-	*c            //@item(addrCPtr, "*c", "*int", "var")
+	var c *int    //@item(addrCPtr, "c", "*int", "var")
 	var _ int = _ //@rank("_ //", addrCPtr, addrA),snippet("_ //", addrCPtr, "*c", "*c")
 
 	wantsVariadic() //@rank(")", addrCPtr, addrA),snippet(")", addrCPtr, "*c", "*c")
 
-	var d **int
-	**d           //@item(addrDPtr, "**d", "**int", "var")
+	var d **int   //@item(addrDPtr, "d", "**int", "var")
 	var _ int = _ //@rank("_ //", addrDPtr, addrA),snippet("_ //", addrDPtr, "**d", "**d")
 
 	type namedPtr *int
-	var np namedPtr
-	*np           //@item(addrNamedPtr, "*np", "namedPtr", "var")
+	var np namedPtr //@item(addrNamedPtr, "np", "namedPtr", "var")
+
 	var _ int = _ //@rank("_ //", addrNamedPtr, addrA)
 
 	// don't get tripped up by recursive pointer type
@@ -62,10 +57,9 @@
 	getFoo().c //@item(addrGetFooC, "getFoo().c", "int", "field")
 
 	// addressable
-	getFoo().ptr().c  //@item(addrGetFooPtrC, "getFoo().ptr().c", "int", "field")
-	&getFoo().ptr().c //@item(addrGetFooPtrCRef, "&getFoo().ptr().c", "int", "field")
+	getFoo().ptr().c //@item(addrGetFooPtrC, "getFoo().ptr().c", "int", "field")
 
-	wantsPtr()   //@rank(addrGetFooPtrCRef, addrGetFooC),snippet(")", addrGetFooPtrCRef, "&getFoo().ptr().c", "&getFoo().ptr().c")
+	wantsPtr()   //@rank(addrGetFooPtrC, addrGetFooC),snippet(")", addrGetFooPtrC, "&getFoo().ptr().c", "&getFoo().ptr().c")
 	wantsPtr(&g) //@rank(addrGetFooPtrC, addrGetFooC),snippet(")", addrGetFooPtrC, "getFoo().ptr().c", "getFoo().ptr().c")
 }
 
@@ -76,8 +70,8 @@
 func _() {
 	getNested := func() nested { return nested{} }
 
-	getNested().f.c        //@item(addrNestedC, "getNested().f.c", "int", "field")
-	&getNested().f.ptr().c //@item(addrNestedPtrC, "&getNested().f.ptr().c", "int", "field")
+	getNested().f.c       //@item(addrNestedC, "getNested().f.c", "int", "field")
+	getNested().f.ptr().c //@item(addrNestedPtrC, "getNested().f.ptr().c", "int", "field")
 
 	// addrNestedC is not addressable, so rank lower
 	wantsPtr(getNestedfc) //@fuzzy(")", addrNestedPtrC, addrNestedC)
diff --git a/internal/lsp/testdata/append/append.go b/internal/lsp/testdata/append/append.go
index 1998f6d..2880e59 100644
--- a/internal/lsp/testdata/append/append.go
+++ b/internal/lsp/testdata/append/append.go
@@ -32,8 +32,7 @@
 	b.b = append(b.b, b) //@rank(")", appendBazzy, appendBazLiteral, appendNestedBaz)
 
 	var aStringsPtr *[]string  //@item(appendStringsPtr, "aStringsPtr", "*[]string", "var")
-	"*aStringsPtr"             //@item(appendStringsDeref, "*aStringsPtr", "*[]string", "var")
-	foo(append([]string{}, a)) //@snippet("))", appendStringsDeref, "*aStringsPtr...", "*aStringsPtr...")
+	foo(append([]string{}, a)) //@snippet("))", appendStringsPtr, "*aStringsPtr...", "*aStringsPtr...")
 
 	foo(append([]string{}, *a)) //@snippet("))", appendStringsPtr, "aStringsPtr...", "aStringsPtr...")
 }
diff --git a/internal/lsp/testdata/complit/complit.go.in b/internal/lsp/testdata/complit/complit.go.in
index c888c01..e819810 100644
--- a/internal/lsp/testdata/complit/complit.go.in
+++ b/internal/lsp/testdata/complit/complit.go.in
@@ -73,14 +73,12 @@
 func _() {
 	type foo struct{} //@item(complitFoo, "foo", "struct{...}", "struct")
 
-	"&foo" //@item(complitAndFoo, "&foo", "struct{...}", "struct")
-
-	var _ *foo = &fo{} //@rank("{", complitFoo)
-	var _ *foo = fo{} //@rank("{", complitAndFoo)
+	var _ *foo = &fo{} //@snippet("{", complitFoo, "foo", "foo")
+	var _ *foo = fo{} //@snippet("{", complitFoo, "&foo", "&foo")
 
 	struct { a, b *foo }{
 		a: &fo{}, //@rank("{", complitFoo)
-		b: fo{}, //@rank("{", complitAndFoo)
+		b: fo{}, //@snippet("{", complitFoo, "&foo", "&foo")
 	}
 }
 
diff --git a/internal/lsp/testdata/deep/deep.go b/internal/lsp/testdata/deep/deep.go
index b713c60..6ed5ff8 100644
--- a/internal/lsp/testdata/deep/deep.go
+++ b/internal/lsp/testdata/deep/deep.go
@@ -34,8 +34,8 @@
 		*deepCircle
 	}
 	var circle deepCircle   //@item(deepCircle, "circle", "deepCircle", "var")
-	*circle.deepCircle      //@item(deepCircleField, "*circle.deepCircle", "*deepCircle", "field")
-	var _ deepCircle = circ //@deep(" //", deepCircle, deepCircleField)
+	circle.deepCircle       //@item(deepCircleField, "circle.deepCircle", "*deepCircle", "field")
+	var _ deepCircle = circ //@deep(" //", deepCircle, deepCircleField),snippet(" //", deepCircleField, "*circle.deepCircle", "*circle.deepCircle")
 }
 
 func _() {
diff --git a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden b/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
index 8d361aa..b15345e 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_args_returns.go.golden
@@ -4,14 +4,14 @@
 func _() {
 	a := 1
 	//@mark(exSt0, "a")
-	a = fn0(a) //@mark(exEn0, "2")
+	a = newFunction(a) //@mark(exEn0, "2")
 	//@extractfunc(exSt0, exEn0)
 	b := a * 2 //@mark(exB, "	b")
 	_ = 3 + 4  //@mark(exEnd, "4")
 	//@extractfunc(exB, exEnd)
 }
 
-func fn0(a int) int {
+func newFunction(a int) int {
 	a = 5
 	a = a + 2
 	return a
@@ -26,11 +26,11 @@
 	a = a + 2 //@mark(exEn0, "2")
 	//@extractfunc(exSt0, exEn0)
 	//@mark(exB, "	b")
-	fn0(a)  //@mark(exEnd, "4")
+	newFunction(a)  //@mark(exEnd, "4")
 	//@extractfunc(exB, exEnd)
 }
 
-func fn0(a int) {
+func newFunction(a int) {
 	b := a * 2
 	_ = 3 + 4
 }
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden b/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
index b65c8bc..ba40ff2 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_basic.go.golden
@@ -3,11 +3,11 @@
 
 func _() {
 	//@mark(exSt1, "a")
-	fn0() //@mark(exEn1, "4")
+	newFunction() //@mark(exEn1, "4")
 	//@extractfunc(exSt1, exEn1)
 }
 
-func fn0() {
+func newFunction() {
 	a := 1
 	_ = 3 + 4
 }
diff --git a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
index 5d679ed..a43822a 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_basic_comment.go.golden
@@ -5,11 +5,11 @@
 	/* comment in the middle of a line */
 	//@mark(exSt18, "a")
 	// Comment on its own line
-	fn0() //@mark(exEn18, "4")
+	newFunction() //@mark(exEn18, "4")
 	//@extractfunc(exSt18, exEn18)
 }
 
-func fn0() {
+func newFunction() {
 	a := 1
 
 	_ = 3 + 4
diff --git a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden b/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
index 3d392aa..3198c9f 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_issue_44813.go.golden
@@ -5,12 +5,12 @@
 
 func main() {
 	//@mark(exSt9, "x")
-	x := fn0() //@mark(exEn9, "}")
+	x := newFunction() //@mark(exEn9, "}")
 	//@extractfunc(exSt9, exEn9)
 	fmt.Printf("%x\n", x)
 }
 
-func fn0() []rune {
+func newFunction() []rune {
 	x := []rune{}
 	s := "HELLO"
 	for _, c := range s {
diff --git a/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden b/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
index e739e66..e2ee217 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_redefine.go.golden
@@ -5,13 +5,13 @@
 
 func _() {
 	i, err := strconv.Atoi("1")
-	u, err := fn0() //@extractfunc("u", ")")
+	u, err := newFunction() //@extractfunc("u", ")")
 	if i == u || err == nil {
 		return
 	}
 }
 
-func fn0() (int, error) {
+func newFunction() (int, error) {
 	u, err := strconv.Atoi("2")
 	return u, err
 }
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
index 030ca57..6103d1e 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_basic.go.golden
@@ -4,15 +4,15 @@
 func _() bool {
 	x := 1
 	//@mark(exSt2, "if")
-	cond0, ret0 := fn0(x)
-	if cond0 {
-		return ret0
+	shouldReturn, returnValue := newFunction(x)
+	if shouldReturn {
+		return returnValue
 	} //@mark(exEn2, "}")
 	return false
 	//@extractfunc(exSt2, exEn2)
 }
 
-func fn0(x int) (bool, bool) {
+func newFunction(x int) (bool, bool) {
 	if x == 0 {
 		return true, true
 	}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
index 41f8921..19e48da 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_basic_nonnested.go.golden
@@ -3,11 +3,11 @@
 
 func _() bool {
 	//@mark(exSt13, "x")
-	return fn0() //@mark(exEn13, "false")
+	return newFunction() //@mark(exEn13, "false")
 	//@extractfunc(exSt13, exEn13)
 }
 
-func fn0() bool {
+func newFunction() bool {
 	x := 1
 	if x == 0 {
 		return true
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
index 4c711c4..4d20122 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_complex.go.golden
@@ -7,15 +7,15 @@
 	x := 1
 	y := "hello"
 	//@mark(exSt3, "z")
-	z, cond0, ret0, ret1, ret2 := fn0(y, x)
-	if cond0 {
-		return ret0, ret1, ret2
+	z, shouldReturn, returnValue, returnValue1, returnValue2 := newFunction(y, x)
+	if shouldReturn {
+		return returnValue, returnValue1, returnValue2
 	} //@mark(exEn3, "}")
 	return x, z, nil
 	//@extractfunc(exSt3, exEn3)
 }
 
-func fn0(y string, x int) (string, bool, int, string, error) {
+func newFunction(y string, x int) (string, bool, int, string, error) {
 	z := "bye"
 	if y == z {
 		return "", true, x, y, fmt.Errorf("same")
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
index 7a43113..de54b15 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_complex_nonnested.go.golden
@@ -7,11 +7,11 @@
 	x := 1
 	y := "hello"
 	//@mark(exSt10, "z")
-	return fn0(y, x) //@mark(exEn10, "nil")
+	return newFunction(y, x) //@mark(exEn10, "nil")
 	//@extractfunc(exSt10, exEn10)
 }
 
-func fn0(y string, x int) (int, string, error) {
+func newFunction(y string, x int) (int, string, error) {
 	z := "bye"
 	if y == z {
 		return x, y, fmt.Errorf("same")
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
index 937b3e5..3af747c 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_func_lit.go.golden
@@ -6,16 +6,16 @@
 func _() {
 	ast.Inspect(ast.NewIdent("a"), func(n ast.Node) bool {
 		//@mark(exSt4, "if")
-		cond0, ret0 := fn0(n)
-		if cond0 {
-			return ret0
+		shouldReturn, returnValue := newFunction(n)
+		if shouldReturn {
+			return returnValue
 		} //@mark(exEn4, "}")
 		return false
 	})
 	//@extractfunc(exSt4, exEn4)
 }
 
-func fn0(n ast.Node) (bool, bool) {
+func newFunction(n ast.Node) (bool, bool) {
 	if n == nil {
 		return true, true
 	}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
index c94a934..efa22ba 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_func_lit_nonnested.go.golden
@@ -6,12 +6,12 @@
 func _() {
 	ast.Inspect(ast.NewIdent("a"), func(n ast.Node) bool {
 		//@mark(exSt11, "if")
-		return fn0(n) //@mark(exEn11, "false")
+		return newFunction(n) //@mark(exEn11, "false")
 	})
 	//@extractfunc(exSt11, exEn11)
 }
 
-func fn0(n ast.Node) bool {
+func newFunction(n ast.Node) bool {
 	if n == nil {
 		return true
 	}
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
index 1a5b4d4..31d1b2d 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_init.go.golden
@@ -4,16 +4,16 @@
 func _() string {
 	x := 1
 	//@mark(exSt5, "if")
-	cond0, ret0 := fn0(x)
-	if cond0 {
-		return ret0
+	shouldReturn, returnValue := newFunction(x)
+	if shouldReturn {
+		return returnValue
 	} //@mark(exEn5, "}")
 	x = 2
 	return "b"
 	//@extractfunc(exSt5, exEn5)
 }
 
-func fn0(x int) (bool, string) {
+func newFunction(x int) (bool, string) {
 	if x == 0 {
 		x = 3
 		return true, "a"
diff --git a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden b/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
index 5a16d0d..58bb573 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_return_init_nonnested.go.golden
@@ -4,11 +4,11 @@
 func _() string {
 	x := 1
 	//@mark(exSt12, "if")
-	return fn0(x) //@mark(exEn12, "\"b\"")
+	return newFunction(x) //@mark(exEn12, "\"b\"")
 	//@extractfunc(exSt12, exEn12)
 }
 
-func fn0(x int) string {
+func newFunction(x int) string {
 	if x == 0 {
 		x = 3
 		return "a"
diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go b/internal/lsp/testdata/extract/extract_function/extract_scope.go
index 73d7419..6cc141f 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_scope.go
+++ b/internal/lsp/testdata/extract/extract_function/extract_scope.go
@@ -1,10 +1,10 @@
 package extract
 
 func _() {
-	fn0 := 1
-	a := fn0 //@extractfunc("a", "fn0")
+	newFunction := 1
+	a := newFunction //@extractfunc("a", "newFunction")
 }
 
-func fn1() int {
+func newFunction1() int {
 	return 1
 }
diff --git a/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
index ecdfc06..a4803b4 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_scope.go.golden
@@ -2,15 +2,15 @@
 package extract
 
 func _() {
-	fn0 := 1
-	fn2(fn0) //@extractfunc("a", "fn0")
+	newFunction := 1
+	newFunction2(newFunction) //@extractfunc("a", "newFunction")
 }
 
-func fn2(fn0 int) {
-	a := fn0
+func newFunction2(newFunction int) {
+	a := newFunction
 }
 
-func fn1() int {
+func newFunction1() int {
 	return 1
 }
 
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden b/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
index 04d7251..8be5040 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_smart_initialization.go.golden
@@ -4,12 +4,12 @@
 func _() {
 	var a []int
 	//@mark(exSt6, "a")
-	a, b := fn0(a)           //@mark(exEn6, "4")
+	a, b := newFunction(a)           //@mark(exEn6, "4")
 	//@extractfunc(exSt6, exEn6)
 	a = append(a, b)
 }
 
-func fn0(a []int) ([]int, int) {
+func newFunction(a []int) ([]int, int) {
 	a = append(a, 2)
 	b := 4
 	return a, b
diff --git a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden b/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
index 5d7765a..fdf55ae 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_smart_return.go.golden
@@ -5,12 +5,12 @@
 	var b []int
 	var a int
 	//@mark(exSt7, "a")
-	b = fn0(a, b) //@mark(exEn7, ")")
+	b = newFunction(a, b) //@mark(exEn7, ")")
 	b[0] = 1
 	//@extractfunc(exSt7, exEn7)
 }
 
-func fn0(a int, b []int) []int {
+func newFunction(a int, b []int) []int {
 	a = 2
 	b = []int{}
 	b = append(b, a)
diff --git a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden b/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
index 8afa749..4374f37 100644
--- a/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
+++ b/internal/lsp/testdata/extract/extract_function/extract_unnecessary_param.go.golden
@@ -5,7 +5,7 @@
 	var b []int
 	var a int
 	//@mark(exSt8, "a")
-	a, b = fn0(b) //@mark(exEn8, ")")
+	a, b = newFunction(b) //@mark(exEn8, ")")
 	b[0] = 1
 	if a == 2 {
 		return
@@ -13,7 +13,7 @@
 	//@extractfunc(exSt8, exEn8)
 }
 
-func fn0(b []int) (int, []int) {
+func newFunction(b []int) (int, []int) {
 	a := 2
 	b = []int{}
 	b = append(b, a)
diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go b/internal/lsp/testdata/extract/extract_method/extract_basic.go
new file mode 100644
index 0000000..c9a8d9d
--- /dev/null
+++ b/internal/lsp/testdata/extract/extract_method/extract_basic.go
@@ -0,0 +1,24 @@
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
diff --git a/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden b/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
new file mode 100644
index 0000000..eab22a6
--- /dev/null
+++ b/internal/lsp/testdata/extract/extract_method/extract_basic.go.golden
@@ -0,0 +1,728 @@
+-- functionextraction_extract_basic_13_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a *A) int {
+	sum := a.x + a.y
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_14_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return newFunction(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_18_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a A) bool {
+	return a.x < a.y
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_basic_22_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a A) int {
+	sum := a.x + a.y
+	return sum
+}
+
+-- functionextraction_extract_basic_23_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return newFunction(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+	return sum
+}
+
+-- functionextraction_extract_basic_9_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a *A) bool {
+	return a.x < a.y
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_method_13_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a *A) int {
+	sum := a.x + a.y
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_method_14_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return newFunction(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_method_18_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a A) bool {
+	return a.x < a.y
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- functionextraction_extract_method_22_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := newFunction(a) //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(a A) int {
+	sum := a.x + a.y
+	return sum
+}
+
+-- functionextraction_extract_method_23_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return newFunction(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func newFunction(sum int) int {
+	return sum
+}
+
+-- functionextraction_extract_method_9_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return newFunction(a) //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func newFunction(a *A) bool {
+	return a.x < a.y
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_13_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a *A) newMethod() int {
+	sum := a.x + a.y
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_14_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return a.newMethod(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (*A) newMethod(sum int) int {
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_18_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) newMethod() bool {
+	return a.x < a.y
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_basic_22_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) newMethod() int {
+	sum := a.x + a.y
+	return sum
+}
+
+-- methodextraction_extract_basic_23_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return a.newMethod(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (A) newMethod(sum int) int {
+	return sum
+}
+
+-- methodextraction_extract_basic_9_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) newMethod() bool {
+	return a.x < a.y
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_method_13_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a *A) newMethod() int {
+	sum := a.x + a.y
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_method_14_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return a.newMethod(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (*A) newMethod(sum int) int {
+	return sum
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_method_18_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) newMethod() bool {
+	return a.x < a.y
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+-- methodextraction_extract_method_22_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.newMethod() //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) newMethod() int {
+	sum := a.x + a.y
+	return sum
+}
+
+-- methodextraction_extract_method_23_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return a.newMethod(sum)       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (A) newMethod(sum int) int {
+	return sum
+}
+
+-- methodextraction_extract_method_9_2 --
+package extract
+
+type A struct {
+	x int
+	y int
+}
+
+func (a *A) XLessThanYP() bool {
+	return a.newMethod() //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a *A) newMethod() bool {
+	return a.x < a.y
+}
+
+func (a *A) AddP() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
+func (a A) XLessThanY() bool {
+	return a.x < a.y //@extractmethod("return", "a.y"),extractfunc("return", "a.y")
+}
+
+func (a A) Add() int {
+	sum := a.x + a.y //@extractmethod("sum", "a.y"),extractfunc("sum", "a.y")
+	return sum       //@extractmethod("return", "sum"),extractfunc("return", "sum")
+}
+
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
index 202d378..00ee7b4 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
@@ -2,8 +2,8 @@
 package extract
 
 func _() {
-	x0 := 1
-	var _ = x0 + 2 //@suggestedfix("1", "refactor.extract")
+	x := 1
+	var _ = x + 2 //@suggestedfix("1", "refactor.extract")
 	var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract")
 }
 
@@ -12,7 +12,7 @@
 
 func _() {
 	var _ = 1 + 2 //@suggestedfix("1", "refactor.extract")
-	x0 := 3 + 4
-	var _ = x0 //@suggestedfix("3 + 4", "refactor.extract")
+	x := 3 + 4
+	var _ = x //@suggestedfix("3 + 4", "refactor.extract")
 }
 
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
index c98bcea..badc010 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
+++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
@@ -3,7 +3,7 @@
 import "strconv"
 
 func _() {
-	a := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
 	str := "1"
 	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
 }
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
index 22c67f6..74df67e 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
@@ -10,15 +10,27 @@
 	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
 }
 
+-- suggestedfix_extract_func_call_6_8 --
+package extract
+
+import "strconv"
+
+func _() {
+	x := append([]int{}, 1)
+	x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	str := "1"
+	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+}
+
 -- suggestedfix_extract_func_call_8_12 --
 package extract
 
 import "strconv"
 
 func _() {
-	a := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
 	str := "1"
-	x0, x1 := strconv.Atoi(str)
-	b, err := x0, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+	x, x1 := strconv.Atoi(str)
+	b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
 }
 
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
index 4ded99a..e0e6464 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
@@ -9,8 +9,8 @@
 		y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
 	}
 	if true {
-		x2 := !false
-		x1 := x2 //@suggestedfix("!false", "refactor.extract")
+		x := !false
+		x1 := x //@suggestedfix("!false", "refactor.extract")
 	}
 }
 
@@ -22,8 +22,8 @@
 func _() {
 	x0 := 0
 	if true {
-		x1 := ast.CompositeLit{}
-		y := x1 //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
+		x := ast.CompositeLit{}
+		y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
 	}
 	if true {
 		x1 := !false //@suggestedfix("!false", "refactor.extract")
diff --git a/internal/lsp/testdata/folding/a.go b/internal/lsp/testdata/folding/a.go
index 76b26c1..e07d7e0 100644
--- a/internal/lsp/testdata/folding/a.go
+++ b/internal/lsp/testdata/folding/a.go
@@ -10,6 +10,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -22,6 +23,14 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
diff --git a/internal/lsp/testdata/folding/a.go.golden b/internal/lsp/testdata/folding/a.go.golden
index d8341f7..59c97ad 100644
--- a/internal/lsp/testdata/folding/a.go.golden
+++ b/internal/lsp/testdata/folding/a.go.golden
@@ -21,7 +21,11 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {<>}
+	/* This is a multiline<>
+
+	/* This is a multiline<>
 	_ = []int{<>}
 	_ = [2]string{<>}
 	_ = map[string]int{<>}
@@ -48,11 +52,20 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:<>
 	case false:<>
 	default:<>
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -102,6 +115,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {<>} else {<>}
@@ -110,6 +124,14 @@
 	default:
 		fmt.Println(<>)
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -162,6 +184,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -174,6 +197,14 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -221,35 +252,37 @@
 3:9-6:0
 10:22-11:32
 12:10-12:9
-12:20-66:0
-13:10-24:1
-14:12-19:3
-15:12-17:2
-16:16-16:21
-17:11-19:2
-18:16-18:22
-20:13-21:22
-21:15-21:21
-22:10-23:24
-23:15-23:23
-25:12-29:1
-30:16-32:1
-33:21-37:1
-38:17-42:1
-43:8-47:1
-48:15-48:23
-48:32-48:40
-49:10-60:1
-50:18-55:3
-51:11-53:2
-52:16-52:28
-53:11-55:2
-54:16-54:29
-56:11-57:18
-57:15-57:17
-58:10-59:24
-59:15-59:23
-61:32-62:30
+12:20-75:0
+14:10-25:1
+15:12-20:3
+16:12-18:2
+17:16-17:21
+18:11-20:2
+19:16-19:22
+21:13-22:22
+22:15-22:21
+23:10-24:24
+24:15-24:23
+26:24-28:11
+30:24-33:32
+34:12-38:1
+39:16-41:1
+42:21-46:1
+47:17-51:1
+52:8-56:1
+57:15-57:23
+57:32-57:40
+58:10-69:1
+59:18-64:3
+60:11-62:2
+61:16-61:28
+62:11-64:2
+63:16-63:29
+65:11-66:18
+66:15-66:17
+67:10-68:24
+68:15-68:23
+70:32-71:30
 
 -- foldingRange-comment-0 --
 package folding //@fold("package")
@@ -263,6 +296,7 @@
 
 // bar is a function.<>
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -275,6 +309,9 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline<>
+
+	/* This is a multiline<>
 	_ = []int{
 		1,
 		2,
@@ -327,6 +364,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -339,6 +377,14 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -407,8 +453,12 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {<>
 	}
+	/* This is a multiline<>
+
+	/* This is a multiline<>
 	_ = []int{<>,
 	}
 	_ = [2]string{"d",
@@ -442,11 +492,20 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:<>
 	case false:<>
 	default:<>
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -496,6 +555,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {<>
@@ -506,6 +566,14 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
@@ -559,6 +627,7 @@
 
 // bar is a function.<>
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -571,6 +640,9 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline<>
+
+	/* This is a multiline<>
 	_ = []int{
 		1,
 		2,
@@ -624,6 +696,7 @@
 // bar is a function.
 // With a multiline doc comment.
 func bar() string {
+	/* This is a single line comment */
 	switch {
 	case true:
 		if true {
@@ -636,6 +709,14 @@
 	default:
 		fmt.Println("default")
 	}
+	/* This is a multiline
+	block
+	comment */
+
+	/* This is a multiline
+	block
+	comment */
+	// Followed by another comment.
 	_ = []int{
 		1,
 		2,
diff --git a/internal/lsp/testdata/godef/a/a.go.golden b/internal/lsp/testdata/godef/a/a.go.golden
index 2f7d8de..c268293 100644
--- a/internal/lsp/testdata/godef/a/a.go.golden
+++ b/internal/lsp/testdata/godef/a/a.go.golden
@@ -76,6 +76,42 @@
 [`a.Random2` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#Random2)
 -- aPackage-hover --
 Package a is a package for testing go to definition\.
+-- declBlockA-hover --
+```go
+type a struct {
+	x string
+}
+```
+
+1st type declaration block
+-- declBlockB-hover --
+```go
+type b struct{}
+```
+
+b has a comment
+-- declBlockC-hover --
+```go
+type c struct {
+	f string
+}
+```
+
+c is a struct
+-- declBlockD-hover --
+```go
+type d string
+```
+
+3rd type declaration block
+-- declBlockE-hover --
+```go
+type e struct {
+	f float64
+}
+```
+
+e has a comment
 -- err-definition --
 godef/a/a.go:33:6-9: defined here as ```go
 var err error
@@ -148,39 +184,3 @@
 ```
 
 z is a variable too\.
--- declBlockA-hover --
-```go
-type a struct {
-	x string
-}
-```
-
-1st type declaration block
--- declBlockB-hover --
-```go
-type b struct{}
-```
-
-b has a comment
--- declBlockC-hover --
-```go
-type c struct {
-	f string
-}
-```
-
-c is a struct
--- declBlockD-hover --
-```go
-type d string
-```
-
-3rd type declaration block
--- declBlockE-hover --
-```go
-type e struct {
-	f float64
-}
-```
-
-e has a comment
diff --git a/internal/lsp/testdata/godef/a/h.go.golden b/internal/lsp/testdata/godef/a/h.go.golden
index 71f78e1..3525d4c 100644
--- a/internal/lsp/testdata/godef/a/h.go.golden
+++ b/internal/lsp/testdata/godef/a/h.go.golden
@@ -1,39 +1,3 @@
--- nestedNumber-hover --
-```go
-field number int64
-```
-
-nested number
--- nestedString-hover --
-```go
-field str string
-```
-
-nested string
--- nestedMap-hover --
-```go
-field m map[string]float64
-```
-
-nested map
--- structA-hover --
-```go
-field a int
-```
-
-a field
--- structB-hover --
-```go
-field b struct{c int}
-```
-
-b nested struct
--- structC-hover --
-```go
-field c int
-```
-
-c field of nested struct
 -- arrD-hover --
 ```go
 field d int
@@ -86,12 +50,60 @@
 ```
 
 X value field
+-- nestedMap-hover --
+```go
+field m map[string]float64
+```
+
+nested map
+-- nestedNumber-hover --
+```go
+field number int64
+```
+
+nested number
+-- nestedString-hover --
+```go
+field str string
+```
+
+nested string
 -- openMethod-hover --
 ```go
 func (interface).open() error
 ```
 
 open method comment
+-- returnX-hover --
+```go
+field x int
+```
+
+X coord
+-- returnY-hover --
+```go
+field y int
+```
+
+Y coord
+-- structA-hover --
+```go
+field a int
+```
+
+a field
+-- structB-hover --
+```go
+field b struct{c int}
+```
+
+b nested struct
+-- structC-hover --
+```go
+field c int
+```
+
+c field of nested struct
 -- testDescription-hover --
 ```go
 field desc string
@@ -122,15 +134,3 @@
 ```
 
 expected test value
--- returnX-hover --
-```go
-field x int
-```
-
-X coord
--- returnY-hover --
-```go
-field y int
-```
-
-Y coord
\ No newline at end of file
diff --git a/internal/lsp/testdata/godef/b/h.go.golden b/internal/lsp/testdata/godef/b/h.go.golden
index 85f0404..b854dd4 100644
--- a/internal/lsp/testdata/godef/b/h.go.golden
+++ b/internal/lsp/testdata/godef/b/h.go.golden
@@ -1,12 +1,12 @@
+-- AStuff-hover --
+```go
+func AStuff()
+```
+
+[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)
 -- AVariable-hover --
 ```go
 var _ A
 ```
 
 variable of type a\.A
--- AStuff-hover --
-```go
-func AStuff()
-```
-
-[`a.AStuff` on pkg.go.dev](https://pkg.go.dev/golang.org/x/tools/internal/lsp/godef/a?utm_source=gopls#AStuff)
\ No newline at end of file
diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go b/internal/lsp/testdata/godef/infer_generics/inferred.go
new file mode 100644
index 0000000..78abf27
--- /dev/null
+++ b/internal/lsp/testdata/godef/infer_generics/inferred.go
@@ -0,0 +1,12 @@
+package inferred
+
+func app[S interface{ ~[]E }, E any](s S, e E) S {
+	return append(s, e)
+}
+
+func _() {
+	_ = app[[]int]             //@mark(constrInfer, "app"),hover("app", constrInfer)
+	_ = app[[]int, int]        //@mark(instance, "app"),hover("app", instance)
+	_ = app[[]int]([]int{}, 0) //@mark(partialInfer, "app"),hover("app", partialInfer)
+	_ = app([]int{}, 0)        //@mark(argInfer, "app"),hover("app", argInfer)
+}
diff --git a/internal/lsp/testdata/godef/infer_generics/inferred.go.golden b/internal/lsp/testdata/godef/infer_generics/inferred.go.golden
new file mode 100644
index 0000000..2dd97d9
--- /dev/null
+++ b/internal/lsp/testdata/godef/infer_generics/inferred.go.golden
@@ -0,0 +1,20 @@
+-- argInfer-hover --
+```go
+func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
+```
+-- constrInf-hover --
+```go
+func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
+```
+-- constrInfer-hover --
+```go
+func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
+```
+-- instance-hover --
+```go
+func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
+```
+-- partialInfer-hover --
+```go
+func app(s []int, e int) []int // func[S₁ interface{~[]E₂}, E₂ interface{}](s S₁, e E₂) S₁
+```
diff --git a/internal/lsp/testdata/maps/maps.go.in b/internal/lsp/testdata/maps/maps.go.in
index b4a4cdd..eeb5576 100644
--- a/internal/lsp/testdata/maps/maps.go.in
+++ b/internal/lsp/testdata/maps/maps.go.in
@@ -11,8 +11,8 @@
 	// comparable
 	type aStruct struct{} //@item(mapStructType, "aStruct", "struct{...}", "struct")
 
-	map[]a{} //@complete("]", mapSliceTypePtr, mapStructType)
+	map[]a{} //@complete("]", mapSliceType, mapStructType),snippet("]", mapSliceType, "*aSlice", "*aSlice")
 
-	map[a]a{} //@complete("]", mapSliceTypePtr, mapStructType)
+	map[a]a{} //@complete("]", mapSliceType, mapStructType)
 	map[a]a{} //@complete("{", mapSliceType, mapStructType)
 }
diff --git a/internal/lsp/testdata/rank/convert_rank.go.in b/internal/lsp/testdata/rank/convert_rank.go.in
index 372d9c3..c430048 100644
--- a/internal/lsp/testdata/rank/convert_rank.go.in
+++ b/internal/lsp/testdata/rank/convert_rank.go.in
@@ -46,8 +46,7 @@
 
 	var _ time.Duration = conv //@rank(" //", convertD, convertE),snippet(" //", convertE, "time.Duration(convE)", "time.Duration(convE)")
 
-	var convP myInt
-	&convP            //@item(convertP, "&convP", "myInt", "var")
+	var convP myInt   //@item(convertP, "convP", "myInt", "var")
 	var _ *int = conv //@snippet(" //", convertP, "(*int)(&convP)", "(*int)(&convP)")
 
 	var ff float64 //@item(convertFloat, "ff", "float64", "var")
diff --git a/internal/lsp/testdata/rename/issue42134/1.go b/internal/lsp/testdata/rename/issue42134/1.go
new file mode 100644
index 0000000..056f847
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/1.go
@@ -0,0 +1,8 @@
+package issue42134
+
+func _() {
+	// foo computes things.
+	foo := func() {}
+
+	foo() //@rename("foo", "bar")
+}
diff --git a/internal/lsp/testdata/rename/issue42134/1.go.golden b/internal/lsp/testdata/rename/issue42134/1.go.golden
new file mode 100644
index 0000000..266aeef
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/1.go.golden
@@ -0,0 +1,10 @@
+-- bar-rename --
+package issue42134
+
+func _() {
+	// bar computes things.
+	bar := func() {}
+
+	bar() //@rename("foo", "bar")
+}
+
diff --git a/internal/lsp/testdata/rename/issue42134/2.go b/internal/lsp/testdata/rename/issue42134/2.go
new file mode 100644
index 0000000..e9f6395
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/2.go
@@ -0,0 +1,12 @@
+package issue42134
+
+import "fmt"
+
+func _() {
+	// minNumber is a min number.
+	// Second line.
+	minNumber := min(1, 2)
+	fmt.Println(minNumber) //@rename("minNumber", "res")
+}
+
+func min(a, b int) int { return a }
diff --git a/internal/lsp/testdata/rename/issue42134/2.go.golden b/internal/lsp/testdata/rename/issue42134/2.go.golden
new file mode 100644
index 0000000..406a383
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/2.go.golden
@@ -0,0 +1,14 @@
+-- res-rename --
+package issue42134
+
+import "fmt"
+
+func _() {
+	// res is a min number.
+	// Second line.
+	res := min(1, 2)
+	fmt.Println(res) //@rename("minNumber", "res")
+}
+
+func min(a, b int) int { return a }
+
diff --git a/internal/lsp/testdata/rename/issue42134/3.go b/internal/lsp/testdata/rename/issue42134/3.go
new file mode 100644
index 0000000..7666f57
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/3.go
@@ -0,0 +1,11 @@
+package issue42134
+
+func _() {
+	/*
+	tests contains test cases
+	*/
+	tests := []struct { //@rename("tests", "testCases")
+		in, out string
+	}{}
+	_ = tests
+}
diff --git a/internal/lsp/testdata/rename/issue42134/3.go.golden b/internal/lsp/testdata/rename/issue42134/3.go.golden
new file mode 100644
index 0000000..cdcae18
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/3.go.golden
@@ -0,0 +1,13 @@
+-- testCases-rename --
+package issue42134
+
+func _() {
+	/*
+	testCases contains test cases
+	*/
+	testCases := []struct { //@rename("tests", "testCases")
+		in, out string
+	}{}
+	_ = testCases
+}
+
diff --git a/internal/lsp/testdata/rename/issue42134/4.go b/internal/lsp/testdata/rename/issue42134/4.go
new file mode 100644
index 0000000..c953520
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/4.go
@@ -0,0 +1,8 @@
+package issue42134
+
+func _() {
+	// a is equal to 5. Comment must stay the same
+
+	a := 5
+	_ = a //@rename("a", "b")
+}
diff --git a/internal/lsp/testdata/rename/issue42134/4.go.golden b/internal/lsp/testdata/rename/issue42134/4.go.golden
new file mode 100644
index 0000000..2086cf7
--- /dev/null
+++ b/internal/lsp/testdata/rename/issue42134/4.go.golden
@@ -0,0 +1,10 @@
+-- b-rename --
+package issue42134
+
+func _() {
+	// a is equal to 5. Comment must stay the same
+
+	b := 5
+	_ = b //@rename("a", "b")
+}
+
diff --git a/internal/lsp/testdata/semantic/a.go b/internal/lsp/testdata/semantic/a.go
index 756c56e..54d6c8a 100644
--- a/internal/lsp/testdata/semantic/a.go
+++ b/internal/lsp/testdata/semantic/a.go
@@ -55,6 +55,8 @@
 	w := b[4:]
 	j := len(x)
 	j--
+	q := []interface{}{j, 23i, &y}
+	g(q...)
 	return true
 }
 
@@ -74,5 +76,6 @@
 	if !ok {
 		switch x := vv[0].(type) {
 		}
+		goto Never
 	}
 }
diff --git a/internal/lsp/testdata/semantic/a.go.golden b/internal/lsp/testdata/semantic/a.go.golden
index 512a83e..4bf70e5 100644
--- a/internal/lsp/testdata/semantic/a.go.golden
+++ b/internal/lsp/testdata/semantic/a.go.golden
@@ -39,7 +39,7 @@
 /*⇒4,keyword,[]*/func (/*⇒1,variable,[]*/a /*⇒1,operator,[]*/*/*⇒1,type,[]*/A) /*⇒1,member,[definition]*/f() /*⇒4,type,[defaultLibrary]*/bool {
 	/*⇒3,keyword,[]*/var /*⇒1,variable,[definition]*/z /*⇒6,type,[defaultLibrary]*/string
 	/*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"foo"
-	/*⇒1,variable,[]*/a(/*⇒1,variable,[definition]*/x)
+	/*⇒1,variable,[]*/a(/*⇒1,variable,[]*/x)
 	/*⇒1,variable,[definition]*/y /*⇒2,operator,[]*/:= /*⇒5,string,[]*/"bar" /*⇒1,operator,[]*/+ /*⇒1,variable,[]*/x
 	/*⇒6,keyword,[]*/switch /*⇒1,variable,[]*/z {
 	/*⇒4,keyword,[]*/case /*⇒4,string,[]*/"xx":
@@ -52,18 +52,20 @@
 	/*⇒3,keyword,[]*/for /*⇒1,variable,[definition]*/k, /*⇒1,variable,[definition]*/v := /*⇒5,keyword,[]*/range /*⇒1,variable,[]*/m {
 		/*⇒6,keyword,[]*/return (/*⇒1,operator,[]*/!/*⇒1,variable,[]*/k) /*⇒2,operator,[]*/&& /*⇒1,variable,[]*/v[/*⇒1,number,[]*/0] /*⇒2,operator,[]*/== /*⇒3,variable,[readonly defaultLibrary]*/nil
 	}
-	/*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[definition]*/X
+	/*⇒2,variable,[]*/c2 /*⇒2,operator,[]*/<- /*⇒1,type,[]*/A./*⇒1,variable,[]*/X
 	/*⇒1,variable,[definition]*/w /*⇒2,operator,[]*/:= /*⇒1,variable,[]*/b[/*⇒1,number,[]*/4:]
 	/*⇒1,variable,[definition]*/j /*⇒2,operator,[]*/:= /*⇒3,function,[defaultLibrary]*/len(/*⇒1,variable,[]*/x)
 	/*⇒1,variable,[]*/j/*⇒2,operator,[]*/--
+	/*⇒1,variable,[definition]*/q /*⇒2,operator,[]*/:= []/*⇒9,keyword,[]*/interface{}{/*⇒1,variable,[]*/j, /*⇒3,number,[]*/23i, /*⇒1,operator,[]*/&/*⇒1,variable,[]*/y}
+	/*⇒1,function,[]*/g(/*⇒1,variable,[]*/q/*⇒3,operator,[]*/...)
 	/*⇒6,keyword,[]*/return /*⇒4,variable,[readonly]*/true
 }
 
 /*⇒4,keyword,[]*/func /*⇒1,function,[definition]*/g(/*⇒2,parameter,[definition]*/vv /*⇒3,operator,[]*/.../*⇒9,keyword,[]*/interface{}) {
 	/*⇒2,variable,[definition]*/ff /*⇒2,operator,[]*/:= /*⇒4,keyword,[]*/func() {}
 	/*⇒5,keyword,[]*/defer /*⇒2,variable,[]*/ff()
-	/*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,variable,[definition]*/RuneCount(/*⇒2,string,[]*/"")
-	/*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,variable,[]*/vv.(/*⇒6,variable,[definition]*/string))
+	/*⇒2,keyword,[]*/go /*⇒3,namespace,[]*/utf./*⇒9,function,[]*/RuneCount(/*⇒2,string,[]*/"")
+	/*⇒2,keyword,[]*/go /*⇒4,namespace,[]*/utf8./*⇒9,function,[]*/RuneCount(/*⇒2,variable,[]*/vv.(/*⇒6,type,[]*/string))
 	/*⇒2,keyword,[]*/if /*⇒4,variable,[readonly]*/true {
 	} /*⇒4,keyword,[]*/else {
 	}
@@ -75,6 +77,7 @@
 	/*⇒2,keyword,[]*/if /*⇒1,operator,[]*/!/*⇒2,variable,[]*/ok {
 		/*⇒6,keyword,[]*/switch /*⇒1,variable,[definition]*/x /*⇒2,operator,[]*/:= /*⇒2,variable,[]*/vv[/*⇒1,number,[]*/0].(/*⇒4,keyword,[]*/type) {
 		}
+		/*⇒4,keyword,[]*/goto Never
 	}
 }
 
diff --git a/internal/lsp/testdata/signature/signature.go b/internal/lsp/testdata/signature/signature.go
index 05f8da2..4e2b12b 100644
--- a/internal/lsp/testdata/signature/signature.go
+++ b/internal/lsp/testdata/signature/signature.go
@@ -47,11 +47,12 @@
 		return func(int) rune { return 0 }
 	}
 
-	fn("hi", "there")    //@signature("hi", "fn(hi string, there string) func(i int) rune", 0)
+	fn("hi", "there")    //@signature("hi", "", 0)
+	fn("hi", "there")    //@signature(",", "fn(hi string, there string) func(i int) rune", 0)
 	fn("hi", "there")(1) //@signature("1", "func(i int) rune", 0)
 
 	fnPtr := &fn
-	(*fnPtr)("hi", "there") //@signature("hi", "func(hi string, there string) func(i int) rune", 0)
+	(*fnPtr)("hi", "there") //@signature(",", "func(hi string, there string) func(i int) rune", 0)
 
 	var fnIntf interface{} = Foo
 	fnIntf.(func(string, int) bool)("hi", 123) //@signature("123", "func(string, int) bool", 1)
@@ -69,8 +70,8 @@
 	Foo(myFunc(123), 456) //@signature("myFunc", "Foo(a string, b int) (c bool)", 0)
 	Foo(myFunc(123), 456) //@signature("123", "myFunc(foo int) string", 0)
 
-	panic("oops!")            //@signature("oops", "panic(v interface{})", 0)
-	println("hello", "world") //@signature("world", "println(args ...Type)", 0)
+	panic("oops!")            //@signature(")", "panic(v interface{})", 0)
+	println("hello", "world") //@signature(",", "println(args ...Type)", 0)
 
 	Hello(func() {
 		//@signature("//", "", 0)
diff --git a/internal/lsp/testdata/snippets/literal_snippets.go.in b/internal/lsp/testdata/snippets/literal_snippets.go.in
index 3f8a02f..e1585dd 100644
--- a/internal/lsp/testdata/snippets/literal_snippets.go.in
+++ b/internal/lsp/testdata/snippets/literal_snippets.go.in
@@ -219,3 +219,9 @@
 	f := func(...[]int) {}
 	f() //@snippet(")", litIntSlice, "[]int{$0\\}", "[]int{$0\\}")
 }
+
+
+func _() {
+	// don't complete to "untyped int()"
+	[]int{}[untyped] //@complete("] //")
+}
diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden
index d783d79..682caefe 100644
--- a/internal/lsp/testdata/summary.txt.golden
+++ b/internal/lsp/testdata/summary.txt.golden
@@ -1,12 +1,12 @@
 -- summary --
 CallHierarchyCount = 2
 CodeLensCount = 5
-CompletionsCount = 264
-CompletionSnippetCount = 95
+CompletionsCount = 265
+CompletionSnippetCount = 103
 UnimportedCompletionsCount = 5
 DeepCompletionsCount = 5
 FuzzyCompletionsCount = 8
-RankedCompletionsCount = 166
+RankedCompletionsCount = 163
 CaseSensitiveCompletionsCount = 4
 DiagnosticsCount = 37
 FoldingRangesCount = 2
@@ -14,16 +14,17 @@
 ImportCount = 8
 SemanticTokenCount = 3
 SuggestedFixCount = 40
-FunctionExtractionCount = 18
+FunctionExtractionCount = 24
+MethodExtractionCount = 6
 DefinitionsCount = 95
 TypeDefinitionsCount = 18
 HighlightsCount = 69
 ReferencesCount = 25
-RenamesCount = 33
+RenamesCount = 37
 PrepareRenamesCount = 7
 SymbolsCount = 5
 WorkspaceSymbolsCount = 20
-SignaturesCount = 32
+SignaturesCount = 33
 LinksCount = 7
 ImplementationsCount = 14
 
diff --git a/internal/lsp/testdata/summary_generics.txt.golden b/internal/lsp/testdata/summary_generics.txt.golden
new file mode 100644
index 0000000..152f38d
--- /dev/null
+++ b/internal/lsp/testdata/summary_generics.txt.golden
@@ -0,0 +1,29 @@
+-- summary --
+CallHierarchyCount = 2
+CodeLensCount = 5
+CompletionsCount = 265
+CompletionSnippetCount = 103
+UnimportedCompletionsCount = 5
+DeepCompletionsCount = 5
+FuzzyCompletionsCount = 8
+RankedCompletionsCount = 163
+CaseSensitiveCompletionsCount = 4
+DiagnosticsCount = 37
+FoldingRangesCount = 2
+FormatCount = 6
+ImportCount = 8
+SemanticTokenCount = 3
+SuggestedFixCount = 40
+FunctionExtractionCount = 18
+DefinitionsCount = 99
+TypeDefinitionsCount = 18
+HighlightsCount = 69
+ReferencesCount = 25
+RenamesCount = 33
+PrepareRenamesCount = 7
+SymbolsCount = 5
+WorkspaceSymbolsCount = 20
+SignaturesCount = 32
+LinksCount = 7
+ImplementationsCount = 14
+
diff --git a/internal/lsp/testdata/typeassert/type_assert.go b/internal/lsp/testdata/typeassert/type_assert.go
index 0dfd3a1..e24b68a 100644
--- a/internal/lsp/testdata/typeassert/type_assert.go
+++ b/internal/lsp/testdata/typeassert/type_assert.go
@@ -13,14 +13,12 @@
 type abcNotImpl struct{} //@item(abcNotImpl, "abcNotImpl", "struct{...}", "struct")
 
 func _() {
-	*abcPtrImpl //@item(abcPtrImplPtr, "*abcPtrImpl", "struct{...}", "struct")
-
 	var a abc
 	switch a.(type) {
-	case ab: //@complete(":", abcPtrImplPtr, abcImpl, abcIntf, abcNotImpl)
+	case ab: //@complete(":", abcImpl, abcPtrImpl, abcIntf, abcNotImpl)
 	case *ab: //@complete(":", abcImpl, abcPtrImpl, abcIntf, abcNotImpl)
 	}
 
-	a.(ab)  //@complete(")", abcPtrImplPtr, abcImpl, abcIntf, abcNotImpl)
+	a.(ab)  //@complete(")", abcImpl, abcPtrImpl, abcIntf, abcNotImpl)
 	a.(*ab) //@complete(")", abcImpl, abcPtrImpl, abcIntf, abcNotImpl)
 }
diff --git a/internal/lsp/testdata/typemods/type_mods.go b/internal/lsp/testdata/typemods/type_mods.go
new file mode 100644
index 0000000..f5f0f80
--- /dev/null
+++ b/internal/lsp/testdata/typemods/type_mods.go
@@ -0,0 +1,21 @@
+package typemods
+
+func fooFunc() func() int { //@item(modFooFunc, "fooFunc", "func() func() int", "func")
+	return func() int {
+		return 0
+	}
+}
+
+func fooPtr() *int { //@item(modFooPtr, "fooPtr", "func() *int", "func")
+	return nil
+}
+
+func _() {
+	var _ int = foo //@snippet(" //", modFooFunc, "fooFunc()()", "fooFunc()()"),snippet(" //", modFooPtr, "*fooPtr()", "*fooPtr()")
+}
+
+func _() {
+	var m map[int][]chan int //@item(modMapChanPtr, "m", "map[int]chan *int", "var")
+
+	var _ int = m //@snippet(" //", modMapChanPtr, "<-m[${1:}][${2:}]", "<-m[${1:}][${2:}]")
+}
diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go
index 53861e0..d5db454 100644
--- a/internal/lsp/tests/tests.go
+++ b/internal/lsp/tests/tests.go
@@ -32,6 +32,7 @@
 	"golang.org/x/tools/internal/lsp/source/completion"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/testenv"
+	"golang.org/x/tools/internal/typeparams"
 	"golang.org/x/tools/txtar"
 )
 
@@ -39,10 +40,17 @@
 	overlayFileSuffix = ".overlay"
 	goldenFileSuffix  = ".golden"
 	inFileSuffix      = ".in"
-	summaryFile       = "summary.txt"
 	testModule        = "golang.org/x/tools/internal/lsp"
 )
 
+var summaryFile = "summary.txt"
+
+func init() {
+	if typeparams.Enabled {
+		summaryFile = "summary_generics.txt"
+	}
+}
+
 var UpdateGolden = flag.Bool("golden", false, "Update golden files")
 
 type CallHierarchy map[span.Span]*CallHierarchyResult
@@ -62,6 +70,7 @@
 type SemanticTokens []span.Span
 type SuggestedFixes map[span.Span][]string
 type FunctionExtractions map[span.Span]span.Span
+type MethodExtractions map[span.Span]span.Span
 type Definitions map[span.Span]Definition
 type Implementations map[span.Span][]span.Span
 type Highlights map[span.Span][]span.Span
@@ -96,6 +105,7 @@
 	SemanticTokens           SemanticTokens
 	SuggestedFixes           SuggestedFixes
 	FunctionExtractions      FunctionExtractions
+	MethodExtractions        MethodExtractions
 	Definitions              Definitions
 	Implementations          Implementations
 	Highlights               Highlights
@@ -139,6 +149,7 @@
 	SemanticTokens(*testing.T, span.Span)
 	SuggestedFix(*testing.T, span.Span, []string, int)
 	FunctionExtraction(*testing.T, span.Span, span.Span)
+	MethodExtraction(*testing.T, span.Span, span.Span)
 	Definition(*testing.T, span.Span, Definition)
 	Implementation(*testing.T, span.Span, []span.Span)
 	Highlight(*testing.T, span.Span, []span.Span)
@@ -290,6 +301,7 @@
 		PrepareRenames:           make(PrepareRenames),
 		SuggestedFixes:           make(SuggestedFixes),
 		FunctionExtractions:      make(FunctionExtractions),
+		MethodExtractions:        make(MethodExtractions),
 		Symbols:                  make(Symbols),
 		symbolsChildren:          make(SymbolsChildren),
 		symbolInformation:        make(SymbolInformation),
@@ -322,6 +334,14 @@
 	}
 
 	files := packagestest.MustCopyFileTree(dir)
+	// Prune test cases that exercise generics.
+	if !typeparams.Enabled {
+		for name := range files {
+			if strings.Contains(name, "_generics") {
+				delete(files, name)
+			}
+		}
+	}
 	overlays := map[string][]byte{}
 	for fragment, operation := range files {
 		if trimmed := strings.TrimSuffix(fragment, goldenFileSuffix); trimmed != fragment {
@@ -449,6 +469,7 @@
 		"link":            datum.collectLinks,
 		"suggestedfix":    datum.collectSuggestedFixes,
 		"extractfunc":     datum.collectFunctionExtractions,
+		"extractmethod":   datum.collectMethodExtractions,
 		"incomingcalls":   datum.collectIncomingCalls,
 		"outgoingcalls":   datum.collectOutgoingCalls,
 		"addimport":       datum.collectAddImports,
@@ -659,6 +680,20 @@
 		}
 	})
 
+	t.Run("MethodExtraction", func(t *testing.T) {
+		t.Helper()
+		for start, end := range data.MethodExtractions {
+			// Check if we should skip this spn if the -modfile flag is not available.
+			if shouldSkip(data, start.URI()) {
+				continue
+			}
+			t.Run(SpanName(start), func(t *testing.T) {
+				t.Helper()
+				tests.MethodExtraction(t, start, end)
+			})
+		}
+	})
+
 	t.Run("Definition", func(t *testing.T) {
 		t.Helper()
 		for spn, d := range data.Definitions {
@@ -879,6 +914,7 @@
 	fmt.Fprintf(buf, "SemanticTokenCount = %v\n", len(data.SemanticTokens))
 	fmt.Fprintf(buf, "SuggestedFixCount = %v\n", len(data.SuggestedFixes))
 	fmt.Fprintf(buf, "FunctionExtractionCount = %v\n", len(data.FunctionExtractions))
+	fmt.Fprintf(buf, "MethodExtractionCount = %v\n", len(data.MethodExtractions))
 	fmt.Fprintf(buf, "DefinitionsCount = %v\n", definitionCount)
 	fmt.Fprintf(buf, "TypeDefinitionsCount = %v\n", typeDefinitionCount)
 	fmt.Fprintf(buf, "HighlightsCount = %v\n", len(data.Highlights))
@@ -1112,6 +1148,12 @@
 	}
 }
 
+func (data *Data) collectMethodExtractions(start span.Span, end span.Span) {
+	if _, ok := data.MethodExtractions[start]; !ok {
+		data.MethodExtractions[start] = end
+	}
+}
+
 func (data *Data) collectDefinitions(src, target span.Span) {
 	data.Definitions[src] = Definition{
 		Src: src,
diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go
index 27b53b8..d9a6961 100644
--- a/internal/lsp/text_synchronization.go
+++ b/internal/lsp/text_synchronization.go
@@ -9,12 +9,14 @@
 	"context"
 	"fmt"
 	"path/filepath"
-	"sync"
+	"time"
 
+	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/jsonrpc2"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/span"
+	"golang.org/x/tools/internal/xcontext"
 	errors "golang.org/x/xerrors"
 )
 
@@ -203,41 +205,103 @@
 }
 
 func (s *Server) didModifyFiles(ctx context.Context, modifications []source.FileModification, cause ModificationSource) error {
-	// diagnosticWG tracks outstanding diagnostic work as a result of this file
-	// modification.
-	var diagnosticWG sync.WaitGroup
+	diagnoseDone := make(chan struct{})
 	if s.session.Options().VerboseWorkDoneProgress {
-		work := s.progress.start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil)
+		work := s.progress.Start(ctx, DiagnosticWorkTitle(cause), "Calculating file diagnostics...", nil, nil)
 		defer func() {
 			go func() {
-				diagnosticWG.Wait()
-				work.end("Done.")
+				<-diagnoseDone
+				work.End("Done.")
 			}()
 		}()
 	}
 
+	onDisk := cause == FromDidChangeWatchedFiles
+	delay := s.session.Options().ExperimentalWatchedFileDelay
+	s.fileChangeMu.Lock()
+	defer s.fileChangeMu.Unlock()
+	if !onDisk || delay == 0 {
+		// No delay: process the modifications immediately.
+		return s.processModifications(ctx, modifications, onDisk, diagnoseDone)
+	}
+	// Debounce and batch up pending modifications from watched files.
+	pending := &pendingModificationSet{
+		diagnoseDone: diagnoseDone,
+		changes:      modifications,
+	}
+	// Invariant: changes appended to s.pendingOnDiskChanges are eventually
+	// handled in the order they arrive. This guarantee is only partially
+	// enforced here. Specifically:
+	//  1. s.fileChangesMu ensures that the append below happens in the order
+	//     notifications were received, so that the changes within each batch are
+	//     ordered properly.
+	//  2. The debounced func below holds s.fileChangesMu while processing all
+	//     changes in s.pendingOnDiskChanges, ensuring that no batches are
+	//     processed out of order.
+	//  3. Session.ExpandModificationsToDirectories and Session.DidModifyFiles
+	//     process changes in order.
+	s.pendingOnDiskChanges = append(s.pendingOnDiskChanges, pending)
+	ctx = xcontext.Detach(ctx)
+	okc := s.watchedFileDebouncer.debounce("", 0, time.After(delay))
+	go func() {
+		if ok := <-okc; !ok {
+			return
+		}
+		s.fileChangeMu.Lock()
+		var allChanges []source.FileModification
+		// For accurate progress notifications, we must notify all goroutines
+		// waiting for the diagnose pass following a didChangeWatchedFiles
+		// notification. This is necessary for regtest assertions.
+		var dones []chan struct{}
+		for _, pending := range s.pendingOnDiskChanges {
+			allChanges = append(allChanges, pending.changes...)
+			dones = append(dones, pending.diagnoseDone)
+		}
+
+		allDone := make(chan struct{})
+		if err := s.processModifications(ctx, allChanges, onDisk, allDone); err != nil {
+			event.Error(ctx, "processing delayed file changes", err)
+		}
+		s.pendingOnDiskChanges = nil
+		s.fileChangeMu.Unlock()
+		<-allDone
+		for _, done := range dones {
+			close(done)
+		}
+	}()
+	return nil
+}
+
+// processModifications update server state to reflect file changes, and
+// triggers diagnostics to run asynchronously. The diagnoseDone channel will be
+// closed once diagnostics complete.
+func (s *Server) processModifications(ctx context.Context, modifications []source.FileModification, onDisk bool, diagnoseDone chan struct{}) error {
+	s.stateMu.Lock()
+	if s.state >= serverShutDown {
+		// This state check does not prevent races below, and exists only to
+		// produce a better error message. The actual race to the cache should be
+		// guarded by Session.viewMu.
+		s.stateMu.Unlock()
+		close(diagnoseDone)
+		return errors.New("server is shut down")
+	}
+	s.stateMu.Unlock()
 	// If the set of changes included directories, expand those directories
 	// to their files.
 	modifications = s.session.ExpandModificationsToDirectories(ctx, modifications)
 
 	snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications)
 	if err != nil {
+		close(diagnoseDone)
 		return err
 	}
 
-	for snapshot, uris := range snapshots {
-		diagnosticWG.Add(1)
-		go func(snapshot source.Snapshot, uris []span.URI) {
-			defer diagnosticWG.Done()
-			s.diagnoseSnapshot(snapshot, uris, cause == FromDidChangeWatchedFiles)
-		}(snapshot, uris)
-	}
-
 	go func() {
-		diagnosticWG.Wait()
+		s.diagnoseSnapshots(snapshots, onDisk)
 		for _, release := range releases {
 			release()
 		}
+		close(diagnoseDone)
 	}()
 
 	// After any file modifications, we need to update our watched files,
diff --git a/internal/mod/lazyregexp/lazyre.go b/internal/mod/lazyregexp/lazyre.go
new file mode 100644
index 0000000..7f524d2
--- /dev/null
+++ b/internal/mod/lazyregexp/lazyre.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
+// regexp variables without forcing them to be compiled at init.
+package lazyregexp
+
+import (
+	"os"
+	"regexp"
+	"strings"
+	"sync"
+)
+
+// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be
+// compiled the first time it is needed.
+type Regexp struct {
+	str  string
+	once sync.Once
+	rx   *regexp.Regexp
+}
+
+func (r *Regexp) re() *regexp.Regexp {
+	r.once.Do(r.build)
+	return r.rx
+}
+
+func (r *Regexp) build() {
+	r.rx = regexp.MustCompile(r.str)
+	r.str = ""
+}
+
+func (r *Regexp) MatchString(s string) bool {
+	return r.re().MatchString(s)
+}
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+// New creates a new lazy regexp, delaying the compiling work until it is first
+// needed. If the code is being run as part of tests, the regexp compiling will
+// happen immediately.
+func New(str string) *Regexp {
+	lr := &Regexp{str: str}
+	if inTest {
+		// In tests, always compile the regexps early.
+		lr.re()
+	}
+	return lr
+}
diff --git a/internal/mod/modfile/read.go b/internal/mod/modfile/read.go
new file mode 100644
index 0000000..f49d553
--- /dev/null
+++ b/internal/mod/modfile/read.go
@@ -0,0 +1,655 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/mod/modfile"
+)
+
+// An input represents a single input file being parsed.
+type input struct {
+	// Lexing state.
+	filename   string            // name of input file, for errors
+	complete   []byte            // entire input
+	remaining  []byte            // remaining input
+	tokenStart []byte            // token being scanned to end of input
+	token      token             // next token to be returned by lex, peek
+	pos        modfile.Position  // current input position
+	comments   []modfile.Comment // accumulated comments
+
+	// Parser state.
+	file        *modfile.FileSyntax // returned top-level syntax tree
+	parseErrors modfile.ErrorList   // errors encountered during parsing
+
+	// Comment assignment state.
+	pre  []modfile.Expr // all expressions, in preorder traversal
+	post []modfile.Expr // all expressions, in postorder traversal
+}
+
+func newInput(filename string, data []byte) *input {
+	return &input{
+		filename:  filename,
+		complete:  data,
+		remaining: data,
+		pos:       modfile.Position{Line: 1, LineRune: 1, Byte: 0},
+	}
+}
+
+// parse parses the input file.
+func parse(file string, data []byte) (f *modfile.FileSyntax, err error) {
+	// The parser panics for both routine errors like syntax errors
+	// and for programmer bugs like array index errors.
+	// Turn both into error returns. Catching bug panics is
+	// especially important when processing many files.
+	in := newInput(file, data)
+	defer func() {
+		if e := recover(); e != nil && e != &in.parseErrors {
+			in.parseErrors = append(in.parseErrors, modfile.Error{
+				Filename: in.filename,
+				Pos:      in.pos,
+				Err:      fmt.Errorf("internal error: %v", e),
+			})
+		}
+		if err == nil && len(in.parseErrors) > 0 {
+			err = in.parseErrors
+		}
+	}()
+
+	// Prime the lexer by reading in the first token. It will be available
+	// in the next peek() or lex() call.
+	in.readToken()
+
+	// Invoke the parser.
+	in.parseFile()
+	if len(in.parseErrors) > 0 {
+		return nil, in.parseErrors
+	}
+	in.file.Name = in.filename
+
+	// Assign comments to nearby syntax.
+	in.assignComments()
+
+	return in.file, nil
+}
+
+// Error is called to report an error.
+// Error does not return: it panics.
+func (in *input) Error(s string) {
+	in.parseErrors = append(in.parseErrors, modfile.Error{
+		Filename: in.filename,
+		Pos:      in.pos,
+		Err:      errors.New(s),
+	})
+	panic(&in.parseErrors)
+}
+
+// eof reports whether the input has reached end of file.
+func (in *input) eof() bool {
+	return len(in.remaining) == 0
+}
+
+// peekRune returns the next rune in the input without consuming it.
+func (in *input) peekRune() int {
+	if len(in.remaining) == 0 {
+		return 0
+	}
+	r, _ := utf8.DecodeRune(in.remaining)
+	return int(r)
+}
+
+// peekPrefix reports whether the remaining input begins with the given prefix.
+func (in *input) peekPrefix(prefix string) bool {
+	// This is like bytes.HasPrefix(in.remaining, []byte(prefix))
+	// but without the allocation of the []byte copy of prefix.
+	for i := 0; i < len(prefix); i++ {
+		if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// readRune consumes and returns the next rune in the input.
+func (in *input) readRune() int {
+	if len(in.remaining) == 0 {
+		in.Error("internal lexer error: readRune at EOF")
+	}
+	r, size := utf8.DecodeRune(in.remaining)
+	in.remaining = in.remaining[size:]
+	if r == '\n' {
+		in.pos.Line++
+		in.pos.LineRune = 1
+	} else {
+		in.pos.LineRune++
+	}
+	in.pos.Byte += size
+	return int(r)
+}
+
+type token struct {
+	kind   tokenKind
+	pos    modfile.Position
+	endPos modfile.Position
+	text   string
+}
+
+type tokenKind int
+
+const (
+	_EOF tokenKind = -(iota + 1)
+	_EOLCOMMENT
+	_IDENT
+	_STRING
+	_COMMENT
+
+	// newlines and punctuation tokens are allowed as ASCII codes.
+)
+
+func (k tokenKind) isComment() bool {
+	return k == _COMMENT || k == _EOLCOMMENT
+}
+
+// isEOL returns whether a token terminates a line.
+func (k tokenKind) isEOL() bool {
+	return k == _EOF || k == _EOLCOMMENT || k == '\n'
+}
+
+// startToken marks the beginning of the next input token.
+// It must be followed by a call to endToken, once the token's text has
+// been consumed using readRune.
+func (in *input) startToken() {
+	in.tokenStart = in.remaining
+	in.token.text = ""
+	in.token.pos = in.pos
+}
+
+// endToken marks the end of an input token.
+// It records the actual token string in tok.text.
+// A single trailing newline (LF or CRLF) will be removed from comment tokens.
+func (in *input) endToken(kind tokenKind) {
+	in.token.kind = kind
+	text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)])
+	if kind.isComment() {
+		if strings.HasSuffix(text, "\r\n") {
+			text = text[:len(text)-2]
+		} else {
+			text = strings.TrimSuffix(text, "\n")
+		}
+	}
+	in.token.text = text
+	in.token.endPos = in.pos
+}
+
+// peek returns the kind of the the next token returned by lex.
+func (in *input) peek() tokenKind {
+	return in.token.kind
+}
+
+// lex is called from the parser to obtain the next input token.
+func (in *input) lex() token {
+	tok := in.token
+	in.readToken()
+	return tok
+}
+
+// readToken lexes the next token from the text and stores it in in.token.
+func (in *input) readToken() {
+	// Skip past spaces, stopping at non-space or EOF.
+	for !in.eof() {
+		c := in.peekRune()
+		if c == ' ' || c == '\t' || c == '\r' {
+			in.readRune()
+			continue
+		}
+
+		// Comment runs to end of line.
+		if in.peekPrefix("//") {
+			in.startToken()
+
+			// Is this comment the only thing on its line?
+			// Find the last \n before this // and see if it's all
+			// spaces from there to here.
+			i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
+			suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
+			in.readRune()
+			in.readRune()
+
+			// Consume comment.
+			for len(in.remaining) > 0 && in.readRune() != '\n' {
+			}
+
+			// If we are at top level (not in a statement), hand the comment to
+			// the parser as a _COMMENT token. The grammar is written
+			// to handle top-level comments itself.
+			if !suffix {
+				in.endToken(_COMMENT)
+				return
+			}
+
+			// Otherwise, save comment for later attachment to syntax tree.
+			in.endToken(_EOLCOMMENT)
+			in.comments = append(in.comments, modfile.Comment{in.token.pos, in.token.text, suffix})
+			return
+		}
+
+		if in.peekPrefix("/*") {
+			in.Error("mod files must use // comments (not /* */ comments)")
+		}
+
+		// Found non-space non-comment.
+		break
+	}
+
+	// Found the beginning of the next token.
+	in.startToken()
+
+	// End of file.
+	if in.eof() {
+		in.endToken(_EOF)
+		return
+	}
+
+	// Punctuation tokens.
+	switch c := in.peekRune(); c {
+	case '\n', '(', ')', '[', ']', '{', '}', ',':
+		in.readRune()
+		in.endToken(tokenKind(c))
+		return
+
+	case '"', '`': // quoted string
+		quote := c
+		in.readRune()
+		for {
+			if in.eof() {
+				in.pos = in.token.pos
+				in.Error("unexpected EOF in string")
+			}
+			if in.peekRune() == '\n' {
+				in.Error("unexpected newline in string")
+			}
+			c := in.readRune()
+			if c == quote {
+				break
+			}
+			if c == '\\' && quote != '`' {
+				if in.eof() {
+					in.pos = in.token.pos
+					in.Error("unexpected EOF in string")
+				}
+				in.readRune()
+			}
+		}
+		in.endToken(_STRING)
+		return
+	}
+
+	// Checked all punctuation. Must be identifier token.
+	if c := in.peekRune(); !isIdent(c) {
+		in.Error(fmt.Sprintf("unexpected input character %#q", c))
+	}
+
+	// Scan over identifier.
+	for isIdent(in.peekRune()) {
+		if in.peekPrefix("//") {
+			break
+		}
+		if in.peekPrefix("/*") {
+			in.Error("mod files must use // comments (not /* */ comments)")
+		}
+		in.readRune()
+	}
+	in.endToken(_IDENT)
+}
+
+// isIdent reports whether c is an identifier rune.
+// We treat most printable runes as identifier runes, except for a handful of
+// ASCII punctuation characters.
+func isIdent(c int) bool {
+	switch r := rune(c); r {
+	case ' ', '(', ')', '[', ']', '{', '}', ',':
+		return false
+	default:
+		return !unicode.IsSpace(r) && unicode.IsPrint(r)
+	}
+}
+
+// Comment assignment.
+// We build two lists of all subexpressions, preorder and postorder.
+// The preorder list is ordered by start location, with outer expressions first.
+// The postorder list is ordered by end location, with outer expressions last.
+// We use the preorder list to assign each whole-line comment to the syntax
+// immediately following it, and we use the postorder list to assign each
+// end-of-line comment to the syntax immediately preceding it.
+
+// order walks the expression adding it and its subexpressions to the
+// preorder and postorder lists.
+func (in *input) order(x modfile.Expr) {
+	if x != nil {
+		in.pre = append(in.pre, x)
+	}
+	switch x := x.(type) {
+	default:
+		panic(fmt.Errorf("order: unexpected type %T", x))
+	case nil:
+		// nothing
+	case *modfile.LParen, *modfile.RParen:
+		// nothing
+	case *modfile.CommentBlock:
+		// nothing
+	case *modfile.Line:
+		// nothing
+	case *modfile.FileSyntax:
+		for _, stmt := range x.Stmt {
+			in.order(stmt)
+		}
+	case *modfile.LineBlock:
+		in.order(&x.LParen)
+		for _, l := range x.Line {
+			in.order(l)
+		}
+		in.order(&x.RParen)
+	}
+	if x != nil {
+		in.post = append(in.post, x)
+	}
+}
+
+// assignComments attaches comments to nearby syntax.
+func (in *input) assignComments() {
+	const debug = false
+
+	// Generate preorder and postorder lists.
+	in.order(in.file)
+
+	// Split into whole-line comments and suffix comments.
+	var line, suffix []modfile.Comment
+	for _, com := range in.comments {
+		if com.Suffix {
+			suffix = append(suffix, com)
+		} else {
+			line = append(line, com)
+		}
+	}
+
+	if debug {
+		for _, c := range line {
+			fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+		}
+	}
+
+	// Assign line comments to syntax immediately following.
+	for _, x := range in.pre {
+		start, _ := x.Span()
+		if debug {
+			fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
+		}
+		xcom := x.Comment()
+		for len(line) > 0 && start.Byte >= line[0].Start.Byte {
+			if debug {
+				fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
+			}
+			xcom.Before = append(xcom.Before, line[0])
+			line = line[1:]
+		}
+	}
+
+	// Remaining line comments go at end of file.
+	in.file.After = append(in.file.After, line...)
+
+	if debug {
+		for _, c := range suffix {
+			fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+		}
+	}
+
+	// Assign suffix comments to syntax immediately before.
+	for i := len(in.post) - 1; i >= 0; i-- {
+		x := in.post[i]
+
+		start, end := x.Span()
+		if debug {
+			fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
+		}
+
+		// Do not assign suffix comments to end of line block or whole file.
+		// Instead assign them to the last element inside.
+		switch x.(type) {
+		case *modfile.FileSyntax:
+			continue
+		}
+
+		// Do not assign suffix comments to something that starts
+		// on an earlier line, so that in
+		//
+		//	x ( y
+		//		z ) // comment
+		//
+		// we assign the comment to z and not to x ( ... ).
+		if start.Line != end.Line {
+			continue
+		}
+		xcom := x.Comment()
+		for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
+			if debug {
+				fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
+			}
+			xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
+			suffix = suffix[:len(suffix)-1]
+		}
+	}
+
+	// We assigned suffix comments in reverse.
+	// If multiple suffix comments were appended to the same
+	// expression node, they are now in reverse. Fix that.
+	for _, x := range in.post {
+		reverseComments(x.Comment().Suffix)
+	}
+
+	// Remaining suffix comments go at beginning of file.
+	in.file.Before = append(in.file.Before, suffix...)
+}
+
+// reverseComments reverses the []Comment list.
+func reverseComments(list []modfile.Comment) {
+	for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+		list[i], list[j] = list[j], list[i]
+	}
+}
+
+func (in *input) parseFile() {
+	in.file = new(modfile.FileSyntax)
+	var cb *modfile.CommentBlock
+	for {
+		switch in.peek() {
+		case '\n':
+			in.lex()
+			if cb != nil {
+				in.file.Stmt = append(in.file.Stmt, cb)
+				cb = nil
+			}
+		case _COMMENT:
+			tok := in.lex()
+			if cb == nil {
+				cb = &modfile.CommentBlock{Start: tok.pos}
+			}
+			com := cb.Comment()
+			com.Before = append(com.Before, modfile.Comment{Start: tok.pos, Token: tok.text})
+		case _EOF:
+			if cb != nil {
+				in.file.Stmt = append(in.file.Stmt, cb)
+			}
+			return
+		default:
+			in.parseStmt()
+			if cb != nil {
+				in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
+				cb = nil
+			}
+		}
+	}
+}
+
+func (in *input) parseStmt() {
+	tok := in.lex()
+	start := tok.pos
+	end := tok.endPos
+	tokens := []string{tok.text}
+	for {
+		tok := in.lex()
+		switch {
+		case tok.kind.isEOL():
+			in.file.Stmt = append(in.file.Stmt, &modfile.Line{
+				Start: start,
+				Token: tokens,
+				End:   end,
+			})
+			return
+
+		case tok.kind == '(':
+			if next := in.peek(); next.isEOL() {
+				// Start of block: no more tokens on this line.
+				in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok))
+				return
+			} else if next == ')' {
+				rparen := in.lex()
+				if in.peek().isEOL() {
+					// Empty block.
+					in.lex()
+					in.file.Stmt = append(in.file.Stmt, &modfile.LineBlock{
+						Start:  start,
+						Token:  tokens,
+						LParen: modfile.LParen{Pos: tok.pos},
+						RParen: modfile.RParen{Pos: rparen.pos},
+					})
+					return
+				}
+				// '( )' in the middle of the line, not a block.
+				tokens = append(tokens, tok.text, rparen.text)
+			} else {
+				// '(' in the middle of the line, not a block.
+				tokens = append(tokens, tok.text)
+			}
+
+		default:
+			tokens = append(tokens, tok.text)
+			end = tok.endPos
+		}
+	}
+}
+
+func (in *input) parseLineBlock(start modfile.Position, token []string, lparen token) *modfile.LineBlock {
+	x := &modfile.LineBlock{
+		Start:  start,
+		Token:  token,
+		LParen: modfile.LParen{Pos: lparen.pos},
+	}
+	var comments []modfile.Comment
+	for {
+		switch in.peek() {
+		case _EOLCOMMENT:
+			// Suffix comment, will be attached later by assignComments.
+			in.lex()
+		case '\n':
+			// Blank line. Add an empty comment to preserve it.
+			in.lex()
+			if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
+				comments = append(comments, modfile.Comment{})
+			}
+		case _COMMENT:
+			tok := in.lex()
+			comments = append(comments, modfile.Comment{Start: tok.pos, Token: tok.text})
+		case _EOF:
+			in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
+		case ')':
+			rparen := in.lex()
+			x.RParen.Before = comments
+			x.RParen.Pos = rparen.pos
+			if !in.peek().isEOL() {
+				in.Error("syntax error (expected newline after closing paren)")
+			}
+			in.lex()
+			return x
+		default:
+			l := in.parseLine()
+			x.Line = append(x.Line, l)
+			l.Comment().Before = comments
+			comments = nil
+		}
+	}
+}
+
+func (in *input) parseLine() *modfile.Line {
+	tok := in.lex()
+	if tok.kind.isEOL() {
+		in.Error("internal parse error: parseLine at end of line")
+	}
+	start := tok.pos
+	end := tok.endPos
+	tokens := []string{tok.text}
+	for {
+		tok := in.lex()
+		if tok.kind.isEOL() {
+			return &modfile.Line{
+				Start:   start,
+				Token:   tokens,
+				End:     end,
+				InBlock: true,
+			}
+		}
+		tokens = append(tokens, tok.text)
+		end = tok.endPos
+	}
+}
+
+var (
+	slashSlash = []byte("//")
+	moduleStr  = []byte("module")
+)
+
+// ModulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+func ModulePath(mod []byte) string {
+	for len(mod) > 0 {
+		line := mod
+		mod = nil
+		if i := bytes.IndexByte(line, '\n'); i >= 0 {
+			line, mod = line[:i], line[i+1:]
+		}
+		if i := bytes.Index(line, slashSlash); i >= 0 {
+			line = line[:i]
+		}
+		line = bytes.TrimSpace(line)
+		if !bytes.HasPrefix(line, moduleStr) {
+			continue
+		}
+		line = line[len(moduleStr):]
+		n := len(line)
+		line = bytes.TrimSpace(line)
+		if len(line) == n || len(line) == 0 {
+			continue
+		}
+
+		if line[0] == '"' || line[0] == '`' {
+			p, err := strconv.Unquote(string(line))
+			if err != nil {
+				return "" // malformed quoted string or multiline module path
+			}
+			return p
+		}
+
+		return string(line)
+	}
+	return "" // missing module path
+}
diff --git a/internal/mod/modfile/rule.go b/internal/mod/modfile/rule.go
new file mode 100644
index 0000000..163a2db
--- /dev/null
+++ b/internal/mod/modfile/rule.go
@@ -0,0 +1,361 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modfile implements a parser and formatter for go.mod files.
+//
+// The go.mod syntax is described in
+// https://golang.org/cmd/go/#hdr-The_go_mod_file.
+//
+// The Parse and ParseLax functions both parse a go.mod file and return an
+// abstract syntax tree. ParseLax ignores unknown statements and may be used to
+// parse go.mod files that may have been developed with newer versions of Go.
+//
+// The File struct returned by Parse and ParseLax represent an abstract
+// go.mod file. File has several methods like AddNewRequire and DropReplace
+// that can be used to programmatically edit a file.
+//
+// The Format function formats a File back to a byte slice which can be
+// written to a file.
+package modfile
+
+import (
+	"errors"
+	"fmt"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"golang.org/x/mod/modfile"
+	"golang.org/x/mod/module"
+	"golang.org/x/tools/internal/mod/lazyregexp"
+)
+
+// A WorkFile is the parsed, interpreted form of a go.work file.
+type WorkFile struct {
+	Go        *modfile.Go
+	Directory []*Directory
+	Replace   []*modfile.Replace
+
+	Syntax *modfile.FileSyntax
+}
+
+// A Directory is a single directory statement.
+type Directory struct {
+	DiskPath   string // TODO(matloob): Replace uses module.Version for new. Do that here?
+	ModulePath string // Module path in the comment.
+	Syntax     *modfile.Line
+}
+
+// Parse parses and returns a go.work file.
+//
+// file is the name of the file, used in positions and errors.
+//
+// data is the content of the file.
+//
+// fix is an optional function that canonicalizes module versions.
+// If fix is nil, all module versions must be canonical (module.CanonicalVersion
+// must return the same string).
+func ParseWork(file string, data []byte, fix modfile.VersionFixer) (*WorkFile, error) {
+	return parseToWorkFile(file, data, fix, true)
+}
+
+var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+
+func parseToWorkFile(file string, data []byte, fix modfile.VersionFixer, strict bool) (parsed *WorkFile, err error) {
+	fs, err := parse(file, data)
+	if err != nil {
+		return nil, err
+	}
+	f := &WorkFile{
+		Syntax: fs,
+	}
+	var errs modfile.ErrorList
+
+	for _, x := range fs.Stmt {
+		switch x := x.(type) {
+		case *modfile.Line:
+			f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict)
+
+		case *modfile.LineBlock:
+			if len(x.Token) > 1 {
+				if strict {
+					errs = append(errs, modfile.Error{
+						Filename: file,
+						Pos:      x.Start,
+						Err:      fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+					})
+				}
+				continue
+			}
+			switch x.Token[0] {
+			default:
+				if strict {
+					errs = append(errs, modfile.Error{
+						Filename: file,
+						Pos:      x.Start,
+						Err:      fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+					})
+				}
+				continue
+			case "module", "directory", "replace":
+				for _, l := range x.Line {
+					f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
+				}
+			}
+		}
+	}
+
+	if len(errs) > 0 {
+		return nil, errs
+	}
+	return f, nil
+}
+
+func (f *WorkFile) add(errs *modfile.ErrorList, block *modfile.LineBlock, line *modfile.Line, verb string, args []string, fix modfile.VersionFixer, strict bool) {
+	// If strict is false, this module is a dependency.
+	// We ignore all unknown directives as well as main-module-only
+	// directives like replace and exclude. It will work better for
+	// forward compatibility if we can depend on modules that have unknown
+	// statements (presumed relevant only when acting as the main module)
+	// and simply ignore those statements.
+	if !strict {
+		switch verb {
+		case "go", "module", "retract", "require":
+			// want these even for dependency go.mods
+		default:
+			return
+		}
+	}
+
+	wrapModPathError := func(modPath string, err error) {
+		*errs = append(*errs, modfile.Error{
+			Filename: f.Syntax.Name,
+			Pos:      line.Start,
+			ModPath:  modPath,
+			Verb:     verb,
+			Err:      err,
+		})
+	}
+	wrapError := func(err error) {
+		*errs = append(*errs, modfile.Error{
+			Filename: f.Syntax.Name,
+			Pos:      line.Start,
+			Err:      err,
+		})
+	}
+	errorf := func(format string, args ...interface{}) {
+		wrapError(fmt.Errorf(format, args...))
+	}
+
+	switch verb {
+	default:
+		errorf("unknown directive: %s", verb)
+
+	case "go":
+		if f.Go != nil {
+			errorf("repeated go statement")
+			return
+		}
+		if len(args) != 1 {
+			errorf("go directive expects exactly one argument")
+			return
+		} else if !GoVersionRE.MatchString(args[0]) {
+			errorf("invalid go version '%s': must match format 1.23", args[0])
+			return
+		}
+
+		f.Go = &modfile.Go{Syntax: line}
+		f.Go.Version = args[0]
+
+	case "directory":
+		if len(args) != 1 {
+			errorf("usage: %s ../local/directory", verb) // TODO(matloob) better example; most directories will be subdirectories of go.work dir
+			return
+		}
+		s, err := parseString(&args[0])
+		if err != nil {
+			errorf("invalid quoted string: %v", err)
+			return
+		}
+		f.Directory = append(f.Directory, &Directory{
+			DiskPath: s,
+			Syntax:   line,
+		})
+
+	case "replace":
+		arrow := 2
+		if len(args) >= 2 && args[1] == "=>" {
+			arrow = 1
+		}
+		if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+			errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb)
+			return
+		}
+		s, err := parseString(&args[0])
+		if err != nil {
+			errorf("invalid quoted string: %v", err)
+			return
+		}
+		pathMajor, err := modulePathMajor(s)
+		if err != nil {
+			wrapModPathError(s, err)
+			return
+		}
+		var v string
+		if arrow == 2 {
+			v, err = parseVersion(verb, s, &args[1], fix)
+			if err != nil {
+				wrapError(err)
+				return
+			}
+			if err := module.CheckPathMajor(v, pathMajor); err != nil {
+				wrapModPathError(s, err)
+				return
+			}
+		}
+		ns, err := parseString(&args[arrow+1])
+		if err != nil {
+			errorf("invalid quoted string: %v", err)
+			return
+		}
+		nv := ""
+		if len(args) == arrow+2 {
+			if !IsDirectoryPath(ns) {
+				errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
+				return
+			}
+			if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+				errorf("replacement directory appears to be Windows path (on a non-windows system)")
+				return
+			}
+		}
+		if len(args) == arrow+3 {
+			nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
+			if err != nil {
+				wrapError(err)
+				return
+			}
+			if IsDirectoryPath(ns) {
+				errorf("replacement module directory path %q cannot have version", ns)
+				return
+			}
+		}
+		f.Replace = append(f.Replace, &modfile.Replace{
+			Old:    module.Version{Path: s, Version: v},
+			New:    module.Version{Path: ns, Version: nv},
+			Syntax: line,
+		})
+	}
+}
+
+// IsDirectoryPath reports whether the given path should be interpreted
+// as a directory path. Just like on the go command line, relative paths
+// and rooted paths are directory paths; the rest are module paths.
+func IsDirectoryPath(ns string) bool {
+	// Because go.mod files can move from one system to another,
+	// we check all known path syntaxes, both Unix and Windows.
+	return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
+		strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+		len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
+}
+
+// MustQuote reports whether s must be quoted in order to appear as
+// a single token in a go.mod line.
+func MustQuote(s string) bool {
+	for _, r := range s {
+		switch r {
+		case ' ', '"', '\'', '`':
+			return true
+
+		case '(', ')', '[', ']', '{', '}', ',':
+			if len(s) > 1 {
+				return true
+			}
+
+		default:
+			if !unicode.IsPrint(r) {
+				return true
+			}
+		}
+	}
+	return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
+}
+
+// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
+// the quotation of s.
+func AutoQuote(s string) string {
+	if MustQuote(s) {
+		return strconv.Quote(s)
+	}
+	return s
+}
+
+func parseString(s *string) (string, error) {
+	t := *s
+	if strings.HasPrefix(t, `"`) {
+		var err error
+		if t, err = strconv.Unquote(t); err != nil {
+			return "", err
+		}
+	} else if strings.ContainsAny(t, "\"'`") {
+		// Other quotes are reserved both for possible future expansion
+		// and to avoid confusion. For example if someone types 'x'
+		// we want that to be a syntax error and not a literal x in literal quotation marks.
+		return "", fmt.Errorf("unquoted string cannot contain quote")
+	}
+	*s = AutoQuote(t)
+	return t, nil
+}
+
+func parseVersion(verb string, path string, s *string, fix modfile.VersionFixer) (string, error) {
+	t, err := parseString(s)
+	if err != nil {
+		return "", &modfile.Error{
+			Verb:    verb,
+			ModPath: path,
+			Err: &module.InvalidVersionError{
+				Version: *s,
+				Err:     err,
+			},
+		}
+	}
+	if fix != nil {
+		fixed, err := fix(path, t)
+		if err != nil {
+			if err, ok := err.(*module.ModuleError); ok {
+				return "", &modfile.Error{
+					Verb:    verb,
+					ModPath: path,
+					Err:     err.Err,
+				}
+			}
+			return "", err
+		}
+		t = fixed
+	} else {
+		cv := module.CanonicalVersion(t)
+		if cv == "" {
+			return "", &modfile.Error{
+				Verb:    verb,
+				ModPath: path,
+				Err: &module.InvalidVersionError{
+					Version: t,
+					Err:     errors.New("must be of the form v1.2.3"),
+				},
+			}
+		}
+		t = cv
+	}
+	*s = t
+	return *s, nil
+}
+
+func modulePathMajor(path string) (string, error) {
+	_, major, ok := module.SplitPathVersion(path)
+	if !ok {
+		return "", fmt.Errorf("invalid module path")
+	}
+	return major, nil
+}
diff --git a/internal/testenv/testenv.go b/internal/testenv/testenv.go
index 49616da..61735dc 100644
--- a/internal/testenv/testenv.go
+++ b/internal/testenv/testenv.go
@@ -251,16 +251,17 @@
 	case "linux-arm-scaleway":
 		// "linux-arm" was renamed to "linux-arm-scaleway" in CL 303230.
 		fmt.Fprintln(os.Stderr, "skipping test: linux-arm-scaleway builder lacks sufficient memory (https://golang.org/issue/32834)")
-		os.Exit(0)
 	case "plan9-arm":
 		fmt.Fprintln(os.Stderr, "skipping test: plan9-arm builder lacks sufficient memory (https://golang.org/issue/38772)")
-		os.Exit(0)
 	case "netbsd-arm-bsiegert", "netbsd-arm64-bsiegert":
 		// As of 2021-06-02, these builders are running with GO_TEST_TIMEOUT_SCALE=10,
 		// and there is only one of each. We shouldn't waste those scarce resources
 		// running very slow tests.
 		fmt.Fprintf(os.Stderr, "skipping test: %s builder is very slow\n", b)
+	default:
+		return
 	}
+	os.Exit(0)
 }
 
 // Go1Point returns the x in Go 1.x.
diff --git a/internal/tool/tool.go b/internal/tool/tool.go
index ecf68d7..41ecd4e 100644
--- a/internal/tool/tool.go
+++ b/internal/tool/tool.go
@@ -88,6 +88,7 @@
 		fmt.Fprintf(s.Output(), "\n\nUsage: %v [flags] %v\n", app.Name(), app.Usage())
 		app.DetailedHelp(s)
 	}
+	addFlags(s, reflect.StructField{}, reflect.ValueOf(app))
 	if err := Run(ctx, app, args); err != nil {
 		fmt.Fprintf(s.Output(), "%s: %v\n", app.Name(), err)
 		if _, printHelp := err.(commandLineError); printHelp {
diff --git a/internal/typeparams/typeparams.go b/internal/typeparams/typeparams.go
index 624ce37..6b7958a 100644
--- a/internal/typeparams/typeparams.go
+++ b/internal/typeparams/typeparams.go
@@ -101,5 +101,5 @@
 		return nil, nil
 	}
 	inf := info.Inferred[e]
-	return inf.Targs, inf.Sig
+	return inf.TArgs, inf.Sig
 }