all: merge master (81c7dc4) into gopls-release-branch.0.9

Also add a replace to gopls/go.mod.

For golang/go#54294

Conflicts:

- gopls/go.mod
- gopls/go.sum

Merge List:

+ 2022-08-04 81c7dc4e4 internal/lsp: polish vulncheck progress messages
+ 2022-08-04 af2a0a816 internal/lsp: use exec.CommandContext when running vulncheck
+ 2022-08-04 3519aa25b internal/lsp/cmd: remove unused Env from pkgLoadConfig
+ 2022-08-04 6c27717f2 internal/lsp/mod/code_lens: add "run govulncheck" codelens
+ 2022-08-04 763f65c3d gopls/internal/regtest/misc: simplify shared edit tests
+ 2022-08-04 fc3b24a45 go/internal/gcimporter: rewrite interface receiver parameters
+ 2022-08-04 b5fd08821 internal/lsp/command: replace VulncheckArgs Dir with URI
+ 2022-08-04 99fd76f9c internal/lsp/cache: delete KnownMetadata.PkgFilesChanged
+ 2022-08-04 01c9ff053 internal/lsp/cache: invalid packages should not be workspace packages
+ 2022-08-04 bd68922a8 internal/lsp: new options to disable certain kinds of semantic tokens
+ 2022-08-04 bceee4b05 internal/lsp/command: let RunVulncheckExp call gopls vulncheck
+ 2022-08-04 3e0a5031e internal/lsp: use directoryFilters in import scanning
+ 2022-08-04 87f47bbfb gopls/internal/regtest/bench: refactor and improve benchmarks
+ 2022-08-03 8b9a1fbdf go/callgraph/vta: do not assume that recovers cannot be deferred
+ 2022-08-03 371fc67d3 go/tools: add check for time formats with 2006-02-01
+ 2022-08-03 d08f5dc9f gopls/internal/regtest: unskip all of TestModFileModification
+ 2022-08-03 ddb90ecd3 internal/lsp/cache: fix data races to view.options
+ 2022-08-02 0d04f65da internal/lsp: re-send diagnostics on file events
+ 2022-08-02 d025cced8 internal/lsp/source: don't crash requesting gc_details for an empty file
+ 2022-08-02 10cb4353f internal/lsp/regtest: improvements for shared execution modes
+ 2022-08-02 4d0b38345 internal/lsp/regtest: minor cleanup for magic regtest envvar
+ 2022-08-02 310ea71b7 gopls/internal/regtest: add a test that ignoring a file resolves errors
+ 2022-08-01 21861e6be gopls/internal/regtest/bench: put feature benchmarks in their own file
+ 2022-08-01 c7f11917c go/internal/gcimporter: set underlying types in proper order; flatten imports
+ 2022-08-01 bd3f52477 internal/lsp: rename all the package names in the renamed package
+ 2022-08-01 9f6568509 internal/lsp/source: enable the new diff with allExperiments
+ 2022-07-29 9580c84d5 internal/lsp: Check if user's editor support rename operation
+ 2022-07-29 f560bc877 internal/lsp/cache: don't set context cancellation as a critical err
+ 2022-07-28 8ea568798 internal/lsp/regtest: remove arbitrary timeout for closing the editor
+ 2022-07-28 d01bb2ff9 internal/lsp/source: document the handling of GOPRIVATE for linkTarget
+ 2022-07-28 98bfcd1be internal/memoize: fix race in Store.Promise
+ 2022-07-27 e02e98a03 internal/lsp/cache: allow network whenever reloading the workspace
+ 2022-07-27 b52794acc internal/lsp/cache: simplify snapshot.Clone reinitialization logic
+ 2022-07-27 f1bb5ca08 internal/lsp/cache: report a critical error when go.work is invalid
+ 2022-07-27 b3b5c13b2 internal/lsp/cache: invalidate packages with missing deps when files are added
+ 2022-07-26 39a4e3647 internal/lsp/regtest: only run /default tests with -short
+ 2022-07-26 f157068c1 internal/lsp/regtest: allow sharing memoized results across regtests
+ 2022-07-26 8ccb25c9a internal/lsp: treat struct tags as string type
+ 2022-07-26 6c8a6c409 internal/lsp: suppress parameter hint when argument matches parameter
+ 2022-07-26 c83f42da7 internal/lsp: update inlay hints documentation to include go snippets
+ 2022-07-26 8b47d4e18 all: update dependencies
+ 2022-07-26 76004542d gopls: update dependencies
+ 2022-07-25 2a6393fe5 internal/lsp: Refactor to share logic with rename
+ 2022-07-25 4375b29f4 cmd/auth/cookieauth: delete unreachable os.Exit
+ 2022-07-25 005c07ac5 gopls/internal/vulncheck: adjust logging
+ 2022-07-25 04bd08781 internal/lsp: enable fillstruct for generics
+ 2022-07-25 6ec939a61 internal/span: fix incorrect bounds check in ToOffset
+ 2022-07-25 178fdf98d gopls/internal/regtest: unskip Test_Issue38211
+ 2022-07-25 1cfe623eb gopls/internal/regtest: unskip TestQuickFixEmptyFiles
+ 2022-07-24 3d474c890 internal/lsp/diff: new diff implementation to replace go-diff
+ 2022-07-22 a2a24778b gopls/internal/regtest: externalize shouldLoad tracking
+ 2022-07-22 7b605f471 gopls/internal/vulncheck: pass go version to vulncheck config
+ 2022-07-22 126ef8f86 gopls/internal/govulncheck: sync x/vuln@b9a3ad9
+ 2022-07-22 a732e45cc gopls: update golang.org/x/vuln
+ 2022-07-22 980cbfeac A+C: delete AUTHORS and CONTRIBUTORS
+ 2022-07-20 ec1f92440 internal/lsp: add check for nil results to fillreturns
+ 2022-07-19 79f3242e4 godoc: support go1.19 doc comment syntax
+ 2022-07-18 2957e9da5 go/callgraph/vta: use types.IsInterface instead of our own isInterface
+ 2022-07-18 2eaea8659 go/callgraph/vta: do not include interface types during propagation
+ 2022-07-18 dc45e742f internal/lsp: Update FilterDisallow to support matching directories at arbitrary depth.
+ 2022-07-15 ce6ce7662 internal/lsp/regtest: increase the time allowed for shutdown
+ 2022-07-15 32129bf2c go/internal/gcimporter: adjust importer to match compiler importer
+ 2022-07-15 22d149443 internal/gcimporter: add support for reading unified IR export data
+ 2022-07-15 c3af7c2fa internal/lsp/cache: delete workspacePackageHandles (dead code)
+ 2022-07-15 1a4e02fee internal/lsp/analysis/unusedvariable: add analyzer
+ 2022-07-14 db8f89b39 internal/memoize: rename Handle to Promise
+ 2022-07-13 a7c53b59a internal/analysisinternal: move FindBestMatch to internal/lsp/fuzzy
+ 2022-07-13 9b6c01892 internal/lsp/cache: don't trim unexported struct fields
+ 2022-07-13 85173cc4b internal/lsp/cache: follow usual structure for packages, analysis maps
+ 2022-07-13 b2eae7626 internal/lsp/cache: simplify modwhy cache
+ 2022-07-13 dcb576d3b internal/lsp/cache: simplify modtidy
+ 2022-07-13 b230791f2 internal/lsp/cache: move PosTo{Decl,Field} out of cache
+ 2022-07-12 8730184ef internal/lsp/fake: retry spurious file lock errors on windows
+ 2022-07-12 459e2b88f internal/lsp/progress: actually close over Context in WorkDoneWriter
+ 2022-07-12 7c06b01db go/callgraph/vta: remove interprocedural flows for receiver objects
+ 2022-07-12 6e6f3131e internal/lsp/regtest: simplify, consolidate, and document settings
+ 2022-07-12 3db2cdc06 internal/lsp: wait for ongoing work to complete during server shutdown
+ 2022-07-12 a5adb0f2c internal/lsp/cache: use mod=readonly for process env funcs
+ 2022-07-12 a79ee0f0f Revert "Revert "internal/lsp/cache: don't pin a snapshot to view.importsState"
+ 2022-07-12 bc957ec62 internal/lsp/source: use token.File-agnostic positions to dedupe refs
+ 2022-07-11 b6e495100 Revert "internal/lsp/cache: don't pin a snapshot to view.importsState"
+ 2022-07-11 71dc5e295 internal/lsp/cache: make snapshot reference counting uniform
+ 2022-07-11 42457a544 internal/lsp/cache: don't pin a snapshot to view.importsState
+ 2022-07-08 d6c099e3c internal/memoize: document stateIdle, RefCounted
+ 2022-07-08 53ead67a9 internal/memoize: delete Generation and Bind
+ 2022-07-08 874617721 internal/lsp/cache: simplify ParseGo
+ 2022-07-08 9c2a5567e internal/lsp/cache: fail addPackageHandle if metadata is stale
+ 2022-07-07 1dfab61a4 internal/lsp/cache: use GetHandle not Bind for 5 URI-keyed maps
+ 2022-07-07 2aef121b8 internal/lsp: consolidate .go/go.mod link logic
+ 2022-07-07 8184d1ff7 internal/lsp/cache: use GetHandle not Bind in astCacheData
+ 2022-07-06 36430f4b3 internal/lsp/cache: use GetHandle not Bind for actions
+ 2022-07-06 b929f3bf4 internal/span: make NewRange accept File, not FileSet
+ 2022-07-06 d69bac6d8 internal/lsp/cache: cache isActiveLocked calculation across snapshots
+ 2022-07-05 afa4a9562 internal/lsp/cache: persist known subdirs
+ 2022-07-01 698251aaa internal/lsp/cache: sort Metadata.Deps, for determinism
+ 2022-07-01 f79f3aac1 internal/lsp/cache: clarify buildPackageHandle
+ 2022-07-01 e92a18fd1 internal/lsp/lsppos: reduce allocations in NewMapper
+ 2022-07-01 f487f3623 internal/lsp/source: reduce allocation in workspace-symbols
+ 2022-07-01 7b04e8b59 internal/persistent: no-op deletion from map does not allocate
+ 2022-07-01 f042799df internal/memoize: delete Bind(cleanup) hook
+ 2022-07-01 bec0cf16b internal/lsp/cache: avoid Handle mechanism for workspace dir
+ 2022-07-01 ffc70b9ac lsp/completion: fix ranking of *types.PkgName candidates
+ 2022-07-01 93bf1fcc7 gopls: add range over channel postfix completion
+ 2022-07-01 79fefdf61 internal/memoize: do not iterate all handles on generation destruction
+ 2022-07-01 fa4babcd9 internal/lsp/cache: use persistent map for storing packages in the snapshot

Change-Id: If2b0e078a3d689617090269ad9ba26edeafaa62c
diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index 15167cd..0000000
--- a/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
deleted file mode 100644
index 1c4577e..0000000
--- a/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/cmd/auth/cookieauth/cookieauth.go b/cmd/auth/cookieauth/cookieauth.go
index feefaff..8b0ff17 100644
--- a/cmd/auth/cookieauth/cookieauth.go
+++ b/cmd/auth/cookieauth/cookieauth.go
@@ -40,7 +40,6 @@
 	f, err := os.Open(os.Args[1])
 	if err != nil {
 		log.Fatalf("failed to read cookie file: %v\n", os.Args[1])
-		os.Exit(1)
 	}
 	defer f.Close()
 
diff --git a/go.mod b/go.mod
index 985b9cc..272a6d2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,12 +1,12 @@
 module golang.org/x/tools
 
-go 1.17
+go 1.18
 
 require (
-	github.com/yuin/goldmark v1.4.1
+	github.com/yuin/goldmark v1.4.13
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
-	golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f
-	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
-	golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
+	golang.org/x/net v0.0.0-20220722155237-a158d28d115b
+	golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
+	golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
 	golang.org/x/text v0.3.7
 )
diff --git a/go.sum b/go.sum
index 85cf00c..f603000 100644
--- a/go.sum
+++ b/go.sum
@@ -1,28 +1,26 @@
-github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go
index 6ef2e79..ea67807 100644
--- a/go/analysis/analysistest/analysistest.go
+++ b/go/analysis/analysistest/analysistest.go
@@ -142,7 +142,7 @@
 						}
 						fileContents[file] = contents
 					}
-					spn, err := span.NewRange(act.Pass.Fset, edit.Pos, edit.End).Span()
+					spn, err := span.NewRange(file, edit.Pos, edit.End).Span()
 					if err != nil {
 						t.Errorf("error converting edit to span %s: %v", file.Name(), err)
 					}
diff --git a/go/analysis/passes/tests/tests.go b/go/analysis/passes/tests/tests.go
index 56b20eb..cab2fa2 100644
--- a/go/analysis/passes/tests/tests.go
+++ b/go/analysis/passes/tests/tests.go
@@ -475,10 +475,12 @@
 	if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 {
 		// Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters.
 		// We have currently decided to also warn before compilation/package loading. This can help users in IDEs.
+		// TODO(adonovan): use ReportRangef(tparams).
 		pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix)
 	}
 
 	if !isTestSuffix(fn.Name.Name[len(prefix):]) {
+		// TODO(adonovan): use ReportRangef(fn.Name).
 		pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix)
 	}
 }
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go b/go/analysis/passes/timeformat/testdata/src/a/a.go
new file mode 100644
index 0000000..9848144
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+	"time"
+
+	"b"
+)
+
+func hasError() {
+	a, _ := time.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+	a.Format(`2006-02-01`)                                           // want `2006-02-01 should be 2006-01-02`
+	a.Format("2006-02-01 15:04:05")                                  // want `2006-02-01 should be 2006-01-02`
+
+	const c = "2006-02-01"
+	a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+	a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+	a.Format("2006-01-02")
+
+	const c = "2006-01-02"
+	a.Format(c)
+
+	v := "2006-02-01"
+	a.Format(v) // Allowed though variables.
+
+	m := map[string]string{
+		"y": "2006-02-01",
+	}
+	a.Format(m["y"])
+
+	s := []string{"2006-02-01"}
+	a.Format(s[0])
+
+	a.Format(badFormat())
+
+	o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+	o.Format("2006-02-01")
+}
+
+func badFormat() string {
+	return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/a/a.go.golden b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
new file mode 100644
index 0000000..9eccded
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/a/a.go.golden
@@ -0,0 +1,50 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the timeformat checker.
+
+package a
+
+import (
+	"time"
+
+	"b"
+)
+
+func hasError() {
+	a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00") // want `2006-02-01 should be 2006-01-02`
+	a.Format(`2006-01-02`)                                           // want `2006-02-01 should be 2006-01-02`
+	a.Format("2006-01-02 15:04:05")                                  // want `2006-02-01 should be 2006-01-02`
+
+	const c = "2006-02-01"
+	a.Format(c) // want `2006-02-01 should be 2006-01-02`
+}
+
+func notHasError() {
+	a, _ := time.Parse("2006-01-02 15:04:05", "2021-01-01 00:00:00")
+	a.Format("2006-01-02")
+
+	const c = "2006-01-02"
+	a.Format(c)
+
+	v := "2006-02-01"
+	a.Format(v) // Allowed though variables.
+
+	m := map[string]string{
+		"y": "2006-02-01",
+	}
+	a.Format(m["y"])
+
+	s := []string{"2006-02-01"}
+	a.Format(s[0])
+
+	a.Format(badFormat())
+
+	o := b.Parse("2006-02-01 15:04:05", "2021-01-01 00:00:00")
+	o.Format("2006-02-01")
+}
+
+func badFormat() string {
+	return "2006-02-01"
+}
diff --git a/go/analysis/passes/timeformat/testdata/src/b/b.go b/go/analysis/passes/timeformat/testdata/src/b/b.go
new file mode 100644
index 0000000..de56908
--- /dev/null
+++ b/go/analysis/passes/timeformat/testdata/src/b/b.go
@@ -0,0 +1,11 @@
+package b
+
+type B struct {
+}
+
+func Parse(string, string) B {
+	return B{}
+}
+
+func (b B) Format(string) {
+}
diff --git a/go/analysis/passes/timeformat/timeformat.go b/go/analysis/passes/timeformat/timeformat.go
new file mode 100644
index 0000000..9147826
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeformat defines an Analyzer that checks for the use
+// of time.Format or time.Parse calls with a bad format.
+package timeformat
+
+import (
+	"fmt"
+	"go/ast"
+	"go/constant"
+	"go/token"
+	"go/types"
+	"strings"
+
+	"golang.org/x/tools/go/analysis"
+	"golang.org/x/tools/go/analysis/passes/inspect"
+	"golang.org/x/tools/go/ast/inspector"
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+const badFormat = "2006-02-01"
+const goodFormat = "2006-01-02"
+
+const Doc = `check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+`
+
+var Analyzer = &analysis.Analyzer{
+	Name:     "timeformat",
+	Doc:      Doc,
+	Requires: []*analysis.Analyzer{inspect.Analyzer},
+	Run:      run,
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+	inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+	nodeFilter := []ast.Node{
+		(*ast.CallExpr)(nil),
+	}
+	inspect.Preorder(nodeFilter, func(n ast.Node) {
+		call := n.(*ast.CallExpr)
+		fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func)
+		if !ok {
+			return
+		}
+		if !isTimeDotFormat(fn) && !isTimeDotParse(fn) {
+			return
+		}
+		if len(call.Args) > 0 {
+			arg := call.Args[0]
+			badAt := badFormatAt(pass.TypesInfo, arg)
+
+			if badAt > -1 {
+				// Check if it's a literal string, otherwise we can't suggest a fix.
+				if _, ok := arg.(*ast.BasicLit); ok {
+					fmt.Printf("%#v\n", arg)
+					pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or `
+					end := pos + len(badFormat)
+
+					pass.Report(analysis.Diagnostic{
+						Pos:     token.Pos(pos),
+						End:     token.Pos(end),
+						Message: badFormat + " should be " + goodFormat,
+						SuggestedFixes: []analysis.SuggestedFix{{
+							Message: "Replace " + badFormat + " with " + goodFormat,
+							TextEdits: []analysis.TextEdit{{
+								Pos:     token.Pos(pos),
+								End:     token.Pos(end),
+								NewText: []byte(goodFormat),
+							}},
+						}},
+					})
+				} else {
+					pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat)
+				}
+			}
+		}
+	})
+	return nil, nil
+}
+
+func isTimeDotFormat(f *types.Func) bool {
+	if f.Name() != "Format" || f.Pkg().Path() != "time" {
+		return false
+	}
+	sig, ok := f.Type().(*types.Signature)
+	if !ok {
+		return false
+	}
+	// Verify that the receiver is time.Time.
+	recv := sig.Recv()
+	if recv == nil {
+		return false
+	}
+	named, ok := recv.Type().(*types.Named)
+	return ok && named.Obj().Name() == "Time"
+}
+
+func isTimeDotParse(f *types.Func) bool {
+	if f.Name() != "Parse" || f.Pkg().Path() != "time" {
+		return false
+	}
+	// Verify that there is no receiver.
+	sig, ok := f.Type().(*types.Signature)
+	return ok && sig.Recv() == nil
+}
+
+// badFormatAt return the start of a bad format in e or -1 if no bad format is found.
+func badFormatAt(info *types.Info, e ast.Expr) int {
+	tv, ok := info.Types[e]
+	if !ok { // no type info, assume good
+		return -1
+	}
+
+	t, ok := tv.Type.(*types.Basic)
+	if !ok || t.Info()&types.IsString == 0 {
+		return -1
+	}
+
+	if tv.Value == nil {
+		return -1
+	}
+
+	return strings.Index(constant.StringVal(tv.Value), badFormat)
+}
diff --git a/go/analysis/passes/timeformat/timeformat_test.go b/go/analysis/passes/timeformat/timeformat_test.go
new file mode 100644
index 0000000..86bbe1b
--- /dev/null
+++ b/go/analysis/passes/timeformat/timeformat_test.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package timeformat_test
+
+import (
+	"testing"
+
+	"golang.org/x/tools/go/analysis/analysistest"
+	"golang.org/x/tools/go/analysis/passes/timeformat"
+)
+
+func Test(t *testing.T) {
+	testdata := analysistest.TestData()
+	analysistest.RunWithSuggestedFixes(t, testdata, timeformat.Analyzer, "a")
+}
diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go
index 365d7a5..2ad0f89 100644
--- a/go/callgraph/vta/graph.go
+++ b/go/callgraph/vta/graph.go
@@ -568,7 +568,9 @@
 func (b *builder) call(c ssa.CallInstruction) {
 	// When c is r := recover() call register instruction, we add Recover -> r.
 	if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" {
-		b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(c.(*ssa.Call)))
+		if v, ok := c.(ssa.Value); ok {
+			b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(v))
+		}
 		return
 	}
 
@@ -586,14 +588,14 @@
 		return
 	}
 	cc := c.Common()
-	// When c is an unresolved method call (cc.Method != nil), cc.Value contains
-	// the receiver object rather than cc.Args[0].
-	if cc.Method != nil {
-		b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value))
-	}
 
 	offset := 0
 	if cc.Method != nil {
+		// We don't add interprocedural flows for receiver objects.
+		// At a call site, the receiver object is interface while the
+		// callee object is concrete. The flow from interface to
+		// concrete type does not make sense. The flow other way around
+		// would bake in information from the initial call graph.
 		offset = 1
 	}
 	for i, v := range cc.Args {
@@ -654,7 +656,7 @@
 
 // Creates const, pointer, global, func, and local nodes based on register instructions.
 func (b *builder) nodeFromVal(val ssa.Value) node {
-	if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) && !isFunction(p.Elem()) {
+	if p, ok := val.Type().(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) {
 		// Nested pointer to interfaces are modeled as a special
 		// nestedPtrInterface node.
 		if i := interfaceUnderPtr(p.Elem()); i != nil {
@@ -687,7 +689,9 @@
 // semantically equivalent types can have different implementations,
 // this method guarantees the same implementation is always used.
 func (b *builder) representative(n node) node {
-	if !hasInitialTypes(n) {
+	if n.Type() == nil {
+		// panicArg and recoverReturn do not have
+		// types and are unique by definition.
 		return n
 	}
 	t := canonicalize(n.Type(), &b.canon)
diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go
index 5934ebc..6127780 100644
--- a/go/callgraph/vta/propagation.go
+++ b/go/callgraph/vta/propagation.go
@@ -175,6 +175,18 @@
 	return &typeSet
 }
 
+// hasInitialTypes check if a node can have initial types.
+// Returns true iff `n` is not a panic, recover, nestedPtr*
+// node, nor a node whose type is an interface.
+func hasInitialTypes(n node) bool {
+	switch n.(type) {
+	case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface:
+		return false
+	default:
+		return !types.IsInterface(n.Type())
+	}
+}
+
 // getPropType creates a propType for `node` based on its type.
 // propType.typ is always node.Type(). If node is function, then
 // propType.val is the underlying function; nil otherwise.
diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go
index 00b2127..f4a754f 100644
--- a/go/callgraph/vta/propagation_test.go
+++ b/go/callgraph/vta/propagation_test.go
@@ -58,7 +58,7 @@
 
 // newNamedType creates a bogus type named `name`.
 func newNamedType(name string) *types.Named {
-	return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil)
+	return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), types.Universe.Lookup("int").Type(), nil)
 }
 
 // sccString is a utility for stringifying `nodeToScc`. Every
diff --git a/go/callgraph/vta/testdata/src/panic.go b/go/callgraph/vta/testdata/src/panic.go
index 2d39c70..5ef3548 100644
--- a/go/callgraph/vta/testdata/src/panic.go
+++ b/go/callgraph/vta/testdata/src/panic.go
@@ -27,12 +27,12 @@
 
 func Baz(a A) {
 	defer recover1()
+	defer recover()
 	panic(a)
 }
 
 // Relevant SSA:
 // func recover1():
-// 	0:
 //   t0 = print("only this recover...":string)
 //   t1 = recover()
 //   t2 = typeassert,ok t1.(I)
@@ -53,6 +53,7 @@
 //   t0 = local A (a)
 //   *t0 = a
 //   defer recover1()
+//   defer recover()
 //   t1 = *t0
 //   t2 = make interface{} <- A (t1)
 //   panic t2
diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go
index 0531a22..c0b5775 100644
--- a/go/callgraph/vta/utils.go
+++ b/go/callgraph/vta/utils.go
@@ -56,24 +56,7 @@
 		return true
 	}
 
-	return isInterface(t) || isFunction(t)
-}
-
-// hasInitialTypes check if a node can have initial types.
-// Returns true iff `n` is not a panic or recover node as
-// those are artificial.
-func hasInitialTypes(n node) bool {
-	switch n.(type) {
-	case panicArg, recoverReturn:
-		return false
-	default:
-		return true
-	}
-}
-
-func isInterface(t types.Type) bool {
-	_, ok := t.Underlying().(*types.Interface)
-	return ok
+	return types.IsInterface(t) || isFunction(t)
 }
 
 func isFunction(t types.Type) bool {
@@ -98,7 +81,7 @@
 			return nil
 		}
 
-		if isInterface(p.Elem()) {
+		if types.IsInterface(p.Elem()) {
 			return p.Elem()
 		}
 
diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go
index ddc276c..2ed25a7 100644
--- a/go/gcexportdata/gcexportdata.go
+++ b/go/gcexportdata/gcexportdata.go
@@ -116,13 +116,29 @@
 	// The indexed export format starts with an 'i'; the older
 	// binary export format starts with a 'c', 'd', or 'v'
 	// (from "version"). Select appropriate importer.
-	if len(data) > 0 && data[0] == 'i' {
-		_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
-		return pkg, err
-	}
+	if len(data) > 0 {
+		switch data[0] {
+		case 'i':
+			_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+			return pkg, err
 
-	_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
-	return pkg, err
+		case 'v', 'c', 'd':
+			_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+			return pkg, err
+
+		case 'u':
+			_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+			return pkg, err
+
+		default:
+			l := len(data)
+			if l > 10 {
+				l = 10
+			}
+			return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+		}
+	}
+	return nil, fmt.Errorf("empty export data for %s", path)
 }
 
 // Write writes encoded type information for the specified package to out.
diff --git a/go/internal/gcimporter/gcimporter.go b/go/internal/gcimporter/gcimporter.go
index 493bfa0..e96c396 100644
--- a/go/internal/gcimporter/gcimporter.go
+++ b/go/internal/gcimporter/gcimporter.go
@@ -181,8 +181,9 @@
 	defer rc.Close()
 
 	var hdr string
+	var size int64
 	buf := bufio.NewReader(rc)
-	if hdr, _, err = FindExportData(buf); err != nil {
+	if hdr, size, err = FindExportData(buf); err != nil {
 		return
 	}
 
@@ -210,10 +211,27 @@
 		// The indexed export format starts with an 'i'; the older
 		// binary export format starts with a 'c', 'd', or 'v'
 		// (from "version"). Select appropriate importer.
-		if len(data) > 0 && data[0] == 'i' {
-			_, pkg, err = IImportData(fset, packages, data[1:], id)
-		} else {
-			_, pkg, err = BImportData(fset, packages, data, id)
+		if len(data) > 0 {
+			switch data[0] {
+			case 'i':
+				_, pkg, err := IImportData(fset, packages, data[1:], id)
+				return pkg, err
+
+			case 'v', 'c', 'd':
+				_, pkg, err := BImportData(fset, packages, data, id)
+				return pkg, err
+
+			case 'u':
+				_, pkg, err := UImportData(fset, packages, data[1:size], id)
+				return pkg, err
+
+			default:
+				l := len(data)
+				if l > 10 {
+					l = 10
+				}
+				return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
+			}
 		}
 
 	default:
diff --git a/go/internal/gcimporter/gcimporter_test.go b/go/internal/gcimporter/gcimporter_test.go
index 4e992af..5e1ca4b 100644
--- a/go/internal/gcimporter/gcimporter_test.go
+++ b/go/internal/gcimporter/gcimporter_test.go
@@ -45,6 +45,10 @@
 // compile runs the compiler on filename, with dirname as the working directory,
 // and writes the output file to outdirname.
 func compile(t *testing.T, dirname, filename, outdirname string) string {
+	return compilePkg(t, dirname, filename, outdirname, "p")
+}
+
+func compilePkg(t *testing.T, dirname, filename, outdirname, pkg string) string {
 	testenv.NeedsGoBuild(t)
 
 	// filename must end with ".go"
@@ -53,12 +57,12 @@
 	}
 	basename := filepath.Base(filename)
 	outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o")
-	cmd := exec.Command("go", "tool", "compile", "-p=p", "-o", outname, filename)
+	cmd := exec.Command("go", "tool", "compile", "-p="+pkg, "-o", outname, filename)
 	cmd.Dir = dirname
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 		t.Logf("%s", out)
-		t.Fatalf("go tool compile %s failed: %s", filename, err)
+		t.Fatalf("(cd %v && %v) failed: %s", cmd.Dir, cmd, err)
 	}
 	return outname
 }
@@ -140,7 +144,11 @@
 		// For now, we just test the presence of a few packages
 		// that we know are there for sure.
 		got := fmt.Sprint(pkg.Imports())
-		for _, want := range []string{"go/ast", "go/token"} {
+		wants := []string{"go/ast", "go/token"}
+		if unifiedIR {
+			wants = []string{"go/ast"}
+		}
+		for _, want := range wants {
 			if !strings.Contains(got, want) {
 				t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
 			}
@@ -451,7 +459,7 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	compile(t, "testdata", "a.go", testoutdir)
+	compilePkg(t, "testdata", "a.go", testoutdir, apkg(testoutdir))
 	compile(t, testoutdir, bpath, testoutdir)
 
 	// import must succeed (test for issue at hand)
@@ -611,13 +619,22 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	compile(t, dir, "a.go", testoutdir)
+	compilePkg(t, dir, "a.go", testoutdir, apkg(testoutdir))
 	compile(t, testoutdir, bpath, testoutdir)
 
 	// import must succeed (test for issue at hand)
 	_ = importPkg(t, "./testdata/aa", tmpdir)
 }
 
+// apkg returns the package "a" prefixed by (as a package) testoutdir
+func apkg(testoutdir string) string {
+	apkg := testoutdir + "/a"
+	if os.PathSeparator != '/' {
+		apkg = strings.ReplaceAll(apkg, string(os.PathSeparator), "/")
+	}
+	return apkg
+}
+
 func importPkg(t *testing.T, path, srcDir string) *types.Package {
 	pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil)
 	if err != nil {
diff --git a/go/internal/gcimporter/iimport.go b/go/internal/gcimporter/iimport.go
index 28b91b8..4caa0f5 100644
--- a/go/internal/gcimporter/iimport.go
+++ b/go/internal/gcimporter/iimport.go
@@ -17,6 +17,7 @@
 	"go/token"
 	"go/types"
 	"io"
+	"math/big"
 	"sort"
 	"strings"
 
@@ -512,7 +513,9 @@
 		val = constant.MakeString(r.string())
 
 	case types.IsInteger:
-		val = r.mpint(b)
+		var x big.Int
+		r.mpint(&x, b)
+		val = constant.Make(&x)
 
 	case types.IsFloat:
 		val = r.mpfloat(b)
@@ -561,8 +564,8 @@
 	return
 }
 
-func (r *importReader) mpint(b *types.Basic) constant.Value {
-	signed, maxBytes := intSize(b)
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+	signed, maxBytes := intSize(typ)
 
 	maxSmall := 256 - maxBytes
 	if signed {
@@ -581,7 +584,8 @@
 				v = ^v
 			}
 		}
-		return constant.MakeInt64(v)
+		x.SetInt64(v)
+		return
 	}
 
 	v := -n
@@ -591,47 +595,23 @@
 	if v < 1 || uint(v) > maxBytes {
 		errorf("weird decoding: %v, %v => %v", n, signed, v)
 	}
-
-	buf := make([]byte, v)
-	io.ReadFull(&r.declReader, buf)
-
-	// convert to little endian
-	// TODO(gri) go/constant should have a more direct conversion function
-	//           (e.g., once it supports a big.Float based implementation)
-	for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
-		buf[i], buf[j] = buf[j], buf[i]
-	}
-
-	x := constant.MakeFromBytes(buf)
+	b := make([]byte, v)
+	io.ReadFull(&r.declReader, b)
+	x.SetBytes(b)
 	if signed && n&1 != 0 {
-		x = constant.UnaryOp(token.SUB, x, 0)
+		x.Neg(x)
 	}
-	return x
 }
 
-func (r *importReader) mpfloat(b *types.Basic) constant.Value {
-	x := r.mpint(b)
-	if constant.Sign(x) == 0 {
-		return x
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+	var mant big.Int
+	r.mpint(&mant, typ)
+	var f big.Float
+	f.SetInt(&mant)
+	if f.Sign() != 0 {
+		f.SetMantExp(&f, int(r.int64()))
 	}
-
-	exp := r.int64()
-	switch {
-	case exp > 0:
-		x = constant.Shift(x, token.SHL, uint(exp))
-		// Ensure that the imported Kind is Float, else this constant may run into
-		// bitsize limits on overlarge integers. Eventually we can instead adopt
-		// the approach of CL 288632, but that CL relies on go/constant APIs that
-		// were introduced in go1.13.
-		//
-		// TODO(rFindley): sync the logic here with tip Go once we no longer
-		// support go1.12.
-		x = constant.ToFloat(x)
-	case exp < 0:
-		d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
-		x = constant.BinaryOp(x, token.QUO, d)
-	}
-	return x
+	return constant.Make(&f)
 }
 
 func (r *importReader) ident() string {
diff --git a/go/internal/gcimporter/unified_no.go b/go/internal/gcimporter/unified_no.go
new file mode 100644
index 0000000..286bf44
--- /dev/null
+++ b/go/internal/gcimporter/unified_no.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(go1.18 && goexperiment.unified)
+// +build !go1.18 !goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = false
diff --git a/go/internal/gcimporter/unified_yes.go b/go/internal/gcimporter/unified_yes.go
new file mode 100644
index 0000000..b5d69ff
--- /dev/null
+++ b/go/internal/gcimporter/unified_yes.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18 && goexperiment.unified
+// +build go1.18,goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = true
diff --git a/go/internal/gcimporter/ureader_no.go b/go/internal/gcimporter/ureader_no.go
new file mode 100644
index 0000000..8eb2072
--- /dev/null
+++ b/go/internal/gcimporter/ureader_no.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package gcimporter
+
+import (
+	"fmt"
+	"go/token"
+	"go/types"
+)
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+	err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data")
+	return
+}
diff --git a/go/internal/gcimporter/ureader_yes.go b/go/internal/gcimporter/ureader_yes.go
new file mode 100644
index 0000000..1a0ce64
--- /dev/null
+++ b/go/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,673 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import (
+	"go/token"
+	"go/types"
+	"strings"
+
+	"golang.org/x/tools/go/internal/pkgbits"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+	pkgbits.PkgDecoder
+
+	fake fakeFileSet
+
+	ctxt    *types.Context
+	imports map[string]*types.Package // previously imported packages, indexed by path
+
+	// lazily initialized arrays corresponding to the unified IR
+	// PosBase, Pkg, and Type sections, respectively.
+	posBases []string // position bases (i.e., file names)
+	pkgs     []*types.Package
+	typs     []types.Type
+
+	// laterFns holds functions that need to be invoked at the end of
+	// import reading.
+	laterFns []func()
+	// laterFors is used in case of 'type A B' to ensure that B is processed before A.
+	laterFors map[types.Type]int
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+	pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+	idx    pkgbits.Index
+	needed bool
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+	idx     pkgbits.Index
+	derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+	s := string(data)
+	s = s[:strings.LastIndex(s, "\n$$\n")]
+	input := pkgbits.NewPkgDecoder(path, s)
+	pkg = readUnifiedPackage(fset, nil, imports, input)
+	return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+	if pr.laterFors == nil {
+		pr.laterFors = make(map[types.Type]int)
+	}
+	pr.laterFors[t] = len(pr.laterFns)
+	pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+	pr := pkgReader{
+		PkgDecoder: input,
+
+		fake: fakeFileSet{
+			fset:  fset,
+			files: make(map[string]*fileInfo),
+		},
+
+		ctxt:    ctxt,
+		imports: imports,
+
+		posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+		pkgs:     make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+		typs:     make([]types.Type, input.NumElems(pkgbits.RelocType)),
+	}
+	defer pr.fake.setLines()
+
+	r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+	pkg := r.pkg()
+	r.Bool() // has init
+
+	for i, n := 0, r.Len(); i < n; i++ {
+		// As if r.obj(), but avoiding the Scope.Lookup call,
+		// to avoid eager loading of imports.
+		r.Sync(pkgbits.SyncObject)
+		assert(!r.Bool())
+		r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+		assert(r.Len() == 0)
+	}
+
+	r.Sync(pkgbits.SyncEOF)
+
+	for _, fn := range pr.laterFns {
+		fn()
+	}
+
+	pkg.MarkComplete()
+	return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+	pkgbits.Decoder
+
+	p *pkgReader
+
+	dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+	// bounds is a slice of typeInfos corresponding to the underlying
+	// bounds of the element's type parameters.
+	bounds []typeInfo
+
+	// tparams is a slice of the constructed TypeParams for the element.
+	tparams []*types.TypeParam
+
+	// devived is a slice of types derived from tparams, which may be
+	// instantiated while reading the current element.
+	derived      []derivedInfo
+	derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+	return &reader{
+		Decoder: pr.NewDecoder(k, idx, marker),
+		p:       pr,
+	}
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+	r.Sync(pkgbits.SyncPos)
+	if !r.Bool() {
+		return token.NoPos
+	}
+
+	// TODO(mdempsky): Delta encoding.
+	posBase := r.posBase()
+	line := r.Uint()
+	col := r.Uint()
+	return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+	return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+	if b := pr.posBases[idx]; b != "" {
+		return b
+	}
+
+	r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+	// Within types2, position bases have a lot more details (e.g.,
+	// keeping track of where //line directives appeared exactly).
+	//
+	// For go/types, we just track the file name.
+
+	filename := r.String()
+
+	if r.Bool() { // file base
+		// Was: "b = token.NewTrimmedFileBase(filename, true)"
+	} else { // line base
+		pos := r.pos()
+		line := r.Uint()
+		col := r.Uint()
+
+		// Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+		_, _, _ = pos, line, col
+	}
+
+	b := filename
+	pr.posBases[idx] = b
+	return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+	r.Sync(pkgbits.SyncPkg)
+	return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+	// TODO(mdempsky): Consider using some non-nil pointer to indicate
+	// the universe scope, so we don't need to keep re-reading it.
+	if pkg := pr.pkgs[idx]; pkg != nil {
+		return pkg
+	}
+
+	pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+	pr.pkgs[idx] = pkg
+	return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+	path := r.String()
+	switch path {
+	case "":
+		path = r.p.PkgPath()
+	case "builtin":
+		return nil // universe
+	case "unsafe":
+		return types.Unsafe
+	}
+
+	if pkg := r.p.imports[path]; pkg != nil {
+		return pkg
+	}
+
+	name := r.String()
+
+	pkg := types.NewPackage(path, name)
+	r.p.imports[path] = pkg
+
+	imports := make([]*types.Package, r.Len())
+	for i := range imports {
+		imports[i] = r.pkg()
+	}
+	pkg.SetImports(flattenImports(imports))
+
+	return pkg
+}
+
+// flattenImports returns the transitive closure of all imported
+// packages rooted from pkgs.
+func flattenImports(pkgs []*types.Package) []*types.Package {
+	var res []*types.Package
+
+	seen := make(map[*types.Package]bool)
+	var add func(pkg *types.Package)
+	add = func(pkg *types.Package) {
+		if seen[pkg] {
+			return
+		}
+		seen[pkg] = true
+		res = append(res, pkg)
+		for _, imp := range pkg.Imports() {
+			add(imp)
+		}
+	}
+
+	for _, pkg := range pkgs {
+		add(pkg)
+	}
+	return res
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+	return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+	r.Sync(pkgbits.SyncType)
+	if r.Bool() {
+		return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+	}
+	return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+	idx := info.idx
+	var where *types.Type
+	if info.derived {
+		where = &dict.derivedTypes[idx]
+		idx = dict.derived[idx].idx
+	} else {
+		where = &pr.typs[idx]
+	}
+
+	if typ := *where; typ != nil {
+		return typ
+	}
+
+	r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+	r.dict = dict
+
+	typ := r.doTyp()
+	assert(typ != nil)
+
+	// See comment in pkgReader.typIdx explaining how this happens.
+	if prev := *where; prev != nil {
+		return prev
+	}
+
+	*where = typ
+	return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+	switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+	default:
+		errorf("unhandled type tag: %v", tag)
+		panic("unreachable")
+
+	case pkgbits.TypeBasic:
+		return types.Typ[r.Len()]
+
+	case pkgbits.TypeNamed:
+		obj, targs := r.obj()
+		name := obj.(*types.TypeName)
+		if len(targs) != 0 {
+			t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+			return t
+		}
+		return name.Type()
+
+	case pkgbits.TypeTypeParam:
+		return r.dict.tparams[r.Len()]
+
+	case pkgbits.TypeArray:
+		len := int64(r.Uint64())
+		return types.NewArray(r.typ(), len)
+	case pkgbits.TypeChan:
+		dir := types.ChanDir(r.Len())
+		return types.NewChan(dir, r.typ())
+	case pkgbits.TypeMap:
+		return types.NewMap(r.typ(), r.typ())
+	case pkgbits.TypePointer:
+		return types.NewPointer(r.typ())
+	case pkgbits.TypeSignature:
+		return r.signature(nil, nil, nil)
+	case pkgbits.TypeSlice:
+		return types.NewSlice(r.typ())
+	case pkgbits.TypeStruct:
+		return r.structType()
+	case pkgbits.TypeInterface:
+		return r.interfaceType()
+	case pkgbits.TypeUnion:
+		return r.unionType()
+	}
+}
+
+func (r *reader) structType() *types.Struct {
+	fields := make([]*types.Var, r.Len())
+	var tags []string
+	for i := range fields {
+		pos := r.pos()
+		pkg, name := r.selector()
+		ftyp := r.typ()
+		tag := r.String()
+		embedded := r.Bool()
+
+		fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+		if tag != "" {
+			for len(tags) < i {
+				tags = append(tags, "")
+			}
+			tags = append(tags, tag)
+		}
+	}
+	return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+	terms := make([]*types.Term, r.Len())
+	for i := range terms {
+		terms[i] = types.NewTerm(r.Bool(), r.typ())
+	}
+	return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+	methods := make([]*types.Func, r.Len())
+	embeddeds := make([]types.Type, r.Len())
+	implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+	for i := range methods {
+		pos := r.pos()
+		pkg, name := r.selector()
+		mtyp := r.signature(nil, nil, nil)
+		methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+	}
+
+	for i := range embeddeds {
+		embeddeds[i] = r.typ()
+	}
+
+	iface := types.NewInterfaceType(methods, embeddeds)
+	if implicit {
+		iface.MarkImplicit()
+	}
+	return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+	r.Sync(pkgbits.SyncSignature)
+
+	params := r.params()
+	results := r.params()
+	variadic := r.Bool()
+
+	return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+	r.Sync(pkgbits.SyncParams)
+
+	params := make([]*types.Var, r.Len())
+	for i := range params {
+		params[i] = r.param()
+	}
+
+	return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+	r.Sync(pkgbits.SyncParam)
+
+	pos := r.pos()
+	pkg, name := r.localIdent()
+	typ := r.typ()
+
+	return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+	r.Sync(pkgbits.SyncObject)
+
+	assert(!r.Bool())
+
+	pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+	obj := pkgScope(pkg).Lookup(name)
+
+	targs := make([]types.Type, r.Len())
+	for i := range targs {
+		targs[i] = r.typ()
+	}
+
+	return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+	rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+	objPkg, objName := rname.qualifiedIdent()
+	assert(objName != "")
+
+	tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+
+	if tag == pkgbits.ObjStub {
+		assert(objPkg == nil || objPkg == types.Unsafe)
+		return objPkg, objName
+	}
+
+	if objPkg.Scope().Lookup(objName) == nil {
+		dict := pr.objDictIdx(idx)
+
+		r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+		r.dict = dict
+
+		declare := func(obj types.Object) {
+			objPkg.Scope().Insert(obj)
+		}
+
+		switch tag {
+		default:
+			panic("weird")
+
+		case pkgbits.ObjAlias:
+			pos := r.pos()
+			typ := r.typ()
+			declare(types.NewTypeName(pos, objPkg, objName, typ))
+
+		case pkgbits.ObjConst:
+			pos := r.pos()
+			typ := r.typ()
+			val := r.Value()
+			declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+		case pkgbits.ObjFunc:
+			pos := r.pos()
+			tparams := r.typeParamNames()
+			sig := r.signature(nil, nil, tparams)
+			declare(types.NewFunc(pos, objPkg, objName, sig))
+
+		case pkgbits.ObjType:
+			pos := r.pos()
+
+			obj := types.NewTypeName(pos, objPkg, objName, nil)
+			named := types.NewNamed(obj, nil, nil)
+			declare(obj)
+
+			named.SetTypeParams(r.typeParamNames())
+
+			rhs := r.typ()
+			pk := r.p
+			pk.laterFor(named, func() {
+				// First be sure that the rhs is initialized, if it needs to be initialized.
+				delete(pk.laterFors, named) // prevent cycles
+				if i, ok := pk.laterFors[rhs]; ok {
+					f := pk.laterFns[i]
+					pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+					f()                        // initialize RHS
+				}
+				underlying := rhs.Underlying()
+
+				// If the underlying type is an interface, we need to
+				// duplicate its methods so we can replace the receiver
+				// parameter's type (#49906).
+				if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+					methods := make([]*types.Func, iface.NumExplicitMethods())
+					for i := range methods {
+						fn := iface.ExplicitMethod(i)
+						sig := fn.Type().(*types.Signature)
+
+						recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+						methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+					}
+
+					embeds := make([]types.Type, iface.NumEmbeddeds())
+					for i := range embeds {
+						embeds[i] = iface.EmbeddedType(i)
+					}
+
+					underlying = types.NewInterfaceType(methods, embeds)
+				}
+
+				named.SetUnderlying(underlying)
+			})
+
+			for i, n := 0, r.Len(); i < n; i++ {
+				named.AddMethod(r.method())
+			}
+
+		case pkgbits.ObjVar:
+			pos := r.pos()
+			typ := r.typ()
+			declare(types.NewVar(pos, objPkg, objName, typ))
+		}
+	}
+
+	return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+	r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+
+	var dict readerDict
+
+	if implicits := r.Len(); implicits != 0 {
+		errorf("unexpected object with %v implicit type parameter(s)", implicits)
+	}
+
+	dict.bounds = make([]typeInfo, r.Len())
+	for i := range dict.bounds {
+		dict.bounds[i] = r.typInfo()
+	}
+
+	dict.derived = make([]derivedInfo, r.Len())
+	dict.derivedTypes = make([]types.Type, len(dict.derived))
+	for i := range dict.derived {
+		dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+	}
+
+	// function references follow, but reader doesn't need those
+
+	return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+	r.Sync(pkgbits.SyncTypeParamNames)
+
+	// Note: This code assumes it only processes objects without
+	// implement type parameters. This is currently fine, because
+	// reader is only used to read in exported declarations, which are
+	// always package scoped.
+
+	if len(r.dict.bounds) == 0 {
+		return nil
+	}
+
+	// Careful: Type parameter lists may have cycles. To allow for this,
+	// we construct the type parameter list in two passes: first we
+	// create all the TypeNames and TypeParams, then we construct and
+	// set the bound type.
+
+	r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+	for i := range r.dict.bounds {
+		pos := r.pos()
+		pkg, name := r.localIdent()
+
+		tname := types.NewTypeName(pos, pkg, name, nil)
+		r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+	}
+
+	typs := make([]types.Type, len(r.dict.bounds))
+	for i, bound := range r.dict.bounds {
+		typs[i] = r.p.typIdx(bound, r.dict)
+	}
+
+	// TODO(mdempsky): This is subtle, elaborate further.
+	//
+	// We have to save tparams outside of the closure, because
+	// typeParamNames() can be called multiple times with the same
+	// dictionary instance.
+	//
+	// Also, this needs to happen later to make sure SetUnderlying has
+	// been called.
+	//
+	// TODO(mdempsky): Is it safe to have a single "later" slice or do
+	// we need to have multiple passes? See comments on CL 386002 and
+	// go.dev/issue/52104.
+	tparams := r.dict.tparams
+	r.p.later(func() {
+		for i, typ := range typs {
+			tparams[i].SetConstraint(typ)
+		}
+	})
+
+	return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+	r.Sync(pkgbits.SyncMethod)
+	pos := r.pos()
+	pkg, name := r.selector()
+
+	rparams := r.typeParamNames()
+	sig := r.signature(r.param(), rparams, nil)
+
+	_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+	return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string)     { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string)       { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+	r.Sync(marker)
+	return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+	if pkg != nil {
+		return pkg.Scope()
+	}
+	return types.Universe
+}
diff --git a/go/internal/pkgbits/codes.go b/go/internal/pkgbits/codes.go
new file mode 100644
index 0000000..f0cabde
--- /dev/null
+++ b/go/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+	// Marker returns the SyncMarker for the Code's dynamic type.
+	Marker() SyncMarker
+
+	// Value returns the Code's ordinal value.
+	Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int         { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+	ValBool CodeVal = iota
+	ValString
+	ValInt64
+	ValBigInt
+	ValBigRat
+	ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int         { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+	TypeBasic CodeType = iota
+	TypeNamed
+	TypePointer
+	TypeSlice
+	TypeArray
+	TypeChan
+	TypeMap
+	TypeSignature
+	TypeStruct
+	TypeInterface
+	TypeUnion
+	TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int         { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+	ObjAlias CodeObj = iota
+	ObjConst
+	ObjType
+	ObjFunc
+	ObjVar
+	ObjStub
+)
diff --git a/go/internal/pkgbits/decoder.go b/go/internal/pkgbits/decoder.go
new file mode 100644
index 0000000..2bc7936
--- /dev/null
+++ b/go/internal/pkgbits/decoder.go
@@ -0,0 +1,433 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"go/token"
+	"math/big"
+	"os"
+	"runtime"
+	"strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+	// version is the file format version.
+	version uint32
+
+	// sync indicates whether the file uses sync markers.
+	sync bool
+
+	// pkgPath is the package path for the package to be decoded.
+	//
+	// TODO(mdempsky): Remove; unneeded since CL 391014.
+	pkgPath string
+
+	// elemData is the full data payload of the encoded package.
+	// Elements are densely and contiguously packed together.
+	//
+	// The last 8 bytes of elemData are the package fingerprint.
+	elemData string
+
+	// elemEnds stores the byte-offset end positions of element
+	// bitstreams within elemData.
+	//
+	// For example, element I's bitstream data starts at elemEnds[I-1]
+	// (or 0, if I==0) and ends at elemEnds[I].
+	//
+	// Note: elemEnds is indexed by absolute indices, not
+	// section-relative indices.
+	elemEnds []uint32
+
+	// elemEndsEnds stores the index-offset end positions of relocation
+	// sections within elemEnds.
+	//
+	// For example, section K's end positions start at elemEndsEnds[K-1]
+	// (or 0, if K==0) and end at elemEndsEnds[K].
+	elemEndsEnds [numRelocs]uint32
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+//
+// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+	pr := PkgDecoder{
+		pkgPath: pkgPath,
+	}
+
+	// TODO(mdempsky): Implement direct indexing of input string to
+	// avoid copying the position information.
+
+	r := strings.NewReader(input)
+
+	assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+
+	switch pr.version {
+	default:
+		panic(fmt.Errorf("unsupported version: %v", pr.version))
+	case 0:
+		// no flags
+	case 1:
+		var flags uint32
+		assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+		pr.sync = flags&flagSyncMarkers != 0
+	}
+
+	assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+	pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+	assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+	pos, err := r.Seek(0, os.SEEK_CUR)
+	assert(err == nil)
+
+	pr.elemData = input[pos:]
+	assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+	return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+	count := int(pr.elemEndsEnds[k])
+	if k > 0 {
+		count -= int(pr.elemEndsEnds[k-1])
+	}
+	return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+	return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+	var fp [8]byte
+	copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+	return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+	absIdx := int(idx)
+	if k > 0 {
+		absIdx += int(pr.elemEndsEnds[k-1])
+	}
+	if absIdx >= int(pr.elemEndsEnds[k]) {
+		errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+	}
+	return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+	absIdx := pr.AbsIdx(k, idx)
+
+	var start uint32
+	if absIdx > 0 {
+		start = pr.elemEnds[absIdx-1]
+	}
+	end := pr.elemEnds[absIdx]
+
+	return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+	return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+	r := pr.NewDecoderRaw(k, idx)
+	r.Sync(marker)
+	return r
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+	r := Decoder{
+		common: pr,
+		k:      k,
+		Idx:    idx,
+	}
+
+	// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+	r.Data = *strings.NewReader(pr.DataIdx(k, idx))
+
+	r.Sync(SyncRelocs)
+	r.Relocs = make([]RelocEnt, r.Len())
+	for i := range r.Relocs {
+		r.Sync(SyncReloc)
+		r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+	}
+
+	return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+	common *PkgDecoder
+
+	Relocs []RelocEnt
+	Data   strings.Reader
+
+	k   RelocKind
+	Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+	if err != nil {
+		errorf("unexpected decoding error: %w", err)
+	}
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+	x, err := binary.ReadUvarint(&r.Data)
+	r.checkErr(err)
+	return x
+}
+
+func (r *Decoder) rawVarint() int64 {
+	ux := r.rawUvarint()
+
+	// Zig-zag decode.
+	x := int64(ux >> 1)
+	if ux&1 != 0 {
+		x = ^x
+	}
+	return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+	e := r.Relocs[idx]
+	assert(e.Kind == k)
+	return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If r.common.sync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+	if !r.common.sync {
+		return
+	}
+
+	pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
+	mHave := SyncMarker(r.rawUvarint())
+	writerPCs := make([]int, r.rawUvarint())
+	for i := range writerPCs {
+		writerPCs[i] = int(r.rawUvarint())
+	}
+
+	if mHave == mWant {
+		return
+	}
+
+	// There's some tension here between printing:
+	//
+	// (1) full file paths that tools can recognize (e.g., so emacs
+	//     hyperlinks the "file:line" text for easy navigation), or
+	//
+	// (2) short file paths that are easier for humans to read (e.g., by
+	//     omitting redundant or irrelevant details, so it's easier to
+	//     focus on the useful bits that remain).
+	//
+	// The current formatting favors the former, as it seems more
+	// helpful in practice. But perhaps the formatting could be improved
+	// to better address both concerns. For example, use relative file
+	// paths if they would be shorter, or rewrite file paths to contain
+	// "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+	// to reliably expand that again.
+
+	fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+	fmt.Printf("\nfound %v, written at:\n", mHave)
+	if len(writerPCs) == 0 {
+		fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+	}
+	for _, pc := range writerPCs {
+		fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+	}
+
+	fmt.Printf("\nexpected %v, reading at:\n", mWant)
+	var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+	n := runtime.Callers(2, readerPCs[:])
+	for _, pc := range fmtFrames(readerPCs[:n]...) {
+		fmt.Printf("\t%s\n", pc)
+	}
+
+	// We already printed a stack trace for the reader, so now we can
+	// simply exit. Printing a second one with panic or base.Fatalf
+	// would just be noise.
+	os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+	r.Sync(SyncBool)
+	x, err := r.Data.ReadByte()
+	r.checkErr(err)
+	assert(x < 2)
+	return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+	r.Sync(SyncInt64)
+	return r.rawVarint()
+}
+
+// Int64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+	r.Sync(SyncUint64)
+	return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+	r.Sync(mark)
+	return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+	r.Sync(SyncUseReloc)
+	return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+	r.Sync(SyncString)
+	return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+	res := make([]string, r.Len())
+	for i := range res {
+		res[i] = r.String()
+	}
+	return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+	r.Sync(SyncValue)
+	isComplex := r.Bool()
+	val := r.scalar()
+	if isComplex {
+		val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+	}
+	return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+	switch tag := CodeVal(r.Code(SyncVal)); tag {
+	default:
+		panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+	case ValBool:
+		return constant.MakeBool(r.Bool())
+	case ValString:
+		return constant.MakeString(r.String())
+	case ValInt64:
+		return constant.MakeInt64(r.Int64())
+	case ValBigInt:
+		return constant.Make(r.bigInt())
+	case ValBigRat:
+		num := r.bigInt()
+		denom := r.bigInt()
+		return constant.Make(new(big.Rat).SetFrac(num, denom))
+	case ValBigFloat:
+		return constant.Make(r.bigFloat())
+	}
+}
+
+func (r *Decoder) bigInt() *big.Int {
+	v := new(big.Int).SetBytes([]byte(r.String()))
+	if r.Bool() {
+		v.Neg(v)
+	}
+	return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+	v := new(big.Float).SetPrec(512)
+	assert(v.UnmarshalText([]byte(r.String())) == nil)
+	return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+	r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef)
+	path := r.String()
+	if path == "" {
+		path = pr.pkgPath
+	}
+	return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+	r := pr.NewDecoder(RelocName, idx, SyncObject1)
+	r.Sync(SyncSym)
+	r.Sync(SyncPkg)
+	path := pr.PeekPkgPath(r.Reloc(RelocPkg))
+	name := r.String()
+	assert(name != "")
+
+	tag := CodeObj(r.Code(SyncCodeObj))
+
+	return path, name, tag
+}
diff --git a/go/internal/pkgbits/doc.go b/go/internal/pkgbits/doc.go
new file mode 100644
index 0000000..c8a2796
--- /dev/null
+++ b/go/internal/pkgbits/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+//
+// This is a copy of internal/pkgbits in the Go implementation.
+package pkgbits
diff --git a/go/internal/pkgbits/encoder.go b/go/internal/pkgbits/encoder.go
new file mode 100644
index 0000000..c50c838
--- /dev/null
+++ b/go/internal/pkgbits/encoder.go
@@ -0,0 +1,379 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/binary"
+	"go/constant"
+	"io"
+	"math/big"
+	"runtime"
+)
+
+// currentVersion is the current version number.
+//
+//   - v0: initial prototype
+//
+//   - v1: adds the flags uint32 word
+const currentVersion uint32 = 1
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+	// elems holds the bitstream for previously encoded elements.
+	elems [numRelocs][]string
+
+	// stringsIdx maps previously encoded strings to their index within
+	// the RelocString section, to allow deduplication. That is,
+	// elems[RelocString][stringsIdx[s]] == s (if present).
+	stringsIdx map[string]Index
+
+	// syncFrames is the number of frames to write at each sync
+	// marker. A negative value means sync markers are omitted.
+	syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(syncFrames int) PkgEncoder {
+	return PkgEncoder{
+		stringsIdx: make(map[string]Index),
+		syncFrames: syncFrames,
+	}
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+	h := md5.New()
+	out := io.MultiWriter(out0, h)
+
+	writeUint32 := func(x uint32) {
+		assert(binary.Write(out, binary.LittleEndian, x) == nil)
+	}
+
+	writeUint32(currentVersion)
+
+	var flags uint32
+	if pw.SyncMarkers() {
+		flags |= flagSyncMarkers
+	}
+	writeUint32(flags)
+
+	// Write elemEndsEnds.
+	var sum uint32
+	for _, elems := range &pw.elems {
+		sum += uint32(len(elems))
+		writeUint32(sum)
+	}
+
+	// Write elemEnds.
+	sum = 0
+	for _, elems := range &pw.elems {
+		for _, elem := range elems {
+			sum += uint32(len(elem))
+			writeUint32(sum)
+		}
+	}
+
+	// Write elemData.
+	for _, elems := range &pw.elems {
+		for _, elem := range elems {
+			_, err := io.WriteString(out, elem)
+			assert(err == nil)
+		}
+	}
+
+	// Write fingerprint.
+	copy(fingerprint[:], h.Sum(nil))
+	_, err := out0.Write(fingerprint[:])
+	assert(err == nil)
+
+	return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+	if idx, ok := pw.stringsIdx[s]; ok {
+		assert(pw.elems[RelocString][idx] == s)
+		return idx
+	}
+
+	idx := Index(len(pw.elems[RelocString]))
+	pw.elems[RelocString] = append(pw.elems[RelocString], s)
+	pw.stringsIdx[s] = idx
+	return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+	e := pw.NewEncoderRaw(k)
+	e.Sync(marker)
+	return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+	idx := Index(len(pw.elems[k]))
+	pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+	return Encoder{
+		p:   pw,
+		k:   k,
+		Idx: idx,
+	}
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+	p *PkgEncoder
+
+	Relocs []RelocEnt
+	Data   bytes.Buffer // accumulated element bitstream data
+
+	encodingRelocHeader bool
+
+	k   RelocKind
+	Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+	var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+	// Backup the data so we write the relocations at the front.
+	var tmp bytes.Buffer
+	io.Copy(&tmp, &w.Data)
+
+	// TODO(mdempsky): Consider writing these out separately so they're
+	// easier to strip, along with function bodies, so that we can prune
+	// down to just the data that's relevant to go/types.
+	if w.encodingRelocHeader {
+		panic("encodingRelocHeader already true; recursive flush?")
+	}
+	w.encodingRelocHeader = true
+	w.Sync(SyncRelocs)
+	w.Len(len(w.Relocs))
+	for _, rEnt := range w.Relocs {
+		w.Sync(SyncReloc)
+		w.Len(int(rEnt.Kind))
+		w.Len(int(rEnt.Idx))
+	}
+
+	io.Copy(&sb, &w.Data)
+	io.Copy(&sb, &tmp)
+	w.p.elems[w.k][w.Idx] = sb.String()
+
+	return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+	if err != nil {
+		errorf("unexpected encoding error: %v", err)
+	}
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	n := binary.PutUvarint(buf[:], x)
+	_, err := w.Data.Write(buf[:n])
+	w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+	// Zig-zag encode.
+	ux := uint64(x) << 1
+	if x < 0 {
+		ux = ^ux
+	}
+
+	w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+	// TODO(mdempsky): Use map for lookup; this takes quadratic time.
+	for i, rEnt := range w.Relocs {
+		if rEnt.Kind == r && rEnt.Idx == idx {
+			return i
+		}
+	}
+
+	i := len(w.Relocs)
+	w.Relocs = append(w.Relocs, RelocEnt{r, idx})
+	return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+	if !w.p.SyncMarkers() {
+		return
+	}
+
+	// Writing out stack frame string references requires working
+	// relocations, but writing out the relocations themselves involves
+	// sync markers. To prevent infinite recursion, we simply trim the
+	// stack frame for sync markers within the relocation header.
+	var frames []string
+	if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+		pcs := make([]uintptr, w.p.syncFrames)
+		n := runtime.Callers(2, pcs)
+		frames = fmtFrames(pcs[:n]...)
+	}
+
+	// TODO(mdempsky): Save space by writing out stack frames as a
+	// linked list so we can share common stack frames.
+	w.rawUvarint(uint64(m))
+	w.rawUvarint(uint64(len(frames)))
+	for _, frame := range frames {
+		w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+	}
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+//	if w.Bool(x != 0) {
+//		// alternative #1
+//	} else {
+//		// alternative #2
+//	}
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+	w.Sync(SyncBool)
+	var x byte
+	if b {
+		x = 1
+	}
+	err := w.Data.WriteByte(x)
+	w.checkErr(err)
+	return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+	w.Sync(SyncInt64)
+	w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+	w.Sync(SyncUint64)
+	w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Len encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+	w.Sync(SyncUseReloc)
+	w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+	w.Sync(c.Marker())
+	w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+	w.Sync(SyncString)
+	w.Reloc(RelocString, w.p.StringIdx(s))
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+	w.Len(len(ss))
+	for _, s := range ss {
+		w.String(s)
+	}
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+	w.Sync(SyncValue)
+	if w.Bool(val.Kind() == constant.Complex) {
+		w.scalar(constant.Real(val))
+		w.scalar(constant.Imag(val))
+	} else {
+		w.scalar(val)
+	}
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+	switch v := constant.Val(val).(type) {
+	default:
+		errorf("unhandled %v (%v)", val, val.Kind())
+	case bool:
+		w.Code(ValBool)
+		w.Bool(v)
+	case string:
+		w.Code(ValString)
+		w.String(v)
+	case int64:
+		w.Code(ValInt64)
+		w.Int64(v)
+	case *big.Int:
+		w.Code(ValBigInt)
+		w.bigInt(v)
+	case *big.Rat:
+		w.Code(ValBigRat)
+		w.bigInt(v.Num())
+		w.bigInt(v.Denom())
+	case *big.Float:
+		w.Code(ValBigFloat)
+		w.bigFloat(v)
+	}
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+	b := v.Bytes()
+	w.String(string(b)) // TODO: More efficient encoding.
+	w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+	b := v.Append(nil, 'p', -1)
+	w.String(string(b)) // TODO: More efficient encoding.
+}
diff --git a/go/internal/pkgbits/flags.go b/go/internal/pkgbits/flags.go
new file mode 100644
index 0000000..6542227
--- /dev/null
+++ b/go/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+	flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/go/internal/pkgbits/frames_go1.go b/go/internal/pkgbits/frames_go1.go
new file mode 100644
index 0000000..5294f6a
--- /dev/null
+++ b/go/internal/pkgbits/frames_go1.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package pkgbits
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	for _, pc := range pcs {
+		fn := runtime.FuncForPC(pc)
+		file, line := fn.FileLine(pc)
+
+		visit(file, line, fn.Name(), pc-fn.Entry())
+	}
+}
diff --git a/go/internal/pkgbits/frames_go17.go b/go/internal/pkgbits/frames_go17.go
new file mode 100644
index 0000000..2324ae7
--- /dev/null
+++ b/go/internal/pkgbits/frames_go17.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package pkgbits
+
+import "runtime"
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	if len(pcs) == 0 {
+		return
+	}
+
+	frames := runtime.CallersFrames(pcs)
+	for {
+		frame, more := frames.Next()
+		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+		if !more {
+			return
+		}
+	}
+}
diff --git a/go/internal/pkgbits/reloc.go b/go/internal/pkgbits/reloc.go
new file mode 100644
index 0000000..7a8f04a
--- /dev/null
+++ b/go/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+	Kind RelocKind
+	Idx  Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+	PublicRootIdx  Index = 0
+	PrivateRootIdx Index = 1
+)
+
+const (
+	RelocString RelocKind = iota
+	RelocMeta
+	RelocPosBase
+	RelocPkg
+	RelocName
+	RelocType
+	RelocObj
+	RelocObjExt
+	RelocObjDict
+	RelocBody
+
+	numRelocs = iota
+)
diff --git a/go/internal/pkgbits/support.go b/go/internal/pkgbits/support.go
new file mode 100644
index 0000000..ad26d3b
--- /dev/null
+++ b/go/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+	if !b {
+		panic("assertion failed")
+	}
+}
+
+func errorf(format string, args ...interface{}) {
+	panic(fmt.Errorf(format, args...))
+}
diff --git a/go/internal/pkgbits/sync.go b/go/internal/pkgbits/sync.go
new file mode 100644
index 0000000..5bd51ef
--- /dev/null
+++ b/go/internal/pkgbits/sync.go
@@ -0,0 +1,113 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+	"fmt"
+	"strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+	res := make([]string, 0, len(pcs))
+	walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+		// Trim package from function name. It's just redundant noise.
+		name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+		res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+	})
+	return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+	_ SyncMarker = iota
+
+	// Public markers (known to go/types importers).
+
+	// Low-level coding markers.
+	SyncEOF
+	SyncBool
+	SyncInt64
+	SyncUint64
+	SyncString
+	SyncValue
+	SyncVal
+	SyncRelocs
+	SyncReloc
+	SyncUseReloc
+
+	// Higher-level object and type markers.
+	SyncPublic
+	SyncPos
+	SyncPosBase
+	SyncObject
+	SyncObject1
+	SyncPkg
+	SyncPkgDef
+	SyncMethod
+	SyncType
+	SyncTypeIdx
+	SyncTypeParamNames
+	SyncSignature
+	SyncParams
+	SyncParam
+	SyncCodeObj
+	SyncSym
+	SyncLocalIdent
+	SyncSelector
+
+	// Private markers (only known to cmd/compile).
+	SyncPrivate
+
+	SyncFuncExt
+	SyncVarExt
+	SyncTypeExt
+	SyncPragma
+
+	SyncExprList
+	SyncExprs
+	SyncExpr
+	SyncExprType
+	SyncAssign
+	SyncOp
+	SyncFuncLit
+	SyncCompLit
+
+	SyncDecl
+	SyncFuncBody
+	SyncOpenScope
+	SyncCloseScope
+	SyncCloseAnotherScope
+	SyncDeclNames
+	SyncDeclName
+
+	SyncStmts
+	SyncBlockStmt
+	SyncIfStmt
+	SyncForStmt
+	SyncSwitchStmt
+	SyncRangeStmt
+	SyncCaseClause
+	SyncCommClause
+	SyncSelectStmt
+	SyncDecls
+	SyncLabeledStmt
+	SyncUseObjLocal
+	SyncAddLocal
+	SyncLinkname
+	SyncStmt1
+	SyncStmtsEnd
+	SyncLabel
+	SyncOptLabel
+)
diff --git a/go/internal/pkgbits/syncmarker_string.go b/go/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 0000000..4a5b0ca
--- /dev/null
+++ b/go/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,89 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[SyncEOF-1]
+	_ = x[SyncBool-2]
+	_ = x[SyncInt64-3]
+	_ = x[SyncUint64-4]
+	_ = x[SyncString-5]
+	_ = x[SyncValue-6]
+	_ = x[SyncVal-7]
+	_ = x[SyncRelocs-8]
+	_ = x[SyncReloc-9]
+	_ = x[SyncUseReloc-10]
+	_ = x[SyncPublic-11]
+	_ = x[SyncPos-12]
+	_ = x[SyncPosBase-13]
+	_ = x[SyncObject-14]
+	_ = x[SyncObject1-15]
+	_ = x[SyncPkg-16]
+	_ = x[SyncPkgDef-17]
+	_ = x[SyncMethod-18]
+	_ = x[SyncType-19]
+	_ = x[SyncTypeIdx-20]
+	_ = x[SyncTypeParamNames-21]
+	_ = x[SyncSignature-22]
+	_ = x[SyncParams-23]
+	_ = x[SyncParam-24]
+	_ = x[SyncCodeObj-25]
+	_ = x[SyncSym-26]
+	_ = x[SyncLocalIdent-27]
+	_ = x[SyncSelector-28]
+	_ = x[SyncPrivate-29]
+	_ = x[SyncFuncExt-30]
+	_ = x[SyncVarExt-31]
+	_ = x[SyncTypeExt-32]
+	_ = x[SyncPragma-33]
+	_ = x[SyncExprList-34]
+	_ = x[SyncExprs-35]
+	_ = x[SyncExpr-36]
+	_ = x[SyncExprType-37]
+	_ = x[SyncAssign-38]
+	_ = x[SyncOp-39]
+	_ = x[SyncFuncLit-40]
+	_ = x[SyncCompLit-41]
+	_ = x[SyncDecl-42]
+	_ = x[SyncFuncBody-43]
+	_ = x[SyncOpenScope-44]
+	_ = x[SyncCloseScope-45]
+	_ = x[SyncCloseAnotherScope-46]
+	_ = x[SyncDeclNames-47]
+	_ = x[SyncDeclName-48]
+	_ = x[SyncStmts-49]
+	_ = x[SyncBlockStmt-50]
+	_ = x[SyncIfStmt-51]
+	_ = x[SyncForStmt-52]
+	_ = x[SyncSwitchStmt-53]
+	_ = x[SyncRangeStmt-54]
+	_ = x[SyncCaseClause-55]
+	_ = x[SyncCommClause-56]
+	_ = x[SyncSelectStmt-57]
+	_ = x[SyncDecls-58]
+	_ = x[SyncLabeledStmt-59]
+	_ = x[SyncUseObjLocal-60]
+	_ = x[SyncAddLocal-61]
+	_ = x[SyncLinkname-62]
+	_ = x[SyncStmt1-63]
+	_ = x[SyncStmtsEnd-64]
+	_ = x[SyncLabel-65]
+	_ = x[SyncOptLabel-66]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+
+func (i SyncMarker) String() string {
+	i -= 1
+	if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+		return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+	}
+	return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/go/packages/packagestest/expect.go b/go/packages/packagestest/expect.go
index 4302586..841099c 100644
--- a/go/packages/packagestest/expect.go
+++ b/go/packages/packagestest/expect.go
@@ -409,6 +409,7 @@
 }
 
 func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) {
+	tokFile := e.ExpectFileSet.File(n.Pos)
 	if len(args) < 1 {
 		return span.Range{}, nil, fmt.Errorf("missing argument")
 	}
@@ -419,10 +420,9 @@
 		// handle the special identifiers
 		switch arg {
 		case eofIdentifier:
-			// end of file identifier, look up the current file
-			f := e.ExpectFileSet.File(n.Pos)
-			eof := f.Pos(f.Size())
-			return span.NewRange(e.ExpectFileSet, eof, token.NoPos), args, nil
+			// end of file identifier
+			eof := tokFile.Pos(tokFile.Size())
+			return span.NewRange(tokFile, eof, eof), args, nil
 		default:
 			// look up an marker by name
 			mark, ok := e.markers[string(arg)]
@@ -436,19 +436,19 @@
 		if err != nil {
 			return span.Range{}, nil, err
 		}
-		if start == token.NoPos {
+		if !start.IsValid() {
 			return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
 		}
-		return span.NewRange(e.ExpectFileSet, start, end), args, nil
+		return span.NewRange(tokFile, start, end), args, nil
 	case *regexp.Regexp:
 		start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg)
 		if err != nil {
 			return span.Range{}, nil, err
 		}
-		if start == token.NoPos {
+		if !start.IsValid() {
 			return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg)
 		}
-		return span.NewRange(e.ExpectFileSet, start, end), args, nil
+		return span.NewRange(tokFile, start, end), args, nil
 	default:
 		return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg)
 	}
diff --git a/godoc/godoc.go b/godoc/godoc.go
index 7ff2eab..6edb8f9 100644
--- a/godoc/godoc.go
+++ b/godoc/godoc.go
@@ -345,11 +345,16 @@
 	return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
 }
 
-func comment_htmlFunc(comment string) string {
+func comment_htmlFunc(info *PageInfo, comment string) string {
 	var buf bytes.Buffer
 	// TODO(gri) Provide list of words (e.g. function parameters)
 	//           to be emphasized by ToHTML.
-	doc.ToHTML(&buf, comment, nil) // does html-escaping
+
+	// godocToHTML is:
+	// - buf.Write(info.PDoc.HTML(comment)) on go1.19
+	// - go/doc.ToHTML(&buf, comment, nil) on other versions
+	godocToHTML(&buf, info.PDoc, comment)
+
 	return buf.String()
 }
 
diff --git a/godoc/static/package.html b/godoc/static/package.html
index 86445df..a04b08b 100644
--- a/godoc/static/package.html
+++ b/godoc/static/package.html
@@ -17,7 +17,7 @@
 
 	{{if $.IsMain}}
 		{{/* command documentation */}}
-		{{comment_html .Doc}}
+		{{comment_html $ .Doc}}
 	{{else}}
 		{{/* package documentation */}}
 		<div id="short-nav">
@@ -42,7 +42,7 @@
 			</div>
 			<div class="expanded">
 				<h2 class="toggleButton" title="Click to hide Overview section">Overview ▾</h2>
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				{{example_html $ ""}}
 			</div>
 		</div>
@@ -154,14 +154,14 @@
 		{{with .Consts}}
 			<h2 id="pkg-constants">Constants</h2>
 			{{range .}}
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				<pre>{{node_html $ .Decl true}}</pre>
 			{{end}}
 		{{end}}
 		{{with .Vars}}
 			<h2 id="pkg-variables">Variables</h2>
 			{{range .}}
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				<pre>{{node_html $ .Decl true}}</pre>
 			{{end}}
 		{{end}}
@@ -174,7 +174,7 @@
 				{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
 			</h2>
 			<pre>{{node_html $ .Decl true}}</pre>
-			{{comment_html .Doc}}
+			{{comment_html $ .Doc}}
 			{{example_html $ .Name}}
 			{{callgraph_html $ "" .Name}}
 
@@ -187,16 +187,16 @@
 				{{$since := since "type" "" .Name $.PDoc.ImportPath}}
 				{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
 			</h2>
-			{{comment_html .Doc}}
+			{{comment_html $ .Doc}}
 			<pre>{{node_html $ .Decl true}}</pre>
 
 			{{range .Consts}}
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				<pre>{{node_html $ .Decl true}}</pre>
 			{{end}}
 
 			{{range .Vars}}
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				<pre>{{node_html $ .Decl true}}</pre>
 			{{end}}
 
@@ -212,7 +212,7 @@
 					{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
 				</h3>
 				<pre>{{node_html $ .Decl true}}</pre>
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				{{example_html $ .Name}}
 				{{callgraph_html $ "" .Name}}
 			{{end}}
@@ -225,7 +225,7 @@
 					{{if $since}}<span title="Added in Go {{$since}}">{{$since}}</span>{{end}}
 				</h3>
 				<pre>{{node_html $ .Decl true}}</pre>
-				{{comment_html .Doc}}
+				{{comment_html $ .Doc}}
 				{{$name := printf "%s_%s" $tname .Name}}
 				{{example_html $ $name}}
 				{{callgraph_html $ .Recv .Name}}
@@ -238,7 +238,7 @@
 			<h2 id="pkg-note-{{$marker}}">{{noteTitle $marker | html}}s</h2>
 			<ul style="list-style: none; padding: 0;">
 			{{range .}}
-			<li><a href="{{posLink_url $ .}}" style="float: left;">&#x261e;</a> {{comment_html .Body}}</li>
+			<li><a href="{{posLink_url $ .}}" style="float: left;">&#x261e;</a> {{comment_html $ .Body}}</li>
 			{{end}}
 			</ul>
 		{{end}}
diff --git a/godoc/static/searchdoc.html b/godoc/static/searchdoc.html
index 679c02c..84dcb34 100644
--- a/godoc/static/searchdoc.html
+++ b/godoc/static/searchdoc.html
@@ -15,7 +15,7 @@
 				<a href="/{{$pkg_html}}">{{html .Package}}</a>.<a href="{{$doc_html}}">{{.Name}}</a>
 			{{end}}
 			{{if .Doc}}
-				<p>{{comment_html .Doc}}</p>
+				<p>{{comment_html $ .Doc}}</p>
 			{{else}}
 				<p><em>No documentation available</em></p>
 			{{end}}
diff --git a/godoc/static/static.go b/godoc/static/static.go
index ada60fa..d6e5f2d 100644
--- a/godoc/static/static.go
+++ b/godoc/static/static.go
@@ -83,7 +83,7 @@
 
 	"methodset.html": "<div\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09<div\x20class=\"collapsed\">\x0a\x09\x09<p\x20class=\"exampleHeading\x20toggleButton\">\xe2\x96\xb9\x20<span\x20class=\"text\">Method\x20set</span></p>\x0a\x09</div>\x0a\x09<div\x20class=\"expanded\">\x0a\x09\x09<p\x20class=\"exampleHeading\x20toggleButton\">\xe2\x96\xbe\x20<span\x20class=\"text\">Method\x20set</span></p>\x0a\x09\x09<div\x20style=\"margin-left:\x201in\"\x20id='methodset-{{.Index}}'>...</div>\x0a\x09</div>\x0a</div>\x0a",
 
-	"package.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PDoc}}\x0a\x09<script>\x0a\x09document.ANALYSIS_DATA\x20=\x20{{$.AnalysisData}};\x0a\x09document.CALLGRAPH\x20=\x20{{$.CallGraph}};\x0a\x09</script>\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09<div\x20id=\"short-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><code>import\x20\"{{html\x20.ImportPath}}\"</code></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-overview\"\x20class=\"overviewLink\">Overview</a></dd>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-index\"\x20class=\"indexLink\">Index</a></dd>\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-examples\"\x20class=\"examplesLink\">Examples</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-subdirectories\">Subdirectories</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09<!--\x20The\x20package's\x20Name\x20is\x20printed\x20as\x20title\x20by\x20the\x20top-level\x20template\x20-->\x0a\x09\x09<div\x20id=\"pkg-overview\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Overview\x20section\">Overview\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Overview\x20section\">Overview\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09</div>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"pkg-index\"\x20class=\"toggleVisible\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Index\x20section\">Index\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Index\x20section\">Index\x20\xe2\x96\xbe</h2>\x0a\x0a\x09\x09<!--\x20Table\x20of\x20contents\x20for\x20API;\x20must\x20be\x20named\x20manual-nav\x20to\x20turn\x20off\x20auto\x20nav.\x20-->\x0a\x09\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-constants\">Constants</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-variables\">Variables</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$tname_html}}\">type\x20{{$tname_html}}</a></dd>\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$tname_html}}.{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09</div><!--\x20#manual-nav\x20-->\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09<div\x20id=\"pkg-examples\">\x0a\x09\x09\x09<h3>Examples</h3>\x0a\x09\x09\x09<div\x20class=\"js-expandAll\x20expandAll\x20collapsed\">(Expand\x20All)</div>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09<dd><a\x20class=\"exampleLink\"\x20href=\"#example_{{.Name}}\">{{example_name\x20.Name}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09<h3>Package\x20files</h3>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09<span\x20style=\"font-size:90%\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09<a\x20href=\"{{.|srcLink|html}}\">{{.|filename|html}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</span>\x0a\x09\x09\x09</p>\x0a\x09\x09{{end}}\x0a\x09\x09</div><!--\x20.expanded\x20-->\x0a\x09\x09</div><!--\x20#pkg-index\x20-->\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09<div\x20id=\"pkg-callgraph\"\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls&mdash;perhaps\x20dynamically.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20<code>func</code>\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<!--\x20Zero\x20means\x20show\x20all\x20package\x20entry\x20points.\x20-->\x0a\x09\x09\x09<ul\x20style=\"margin-left:\x200.5in\"\x20id=\"callgraph-0\"\x20class=\"treeview\"></ul>\x0a\x09\x09</div>\x0a\x09\x09</div>\x20<!--\x20#pkg-callgraph\x20-->\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09<h2\x20id=\"pkg-constants\">Constants</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09<h2\x20id=\"pkg-variables\">Variables</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$tname_html}}\">type\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$tname_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$tname_html}}.{{$name_html}}\">func\x20({{html\x20.Recv}})\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}.{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09<h2\x20id=\"pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</h2>\x0a\x09\x09\x09<ul\x20style=\"list-style:\x20none;\x20padding:\x200;\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09<li><a\x20href=\"{{posLink_url\x20$\x20.}}\"\x20style=\"float:\x20left;\">&#x261e;</a>\x20{{comment_html\x20.Body}}</li>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</ul>\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09<table>\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<td\x20colspan=\"2\"><a\x20href=\"..\">..</a></td>\x0a\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09</div>\x0a{{end}}\x0a",
+	"package.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PDoc}}\x0a\x09<script>\x0a\x09document.ANALYSIS_DATA\x20=\x20{{$.AnalysisData}};\x0a\x09document.CALLGRAPH\x20=\x20{{$.CallGraph}};\x0a\x09</script>\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09<div\x20id=\"short-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><code>import\x20\"{{html\x20.ImportPath}}\"</code></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-overview\"\x20class=\"overviewLink\">Overview</a></dd>\x0a\x09\x09\x09<dd><a\x20href=\"#pkg-index\"\x20class=\"indexLink\">Index</a></dd>\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-examples\"\x20class=\"examplesLink\">Examples</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-subdirectories\">Subdirectories</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09<!--\x20The\x20package's\x20Name\x20is\x20printed\x20as\x20title\x20by\x20the\x20top-level\x20template\x20-->\x0a\x09\x09<div\x20id=\"pkg-overview\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Overview\x20section\">Overview\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Overview\x20section\">Overview\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09</div>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"pkg-index\"\x20class=\"toggleVisible\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Index\x20section\">Index\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Index\x20section\">Index\x20\xe2\x96\xbe</h2>\x0a\x0a\x09\x09<!--\x20Table\x20of\x20contents\x20for\x20API;\x20must\x20be\x20named\x20manual-nav\x20to\x20turn\x20off\x20auto\x20nav.\x20-->\x0a\x09\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-constants\">Constants</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-variables\">Variables</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#{{$tname_html}}\">type\x20{{$tname_html}}</a></dd>\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09<dd>&nbsp;\x20&nbsp;\x20<a\x20href=\"#{{$tname_html}}.{{$name_html}}\">{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09<dd><a\x20href=\"#pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</a></dd>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09\x09</div><!--\x20#manual-nav\x20-->\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09<div\x20id=\"pkg-examples\">\x0a\x09\x09\x09<h3>Examples</h3>\x0a\x09\x09\x09<div\x20class=\"js-expandAll\x20expandAll\x20collapsed\">(Expand\x20All)</div>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09<dd><a\x20class=\"exampleLink\"\x20href=\"#example_{{.Name}}\">{{example_name\x20.Name}}</a></dd>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09<h3>Package\x20files</h3>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09<span\x20style=\"font-size:90%\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09<a\x20href=\"{{.|srcLink|html}}\">{{.|filename|html}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</span>\x0a\x09\x09\x09</p>\x0a\x09\x09{{end}}\x0a\x09\x09</div><!--\x20.expanded\x20-->\x0a\x09\x09</div><!--\x20#pkg-index\x20-->\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09<div\x20id=\"pkg-callgraph\"\x20class=\"toggle\"\x20style=\"display:\x20none\">\x0a\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xb9</h2>\x0a\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Internal\x20Call\x20Graph\x20section\">Internal\x20call\x20graph\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls&mdash;perhaps\x20dynamically.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20<code>func</code>\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<p>\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09</p>\x0a\x09\x09\x09<!--\x20Zero\x20means\x20show\x20all\x20package\x20entry\x20points.\x20-->\x0a\x09\x09\x09<ul\x20style=\"margin-left:\x200.5in\"\x20id=\"callgraph-0\"\x20class=\"treeview\"></ul>\x0a\x09\x09</div>\x0a\x09\x09</div>\x20<!--\x20#pkg-callgraph\x20-->\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09<h2\x20id=\"pkg-constants\">Constants</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09<h2\x20id=\"pkg-variables\">Variables</h2>\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09<h2\x20id=\"{{$tname_html}}\">type\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$tname_html}}</a>\x0a\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09</h2>\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$name_html}}\">func\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09<h3\x20id=\"{{$tname_html}}.{{$name_html}}\">func\x20({{html\x20.Recv}})\x20<a\x20href=\"{{posLink_url\x20$\x20.Decl}}\">{{$name_html}}</a>\x0a\x09\x09\x09\x09\x09<a\x20class=\"permalink\"\x20href=\"#{{$tname_html}}.{{$name_html}}\">&#xb6;</a>\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}<span\x20title=\"Added\x20in\x20Go\x20{{$since}}\">{{$since}}</span>{{end}}\x0a\x09\x09\x09\x09</h3>\x0a\x09\x09\x09\x09<pre>{{node_html\x20$\x20.Decl\x20true}}</pre>\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09<h2\x20id=\"pkg-note-{{$marker}}\">{{noteTitle\x20$marker\x20|\x20html}}s</h2>\x0a\x09\x09\x09<ul\x20style=\"list-style:\x20none;\x20padding:\x200;\">\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09<li><a\x20href=\"{{posLink_url\x20$\x20.}}\"\x20style=\"float:\x20left;\">&#x261e;</a>\x20{{comment_html\x20$\x20.Body}}</li>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09</ul>\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09<table>\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09<tr>\x0a\x09\x09\x09\x09<td\x20colspan=\"2\"><a\x20href=\"..\">..</a></td>\x0a\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09</tr>\x0a\x09\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09</div>\x0a{{end}}\x0a",
 
 	"packageroot.html": "<!--\x0a\x09Copyright\x202018\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a<!--\x0a\x09Note:\x20Static\x20(i.e.,\x20not\x20template-generated)\x20href\x20and\x20id\x0a\x09attributes\x20start\x20with\x20\"pkg-\"\x20to\x20make\x20it\x20impossible\x20for\x0a\x09them\x20to\x20conflict\x20with\x20generated\x20attributes\x20(some\x20of\x20which\x0a\x09correspond\x20to\x20Go\x20identifiers).\x0a-->\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09<a\x20href=\"{{$filename|srcLink|html}}\">{{$filename|filename|html}}</a>:<pre>{{node_html\x20$\x20$ast\x20false}}</pre>\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09<h2\x20id=\"pkg-subdirectories\">Subdirectories</h2>\x0a\x09{{end}}\x0a\x09\x09<div\x20id=\"manual-nav\">\x0a\x09\x09\x09<img\x20alt=\"\"\x20class=\"gopher\"\x20src=\"/lib/godoc/gopher/pkg.png\"/>\x0a\x09\x09\x09<dl>\x0a\x09\x09\x09\x09<dt><a\x20href=\"#stdlib\">Standard\x20library</a></dt>\x0a\x09\x09\x09\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x09\x09\x09<dt><a\x20href=\"#thirdparty\">Third\x20party</a></dt>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09<dt><a\x20href=\"#other\">Other\x20packages</a></dt>\x0a\x09\x09\x09\x09<dd><a\x20href=\"#subrepo\">Sub-repositories</a></dd>\x0a\x09\x09\x09\x09<dd><a\x20href=\"#community\">Community</a></dd>\x0a\x09\x09\x09</dl>\x0a\x09\x09</div>\x0a\x0a\x09\x09<div\x20id=\"stdlib\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Standard\x20library\x20section\">Standard\x20library\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Standard\x20library\x20section\">Standard\x20library\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09\x09\x09\x09<table>\x0a\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOROOT\"}}\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</table>\x0a\x09\x09\x09\x09</div>\x20<!--\x20.pkg-dir\x20-->\x0a\x09\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09</div>\x20<!--\x20#stdlib\x20.toggleVisible\x20-->\x0a\x0a\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09<div\x20id=\"thirdparty\"\x20class=\"toggleVisible\">\x0a\x09\x09\x09<div\x20class=\"collapsed\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20show\x20Third\x20party\x20section\">Third\x20party\x20\xe2\x96\xb9</h2>\x0a\x09\x09\x09</div>\x0a\x09\x09\x09<div\x20class=\"expanded\">\x0a\x09\x09\x09\x09<h2\x20class=\"toggleButton\"\x20title=\"Click\x20to\x20hide\x20Third\x20party\x20section\">Third\x20party\x20\xe2\x96\xbe</h2>\x0a\x09\x09\x09\x09<div\x20class=\"pkg-dir\">\x0a\x09\x09\x09\x09\x09<table>\x0a\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-name\">Name</th>\x0a\x09\x09\x09\x09\x09\x09\x09<th\x20class=\"pkg-synopsis\">Synopsis</th>\x0a\x09\x09\x09\x09\x09\x09</tr>\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOPATH\"}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-name\"\x20style=\"padding-left:\x20{{multiply\x20.Depth\x2020}}px;\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09<a\x20href=\"{{html\x20.Path}}/{{modeQueryString\x20$.Mode\x20|\x20html}}\">{{html\x20.Name}}</a>\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09<td\x20class=\"pkg-synopsis\">\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</table>\x0a\x09\x09\x09\x09</div>\x20<!--\x20.pkg-dir\x20-->\x0a\x09\x09\x09</div>\x20<!--\x20.expanded\x20-->\x0a\x09\x09</div>\x20<!--\x20#stdlib\x20.toggleVisible\x20-->\x0a\x09{{end}}\x0a\x0a\x09<h2\x20id=\"other\">Other\x20packages</h2>\x0a\x09<h3\x20id=\"subrepo\">Sub-repositories</h3>\x0a\x09<p>\x0a\x09These\x20packages\x20are\x20part\x20of\x20the\x20Go\x20Project\x20but\x20outside\x20the\x20main\x20Go\x20tree.\x0a\x09They\x20are\x20developed\x20under\x20looser\x20<a\x20href=\"https://golang.org/doc/go1compat\">compatibility\x20requirements</a>\x20than\x20the\x20Go\x20core.\x0a\x09Install\x20them\x20with\x20\"<a\x20href=\"/cmd/go/#hdr-Download_and_install_packages_and_dependencies\">go\x20get</a>\".\x0a\x09</p>\x0a\x09<ul>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/benchmarks\">benchmarks</a>\x20\xe2\x80\x94\x20benchmarks\x20to\x20measure\x20Go\x20as\x20it\x20is\x20developed.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/blog\">blog</a>\x20\xe2\x80\x94\x20<a\x20href=\"//blog.golang.org\">blog.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/build\">build</a>\x20\xe2\x80\x94\x20<a\x20href=\"//build.golang.org\">build.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/crypto\">crypto</a>\x20\xe2\x80\x94\x20additional\x20cryptography\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/debug\">debug</a>\x20\xe2\x80\x94\x20an\x20experimental\x20debugger\x20for\x20Go.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/image\">image</a>\x20\xe2\x80\x94\x20additional\x20imaging\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/mobile\">mobile</a>\x20\xe2\x80\x94\x20experimental\x20support\x20for\x20Go\x20on\x20mobile\x20platforms.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/net\">net</a>\x20\xe2\x80\x94\x20additional\x20networking\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/perf\">perf</a>\x20\xe2\x80\x94\x20packages\x20and\x20tools\x20for\x20performance\x20measurement,\x20storage,\x20and\x20analysis.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/pkgsite\">pkgsite</a>\x20\xe2\x80\x94\x20home\x20of\x20the\x20pkg.go.dev\x20website.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/review\">review</a>\x20\xe2\x80\x94\x20a\x20tool\x20for\x20working\x20with\x20Gerrit\x20code\x20reviews.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/sync\">sync</a>\x20\xe2\x80\x94\x20additional\x20concurrency\x20primitives.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/sys\">sys</a>\x20\xe2\x80\x94\x20packages\x20for\x20making\x20system\x20calls.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/text\">text</a>\x20\xe2\x80\x94\x20packages\x20for\x20working\x20with\x20text.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/time\">time</a>\x20\xe2\x80\x94\x20additional\x20time\x20packages.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/tools\">tools</a>\x20\xe2\x80\x94\x20godoc,\x20goimports,\x20gorename,\x20and\x20other\x20tools.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/tour\">tour</a>\x20\xe2\x80\x94\x20<a\x20href=\"//tour.golang.org\">tour.golang.org</a>'s\x20implementation.</li>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev/golang.org/x/exp\">exp</a>\x20\xe2\x80\x94\x20experimental\x20and\x20deprecated\x20packages\x20(handle\x20with\x20care;\x20may\x20change\x20without\x20warning).</li>\x0a\x09</ul>\x0a\x0a\x09<h3\x20id=\"community\">Community</h3>\x0a\x09<p>\x0a\x09These\x20services\x20can\x20help\x20you\x20find\x20Open\x20Source\x20packages\x20provided\x20by\x20the\x20community.\x0a\x09</p>\x0a\x09<ul>\x0a\x09\x09<li><a\x20href=\"//pkg.go.dev\">Pkg.go.dev</a>\x20-\x20the\x20Go\x20package\x20discovery\x20site.</li>\x0a\x09\x09<li><a\x20href=\"/wiki/Projects\">Projects\x20at\x20the\x20Go\x20Wiki</a>\x20-\x20a\x20curated\x20list\x20of\x20Go\x20projects.</li>\x0a\x09</ul>\x0a{{end}}\x0a",
 
@@ -95,7 +95,7 @@
 
 	"searchcode.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{if\x20not\x20.Idents}}\x0a\x09{{with\x20.Pak}}\x0a\x09\x09<h2\x20id=\"Packages\">Package\x20{{html\x20$.Query}}</h2>\x0a\x09\x09<p>\x0a\x09\x09<table\x20class=\"layout\">\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<tr><td><a\x20href=\"/{{$pkg_html}}\">{{$pkg_html}}</a></td></tr>\x0a\x09\x09{{end}}\x0a\x09\x09</table>\x0a\x09\x09</p>\x0a\x09{{end}}\x0a{{end}}\x0a{{with\x20.Hit}}\x0a\x09{{with\x20.Decls}}\x0a\x09\x09<h2\x20id=\"Global\">Package-level\x20declarations</h2>\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<h3\x20id=\"Global_{{$pkg_html}}\">package\x20<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Pak.Name}}</a></h3>\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20$line\x20|\x20html}}\">{{$file}}:{{$line}}</a>\x0a\x09\x09\x09\x09\x09\x09{{infoSnippet_html\x20.}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x09{{with\x20.Others}}\x0a\x09\x09<h2\x20id=\"Local\">Local\x20declarations\x20and\x20uses</h2>\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09<h3\x20id=\"Local_{{$pkg_html}}\">package\x20<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Pak.Name}}</a></h3>\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x200\x20|\x20html}}\">{{$file}}</a>\x0a\x09\x09\x09\x09<table\x20class=\"layout\">\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09<tr>\x0a\x09\x09\x09\x09\x09<td\x20width=\"25\"></td>\x0a\x09\x09\x09\x09\x09<th\x20align=\"left\"\x20valign=\"top\">{{index\x20.\x200\x20|\x20infoKind_html}}</th>\x0a\x09\x09\x09\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09\x09\x09\x09<td>\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20$line\x20|\x20html}}\">{{$line}}</a>\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09</td>\x0a\x09\x09\x09\x09\x09</tr>\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09</table>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
 
-	"searchdoc.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09<h2\x20id=\"{{$key.Name}}\">{{$key.Name}}</h2>\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Package}}</a>.<a\x20href=\"{{$doc_html}}\">{{.Name}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09<p>{{comment_html\x20.Doc}}</p>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09<p><em>No\x20documentation\x20available</em></p>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
+	"searchdoc.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09<h2\x20id=\"{{$key.Name}}\">{{$key.Name}}</h2>\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Path}}</a>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09<a\x20href=\"/{{$pkg_html}}\">{{html\x20.Package}}</a>.<a\x20href=\"{{$doc_html}}\">{{.Name}}</a>\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09<p>{{comment_html\x20$\x20.Doc}}</p>\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09<p><em>No\x20documentation\x20available</em></p>\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a",
 
 	"searchtxt.html": "<!--\x0a\x09Copyright\x202009\x20The\x20Go\x20Authors.\x20All\x20rights\x20reserved.\x0a\x09Use\x20of\x20this\x20source\x20code\x20is\x20governed\x20by\x20a\x20BSD-style\x0a\x09license\x20that\x20can\x20be\x20found\x20in\x20the\x20LICENSE\x20file.\x0a-->\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{with\x20.Textual}}\x0a\x09{{if\x20$.Complete}}\x0a\x09\x09<h2\x20id=\"Textual\">{{html\x20$.Found}}\x20textual\x20occurrences</h2>\x0a\x09{{else}}\x0a\x09\x09<h2\x20id=\"Textual\">More\x20than\x20{{html\x20$.Found}}\x20textual\x20occurrences</h2>\x0a\x09\x09<p>\x0a\x09\x09<span\x20class=\"alert\"\x20style=\"font-size:120%\">Not\x20all\x20files\x20or\x20lines\x20containing\x20\"{{html\x20$.Query}}\"\x20are\x20shown.</span>\x0a\x09\x09</p>\x0a\x09{{end}}\x0a\x09<p>\x0a\x09<table\x20class=\"layout\">\x0a\x09{{range\x20.}}\x0a\x09\x09{{$file\x20:=\x20.Filename}}\x0a\x09\x09<tr>\x0a\x09\x09<td\x20align=\"left\"\x20valign=\"top\">\x0a\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x200}}\">{{$file}}</a>:\x0a\x09\x09</td>\x0a\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09<th\x20align=\"left\"\x20valign=\"top\">{{len\x20.Lines}}</th>\x0a\x09\x09<td\x20align=\"left\"\x20width=\"4\"></td>\x0a\x09\x09<td\x20align=\"left\">\x0a\x09\x09{{range\x20.Lines}}\x0a\x09\x09\x09<a\x20href=\"{{queryLink\x20$file\x20$query_url\x20.}}\">{{html\x20.}}</a>\x0a\x09\x09{{end}}\x0a\x09\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09\x09...\x0a\x09\x09{{end}}\x0a\x09\x09</td>\x0a\x09\x09</tr>\x0a\x09{{end}}\x0a\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09<tr><td\x20align=\"left\">...</td></tr>\x0a\x09{{end}}\x0a\x09</table>\x0a\x09</p>\x0a{{end}}\x0a",
 
diff --git a/godoc/tohtml_go119.go b/godoc/tohtml_go119.go
new file mode 100644
index 0000000..6dbf721
--- /dev/null
+++ b/godoc/tohtml_go119.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package godoc
+
+import (
+	"bytes"
+	"go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+	buf.Write(pkg.HTML(comment))
+}
diff --git a/godoc/tohtml_other.go b/godoc/tohtml_other.go
new file mode 100644
index 0000000..a1dcf2e
--- /dev/null
+++ b/godoc/tohtml_other.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.19
+// +build !go1.19
+
+package godoc
+
+import (
+	"bytes"
+	"go/doc"
+)
+
+func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) {
+	doc.ToHTML(buf, comment, nil)
+}
diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md
index fd65c3a..90a5118 100644
--- a/gopls/doc/analyzers.md
+++ b/gopls/doc/analyzers.md
@@ -497,6 +497,17 @@
 
 **Enabled by default.**
 
+## **timeformat**
+
+check for calls of (time.Time).Format or time.Parse with 2006-02-01
+
+The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)
+format. Internationally, "yyyy-dd-mm" does not occur in common calendar date
+standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.
+
+
+**Enabled by default.**
+
 ## **unmarshal**
 
 report passing non-pointer or non-interface values to unmarshal
@@ -657,6 +668,15 @@
 
 **Enabled by default.**
 
+## **unusedvariable**
+
+check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+
+
+**Disabled by default. Enable it by setting `"analyses": {"unusedvariable": true}`.**
+
 ## **fillstruct**
 
 note incomplete struct initializations
diff --git a/gopls/doc/commands.md b/gopls/doc/commands.md
index f868a48..c202b51 100644
--- a/gopls/doc/commands.md
+++ b/gopls/doc/commands.md
@@ -274,33 +274,13 @@
 
 ```
 {
-	// Dir is the directory from which vulncheck will run from.
-	"Dir": string,
+	// Any document in the directory from which govulncheck will run.
+	"URI": string,
 	// Package pattern. E.g. "", ".", "./...".
 	"Pattern": string,
 }
 ```
 
-Result:
-
-```
-{
-	"Vuln": []{
-		"ID": string,
-		"Details": string,
-		"Aliases": []string,
-		"Symbol": string,
-		"PkgPath": string,
-		"ModPath": string,
-		"URL": string,
-		"CurrentVersion": string,
-		"FixedVersion": string,
-		"CallStacks": [][]golang.org/x/tools/internal/lsp/command.StackEntry,
-		"CallStackSummaries": []string,
-	},
-}
-```
-
 ### **Start the gopls debug server**
 Identifier: `gopls.start_debugging`
 
diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md
index 15957b5..2ae9a28 100644
--- a/gopls/doc/inlayHints.md
+++ b/gopls/doc/inlayHints.md
@@ -6,67 +6,74 @@
 ## **assignVariableTypes**
 
 Enable/disable inlay hints for variable types in assign statements:
-
-	i/* int/*, j/* int/* := 0, len(r)-1
+```go
+	i/* int*/, j/* int*/ := 0, len(r)-1
+```
 
 **Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.**
 
 ## **compositeLiteralFields**
 
 Enable/disable inlay hints for composite literal field names:
-
-	{in: "Hello, world", want: "dlrow ,olleH"}
+```go
+	{/*in: */"Hello, world", /*want: */"dlrow ,olleH"}
+```
 
 **Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.**
 
 ## **compositeLiteralTypes**
 
 Enable/disable inlay hints for composite literal types:
-
+```go
 	for _, c := range []struct {
 		in, want string
 	}{
 		/*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"},
 	}
+```
 
 **Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.**
 
 ## **constantValues**
 
 Enable/disable inlay hints for constant values:
-
+```go
 	const (
 		KindNone   Kind = iota/* = 0*/
 		KindPrint/*  = 1*/
 		KindPrintf/* = 2*/
 		KindErrorf/* = 3*/
 	)
+```
 
 **Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.**
 
 ## **functionTypeParameters**
 
 Enable/disable inlay hints for implicit type parameters on generic functions:
-
+```go
 	myFoo/*[int, string]*/(1, "hello")
+```
 
 **Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.**
 
 ## **parameterNames**
 
 Enable/disable inlay hints for parameter names:
-
+```go
 	parseInt(/* str: */ "123", /* radix: */ 8)
+```
 
 **Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.**
 
 ## **rangeVariableTypes**
 
 Enable/disable inlay hints for variable types in range statements:
-
-	for k/* int*/, v/* string/* := range []string{} {
+```go
+	for k/* int*/, v/* string*/ := range []string{} {
 		fmt.Println(k, v)
 	}
+```
 
 **Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.**
 
diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md
index 0ed0e19..890a0a3 100644
--- a/gopls/doc/settings.md
+++ b/gopls/doc/settings.md
@@ -215,6 +215,22 @@
 
 Default: `false`.
 
+#### **noSemanticString** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticString turns off the sending of the semantic token 'string'
+
+Default: `false`.
+
+#### **noSemanticNumber** *bool*
+
+**This setting is experimental and may be deleted.**
+
+noSemanticNumber  turns off the sending of the semantic token 'number'
+
+Default: `false`.
+
 #### Completion
 
 ##### **usePlaceholders** *bool*
@@ -363,6 +379,9 @@
 
 If company chooses to use its own `godoc.org`, its address can be used as well.
 
+Modules matching the GOPRIVATE environment variable will not have
+documentation links in hover.
+
 Default: `"pkg.go.dev"`.
 
 ##### **linksInHover** *bool*
@@ -452,6 +471,16 @@
 
 <!-- END User: DO NOT MANUALLY EDIT THIS SECTION -->
 
+#### **newDiff** *string*
+
+newDiff enables the new diff implementation. If this is "both",
+for now both diffs will be run and statistics will be generateted in
+a file in $TMPDIR. This is a risky setting; help in trying it
+is appreciated. If it is "old" the old implementation is used,
+and if it is "new", just the new implementation is used.
+
+Default: 'old'.
+
 ## Code Lenses
 
 These are the code lenses that `gopls` currently supports. They can be enabled
@@ -474,6 +503,11 @@
 Identifier: `regenerate_cgo`
 
 Regenerates cgo definitions.
+### **Run vulncheck (experimental)**
+
+Identifier: `run_vulncheck_exp`
+
+Run vulnerability check (`govulncheck`).
 ### **Run test(s) (legacy)**
 
 Identifier: `test`
diff --git a/gopls/go.mod b/gopls/go.mod
index 8e95f8e..3fcbcb4 100644
--- a/gopls/go.mod
+++ b/gopls/go.mod
@@ -3,24 +3,25 @@
 go 1.18
 
 require (
-	github.com/google/go-cmp v0.5.7
+	github.com/google/go-cmp v0.5.8
 	github.com/jba/printsrc v0.2.2
 	github.com/jba/templatecheck v0.6.0
 	github.com/sergi/go-diff v1.1.0
 	golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
-	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
-	golang.org/x/sys v0.0.0-20220209214540-3681064d5158
+	golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
 	golang.org/x/tools v0.1.12-0.20220713141851-7464a5a40219
-	golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c
+	golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df
 	honnef.co/go/tools v0.3.2
-	mvdan.cc/gofumpt v0.3.0
+	mvdan.cc/gofumpt v0.3.1
 	mvdan.cc/xurls/v2 v2.4.0
 )
 
 require (
-	github.com/BurntSushi/toml v1.0.0 // indirect
+	github.com/BurntSushi/toml v1.2.0 // indirect
 	github.com/google/safehtml v0.0.2 // indirect
-	golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
+	golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e // indirect
+	golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
 	golang.org/x/text v0.3.7 // indirect
-	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 )
+
+replace golang.org/x/tools => ../
diff --git a/gopls/go.sum b/gopls/go.sum
index 8409956..ecd3f4d 100644
--- a/gopls/go.sum
+++ b/gopls/go.sum
@@ -1,7 +1,7 @@
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
-github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
+github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
 github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -13,8 +13,9 @@
 github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
 github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM=
 github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
@@ -40,59 +41,36 @@
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
 golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e h1:7Xs2YCOpMlNqSQSmrrnhlzBXIE/bpMecZplbLePTJvE=
+golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
-golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
-golang.org/x/tools v0.1.12-0.20220713141851-7464a5a40219 h1:Ljlba2fVWOA1049JjsKii44g8nZN2GjpxMlzVc8AnQM=
-golang.org/x/tools v0.1.12-0.20220713141851-7464a5a40219/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
-golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c h1:r5bbIROBQtRRgoutV8Q3sFY58VzHW6jMBYl48ANSyS4=
-golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df h1:BkeW9/QJhcigekDUPS9N9bIb0v7gPKKmLYeczVAqr2s=
+golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -104,8 +82,8 @@
 honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
 honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=
 honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
-mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
-mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo=
+mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=
+mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE=
 mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=
 mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY=
 mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc=
diff --git a/gopls/internal/govulncheck/README.md b/gopls/internal/govulncheck/README.md
index d8339c5..bc10d8a 100644
--- a/gopls/internal/govulncheck/README.md
+++ b/gopls/internal/govulncheck/README.md
@@ -15,3 +15,5 @@
 2. cd to this directory.
 
 3. Run `copy.sh`.
+
+4. Re-add build tags for go1.18
\ No newline at end of file
diff --git a/gopls/internal/govulncheck/source.go b/gopls/internal/govulncheck/source.go
index 23028b9..d51fe8c 100644
--- a/gopls/internal/govulncheck/source.go
+++ b/gopls/internal/govulncheck/source.go
@@ -8,13 +8,11 @@
 package govulncheck
 
 import (
-	"context"
 	"fmt"
 	"sort"
 	"strings"
 
 	"golang.org/x/tools/go/packages"
-	"golang.org/x/vuln/client"
 	"golang.org/x/vuln/vulncheck"
 )
 
@@ -57,26 +55,6 @@
 	return vpkgs, err
 }
 
-// Source calls vulncheck.Source on the Go source in pkgs. It returns the result
-// with Vulns trimmed to those that are actually called.
-//
-// This function is being used by the Go IDE team.
-func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*vulncheck.Result, error) {
-	r, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{Client: c})
-	if err != nil {
-		return nil, err
-	}
-	// Keep only the vulns that are called.
-	var vulns []*vulncheck.Vuln
-	for _, v := range r.Vulns {
-		if v.CallSink != 0 {
-			vulns = append(vulns, v)
-		}
-	}
-	r.Vulns = vulns
-	return r, nil
-}
-
 // CallInfo is information about calls to vulnerable functions.
 type CallInfo struct {
 	// CallStacks contains all call stacks to vulnerable functions.
diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go
index a307ba7..e0461a1 100644
--- a/gopls/internal/hooks/diff.go
+++ b/gopls/internal/hooks/diff.go
@@ -5,13 +5,177 @@
 package hooks
 
 import (
+	"crypto/rand"
+	"encoding/json"
 	"fmt"
+	"io"
+	"log"
+	"math/big"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+	"unicode"
 
 	"github.com/sergi/go-diff/diffmatchpatch"
 	"golang.org/x/tools/internal/lsp/diff"
 	"golang.org/x/tools/internal/span"
 )
 
+// structure for saving information about diffs
+// while the new code is being rolled out
+type diffstat struct {
+	Before, After      int
+	Oldedits, Newedits int
+	Oldtime, Newtime   time.Duration
+	Stack              string
+	Msg                string `json:",omitempty"` // for errors
+	Ignored            int    `json:",omitempty"` // numbr of skipped records with 0 edits
+}
+
+var (
+	mu      sync.Mutex // serializes writes and protects ignored
+	difffd  io.Writer
+	ignored int // lots of the diff calls have 0 diffs
+)
+
+var fileonce sync.Once
+
+func (s *diffstat) save() {
+	// save log records in a file in os.TempDir().
+	// diff is frequently called with identical strings, so
+	// these are somewhat compressed out
+	fileonce.Do(func() {
+		fname := filepath.Join(os.TempDir(), fmt.Sprintf("gopls-diff-%x", os.Getpid()))
+		fd, err := os.Create(fname)
+		if err != nil {
+			// now what?
+		}
+		difffd = fd
+	})
+
+	mu.Lock()
+	defer mu.Unlock()
+	if s.Oldedits == 0 && s.Newedits == 0 {
+		if ignored < 15 {
+			// keep track of repeated instances of no diffs
+			// but only print every 15th
+			ignored++
+			return
+		}
+		s.Ignored = ignored + 1
+	} else {
+		s.Ignored = ignored
+	}
+	ignored = 0
+	// it would be really nice to see why diff was called
+	_, f, l, ok := runtime.Caller(2)
+	if ok {
+		var fname string
+		fname = filepath.Base(f) // diff is only called from a few places
+		s.Stack = fmt.Sprintf("%s:%d", fname, l)
+	}
+	x, err := json.Marshal(s)
+	if err != nil {
+		log.Print(err) // failure to print statistics should not stop gopls
+	}
+	fmt.Fprintf(difffd, "%s\n", x)
+}
+
+// save encrypted versions of the broken input and return the file name
+// (the saved strings will have the same diff behavior as the user's strings)
+func disaster(before, after string) string {
+	// encrypt before and after for privacy. (randomized monoalphabetic cipher)
+	// got will contain the substitution cipher
+	// for the runes in before and after
+	got := map[rune]rune{}
+	for _, r := range before {
+		got[r] = ' ' // value doesn't matter
+	}
+	for _, r := range after {
+		got[r] = ' '
+	}
+	repl := initrepl(len(got))
+	i := 0
+	for k := range got { // randomized
+		got[k] = repl[i]
+		i++
+	}
+	// use got to encrypt before and after
+	subst := func(r rune) rune { return got[r] }
+	first := strings.Map(subst, before)
+	second := strings.Map(subst, after)
+
+	// one failure per session is enough, and more private.
+	// this saves the last one.
+	fname := fmt.Sprintf("%s/gopls-failed-%x", os.TempDir(), os.Getpid())
+	fd, err := os.Create(fname)
+	defer fd.Close()
+	_, err = fd.Write([]byte(fmt.Sprintf("%s\n%s\n", string(first), string(second))))
+	if err != nil {
+		// what do we tell the user?
+		return ""
+	}
+	// ask the user to send us the file, somehow
+	return fname
+}
+
+func initrepl(n int) []rune {
+	repl := make([]rune, 0, n)
+	for r := rune(0); len(repl) < n; r++ {
+		if unicode.IsLetter(r) || unicode.IsNumber(r) {
+			repl = append(repl, r)
+		}
+	}
+	// randomize repl
+	rdr := rand.Reader
+	lim := big.NewInt(int64(len(repl)))
+	for i := 1; i < n; i++ {
+		v, _ := rand.Int(rdr, lim)
+		k := v.Int64()
+		repl[i], repl[k] = repl[k], repl[i]
+	}
+	return repl
+}
+
+// BothDiffs edits calls both the new and old diffs, checks that the new diffs
+// change before into after, and attempts to preserve some statistics.
+func BothDiffs(uri span.URI, before, after string) (edits []diff.TextEdit, err error) {
+	// The new diff code contains a lot of internal checks that panic when they
+	// fail. This code catches the panics, or other failures, tries to save
+	// the failing example (and ut wiykd ask the user to send it back to us, and
+	// changes options.newDiff to 'old', if only we could figure out how.)
+	stat := diffstat{Before: len(before), After: len(after)}
+	now := time.Now()
+	Oldedits, oerr := ComputeEdits(uri, before, after)
+	if oerr != nil {
+		stat.Msg += fmt.Sprintf("old:%v", oerr)
+	}
+	stat.Oldedits = len(Oldedits)
+	stat.Oldtime = time.Since(now)
+	defer func() {
+		if r := recover(); r != nil {
+			disaster(before, after)
+			edits, err = Oldedits, oerr
+		}
+	}()
+	now = time.Now()
+	Newedits, rerr := diff.NComputeEdits(uri, before, after)
+	stat.Newedits = len(Newedits)
+	stat.Newtime = time.Now().Sub(now)
+	got := diff.ApplyEdits(before, Newedits)
+	if got != after {
+		stat.Msg += "FAIL"
+		disaster(before, after)
+		stat.save()
+		return Oldedits, oerr
+	}
+	stat.save()
+	return Newedits, rerr
+}
+
 func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) {
 	// The go-diff library has an unresolved panic (see golang/go#278774).
 	// TODO(rstambler): Remove the recover once the issue has been fixed
diff --git a/gopls/internal/hooks/diff_test.go b/gopls/internal/hooks/diff_test.go
index d979be7..a9e5367 100644
--- a/gopls/internal/hooks/diff_test.go
+++ b/gopls/internal/hooks/diff_test.go
@@ -2,15 +2,56 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package hooks_test
+package hooks
 
 import (
+	"fmt"
+	"io/ioutil"
+	"os"
 	"testing"
+	"unicode/utf8"
 
-	"golang.org/x/tools/gopls/internal/hooks"
 	"golang.org/x/tools/internal/lsp/diff/difftest"
 )
 
 func TestDiff(t *testing.T) {
-	difftest.DiffTest(t, hooks.ComputeEdits)
+	difftest.DiffTest(t, ComputeEdits)
+}
+
+func TestRepl(t *testing.T) {
+	t.Skip("just for checking repl by looking at it")
+	repl := initrepl(800)
+	t.Errorf("%q", string(repl))
+	t.Errorf("%d", len(repl))
+}
+
+func TestDisaster(t *testing.T) {
+	a := "This is a string,(\u0995) just for basic functionality"
+	b := "Ths is another string, (\u0996) to see if disaster will store stuff correctly"
+	fname := disaster(a, b)
+	buf, err := ioutil.ReadFile(fname)
+	if err != nil {
+		t.Errorf("error %v reading %s", err, fname)
+	}
+	var x, y string
+	n, err := fmt.Sscanf(string(buf), "%s\n%s\n", &x, &y)
+	if n != 2 {
+		t.Errorf("got %d, expected 2", n)
+		t.Logf("read %q", string(buf))
+	}
+	if a == x || b == y {
+		t.Error("failed to encrypt")
+	}
+	err = os.Remove(fname)
+	if err != nil {
+		t.Errorf("%v removing %s", err, fname)
+	}
+	alen, blen := utf8.RuneCount([]byte(a)), utf8.RuneCount([]byte(b))
+	xlen, ylen := utf8.RuneCount([]byte(x)), utf8.RuneCount([]byte(y))
+	if alen != xlen {
+		t.Errorf("a; got %d, expected %d", xlen, alen)
+	}
+	if blen != ylen {
+		t.Errorf("b: got %d expected %d", ylen, blen)
+	}
 }
diff --git a/gopls/internal/hooks/hooks.go b/gopls/internal/hooks/hooks.go
index 023aefe..b55917e 100644
--- a/gopls/internal/hooks/hooks.go
+++ b/gopls/internal/hooks/hooks.go
@@ -11,6 +11,7 @@
 	"context"
 
 	"golang.org/x/tools/gopls/internal/vulncheck"
+	"golang.org/x/tools/internal/lsp/diff"
 	"golang.org/x/tools/internal/lsp/source"
 	"mvdan.cc/gofumpt/format"
 	"mvdan.cc/xurls/v2"
@@ -19,7 +20,14 @@
 func Options(options *source.Options) {
 	options.LicensesText = licensesText
 	if options.GoDiff {
-		options.ComputeEdits = ComputeEdits
+		switch options.NewDiff {
+		case "old":
+			options.ComputeEdits = ComputeEdits
+		case "new":
+			options.ComputeEdits = diff.NComputeEdits
+		default:
+			options.ComputeEdits = BothDiffs
+		}
 	}
 	options.URLRegexp = xurls.Relaxed()
 	options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) {
diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go
index 22f157f..a3780f0 100644
--- a/gopls/internal/regtest/bench/bench_test.go
+++ b/gopls/internal/regtest/bench/bench_test.go
@@ -5,220 +5,209 @@
 package bench
 
 import (
+	"context"
 	"flag"
 	"fmt"
+	"io/ioutil"
+	"log"
 	"os"
-	"runtime"
-	"runtime/pprof"
+	"os/exec"
+	"sync"
 	"testing"
+	"time"
 
 	"golang.org/x/tools/gopls/internal/hooks"
+	"golang.org/x/tools/internal/event"
+	"golang.org/x/tools/internal/fakenet"
+	"golang.org/x/tools/internal/jsonrpc2"
+	"golang.org/x/tools/internal/jsonrpc2/servertest"
 	"golang.org/x/tools/internal/lsp/bug"
+	"golang.org/x/tools/internal/lsp/cache"
 	"golang.org/x/tools/internal/lsp/fake"
-	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/internal/lsp/lsprpc"
+	"golang.org/x/tools/internal/lsp/regtest"
 
-	"golang.org/x/tools/internal/lsp/protocol"
+	. "golang.org/x/tools/internal/lsp/regtest"
 )
 
+// This package implements benchmarks that share a common editor session.
+//
+// It is a work-in-progress.
+//
+// Remaining TODO(rfindley):
+//   - add detailed documentation for how to write a benchmark, as a package doc
+//   - add benchmarks for more features
+//   - eliminate flags, and just run benchmarks on with a predefined set of
+//     arguments
+
 func TestMain(m *testing.M) {
 	bug.PanicOnBugs = true
-	Main(m, hooks.Options)
+	event.SetExporter(nil) // don't log to stderr
+	code := doMain(m)
+	os.Exit(code)
 }
 
-func benchmarkOptions(dir string) []RunOption {
-	return []RunOption{
-		// Run in an existing directory, since we're trying to simulate known cases
-		// that cause gopls memory problems.
-		InExistingDir(dir),
-		// Skip logs as they buffer up memory unnaturally.
-		SkipLogs(),
-		// The Debug server only makes sense if running in singleton mode.
-		Modes(Singleton),
-		// Remove the default timeout. Individual tests should control their
-		// own graceful termination.
-		NoDefaultTimeout(),
-
-		// Use the actual proxy, since we want our builds to succeed.
-		GOPROXY("https://proxy.golang.org"),
-	}
-}
-
-func printBenchmarkResults(result testing.BenchmarkResult) {
-	fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString())
-}
-
-var iwlOptions struct {
-	workdir string
-}
-
-func init() {
-	flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory")
-}
-
-func TestBenchmarkIWL(t *testing.T) {
-	if iwlOptions.workdir == "" {
-		t.Skip("-iwl_workdir not configured")
-	}
-
-	opts := stressTestOptions(iwlOptions.workdir)
-	// Don't skip hooks, so that we can wait for IWL.
-	opts = append(opts, SkipHooks(false))
-
-	results := testing.Benchmark(func(b *testing.B) {
-		for i := 0; i < b.N; i++ {
-			WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {})
-
-		}
-	})
-
-	printBenchmarkResults(results)
-}
-
-var symbolOptions struct {
-	workdir, query, matcher, style string
-	printResults                   bool
-}
-
-func init() {
-	flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory")
-	flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark")
-	flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark")
-	flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark")
-	flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results")
-}
-
-func TestBenchmarkSymbols(t *testing.T) {
-	if symbolOptions.workdir == "" {
-		t.Skip("-symbol_workdir not configured")
-	}
-
-	opts := benchmarkOptions(symbolOptions.workdir)
-	conf := EditorConfig{}
-	if symbolOptions.matcher != "" {
-		conf.SymbolMatcher = &symbolOptions.matcher
-	}
-	if symbolOptions.style != "" {
-		conf.SymbolStyle = &symbolOptions.style
-	}
-	opts = append(opts, conf)
-
-	WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
-		// We can't Await in this test, since we have disabled hooks. Instead, run
-		// one symbol request to completion to ensure all necessary cache entries
-		// are populated.
-		symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
-			Query: symbolOptions.query,
-		})
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if symbolOptions.printResults {
-			fmt.Println("Results:")
-			for i := 0; i < len(symbols); i++ {
-				fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
-			}
-		}
-
-		results := testing.Benchmark(func(b *testing.B) {
-			for i := 0; i < b.N; i++ {
-				if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
-					Query: symbolOptions.query,
-				}); err != nil {
-					t.Fatal(err)
+func doMain(m *testing.M) (code int) {
+	defer func() {
+		if editor != nil {
+			if err := editor.Close(context.Background()); err != nil {
+				fmt.Fprintf(os.Stderr, "closing editor: %v", err)
+				if code == 0 {
+					code = 1
 				}
 			}
-		})
-		printBenchmarkResults(results)
-	})
+		}
+		if tempDir != "" {
+			if err := os.RemoveAll(tempDir); err != nil {
+				fmt.Fprintf(os.Stderr, "cleaning temp dir: %v", err)
+				if code == 0 {
+					code = 1
+				}
+			}
+		}
+	}()
+	return m.Run()
 }
 
 var (
-	benchDir     = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.")
-	benchFile    = flag.String("didchange_file", "", "The file to modify")
-	benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
+	workdir   = flag.String("workdir", "", "if set, working directory to use for benchmarks; overrides -repo and -commit")
+	repo      = flag.String("repo", "https://go.googlesource.com/tools", "if set (and -workdir is unset), run benchmarks in this repo")
+	file      = flag.String("file", "go/ast/astutil/util.go", "active file, for benchmarks that operate on a file")
+	commitish = flag.String("commit", "gopls/v0.9.0", "if set (and -workdir is unset), run benchmarks at this commit")
+
+	goplsPath = flag.String("gopls", "", "if set, use this gopls for testing")
+
+	// If non-empty, tempDir is a temporary working dir that was created by this
+	// test suite.
+	setupDirOnce sync.Once
+	tempDir      string
+
+	setupEditorOnce sync.Once
+	sandbox         *fake.Sandbox
+	editor          *fake.Editor
+	awaiter         *regtest.Awaiter
 )
 
-// TestBenchmarkDidChange benchmarks modifications of a single file by making
-// synthetic modifications in a comment. It controls pacing by waiting for the
-// server to actually start processing the didChange notification before
-// proceeding. Notably it does not wait for diagnostics to complete.
+// benchmarkDir returns the directory to use for benchmarks.
 //
-// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir
-// is the path to a workspace root, and -didchange_file is the
-// workspace-relative path to a file to modify. e.g.:
-//
-//	go test -run=TestBenchmarkDidChange \
-//	 -didchange_dir=path/to/kubernetes \
-//	 -didchange_file=pkg/util/hash/hash.go
-func TestBenchmarkDidChange(t *testing.T) {
-	if *benchDir == "" {
-		t.Skip("-didchange_dir is not set")
+// If -workdir is set, just use that directory. Otherwise, check out a shallow
+// copy of -repo at the given -commit, and clean up when the test suite exits.
+func benchmarkDir() string {
+	if *workdir != "" {
+		return *workdir
 	}
-	if *benchFile == "" {
-		t.Fatal("-didchange_file must be set if -didchange_dir is set")
-	}
-
-	opts := benchmarkOptions(*benchDir)
-	WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
-		env.OpenFile(*benchFile)
-		env.Await(env.DoneWithOpen())
-		// Insert the text we'll be modifying at the top of the file.
-		env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
-
-		// Run the profiler after the initial load,
-		// across all benchmark iterations.
-		if *benchProfile != "" {
-			profile, err := os.Create(*benchProfile)
-			if err != nil {
-				t.Fatal(err)
-			}
-			defer profile.Close()
-			if err := pprof.StartCPUProfile(profile); err != nil {
-				t.Fatal(err)
-			}
-			defer pprof.StopCPUProfile()
+	setupDirOnce.Do(func() {
+		if *repo == "" {
+			log.Fatal("-repo must be provided")
 		}
 
-		result := testing.Benchmark(func(b *testing.B) {
-			for i := 0; i < b.N; i++ {
-				env.EditBuffer(*benchFile, fake.Edit{
-					Start: fake.Pos{Line: 0, Column: 0},
-					End:   fake.Pos{Line: 1, Column: 0},
-					// Increment
-					Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
-				})
-				env.Await(StartedChange(uint64(i + 1)))
-			}
-		})
-		printBenchmarkResults(result)
+		if *commitish == "" {
+			log.Fatal("-commit must be provided")
+		}
+
+		var err error
+		tempDir, err = ioutil.TempDir("", "gopls-bench")
+		if err != nil {
+			log.Fatal(err)
+		}
+		fmt.Printf("checking out %s@%s to %s\n", *repo, *commitish, tempDir)
+
+		// Set a timeout for git fetch. If this proves flaky, it can be removed.
+		ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
+		defer cancel()
+
+		// Use a shallow fetch to download just the releveant commit.
+		shInit := fmt.Sprintf("git init && git fetch --depth=1 %q %q && git checkout FETCH_HEAD", *repo, *commitish)
+		initCmd := exec.CommandContext(ctx, "/bin/sh", "-c", shInit)
+		initCmd.Dir = tempDir
+		if err := initCmd.Run(); err != nil {
+			log.Fatalf("checking out %s: %v", *repo, err)
+		}
 	})
+	return tempDir
 }
 
-// TestPrintMemStats measures the memory usage of loading a project.
-// It uses the same -didchange_dir flag as above.
-// Always run it in isolation since it measures global heap usage.
-//
-// Kubernetes example:
-//   $ go test -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes
-//   TotalAlloc:      5766 MB
-//   HeapAlloc:       1984 MB
-//
-// Both figures exhibit variance of less than 1%.
-func TestPrintMemStats(t *testing.T) {
-	if *benchDir == "" {
-		t.Skip("-didchange_dir is not set")
+// benchmarkEnv returns a shared benchmark environment
+func benchmarkEnv(tb testing.TB) *Env {
+	setupEditorOnce.Do(func() {
+		dir := benchmarkDir()
+
+		var err error
+		sandbox, editor, awaiter, err = connectEditor(dir)
+		if err != nil {
+			log.Fatalf("connecting editor: %v", err)
+		}
+
+		if err := awaiter.Await(context.Background(), InitialWorkspaceLoad); err != nil {
+			panic(err)
+		}
+	})
+
+	return &Env{
+		T:       tb,
+		Ctx:     context.Background(),
+		Editor:  editor,
+		Sandbox: sandbox,
+		Awaiter: awaiter,
+	}
+}
+
+// connectEditor connects a fake editor session in the given dir, using the
+// given editor config.
+func connectEditor(dir string) (*fake.Sandbox, *fake.Editor, *regtest.Awaiter, error) {
+	s, err := fake.NewSandbox(&fake.SandboxConfig{
+		Workdir: dir,
+		GOPROXY: "https://proxy.golang.org",
+	})
+	if err != nil {
+		return nil, nil, nil, err
 	}
 
-	// Load the program...
-	opts := benchmarkOptions(*benchDir)
-	WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
-		// ...and print the memory usage.
-		runtime.GC()
-		runtime.GC()
-		var mem runtime.MemStats
-		runtime.ReadMemStats(&mem)
-		t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
-		t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
-	})
+	a := regtest.NewAwaiter(s.Workdir)
+	ts := getServer()
+	e, err := fake.NewEditor(s, fake.EditorConfig{}).Connect(context.Background(), ts, a.Hooks())
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	return s, e, a, nil
+}
+
+// getServer returns a server connector that either starts a new in-process
+// server, or starts a separate gopls process.
+func getServer() servertest.Connector {
+	if *goplsPath != "" {
+		return &SidecarServer{*goplsPath}
+	}
+	server := lsprpc.NewStreamServer(cache.New(nil, nil, hooks.Options), false)
+	return servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+}
+
+// A SidecarServer starts (and connects to) a separate gopls process at the
+// given path.
+type SidecarServer struct {
+	goplsPath string
+}
+
+// Connect creates new io.Pipes and binds them to the underlying StreamServer.
+func (s *SidecarServer) Connect(ctx context.Context) jsonrpc2.Conn {
+	cmd := exec.CommandContext(ctx, *goplsPath, "serve")
+
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		log.Fatal(err)
+	}
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		log.Fatal(err)
+	}
+	cmd.Stderr = os.Stdout
+	if err := cmd.Start(); err != nil {
+		log.Fatalf("starting gopls: %v", err)
+	}
+
+	go cmd.Wait() // to free resources; error is ignored
+
+	clientStream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", stdout, stdin))
+	clientConn := jsonrpc2.NewConn(clientStream)
+	return clientConn
 }
diff --git a/gopls/internal/regtest/bench/completion_bench_test.go b/gopls/internal/regtest/bench/completion_bench_test.go
deleted file mode 100644
index f9b8445..0000000
--- a/gopls/internal/regtest/bench/completion_bench_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bench
-
-import (
-	"flag"
-	"fmt"
-	"strings"
-	"testing"
-
-	. "golang.org/x/tools/internal/lsp/regtest"
-
-	"golang.org/x/tools/internal/lsp/fake"
-)
-
-// dummyCompletionFunction to test manually configured completion using CLI.
-func dummyCompletionFunction() { const s = "placeholder"; fmt.Printf("%s", s) }
-
-type completionBenchOptions struct {
-	workdir, file, locationRegexp string
-	printResults                  bool
-	// hook to run edits before initial completion, not supported for manually
-	// configured completions.
-	preCompletionEdits func(*Env)
-}
-
-var completionOptions = completionBenchOptions{}
-
-func init() {
-	flag.StringVar(&completionOptions.workdir, "completion_workdir", "", "directory to run completion benchmarks in")
-	flag.StringVar(&completionOptions.file, "completion_file", "", "relative path to the file to complete in")
-	flag.StringVar(&completionOptions.locationRegexp, "completion_regexp", "", "regexp location to complete at")
-	flag.BoolVar(&completionOptions.printResults, "completion_print_results", false, "whether to print completion results")
-}
-
-func benchmarkCompletion(options completionBenchOptions, t *testing.T) {
-	if completionOptions.workdir == "" {
-		t.Skip("-completion_workdir not configured, skipping benchmark")
-	}
-
-	opts := stressTestOptions(options.workdir)
-
-	// Completion gives bad results if IWL is not yet complete, so we must await
-	// it first (and therefore need hooks).
-	opts = append(opts, SkipHooks(false))
-
-	WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
-		env.OpenFile(options.file)
-
-		// Run edits required for this completion.
-		if options.preCompletionEdits != nil {
-			options.preCompletionEdits(env)
-		}
-
-		// Run a completion to make sure the system is warm.
-		pos := env.RegexpSearch(options.file, options.locationRegexp)
-		completions := env.Completion(options.file, pos)
-
-		if options.printResults {
-			fmt.Println("Results:")
-			for i := 0; i < len(completions.Items); i++ {
-				fmt.Printf("\t%d. %v\n", i, completions.Items[i])
-			}
-		}
-
-		results := testing.Benchmark(func(b *testing.B) {
-			for i := 0; i < b.N; i++ {
-				env.Completion(options.file, pos)
-			}
-		})
-
-		printBenchmarkResults(results)
-	})
-}
-
-// endPosInBuffer returns the position for last character in the buffer for
-// the given file.
-func endPosInBuffer(env *Env, name string) fake.Pos {
-	buffer := env.Editor.BufferText(name)
-	lines := strings.Split(buffer, "\n")
-	numLines := len(lines)
-
-	return fake.Pos{
-		Line:   numLines - 1,
-		Column: len([]rune(lines[numLines-1])),
-	}
-}
-
-// Benchmark completion at a specified file and location. When no CLI options
-// are specified, this test is skipped.
-// To Run (from x/tools/gopls) against the dummy function above:
-//
-//	go test -v ./internal/regtest/bench -run=TestBenchmarkConfiguredCompletion
-//	-completion_workdir="$HOME/Developer/tools"
-//	-completion_file="gopls/internal/regtest/completion_bench_test.go"
-//	-completion_regexp="dummyCompletionFunction.*fmt\.Printf\(\"%s\", s(\))"
-func TestBenchmarkConfiguredCompletion(t *testing.T) {
-	benchmarkCompletion(completionOptions, t)
-}
-
-// To run (from x/tools/gopls):
-// 	go test -v ./internal/regtest/bench -run TestBenchmark<>Completion
-//	-completion_workdir="$HOME/Developer/tools"
-// where <> is one of the tests below. completion_workdir should be path to
-// x/tools on your system.
-
-// Benchmark struct completion in tools codebase.
-func TestBenchmarkStructCompletion(t *testing.T) {
-	file := "internal/lsp/cache/session.go"
-
-	preCompletionEdits := func(env *Env) {
-		env.OpenFile(file)
-		originalBuffer := env.Editor.BufferText(file)
-		env.EditBuffer(file, fake.Edit{
-			End:  endPosInBuffer(env, file),
-			Text: originalBuffer + "\nvar testVariable map[string]bool = Session{}.\n",
-		})
-	}
-
-	benchmarkCompletion(completionBenchOptions{
-		workdir:            completionOptions.workdir,
-		file:               file,
-		locationRegexp:     `var testVariable map\[string\]bool = Session{}(\.)`,
-		preCompletionEdits: preCompletionEdits,
-		printResults:       completionOptions.printResults,
-	}, t)
-}
-
-// Benchmark import completion in tools codebase.
-func TestBenchmarkImportCompletion(t *testing.T) {
-	benchmarkCompletion(completionBenchOptions{
-		workdir:        completionOptions.workdir,
-		file:           "internal/lsp/source/completion/completion.go",
-		locationRegexp: `go\/()`,
-		printResults:   completionOptions.printResults,
-	}, t)
-}
-
-// Benchmark slice completion in tools codebase.
-func TestBenchmarkSliceCompletion(t *testing.T) {
-	file := "internal/lsp/cache/session.go"
-
-	preCompletionEdits := func(env *Env) {
-		env.OpenFile(file)
-		originalBuffer := env.Editor.BufferText(file)
-		env.EditBuffer(file, fake.Edit{
-			End:  endPosInBuffer(env, file),
-			Text: originalBuffer + "\nvar testVariable []byte = \n",
-		})
-	}
-
-	benchmarkCompletion(completionBenchOptions{
-		workdir:            completionOptions.workdir,
-		file:               file,
-		locationRegexp:     `var testVariable \[\]byte (=)`,
-		preCompletionEdits: preCompletionEdits,
-		printResults:       completionOptions.printResults,
-	}, t)
-}
-
-// Benchmark deep completion in function call in tools codebase.
-func TestBenchmarkFuncDeepCompletion(t *testing.T) {
-	file := "internal/lsp/source/completion/completion.go"
-	fileContent := `
-func (c *completer) _() {
-	c.inference.kindMatches(c.)
-}
-`
-	preCompletionEdits := func(env *Env) {
-		env.OpenFile(file)
-		originalBuffer := env.Editor.BufferText(file)
-		env.EditBuffer(file, fake.Edit{
-			End:  endPosInBuffer(env, file),
-			Text: originalBuffer + fileContent,
-		})
-	}
-
-	benchmarkCompletion(completionBenchOptions{
-		workdir:            completionOptions.workdir,
-		file:               file,
-		locationRegexp:     `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
-		preCompletionEdits: preCompletionEdits,
-		printResults:       completionOptions.printResults,
-	}, t)
-}
diff --git a/gopls/internal/regtest/bench/completion_test.go b/gopls/internal/regtest/bench/completion_test.go
new file mode 100644
index 0000000..cdafb08
--- /dev/null
+++ b/gopls/internal/regtest/bench/completion_test.go
@@ -0,0 +1,161 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"testing"
+
+	. "golang.org/x/tools/internal/lsp/regtest"
+
+	"golang.org/x/tools/internal/lsp/fake"
+)
+
+type completionBenchOptions struct {
+	file, locationRegexp string
+
+	// hook to run edits before initial completion
+	preCompletionEdits func(*Env)
+}
+
+func benchmarkCompletion(options completionBenchOptions, b *testing.B) {
+	dir := benchmarkDir()
+
+	// Use a new environment for each test, to avoid any existing state from the
+	// previous session.
+	sandbox, editor, awaiter, err := connectEditor(dir)
+	if err != nil {
+		b.Fatal(err)
+	}
+	ctx := context.Background()
+	defer func() {
+		if err := editor.Close(ctx); err != nil {
+			b.Errorf("closing editor: %v", err)
+		}
+	}()
+
+	env := &Env{
+		T:       b,
+		Ctx:     ctx,
+		Editor:  editor,
+		Sandbox: sandbox,
+		Awaiter: awaiter,
+	}
+	env.OpenFile(options.file)
+
+	// Run edits required for this completion.
+	if options.preCompletionEdits != nil {
+		options.preCompletionEdits(env)
+	}
+
+	// Run a completion to make sure the system is warm.
+	pos := env.RegexpSearch(options.file, options.locationRegexp)
+	completions := env.Completion(options.file, pos)
+
+	if testing.Verbose() {
+		fmt.Println("Results:")
+		for i := 0; i < len(completions.Items); i++ {
+			fmt.Printf("\t%d. %v\n", i, completions.Items[i])
+		}
+	}
+
+	b.ResetTimer()
+
+	// Use a subtest to ensure that benchmarkCompletion does not itself get
+	// executed multiple times (as it is doing expensive environment
+	// initialization).
+	b.Run("completion", func(b *testing.B) {
+		for i := 0; i < b.N; i++ {
+			env.Completion(options.file, pos)
+		}
+	})
+}
+
+// endPosInBuffer returns the position for last character in the buffer for
+// the given file.
+func endPosInBuffer(env *Env, name string) fake.Pos {
+	buffer := env.Editor.BufferText(name)
+	lines := strings.Split(buffer, "\n")
+	numLines := len(lines)
+
+	return fake.Pos{
+		Line:   numLines - 1,
+		Column: len([]rune(lines[numLines-1])),
+	}
+}
+
+// Benchmark struct completion in tools codebase.
+func BenchmarkStructCompletion(b *testing.B) {
+	file := "internal/lsp/cache/session.go"
+
+	preCompletionEdits := func(env *Env) {
+		env.OpenFile(file)
+		originalBuffer := env.Editor.BufferText(file)
+		env.EditBuffer(file, fake.Edit{
+			End:  endPosInBuffer(env, file),
+			Text: originalBuffer + "\nvar testVariable map[string]bool = Session{}.\n",
+		})
+	}
+
+	benchmarkCompletion(completionBenchOptions{
+		file:               file,
+		locationRegexp:     `var testVariable map\[string\]bool = Session{}(\.)`,
+		preCompletionEdits: preCompletionEdits,
+	}, b)
+}
+
+// Benchmark import completion in tools codebase.
+func BenchmarkImportCompletion(b *testing.B) {
+	benchmarkCompletion(completionBenchOptions{
+		file:           "internal/lsp/source/completion/completion.go",
+		locationRegexp: `go\/()`,
+	}, b)
+}
+
+// Benchmark slice completion in tools codebase.
+func BenchmarkSliceCompletion(b *testing.B) {
+	file := "internal/lsp/cache/session.go"
+
+	preCompletionEdits := func(env *Env) {
+		env.OpenFile(file)
+		originalBuffer := env.Editor.BufferText(file)
+		env.EditBuffer(file, fake.Edit{
+			End:  endPosInBuffer(env, file),
+			Text: originalBuffer + "\nvar testVariable []byte = \n",
+		})
+	}
+
+	benchmarkCompletion(completionBenchOptions{
+		file:               file,
+		locationRegexp:     `var testVariable \[\]byte (=)`,
+		preCompletionEdits: preCompletionEdits,
+	}, b)
+}
+
+// Benchmark deep completion in function call in tools codebase.
+func BenchmarkFuncDeepCompletion(b *testing.B) {
+	file := "internal/lsp/source/completion/completion.go"
+	fileContent := `
+func (c *completer) _() {
+	c.inference.kindMatches(c.)
+}
+`
+	preCompletionEdits := func(env *Env) {
+		env.OpenFile(file)
+		originalBuffer := env.Editor.BufferText(file)
+		env.EditBuffer(file, fake.Edit{
+			End:  endPosInBuffer(env, file),
+			Text: originalBuffer + fileContent,
+		})
+	}
+
+	benchmarkCompletion(completionBenchOptions{
+		file:               file,
+		locationRegexp:     `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
+		preCompletionEdits: preCompletionEdits,
+	}, b)
+}
diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go
new file mode 100644
index 0000000..8fcf253
--- /dev/null
+++ b/gopls/internal/regtest/bench/didchange_test.go
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+	"fmt"
+	"testing"
+
+	"golang.org/x/tools/internal/lsp/fake"
+
+	. "golang.org/x/tools/internal/lsp/regtest"
+)
+
+// BenchmarkDidChange benchmarks modifications of a single file by making
+// synthetic modifications in a comment. It controls pacing by waiting for the
+// server to actually start processing the didChange notification before
+// proceeding. Notably it does not wait for diagnostics to complete.
+//
+// Uses -workdir and -file to control where the edits occur.
+func BenchmarkDidChange(b *testing.B) {
+	env := benchmarkEnv(b)
+	env.OpenFile(*file)
+	env.Await(env.DoneWithOpen())
+
+	// Insert the text we'll be modifying at the top of the file.
+	env.EditBuffer(*file, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		env.EditBuffer(*file, fake.Edit{
+			Start: fake.Pos{Line: 0, Column: 0},
+			End:   fake.Pos{Line: 1, Column: 0},
+			// Increment the placeholder text, to ensure cache misses.
+			Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
+		})
+		env.Await(StartedChange(uint64(i + 1)))
+	}
+}
diff --git a/gopls/internal/regtest/bench/iwl_test.go b/gopls/internal/regtest/bench/iwl_test.go
new file mode 100644
index 0000000..e262a39
--- /dev/null
+++ b/gopls/internal/regtest/bench/iwl_test.go
@@ -0,0 +1,35 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+	"context"
+	"testing"
+
+	. "golang.org/x/tools/internal/lsp/regtest"
+)
+
+// BenchmarkIWL benchmarks the initial workspace load time for a new editing
+// session.
+func BenchmarkIWL(b *testing.B) {
+	dir := benchmarkDir()
+	b.ResetTimer()
+
+	ctx := context.Background()
+	for i := 0; i < b.N; i++ {
+		_, editor, awaiter, err := connectEditor(dir)
+		if err != nil {
+			b.Fatal(err)
+		}
+		if err := awaiter.Await(ctx, InitialWorkspaceLoad); err != nil {
+			b.Fatal(err)
+		}
+		b.StopTimer()
+		if err := editor.Close(ctx); err != nil {
+			b.Fatal(err)
+		}
+		b.StartTimer()
+	}
+}
diff --git a/gopls/internal/regtest/bench/mem_test.go b/gopls/internal/regtest/bench/mem_test.go
new file mode 100644
index 0000000..1962678
--- /dev/null
+++ b/gopls/internal/regtest/bench/mem_test.go
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+	"runtime"
+	"testing"
+)
+
+// TestPrintMemStats measures the memory usage of loading a project.
+// It uses the same -didchange_dir flag as above.
+// Always run it in isolation since it measures global heap usage.
+//
+// Kubernetes example:
+//
+//	$ go test -v -run=TestPrintMemStats -workdir=$HOME/w/kubernetes
+//	TotalAlloc:      5766 MB
+//	HeapAlloc:       1984 MB
+//
+// Both figures exhibit variance of less than 1%.
+func TestPrintMemStats(t *testing.T) {
+	// This test only makes sense when run in isolation, so for now it is
+	// manually skipped.
+	//
+	// TODO(rfindley): figure out a better way to capture memstats as a benchmark
+	// metric.
+	t.Skip("unskip to run this test manually")
+
+	_ = benchmarkEnv(t)
+
+	runtime.GC()
+	runtime.GC()
+	var mem runtime.MemStats
+	runtime.ReadMemStats(&mem)
+	t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
+	t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
+}
diff --git a/gopls/internal/regtest/bench/stress_test.go b/gopls/internal/regtest/bench/stress_test.go
index f7e59fa..a410c30 100644
--- a/gopls/internal/regtest/bench/stress_test.go
+++ b/gopls/internal/regtest/bench/stress_test.go
@@ -11,56 +11,83 @@
 	"testing"
 	"time"
 
-	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/gopls/internal/hooks"
+	"golang.org/x/tools/internal/jsonrpc2"
+	"golang.org/x/tools/internal/jsonrpc2/servertest"
+	"golang.org/x/tools/internal/lsp/cache"
+	"golang.org/x/tools/internal/lsp/fake"
+	"golang.org/x/tools/internal/lsp/lsprpc"
 )
 
-// Pilosa is a repository that has historically caused significant memory
-// problems for Gopls. We use it for a simple stress test that types
-// arbitrarily in a file with lots of dependents.
+// github.com/pilosa/pilosa is a repository that has historically caused
+// significant memory problems for Gopls. We use it for a simple stress test
+// that types arbitrarily in a file with lots of dependents.
 
 var pilosaPath = flag.String("pilosa_path", "", "Path to a directory containing "+
 	"github.com/pilosa/pilosa, for stress testing. Do not set this unless you "+
 	"know what you're doing!")
 
-func stressTestOptions(dir string) []RunOption {
-	opts := benchmarkOptions(dir)
-	opts = append(opts, SkipHooks(true), DebugAddress(":8087"))
-	return opts
-}
-
 func TestPilosaStress(t *testing.T) {
+	// TODO(rfindley): revisit this test and make it is hermetic: it should check
+	// out pilosa into a directory.
+	//
+	// Note: This stress test has not been run recently, and may no longer
+	// function properly.
 	if *pilosaPath == "" {
 		t.Skip("-pilosa_path not configured")
 	}
-	opts := stressTestOptions(*pilosaPath)
 
-	WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
-		files := []string{
-			"cmd.go",
-			"internal/private.pb.go",
-			"roaring/roaring.go",
-			"roaring/roaring_internal_test.go",
-			"server/handler_test.go",
-		}
-		for _, file := range files {
-			env.OpenFile(file)
-		}
-		ctx, cancel := context.WithTimeout(env.Ctx, 10*time.Minute)
-		defer cancel()
-
-		i := 1
-		// MagicNumber is an identifier that occurs in roaring.go. Just change it
-		// arbitrarily.
-		env.RegexpReplace("roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1))
-		for {
-			select {
-			case <-ctx.Done():
-				return
-			default:
-			}
-			env.RegexpReplace("roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1))
-			time.Sleep(20 * time.Millisecond)
-			i++
-		}
+	sandbox, err := fake.NewSandbox(&fake.SandboxConfig{
+		Workdir: *pilosaPath,
+		GOPROXY: "https://proxy.golang.org",
 	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	server := lsprpc.NewStreamServer(cache.New(nil, nil, hooks.Options), false)
+	ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+	ctx := context.Background()
+
+	editor, err := fake.NewEditor(sandbox, fake.EditorConfig{}).Connect(ctx, ts, fake.ClientHooks{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	files := []string{
+		"cmd.go",
+		"internal/private.pb.go",
+		"roaring/roaring.go",
+		"roaring/roaring_internal_test.go",
+		"server/handler_test.go",
+	}
+	for _, file := range files {
+		if err := editor.OpenFile(ctx, file); err != nil {
+			t.Fatal(err)
+		}
+	}
+	ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+	defer cancel()
+
+	i := 1
+	// MagicNumber is an identifier that occurs in roaring.go. Just change it
+	// arbitrarily.
+	if err := editor.RegexpReplace(ctx, "roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1)); err != nil {
+		t.Fatal(err)
+	}
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		default:
+		}
+		if err := editor.RegexpReplace(ctx, "roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1)); err != nil {
+			t.Fatal(err)
+		}
+		// Simulate (very fast) typing.
+		//
+		// Typing 80 wpm ~150ms per keystroke.
+		time.Sleep(150 * time.Millisecond)
+		i++
+	}
 }
diff --git a/gopls/internal/regtest/bench/workspace_symbols_test.go b/gopls/internal/regtest/bench/workspace_symbols_test.go
new file mode 100644
index 0000000..fccc818
--- /dev/null
+++ b/gopls/internal/regtest/bench/workspace_symbols_test.go
@@ -0,0 +1,35 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bench
+
+import (
+	"flag"
+	"fmt"
+	"testing"
+)
+
+var symbolQuery = flag.String("symbol_query", "test", "symbol query to use in benchmark")
+
+// BenchmarkWorkspaceSymbols benchmarks the time to execute a workspace symbols
+// request (controlled by the -symbol_query flag).
+func BenchmarkWorkspaceSymbols(b *testing.B) {
+	env := benchmarkEnv(b)
+
+	// Make an initial symbol query to warm the cache.
+	symbols := env.WorkspaceSymbol(*symbolQuery)
+
+	if testing.Verbose() {
+		fmt.Println("Results:")
+		for i := 0; i < len(symbols); i++ {
+			fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
+		}
+	}
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		env.WorkspaceSymbol(*symbolQuery)
+	}
+}
diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go
index a64f9c4..4d2be8e 100644
--- a/gopls/internal/regtest/codelens/codelens_test.go
+++ b/gopls/internal/regtest/codelens/codelens_test.go
@@ -6,8 +6,6 @@
 
 import (
 	"fmt"
-	"runtime"
-	"strings"
 	"testing"
 
 	"golang.org/x/tools/gopls/internal/hooks"
@@ -15,7 +13,6 @@
 	. "golang.org/x/tools/internal/lsp/regtest"
 
 	"golang.org/x/tools/internal/lsp/command"
-	"golang.org/x/tools/internal/lsp/fake"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/tests"
 	"golang.org/x/tools/internal/testenv"
@@ -63,9 +60,7 @@
 	for _, test := range tests {
 		t.Run(test.label, func(t *testing.T) {
 			WithOptions(
-				EditorConfig{
-					CodeLenses: test.enabled,
-				},
+				Settings{"codelenses": test.enabled},
 			).Run(t, workspace, func(t *testing.T, env *Env) {
 				env.OpenFile("lib.go")
 				lens := env.CodeLens("lib.go")
@@ -286,70 +281,3 @@
 		env.Await(EmptyDiagnostics("cgo.go"))
 	})
 }
-
-func TestGCDetails(t *testing.T) {
-	testenv.NeedsGo1Point(t, 15)
-	if runtime.GOOS == "android" {
-		t.Skipf("the gc details code lens doesn't work on Android")
-	}
-
-	const mod = `
--- go.mod --
-module mod.com
-
-go 1.15
--- main.go --
-package main
-
-import "fmt"
-
-func main() {
-	fmt.Println(42)
-}
-`
-	WithOptions(
-		EditorConfig{
-			CodeLenses: map[string]bool{
-				"gc_details": true,
-			}},
-	).Run(t, mod, func(t *testing.T, env *Env) {
-		env.OpenFile("main.go")
-		env.ExecuteCodeLensCommand("main.go", command.GCDetails)
-		d := &protocol.PublishDiagnosticsParams{}
-		env.Await(
-			OnceMet(
-				DiagnosticAt("main.go", 5, 13),
-				ReadDiagnostics("main.go", d),
-			),
-		)
-		// Confirm that the diagnostics come from the gc details code lens.
-		var found bool
-		for _, d := range d.Diagnostics {
-			if d.Severity != protocol.SeverityInformation {
-				t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity)
-			}
-			if strings.Contains(d.Message, "42 escapes") {
-				found = true
-			}
-		}
-		if !found {
-			t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`)
-		}
-
-		// Editing a buffer should cause gc_details diagnostics to disappear, since
-		// they only apply to saved buffers.
-		env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n"))
-		env.Await(EmptyDiagnostics("main.go"))
-
-		// Saving a buffer should re-format back to the original state, and
-		// re-enable the gc_details diagnostics.
-		env.SaveBuffer("main.go")
-		env.Await(DiagnosticAt("main.go", 5, 13))
-
-		// Toggle the GC details code lens again so now it should be off.
-		env.ExecuteCodeLensCommand("main.go", command.GCDetails)
-		env.Await(
-			EmptyDiagnostics("main.go"),
-		)
-	})
-}
diff --git a/gopls/internal/regtest/codelens/gcdetails_test.go b/gopls/internal/regtest/codelens/gcdetails_test.go
new file mode 100644
index 0000000..3764888
--- /dev/null
+++ b/gopls/internal/regtest/codelens/gcdetails_test.go
@@ -0,0 +1,137 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package codelens
+
+import (
+	"runtime"
+	"strings"
+	"testing"
+
+	"golang.org/x/tools/internal/testenv"
+
+	"golang.org/x/tools/internal/lsp/bug"
+	"golang.org/x/tools/internal/lsp/command"
+	"golang.org/x/tools/internal/lsp/fake"
+	"golang.org/x/tools/internal/lsp/protocol"
+	. "golang.org/x/tools/internal/lsp/regtest"
+)
+
+func TestGCDetails_Toggle(t *testing.T) {
+	testenv.NeedsGo1Point(t, 15)
+	if runtime.GOOS == "android" {
+		t.Skipf("the gc details code lens doesn't work on Android")
+	}
+
+	const mod = `
+-- go.mod --
+module mod.com
+
+go 1.15
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+	fmt.Println(42)
+}
+`
+	WithOptions(
+		Settings{
+			"codelenses": map[string]bool{
+				"gc_details": true,
+			},
+		},
+	).Run(t, mod, func(t *testing.T, env *Env) {
+		env.OpenFile("main.go")
+		env.ExecuteCodeLensCommand("main.go", command.GCDetails)
+		d := &protocol.PublishDiagnosticsParams{}
+		env.Await(
+			OnceMet(
+				DiagnosticAt("main.go", 5, 13),
+				ReadDiagnostics("main.go", d),
+			),
+		)
+		// Confirm that the diagnostics come from the gc details code lens.
+		var found bool
+		for _, d := range d.Diagnostics {
+			if d.Severity != protocol.SeverityInformation {
+				t.Fatalf("unexpected diagnostic severity %v, wanted Information", d.Severity)
+			}
+			if strings.Contains(d.Message, "42 escapes") {
+				found = true
+			}
+		}
+		if !found {
+			t.Fatalf(`expected to find diagnostic with message "escape(42 escapes to heap)", found none`)
+		}
+
+		// Editing a buffer should cause gc_details diagnostics to disappear, since
+		// they only apply to saved buffers.
+		env.EditBuffer("main.go", fake.NewEdit(0, 0, 0, 0, "\n\n"))
+		env.Await(EmptyDiagnostics("main.go"))
+
+		// Saving a buffer should re-format back to the original state, and
+		// re-enable the gc_details diagnostics.
+		env.SaveBuffer("main.go")
+		env.Await(DiagnosticAt("main.go", 5, 13))
+
+		// Toggle the GC details code lens again so now it should be off.
+		env.ExecuteCodeLensCommand("main.go", command.GCDetails)
+		env.Await(
+			EmptyDiagnostics("main.go"),
+		)
+	})
+}
+
+// Test for the crasher in golang/go#54199
+func TestGCDetails_NewFile(t *testing.T) {
+	bug.PanicOnBugs = false
+	// It appears that older Go versions don't even see p.go from the initial
+	// workspace load.
+	testenv.NeedsGo1Point(t, 15)
+	const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+`
+
+	WithOptions(
+		Settings{
+			"codelenses": map[string]bool{
+				"gc_details": true,
+			},
+		},
+	).Run(t, src, func(t *testing.T, env *Env) {
+		env.CreateBuffer("p_test.go", "")
+
+		const gcDetailsCommand = "gopls." + string(command.GCDetails)
+
+		hasGCDetails := func() bool {
+			lenses := env.CodeLens("p_test.go") // should not crash
+			for _, lens := range lenses {
+				if lens.Command.Command == gcDetailsCommand {
+					return true
+				}
+			}
+			return false
+		}
+
+		// With an empty file, we shouldn't get the gc_details codelens because
+		// there is nowhere to position it (it needs a package name).
+		if hasGCDetails() {
+			t.Errorf("got the gc_details codelens for an empty file")
+		}
+
+		// Edit to provide a package name.
+		env.EditBuffer("p_test.go", fake.NewEdit(0, 0, 0, 0, "package p"))
+
+		// Now we should get the gc_details codelens.
+		if !hasGCDetails() {
+			t.Errorf("didn't get the gc_details codelens for a valid non-empty Go file")
+		}
+	})
+}
diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go
index 1ffb000..51a54c4 100644
--- a/gopls/internal/regtest/completion/completion_test.go
+++ b/gopls/internal/regtest/completion/completion_test.go
@@ -529,7 +529,7 @@
 }
 `
 	WithOptions(
-		EditorConfig{WindowsLineEndings: true},
+		WindowsLineEndings(),
 	).Run(t, src, func(t *testing.T, env *Env) {
 		// Trigger unimported completions for the example.com/blah package.
 		env.OpenFile("main.go")
diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go
index 2674d55..5486047 100644
--- a/gopls/internal/regtest/completion/postfix_snippet_test.go
+++ b/gopls/internal/regtest/completion/postfix_snippet_test.go
@@ -9,12 +9,9 @@
 	"testing"
 
 	. "golang.org/x/tools/internal/lsp/regtest"
-	"golang.org/x/tools/internal/lsp/source"
 )
 
 func TestPostfixSnippetCompletion(t *testing.T) {
-	t.Skipf("skipping test due to suspected synchronization bug; see https://go.dev/issue/50707")
-
 	const mod = `
 -- go.mod --
 module mod.com
@@ -268,6 +265,27 @@
 `,
 		},
 		{
+			name: "channel_range",
+			before: `
+package foo
+
+func _() {
+	foo := make(chan int)
+	foo.range
+}
+`,
+			after: `
+package foo
+
+func _() {
+	foo := make(chan int)
+	for e := range foo {
+	$0
+}
+}
+`,
+		},
+		{
 			name: "var",
 			before: `
 package foo
@@ -379,7 +397,7 @@
 			before: `
 package foo
 
-func foo() []string { 
+func foo() []string {
 	x := "test"
 	return x.split
 }`,
@@ -388,7 +406,7 @@
 
 import "strings"
 
-func foo() []string { 
+func foo() []string {
 	x := "test"
 	return strings.Split(x, "$0")
 }`,
@@ -414,9 +432,11 @@
 		},
 	}
 
-	r := WithOptions(Options(func(o *source.Options) {
-		o.ExperimentalPostfixCompletions = true
-	}))
+	r := WithOptions(
+		Settings{
+			"experimentalPostfixCompletions": true,
+		},
+	)
 	r.Run(t, mod, func(t *testing.T, env *Env) {
 		for _, c := range cases {
 			t.Run(c.name, func(t *testing.T) {
diff --git a/gopls/internal/regtest/debug/debug_test.go b/gopls/internal/regtest/debug/debug_test.go
index d60b3f7..bae1480 100644
--- a/gopls/internal/regtest/debug/debug_test.go
+++ b/gopls/internal/regtest/debug/debug_test.go
@@ -20,12 +20,8 @@
 	// Verify that a properly configured session gets notified of a bug on the
 	// server.
 	WithOptions(
-		Modes(Singleton), // must be in-process to receive the bug report below
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"showBugReports": true,
-			},
-		},
+		Modes(Default), // must be in-process to receive the bug report below
+		Settings{"showBugReports": true},
 	).Run(t, "", func(t *testing.T, env *Env) {
 		const desc = "got a bug"
 		bug.Report(desc, nil)
diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go
index 6f5db4c..d7246ae 100644
--- a/gopls/internal/regtest/diagnostics/diagnostics_test.go
+++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go
@@ -298,7 +298,7 @@
 
 	t.Run("without workspace module", func(t *testing.T) {
 		WithOptions(
-			Modes(Singleton),
+			Modes(Default),
 		).Run(t, noMod, func(t *testing.T, env *Env) {
 			env.Await(
 				env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`),
@@ -384,7 +384,7 @@
 			// completed.
 			OnceMet(
 				env.DoneWithChange(),
-				NoDiagnostics("a.go"),
+				EmptyDiagnostics("a.go"),
 			),
 		)
 	})
@@ -471,12 +471,11 @@
 }
 `
 	WithOptions(
-		EditorConfig{
-			Env: map[string]string{
-				"GOPATH":      "",
-				"GO111MODULE": "off",
-			},
-		}).Run(t, files, func(t *testing.T, env *Env) {
+		EnvVars{
+			"GOPATH":      "",
+			"GO111MODULE": "off",
+		},
+	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		env.Await(env.DiagnosticAtRegexp("main.go", "fmt"))
 		env.SaveBuffer("main.go")
@@ -500,8 +499,9 @@
 
 var X = 0
 `
-	editorConfig := EditorConfig{Env: map[string]string{"GOFLAGS": "-tags=foo"}}
-	WithOptions(editorConfig).Run(t, files, func(t *testing.T, env *Env) {
+	WithOptions(
+		EnvVars{"GOFLAGS": "-tags=foo"},
+	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		env.OrganizeImports("main.go")
 		env.Await(EmptyDiagnostics("main.go"))
@@ -543,7 +543,7 @@
 
 // Expect a module/GOPATH error if there is an error in the file at startup.
 // Tests golang/go#37279.
-func TestShowCriticalError_Issue37279(t *testing.T) {
+func TestBrokenWorkspace_OutsideModule(t *testing.T) {
 	const noModule = `
 -- a.go --
 package foo
@@ -573,9 +573,9 @@
 `
 	for _, go111module := range []string{"on", "off", ""} {
 		t.Run(fmt.Sprintf("GO111MODULE_%v", go111module), func(t *testing.T) {
-			WithOptions(EditorConfig{
-				Env: map[string]string{"GO111MODULE": go111module},
-			}).Run(t, files, func(t *testing.T, env *Env) {
+			WithOptions(
+				EnvVars{"GO111MODULE": go111module},
+			).Run(t, files, func(t *testing.T, env *Env) {
 				env.Await(
 					NoOutstandingWork(),
 				)
@@ -605,11 +605,7 @@
 `
 	WithOptions(
 		InGOPATH(),
-		EditorConfig{
-			Env: map[string]string{
-				"GO111MODULE": "off",
-			},
-		},
+		EnvVars{"GO111MODULE": "off"},
 	).Run(t, collision, func(t *testing.T, env *Env) {
 		env.OpenFile("x/x.go")
 		env.Await(
@@ -640,8 +636,6 @@
 
 // Test for golang/go#38211.
 func Test_Issue38211(t *testing.T) {
-	t.Skipf("Skipping flaky test: https://golang.org/issue/44098")
-
 	testenv.NeedsGo1Point(t, 14)
 	const ardanLabs = `
 -- go.mod --
@@ -707,7 +701,8 @@
 
 // Test for golang/go#38207.
 func TestNewModule_Issue38207(t *testing.T) {
-	testenv.NeedsGo1Point(t, 14)
+	// Fails at Go 1.14 following CL 417576. Not investigated.
+	testenv.NeedsGo1Point(t, 15)
 	const emptyFile = `
 -- go.mod --
 module mod.com
@@ -762,15 +757,20 @@
 		env.OpenFile("a/a1.go")
 		env.CreateBuffer("a/a2.go", ``)
 		env.SaveBufferWithoutActions("a/a2.go")
+		// We can't use OnceMet here (at least, not easily) because the didSave
+		// races with the didChangeWatchedFiles.
+		//
+		// TODO(rfindley): add an AllOf expectation combinator, or an expectation
+		// that all notifications have been processed.
 		env.Await(
-			OnceMet(
-				env.DoneWithSave(),
-				NoDiagnostics("a/a1.go"),
-			),
+			EmptyDiagnostics("a/a1.go"),
 		)
 		env.EditBuffer("a/a2.go", fake.NewEdit(0, 0, 0, 0, `package a`))
 		env.Await(
-			OnceMet(env.DoneWithChange(), NoDiagnostics("a/a1.go")),
+			OnceMet(
+				env.DoneWithChange(),
+				EmptyDiagnostics("a/a1.go"),
+			),
 		)
 	})
 }
@@ -878,7 +878,7 @@
 }
 
 func TestChangePackageName(t *testing.T) {
-	t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.")
+	testenv.NeedsGo1Point(t, 16) // needs native overlay support
 
 	const mod = `
 -- go.mod --
@@ -893,15 +893,11 @@
 	Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("foo/bar_test.go")
 		env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test")
-		env.SaveBuffer("foo/bar_test.go")
 		env.Await(
 			OnceMet(
-				env.DoneWithSave(),
-				NoDiagnostics("foo/bar_test.go"),
-			),
-			OnceMet(
-				env.DoneWithSave(),
-				NoDiagnostics("foo/foo.go"),
+				env.DoneWithChange(),
+				EmptyOrNoDiagnostics("foo/bar_test.go"),
+				EmptyOrNoDiagnostics("foo/foo.go"),
 			),
 		)
 	})
@@ -923,7 +919,7 @@
 		env.Await(
 			OnceMet(
 				env.DoneWithOpen(),
-				NoDiagnostics("_foo/x.go"),
+				EmptyDiagnostics("_foo/x.go"),
 			))
 	})
 }
@@ -971,8 +967,6 @@
 // This is a copy of the scenario_default/quickfix_empty_files.txt test from
 // govim. Reproduces golang/go#39646.
 func TestQuickFixEmptyFiles(t *testing.T) {
-	t.Skip("too flaky: golang/go#48773")
-
 	testenv.NeedsGo1Point(t, 15)
 
 	const mod = `
@@ -1236,7 +1230,7 @@
 	})
 	WithOptions(
 		WorkspaceFolders("a"),
-		LimitWorkspaceScope(),
+		Settings{"expandWorkspaceToModule": false},
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("a/main.go")
 		env.Await(
@@ -1267,11 +1261,7 @@
 `
 
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"staticcheck": true,
-			},
-		},
+		Settings{"staticcheck": true},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		var d protocol.PublishDiagnosticsParams
@@ -1306,7 +1296,7 @@
 	Run(t, dir, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		env.OpenFile("other.go")
-		x := env.DiagnosticsFor("main.go")
+		x := env.Awaiter.DiagnosticsFor("main.go")
 		if x == nil {
 			t.Fatalf("expected 1 diagnostic, got none")
 		}
@@ -1314,7 +1304,7 @@
 			t.Fatalf("main.go, got %d diagnostics, expected 1", len(x.Diagnostics))
 		}
 		keep := x.Diagnostics[0]
-		y := env.DiagnosticsFor("other.go")
+		y := env.Awaiter.DiagnosticsFor("other.go")
 		if len(y.Diagnostics) != 1 {
 			t.Fatalf("other.go: got %d diagnostics, expected 1", len(y.Diagnostics))
 		}
@@ -1381,9 +1371,7 @@
 }
 `
 	WithOptions(
-		EditorConfig{
-			AllExperiments: true,
-		},
+		Settings{"allExperiments": true},
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		// Confirm that the setting doesn't cause any warnings.
 		env.Await(NoShowMessage())
@@ -1495,11 +1483,7 @@
 	WithOptions(
 		ProxyFiles(proxy),
 		InGOPATH(),
-		EditorConfig{
-			Env: map[string]string{
-				"GO111MODULE": "off",
-			},
-		},
+		EnvVars{"GO111MODULE": "off"},
 	).Run(t, contents, func(t *testing.T, env *Env) {
 		// Simulate typing character by character.
 		env.OpenFile("foo/foo_test.go")
@@ -1698,10 +1682,8 @@
 	t.Run("GOPATH", func(t *testing.T) {
 		WithOptions(
 			InGOPATH(),
-			EditorConfig{
-				Env: map[string]string{"GO111MODULE": "off"},
-			},
-			Modes(Singleton),
+			EnvVars{"GO111MODULE": "off"},
+			Modes(Default),
 		).Run(t, mod, func(t *testing.T, env *Env) {
 			env.Await(
 				env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `cannot find package "nosuchpkg" in any of`),
@@ -1728,12 +1710,8 @@
 	for _, go111module := range []string{"on", "auto"} {
 		t.Run("GO111MODULE="+go111module, func(t *testing.T) {
 			WithOptions(
-				Modes(Singleton),
-				EditorConfig{
-					Env: map[string]string{
-						"GO111MODULE": go111module,
-					},
-				},
+				Modes(Default),
+				EnvVars{"GO111MODULE": go111module},
 			).Run(t, modules, func(t *testing.T, env *Env) {
 				env.OpenFile("a/a.go")
 				env.OpenFile("b/go.mod")
@@ -1749,19 +1727,15 @@
 	// Expect no warning if GO111MODULE=auto in a directory in GOPATH.
 	t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) {
 		WithOptions(
-			Modes(Singleton),
-			EditorConfig{
-				Env: map[string]string{
-					"GO111MODULE": "auto",
-				},
-			},
+			Modes(Default),
+			EnvVars{"GO111MODULE": "auto"},
 			InGOPATH(),
 		).Run(t, modules, func(t *testing.T, env *Env) {
 			env.OpenFile("a/a.go")
 			env.Await(
 				OnceMet(
 					env.DoneWithOpen(),
-					NoDiagnostics("a/a.go"),
+					EmptyDiagnostics("a/a.go"),
 				),
 				NoOutstandingWork(),
 			)
@@ -1815,7 +1789,7 @@
 `
 	WithOptions(
 		ProxyFiles(proxy),
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, nested, func(t *testing.T, env *Env) {
 		// Expect a diagnostic in a nested module.
 		env.OpenFile("nested/hello/hello.go")
@@ -2026,10 +2000,8 @@
 func Hello() {}
 `
 	WithOptions(
-		EditorConfig{
-			ExperimentalUseInvalidMetadata: true,
-		},
-		Modes(Singleton),
+		Settings{"experimentalUseInvalidMetadata": true},
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("go.mod")
 		env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file
@@ -2082,12 +2054,10 @@
 func _() {}
 `
 	WithOptions(
-		EditorConfig{
-			ExperimentalUseInvalidMetadata: true,
-		},
+		Settings{"experimentalUseInvalidMetadata": true},
 		// ExperimentalWorkspaceModule has a different failure mode for this
 		// case.
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.Await(
 			OnceMet(
diff --git a/gopls/internal/regtest/diagnostics/invalidation_test.go b/gopls/internal/regtest/diagnostics/invalidation_test.go
new file mode 100644
index 0000000..ea65037
--- /dev/null
+++ b/gopls/internal/regtest/diagnostics/invalidation_test.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diagnostics
+
+import (
+	"fmt"
+	"testing"
+
+	"golang.org/x/tools/internal/lsp/protocol"
+	. "golang.org/x/tools/internal/lsp/regtest"
+)
+
+// Test for golang/go#50267: diagnostics should be re-sent after a file is
+// opened.
+func TestDiagnosticsAreResentAfterCloseOrOpen(t *testing.T) {
+	const files = `
+-- go.mod --
+module mod.com
+
+go 1.16
+-- main.go --
+package main
+
+func _() {
+	x := 2
+}
+`
+	Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
+		env.OpenFile("main.go")
+		var afterOpen protocol.PublishDiagnosticsParams
+		env.Await(
+			OnceMet(
+				env.DoneWithOpen(),
+				ReadDiagnostics("main.go", &afterOpen),
+			),
+		)
+		env.CloseBuffer("main.go")
+		var afterClose protocol.PublishDiagnosticsParams
+		env.Await(
+			OnceMet(
+				env.DoneWithClose(),
+				ReadDiagnostics("main.go", &afterClose),
+			),
+		)
+		if afterOpen.Version == afterClose.Version {
+			t.Errorf("publishDiagnostics: got the same version after closing (%d) as after opening", afterOpen.Version)
+		}
+		env.OpenFile("main.go")
+		var afterReopen protocol.PublishDiagnosticsParams
+		env.Await(
+			OnceMet(
+				env.DoneWithOpen(),
+				ReadDiagnostics("main.go", &afterReopen),
+			),
+		)
+		if afterReopen.Version == afterClose.Version {
+			t.Errorf("pubslishDiagnostics: got the same version after reopening (%d) as after closing", afterClose.Version)
+		}
+	})
+}
+
+// Test for the "chattyDiagnostics" setting: we should get re-published
+// diagnostics after every file change, even if diagnostics did not change.
+func TestChattyDiagnostics(t *testing.T) {
+	const files = `
+-- go.mod --
+module mod.com
+
+go 1.16
+-- main.go --
+package main
+
+func _() {
+	x := 2
+}
+
+// Irrelevant comment #0
+`
+
+	WithOptions(
+		Settings{
+			"chattyDiagnostics": true,
+		},
+	).Run(t, files, func(_ *testing.T, env *Env) { // Create a new workspace-level directory and empty file.
+
+		env.OpenFile("main.go")
+		var d protocol.PublishDiagnosticsParams
+		env.Await(
+			OnceMet(
+				env.DoneWithOpen(),
+				ReadDiagnostics("main.go", &d),
+			),
+		)
+
+		if len(d.Diagnostics) != 1 {
+			t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics))
+		}
+		msg := d.Diagnostics[0].Message
+
+		for i := 0; i < 5; i++ {
+			before := d.Version
+			env.RegexpReplace("main.go", "Irrelevant comment #.", fmt.Sprintf("Irrelevant comment #%d", i))
+			env.Await(
+				OnceMet(
+					env.DoneWithChange(),
+					ReadDiagnostics("main.go", &d),
+				),
+			)
+
+			if d.Version == before {
+				t.Errorf("after change, got version %d, want new version", d.Version)
+			}
+
+			// As a sanity check, make sure we have the same diagnostic.
+			if len(d.Diagnostics) != 1 {
+				t.Fatalf("len(Diagnostics) = %d, want 1", len(d.Diagnostics))
+			}
+			newMsg := d.Diagnostics[0].Message
+			if newMsg != msg {
+				t.Errorf("after change, got message %q, want %q", newMsg, msg)
+			}
+		}
+	})
+}
diff --git a/gopls/internal/regtest/diagnostics/undeclared_test.go b/gopls/internal/regtest/diagnostics/undeclared_test.go
index 79f7d42..ed2b1d0 100644
--- a/gopls/internal/regtest/diagnostics/undeclared_test.go
+++ b/gopls/internal/regtest/diagnostics/undeclared_test.go
@@ -45,7 +45,7 @@
 		// 'x' is undeclared, but still necessary.
 		env.OpenFile("a/a.go")
 		env.Await(env.DiagnosticAtRegexp("a/a.go", "x"))
-		diags := env.DiagnosticsFor("a/a.go")
+		diags := env.Awaiter.DiagnosticsFor("a/a.go")
 		if got := len(diags.Diagnostics); got != 1 {
 			t.Errorf("len(Diagnostics) = %d, want 1", got)
 		}
@@ -56,7 +56,7 @@
 		// 'y = y' is pointless, and should be detected as unnecessary.
 		env.OpenFile("b/b.go")
 		env.Await(env.DiagnosticAtRegexp("b/b.go", "y = y"))
-		diags = env.DiagnosticsFor("b/b.go")
+		diags = env.Awaiter.DiagnosticsFor("b/b.go")
 		if got := len(diags.Diagnostics); got != 1 {
 			t.Errorf("len(Diagnostics) = %d, want 1", got)
 		}
diff --git a/gopls/internal/regtest/inlayHints/inlayHints_test.go b/gopls/internal/regtest/inlayhints/inlayhints_test.go
similarity index 92%
rename from gopls/internal/regtest/inlayHints/inlayHints_test.go
rename to gopls/internal/regtest/inlayhints/inlayhints_test.go
index 67931fb..1ca1dfb 100644
--- a/gopls/internal/regtest/inlayHints/inlayHints_test.go
+++ b/gopls/internal/regtest/inlayhints/inlayhints_test.go
@@ -1,7 +1,7 @@
 // Copyright 2022 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
-package inlayHint
+package inlayhint
 
 import (
 	"testing"
@@ -17,6 +17,7 @@
 	bug.PanicOnBugs = true
 	Main(m, hooks.Options)
 }
+
 func TestEnablingInlayHints(t *testing.T) {
 	testenv.NeedsGo1Point(t, 14) // Test fails on 1.13.
 	const workspace = `
@@ -55,10 +56,8 @@
 	for _, test := range tests {
 		t.Run(test.label, func(t *testing.T) {
 			WithOptions(
-				EditorConfig{
-					Settings: map[string]interface{}{
-						"hints": test.enabled,
-					},
+				Settings{
+					"hints": test.enabled,
 				},
 			).Run(t, workspace, func(t *testing.T, env *Env) {
 				env.OpenFile("lib.go")
diff --git a/gopls/internal/regtest/misc/configuration_test.go b/gopls/internal/regtest/misc/configuration_test.go
index d9cce96..433f96e 100644
--- a/gopls/internal/regtest/misc/configuration_test.go
+++ b/gopls/internal/regtest/misc/configuration_test.go
@@ -9,7 +9,6 @@
 
 	. "golang.org/x/tools/internal/lsp/regtest"
 
-	"golang.org/x/tools/internal/lsp/fake"
 	"golang.org/x/tools/internal/testenv"
 )
 
@@ -38,14 +37,13 @@
 		env.OpenFile("a/a.go")
 		env.Await(
 			env.DoneWithOpen(),
-			NoDiagnostics("a/a.go"),
+			EmptyDiagnostics("a/a.go"),
 		)
-		cfg := &fake.EditorConfig{}
-		*cfg = env.Editor.Config
+		cfg := env.Editor.Config()
 		cfg.Settings = map[string]interface{}{
 			"staticcheck": true,
 		}
-		env.ChangeConfiguration(t, cfg)
+		env.ChangeConfiguration(cfg)
 		env.Await(
 			DiagnosticAt("a/a.go", 5, 4),
 		)
@@ -70,11 +68,9 @@
 var FooErr = errors.New("foo")
 `
 
-	WithOptions(EditorConfig{
-		Settings: map[string]interface{}{
-			"staticcheck": true,
-		},
-	}).Run(t, files, func(t *testing.T, env *Env) {
+	WithOptions(
+		Settings{"staticcheck": true},
+	).Run(t, files, func(t *testing.T, env *Env) {
 		env.Await(ShownMessage("staticcheck is not supported"))
 	})
 }
diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go
index 2f5a548..b71cf23 100644
--- a/gopls/internal/regtest/misc/definition_test.go
+++ b/gopls/internal/regtest/misc/definition_test.go
@@ -162,9 +162,7 @@
 	} {
 		t.Run(tt.importShortcut, func(t *testing.T) {
 			WithOptions(
-				EditorConfig{
-					ImportShortcut: tt.importShortcut,
-				},
+				Settings{"importShortcut": tt.importShortcut},
 			).Run(t, mod, func(t *testing.T, env *Env) {
 				env.OpenFile("main.go")
 				file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"fmt"`))
diff --git a/gopls/internal/regtest/misc/failures_test.go b/gopls/internal/regtest/misc/failures_test.go
index 23fccfd..86c9b22 100644
--- a/gopls/internal/regtest/misc/failures_test.go
+++ b/gopls/internal/regtest/misc/failures_test.go
@@ -29,7 +29,7 @@
 	var err error
 	err.Error()
 }`
-	WithOptions(SkipLogs()).Run(t, mod, func(t *testing.T, env *Env) {
+	Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error"))
 		// without the //line comment content would be non-nil
diff --git a/gopls/internal/regtest/misc/formatting_test.go b/gopls/internal/regtest/misc/formatting_test.go
index 75d8f62..71b8cad 100644
--- a/gopls/internal/regtest/misc/formatting_test.go
+++ b/gopls/internal/regtest/misc/formatting_test.go
@@ -352,10 +352,8 @@
 `
 
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"gofumpt": true,
-			},
+		Settings{
+			"gofumpt": true,
 		},
 	).Run(t, input, func(t *testing.T, env *Env) {
 		env.OpenFile("foo.go")
diff --git a/gopls/internal/regtest/misc/imports_test.go b/gopls/internal/regtest/misc/imports_test.go
index 1250e78..c0e213e 100644
--- a/gopls/internal/regtest/misc/imports_test.go
+++ b/gopls/internal/regtest/misc/imports_test.go
@@ -153,9 +153,8 @@
 		t.Fatal(err)
 	}
 	defer os.RemoveAll(modcache)
-	editorConfig := EditorConfig{Env: map[string]string{"GOMODCACHE": modcache}}
 	WithOptions(
-		editorConfig,
+		EnvVars{"GOMODCACHE": modcache},
 		ProxyFiles(proxy),
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
diff --git a/gopls/internal/regtest/misc/link_test.go b/gopls/internal/regtest/misc/link_test.go
index e84f637..1005de9 100644
--- a/gopls/internal/regtest/misc/link_test.go
+++ b/gopls/internal/regtest/misc/link_test.go
@@ -75,7 +75,9 @@
 		}
 
 		// Then change the environment to make these links private.
-		env.ChangeEnv(map[string]string{"GOPRIVATE": "import.test"})
+		cfg := env.Editor.Config()
+		cfg.Env = map[string]string{"GOPRIVATE": "import.test"}
+		env.ChangeConfiguration(cfg)
 
 		// Finally, verify that the links are gone.
 		content, _ = env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello"))
diff --git a/gopls/internal/regtest/misc/rename_test.go b/gopls/internal/regtest/misc/rename_test.go
index 121b707..1d980d9 100644
--- a/gopls/internal/regtest/misc/rename_test.go
+++ b/gopls/internal/regtest/misc/rename_test.go
@@ -8,9 +8,85 @@
 	"strings"
 	"testing"
 
+	"golang.org/x/tools/internal/lsp/protocol"
 	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/internal/testenv"
 )
 
+func TestPrepareRenamePackage(t *testing.T) {
+	const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	fmt.Println(1)
+}
+`
+	const wantErr = "can't rename packages: LSP client does not support file renaming"
+	Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("main.go")
+		pos := env.RegexpSearch("main.go", `main`)
+		tdpp := protocol.TextDocumentPositionParams{
+			TextDocument: env.Editor.TextDocumentIdentifier("main.go"),
+			Position:     pos.ToProtocolPosition(),
+		}
+		params := &protocol.PrepareRenameParams{
+			TextDocumentPositionParams: tdpp,
+		}
+		_, err := env.Editor.Server.PrepareRename(env.Ctx, params)
+		if err == nil {
+			t.Errorf("missing can't rename package error from PrepareRename")
+		}
+
+		if err.Error() != wantErr {
+			t.Errorf("got %v, want %v", err.Error(), wantErr)
+		}
+	})
+}
+
+func TestRenamePackageInRenamedPackage(t *testing.T) {
+	// Failed at Go 1.13; not investigated
+	testenv.NeedsGo1Point(t, 14)
+	const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+	"fmt"
+	"a.go"
+)
+
+func main() {
+	fmt.Println(a.C)
+}
+-- a.go --
+package main
+
+const C = 1
+`
+	Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("main.go")
+		pos := env.RegexpSearch("main.go", "main")
+		env.Rename("main.go", pos, "pkg")
+
+		// Check if the new package name exists.
+		env.RegexpSearch("main.go", "package pkg")
+		env.RegexpSearch("a.go", "package pkg")
+	})
+}
+
 // Test for golang/go#47564.
 func TestRenameInTestVariant(t *testing.T) {
 	const files = `
diff --git a/gopls/internal/regtest/misc/semantictokens_test.go b/gopls/internal/regtest/misc/semantictokens_test.go
index 7950787..4437d40 100644
--- a/gopls/internal/regtest/misc/semantictokens_test.go
+++ b/gopls/internal/regtest/misc/semantictokens_test.go
@@ -25,10 +25,8 @@
 
 `
 	WithOptions(
-		Modes(Singleton),
-		EditorConfig{
-			AllExperiments: true,
-		},
+		Modes(Default),
+		Settings{"allExperiments": true},
 	).Run(t, src, func(t *testing.T, env *Env) {
 		params := &protocol.SemanticTokensParams{}
 		const badURI = "http://foo"
diff --git a/gopls/internal/regtest/misc/settings_test.go b/gopls/internal/regtest/misc/settings_test.go
index 7704c3c..62d3d90 100644
--- a/gopls/internal/regtest/misc/settings_test.go
+++ b/gopls/internal/regtest/misc/settings_test.go
@@ -24,11 +24,7 @@
 `
 
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"directoryFilters": []string{""},
-			},
-		},
+		Settings{"directoryFilters": []string{""}},
 	).Run(t, src, func(t *testing.T, env *Env) {
 		// No need to do anything. Issue golang/go#51843 is triggered by the empty
 		// directory filter above.
diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go
index a6b0cd8..64e0720 100644
--- a/gopls/internal/regtest/misc/shared_test.go
+++ b/gopls/internal/regtest/misc/shared_test.go
@@ -7,10 +7,13 @@
 import (
 	"testing"
 
+	"golang.org/x/tools/internal/lsp/fake"
 	. "golang.org/x/tools/internal/lsp/regtest"
 )
 
-const sharedProgram = `
+// Smoke test that simultaneous editing sessions in the same workspace works.
+func TestSimultaneousEdits(t *testing.T) {
+	const sharedProgram = `
 -- go.mod --
 module mod
 
@@ -24,21 +27,25 @@
 	fmt.Println("Hello World.")
 }`
 
-func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) {
-	// Only run these tests in forwarded modes.
-	modes := DefaultModes() & (Forwarded | SeparateProcess)
-	WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) {
+	WithOptions(
+		Modes(DefaultModes()&(Forwarded|SeparateProcess)),
+	).Run(t, sharedProgram, func(t *testing.T, env1 *Env) {
 		// Create a second test session connected to the same workspace and server
 		// as the first.
-		env2, cleanup := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true)
-		defer cleanup()
+		awaiter := NewAwaiter(env1.Sandbox.Workdir)
+		editor, err := fake.NewEditor(env1.Sandbox, env1.Editor.Config()).Connect(env1.Ctx, env1.Server, awaiter.Hooks())
+		if err != nil {
+			t.Fatal(err)
+		}
+		env2 := &Env{
+			T:       t,
+			Ctx:     env1.Ctx,
+			Sandbox: env1.Sandbox,
+			Server:  env1.Server,
+			Editor:  editor,
+			Awaiter: awaiter,
+		}
 		env2.Await(InitialWorkspaceLoad)
-		testFunc(env1, env2)
-	})
-}
-
-func TestSimultaneousEdits(t *testing.T) {
-	runShared(t, func(env1 *Env, env2 *Env) {
 		// In editor #1, break fmt.Println as before.
 		env1.OpenFile("main.go")
 		env1.RegexpReplace("main.go", "Printl(n)", "")
@@ -49,17 +56,19 @@
 		// Now check that we got different diagnostics in each environment.
 		env1.Await(env1.DiagnosticAtRegexp("main.go", "Printl"))
 		env2.Await(env2.DiagnosticAtRegexp("main.go", "$"))
-	})
-}
 
-func TestShutdown(t *testing.T) {
-	runShared(t, func(env1 *Env, env2 *Env) {
-		if err := env1.Editor.Close(env1.Ctx); err != nil {
-			t.Errorf("closing first editor: %v", err)
+		// Now close editor #2, and verify that operation in editor #1 is
+		// unaffected.
+		if err := env2.Editor.Close(env2.Ctx); err != nil {
+			t.Errorf("closing second editor: %v", err)
 		}
-		// Now make an edit in editor #2 to trigger diagnostics.
-		env2.OpenFile("main.go")
-		env2.RegexpReplace("main.go", "\\)\n(})", "")
-		env2.Await(env2.DiagnosticAtRegexp("main.go", "$"))
+
+		env1.RegexpReplace("main.go", "Printl", "Println")
+		env1.Await(
+			OnceMet(
+				env1.DoneWithChange(),
+				EmptyDiagnostics("main.go"),
+			),
+		)
 	})
 }
diff --git a/gopls/internal/regtest/misc/staticcheck_test.go b/gopls/internal/regtest/misc/staticcheck_test.go
index 94bb399..6f1bda3 100644
--- a/gopls/internal/regtest/misc/staticcheck_test.go
+++ b/gopls/internal/regtest/misc/staticcheck_test.go
@@ -60,11 +60,9 @@
 var FooErr error = errors.New("foo")
 `
 
-	WithOptions(EditorConfig{
-		Settings: map[string]interface{}{
-			"staticcheck": true,
-		},
-	}).Run(t, files, func(t *testing.T, env *Env) {
+	WithOptions(
+		Settings{"staticcheck": true},
+	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("a/a.go")
 		env.Await(
 			env.DiagnosticAtRegexpFromSource("a/a.go", "sort.Slice", "sortslice"),
diff --git a/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/crypto.json b/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/crypto.json
new file mode 100644
index 0000000..e0a2791
--- /dev/null
+++ b/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/crypto.json
@@ -0,0 +1 @@
+[{"id":"GO-2020-0012","published":"2021-04-14T20:04:52Z","modified":"2021-04-14T20:04:52Z","aliases":["CVE-2020-9283"],"details":"An attacker can craft an ssh-ed25519 or sk-ssh-ed25519@openssh.com public\nkey, such that the library will panic when trying to verify a signature\nwith it. If verifying signatures using user supplied public keys, this\nmay be used as a denial of service vector.\n","affected":[{"package":{"name":"golang.org/x/crypto/ssh","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.0.0-20200220183623-bac4c82f6975"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2020-0012"},"ecosystem_specific":{"symbols":["parseED25519","ed25519PublicKey.Verify","parseSKEd25519","skEd25519PublicKey.Verify","NewPublicKey"]}}],"references":[{"type":"FIX","url":"https://go-review.googlesource.com/c/crypto/+/220357"},{"type":"FIX","url":"https://go.googlesource.com/crypto/+/bac4c82f69751a6dd76e702d54b3ceb88adab236"},{"type":"WEB","url":"https://groups.google.com/g/golang-announce/c/3L45YRc91SY"}]},{"id":"GO-2020-0013","published":"2021-04-14T20:04:52Z","modified":"2021-04-14T20:04:52Z","aliases":["CVE-2017-3204"],"details":"By default host key verification is disabled which allows for\nman-in-the-middle attacks against SSH clients if\nClientConfig.HostKeyCallback is not set.\n","affected":[{"package":{"name":"golang.org/x/crypto/ssh","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.0.0-20170330155735-e4e2799dd7aa"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2020-0013"},"ecosystem_specific":{"symbols":["NewClientConn"]}}],"references":[{"type":"FIX","url":"https://go-review.googlesource.com/38701"},{"type":"FIX","url":"https://go.googlesource.com/crypto/+/e4e2799dd7aab89f583e1d898300d96367750991"},{"type":"WEB","url":"https://go.dev/issue/19767"},{"type":"WEB","url":"https://bridge.grumpy-troll.org/2017/04/golang-ssh-security/"}]},{"id":"GO-2021-0227","published":"2022-02-17T17:35:32Z","modified":"2022-02-17T17:35:32Z","aliases":["CVE-2020-29652"],"details":"Clients can cause a panic in SSH servers. An attacker can craft\nan authentication request message for the “gssapi-with-mic” method\nwhich will cause NewServerConn to panic via a nil pointer dereference\nif ServerConfig.GSSAPIWithMICConfig is nil.\n","affected":[{"package":{"name":"golang.org/x/crypto/ssh","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.0.0-20201216223049-8b5274cf687f"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2021-0227"},"ecosystem_specific":{"symbols":["connection.serverAuthenticate"]}}],"references":[{"type":"FIX","url":"https://go-review.googlesource.com/c/crypto/+/278852"},{"type":"FIX","url":"https://go.googlesource.com/crypto/+/8b5274cf687fd9316b4108863654cc57385531e8"},{"type":"WEB","url":"https://groups.google.com/g/golang-announce/c/ouZIlBimOsE?pli=1"}]}]
\ No newline at end of file
diff --git a/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/text.json b/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/text.json
new file mode 100644
index 0000000..eee052f
--- /dev/null
+++ b/gopls/internal/regtest/misc/testdata/vulndb/golang.org/x/text.json
@@ -0,0 +1 @@
+[{"id":"GO-2020-0015","published":"2021-04-14T20:04:52Z","modified":"2021-06-07T12:00:00Z","aliases":["CVE-2020-14040"],"details":"An attacker could provide a single byte to a UTF16 decoder instantiated with\nUseBOM or ExpectBOM to trigger an infinite loop if the String function on\nthe Decoder is called, or the Decoder is passed to transform.String.\nIf used to parse user supplied input, this may be used as a denial of service\nvector.\n","affected":[{"package":{"name":"golang.org/x/text/encoding/unicode","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.3.3"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2020-0015"},"ecosystem_specific":{"symbols":["utf16Decoder.Transform","bomOverride.Transform"]}},{"package":{"name":"golang.org/x/text/transform","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.3.3"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2020-0015"},"ecosystem_specific":{"symbols":["Transform"]}}],"references":[{"type":"FIX","url":"https://go-review.googlesource.com/c/text/+/238238"},{"type":"FIX","url":"https://go.googlesource.com/text/+/23ae387dee1f90d29a23c0e87ee0b46038fbed0e"},{"type":"WEB","url":"https://go.dev/issue/39491"},{"type":"WEB","url":"https://groups.google.com/g/golang-announce/c/bXVeAmGOqz0"}]},{"id":"GO-2021-0113","published":"2021-10-06T17:51:21Z","modified":"2021-10-06T17:51:21Z","aliases":["CVE-2021-38561"],"details":"Due to improper index calculation, an incorrectly formatted language tag can cause Parse\nto panic via an out of bounds read. If Parse is used to process untrusted user inputs,\nthis may be used as a vector for a denial of service attack.\n","affected":[{"package":{"name":"golang.org/x/text/language","ecosystem":"Go"},"ranges":[{"type":"SEMVER","events":[{"introduced":"0"},{"fixed":"0.3.7"}]}],"database_specific":{"url":"https://pkg.go.dev/vuln/GO-2021-0113"},"ecosystem_specific":{"symbols":["Parse","MatchStrings","MustParse","ParseAcceptLanguage"]}}],"references":[{"type":"FIX","url":"https://go-review.googlesource.com/c/text/+/340830"},{"type":"FIX","url":"https://go.googlesource.com/text/+/383b2e75a7a4198c42f8f87833eefb772868a56f"}]}]
\ No newline at end of file
diff --git a/gopls/internal/regtest/misc/testdata/vulndb/stdlib.json b/gopls/internal/regtest/misc/testdata/vulndb/stdlib.json
new file mode 100644
index 0000000..7cbfafc
--- /dev/null
+++ b/gopls/internal/regtest/misc/testdata/vulndb/stdlib.json
@@ -0,0 +1 @@
+[{"id":"GO-0000-001","affected":[{"package":{"name":"archive/zip"},"ranges":[{"type":"SEMVER","events":[{"introduced":"1.18.0"}]}],"ecosystem_specific":{"symbols":["OpenReader"]}}]}]
diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go
index 324a800..b0f507a 100644
--- a/gopls/internal/regtest/misc/vendor_test.go
+++ b/gopls/internal/regtest/misc/vendor_test.go
@@ -27,16 +27,6 @@
 func TestInconsistentVendoring(t *testing.T) {
 	testenv.NeedsGo1Point(t, 14)
 
-	// TODO(golang/go#49646): delete this comment once this test is stable.
-	//
-	// In golang/go#49646, this test is reported as flaky on Windows. We believe
-	// this is due to file contention from go mod vendor that should be resolved.
-	// If this test proves to still be flaky, skip it.
-	//
-	// if runtime.GOOS == "windows" {
-	// 	t.Skipf("skipping test due to flakiness on Windows: https://golang.org/issue/49646")
-	// }
-
 	const pkgThatUsesVendoring = `
 -- go.mod --
 module mod.com
@@ -59,7 +49,7 @@
 }
 `
 	WithOptions(
-		Modes(Singleton),
+		Modes(Default),
 		ProxyFiles(basicProxy),
 	).Run(t, pkgThatUsesVendoring, func(t *testing.T, env *Env) {
 		env.OpenFile("a/a1.go")
diff --git a/gopls/internal/regtest/misc/vuln_test.go b/gopls/internal/regtest/misc/vuln_test.go
index 94fde71..91fef3f 100644
--- a/gopls/internal/regtest/misc/vuln_test.go
+++ b/gopls/internal/regtest/misc/vuln_test.go
@@ -5,11 +5,14 @@
 package misc
 
 import (
+	"os"
+	"path/filepath"
 	"testing"
 
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/protocol"
 	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/internal/testenv"
 )
 
 func TestRunVulncheckExpError(t *testing.T) {
@@ -23,7 +26,7 @@
 `
 	Run(t, files, func(t *testing.T, env *Env) {
 		cmd, err := command.NewRunVulncheckExpCommand("Run Vulncheck Exp", command.VulncheckArgs{
-			Dir: "/invalid/file/url", // invalid arg
+			URI: "/invalid/file/url", // invalid arg
 		})
 		if err != nil {
 			t.Fatal(err)
@@ -41,3 +44,74 @@
 		}
 	})
 }
+
+func TestRunVulncheckExp(t *testing.T) {
+	testenv.NeedsGo1Point(t, 18)
+	const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- main.go --
+package main
+
+import (
+        "archive/zip"
+        "fmt"
+)
+
+func main() {
+        _, err := zip.OpenReader("file.zip")  // vulnerability GO-0000-001
+        fmt.Println(err)
+}
+`
+
+	cwd, _ := os.Getwd()
+	WithOptions(
+		EnvVars{
+			// Let the analyzer read vulnerabilities data from the testdata/vulndb.
+			"GOVULNDB": "file://" + filepath.Join(cwd, "testdata", "vulndb"),
+			// When fetchinging stdlib package vulnerability info,
+			// behave as if our go version is go1.18 for this testing.
+			// The default behavior is to run `go env GOVERSION` (which isn't mutable env var).
+			// See gopls/internal/vulncheck.goVersion
+			// which follows the convention used in golang.org/x/vuln/cmd/govulncheck.
+			"GOVERSION":                       "go1.18",
+			"_GOPLS_TEST_BINARY_RUN_AS_GOPLS": "true",
+		},
+		Settings{
+			"codelenses": map[string]bool{
+				"run_vulncheck_exp": true,
+			},
+		},
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("go.mod")
+
+		// Test CodeLens is present.
+		lenses := env.CodeLens("go.mod")
+
+		const wantCommand = "gopls." + string(command.RunVulncheckExp)
+		var gotCodelens = false
+		var lens protocol.CodeLens
+		for _, l := range lenses {
+			if l.Command.Command == wantCommand {
+				gotCodelens = true
+				lens = l
+				break
+			}
+		}
+		if !gotCodelens {
+			t.Fatal("got no vulncheck codelens")
+		}
+		// Run Command included in the codelens.
+		env.ExecuteCommand(&protocol.ExecuteCommandParams{
+			Command:   lens.Command.Command,
+			Arguments: lens.Command.Arguments,
+		}, nil)
+		env.Await(
+			CompletedWork("govulncheck", 1, true),
+			// TODO(hyangah): once the diagnostics are published, wait for diagnostics.
+			ShownMessage("Found GO-0000-001"),
+		)
+	})
+}
diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go
index a21d473..2dc3a1b 100644
--- a/gopls/internal/regtest/misc/workspace_symbol_test.go
+++ b/gopls/internal/regtest/misc/workspace_symbol_test.go
@@ -72,9 +72,7 @@
 
 	var symbolMatcher = string(source.SymbolFastFuzzy)
 	WithOptions(
-		EditorConfig{
-			SymbolMatcher: &symbolMatcher,
-		},
+		Settings{"symbolMatcher": symbolMatcher},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		want := []string{
 			"Foo",    // prefer exact segment matches first
@@ -105,9 +103,7 @@
 
 	var symbolMatcher = string(source.SymbolFastFuzzy)
 	WithOptions(
-		EditorConfig{
-			SymbolMatcher: &symbolMatcher,
-		},
+		Settings{"symbolMatcher": symbolMatcher},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		compareSymbols(t, env.WorkspaceSymbol("ABC"), []string{"ABC", "AxxBxxCxx"})
 		compareSymbols(t, env.WorkspaceSymbol("'ABC"), []string{"ABC"})
diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go
index 93d4325..a32a06a 100644
--- a/gopls/internal/regtest/modfile/modfile_test.go
+++ b/gopls/internal/regtest/modfile/modfile_test.go
@@ -95,7 +95,10 @@
 			goModContent := env.ReadWorkspaceFile("a/go.mod")
 			env.OpenFile("a/main.go")
 			env.Await(
-				env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""),
+				OnceMet(
+					env.DoneWithOpen(),
+					env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""),
+				),
 			)
 			if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent {
 				t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got))
@@ -114,26 +117,43 @@
 
 	// Reproduce golang/go#40269 by deleting and recreating main.go.
 	t.Run("delete main.go", func(t *testing.T) {
-		t.Skip("This test will be flaky until golang/go#40269 is resolved.")
-
 		runner.Run(t, untidyModule, func(t *testing.T, env *Env) {
 			goModContent := env.ReadWorkspaceFile("a/go.mod")
 			mainContent := env.ReadWorkspaceFile("a/main.go")
 			env.OpenFile("a/main.go")
 			env.SaveBuffer("a/main.go")
 
+			// Ensure that we're done processing all the changes caused by opening
+			// and saving above. If not, we may run into a file locking issue on
+			// windows.
+			//
+			// If this proves insufficient, env.RemoveWorkspaceFile can be updated to
+			// retry file lock errors on windows.
+			env.Await(
+				env.DoneWithOpen(),
+				env.DoneWithSave(),
+				env.DoneWithChangeWatchedFiles(),
+			)
 			env.RemoveWorkspaceFile("a/main.go")
+
+			// TODO(rfindley): awaiting here shouldn't really be necessary. We should
+			// be consistent eventually.
+			//
+			// Probably this was meant to exercise a race with the change below.
 			env.Await(
 				env.DoneWithOpen(),
 				env.DoneWithSave(),
 				env.DoneWithChangeWatchedFiles(),
 			)
 
-			env.WriteWorkspaceFile("main.go", mainContent)
+			env.WriteWorkspaceFile("a/main.go", mainContent)
 			env.Await(
-				env.DiagnosticAtRegexp("main.go", "\"example.com/blah\""),
+				OnceMet(
+					env.DoneWithChangeWatchedFiles(),
+					env.DiagnosticAtRegexp("a/main.go", "\"example.com/blah\""),
+				),
 			)
-			if got := env.ReadWorkspaceFile("go.mod"); got != goModContent {
+			if got := env.ReadWorkspaceFile("a/go.mod"); got != goModContent {
 				t.Fatalf("go.mod changed on disk:\n%s", tests.Diff(t, goModContent, got))
 			}
 		})
@@ -552,7 +572,7 @@
 			env.DiagnosticAtRegexpWithMessage("a/main.go", `"example.com/blah/v2"`, "cannot find module providing"),
 			env.DiagnosticAtRegexpWithMessage("a/go.mod", `require example.com/blah/v2`, "cannot find module providing"),
 		)
-		env.ApplyQuickFixes("a/go.mod", env.DiagnosticsFor("a/go.mod").Diagnostics)
+		env.ApplyQuickFixes("a/go.mod", env.Awaiter.DiagnosticsFor("a/go.mod").Diagnostics)
 		const want = `module mod.com
 
 go 1.12
@@ -576,7 +596,9 @@
 		t.Skipf("skipping test that fails for unknown reasons on plan9; see https://go.dev/issue/50477")
 	}
 
-	testenv.NeedsGo1Point(t, 14)
+	// This test fails at go1.14 and go1.15 due to differing Go command behavior.
+	// This was not significantly investigated.
+	testenv.NeedsGo1Point(t, 16)
 
 	const unknown = `
 -- a/go.mod --
@@ -740,13 +762,9 @@
 }
 `
 	WithOptions(
-		EditorConfig{
-			Env: map[string]string{
-				"GOFLAGS": "-mod=readonly",
-			},
-		},
+		EnvVars{"GOFLAGS": "-mod=readonly"},
 		ProxyFiles(proxy),
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		original := env.ReadWorkspaceFile("go.mod")
@@ -830,9 +848,7 @@
 `
 	WithOptions(
 		ProxyFiles(workspaceProxy),
-		EditorConfig{
-			BuildFlags: []string{"-tags", "bob"},
-		},
+		Settings{"buildFlags": []string{"-tags", "bob"}},
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.Await(
 			env.DiagnosticAtRegexp("main.go", `"example.com/blah"`),
@@ -928,7 +944,7 @@
 		// TODO(rFindley) this doesn't work in multi-module workspace mode, because
 		// it keeps around the last parsing modfile. Update this test to also
 		// exercise the workspace module.
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("go.mod")
 		env.Await(env.DoneWithOpen())
@@ -1096,7 +1112,7 @@
 `
 	WithOptions(
 		ProxyFiles(workspaceProxy),
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("go.mod")
 		params := &protocol.PublishDiagnosticsParams{}
@@ -1165,7 +1181,7 @@
 `
 	WithOptions(
 		ProxyFiles(proxy),
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.OpenFile("main.go")
 		d := &protocol.PublishDiagnosticsParams{}
diff --git a/gopls/internal/regtest/template/template_test.go b/gopls/internal/regtest/template/template_test.go
index 9489e9b..ade9ac9 100644
--- a/gopls/internal/regtest/template/template_test.go
+++ b/gopls/internal/regtest/template/template_test.go
@@ -35,11 +35,9 @@
 {{end}}
 `
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"templateExtensions": []string{"tmpl"},
-				"semanticTokens":     true,
-			},
+		Settings{
+			"templateExtensions": []string{"tmpl"},
+			"semanticTokens":     true,
 		},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		var p protocol.SemanticTokensParams
@@ -66,16 +64,14 @@
 {{end}}
 `
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"templateExtensions": []string{"tmpl"},
-				"semanticTokens":     true,
-			},
+		Settings{
+			"templateExtensions": []string{"tmpl"},
+			"semanticTokens":     true,
 		},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		// TODO: can we move this diagnostic onto {{}}?
 		env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}"))
-		d := env.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source
+		d := env.Awaiter.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source
 		if len(d) != 1 {
 			t.Errorf("expected 1 diagnostic, got %d", len(d))
 			return
@@ -112,11 +108,9 @@
 `
 
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"templateExtensions": []string{"tmpl"},
-			},
-			DirectoryFilters: []string{"-b"},
+		Settings{
+			"directoryFilters":   []string{"-b"},
+			"templateExtensions": []string{"tmpl"},
 		},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.Await(
@@ -139,7 +133,7 @@
 		env.Await(
 			OnceMet(
 				env.DoneWithOpen(),
-				NoDiagnostics("hello.tmpl"), // Don't get spurious errors for empty templates.
+				EmptyDiagnostics("hello.tmpl"), // Don't get spurious errors for empty templates.
 			),
 		)
 		env.SetBufferContent("hello.tmpl", "{{range .Planets}}\nHello {{}}\n{{end}}")
@@ -184,10 +178,8 @@
 `
 
 	WithOptions(
-		EditorConfig{
-			Settings: map[string]interface{}{
-				"templateExtensions": []string{"tmpl", "gotmpl"},
-			},
+		Settings{
+			"templateExtensions": []string{"tmpl", "gotmpl"},
 		},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("a.tmpl")
diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go
index e66d08a..3165595 100644
--- a/gopls/internal/regtest/watch/watch_test.go
+++ b/gopls/internal/regtest/watch/watch_test.go
@@ -139,7 +139,7 @@
 		})
 		env.Await(
 			EmptyDiagnostics("a/a.go"),
-			NoDiagnostics("b/b.go"),
+			EmptyOrNoDiagnostics("b/b.go"),
 		)
 	})
 }
@@ -199,14 +199,12 @@
 }
 `
 	Run(t, missing, func(t *testing.T, env *Env) {
-		t.Skip("the initial workspace load fails and never retries")
-
 		env.Await(
 			env.DiagnosticAtRegexp("a/a.go", "\"mod.com/c\""),
 		)
 		env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`)
 		env.Await(
-			EmptyDiagnostics("c/c.go"),
+			EmptyDiagnostics("a/a.go"),
 		)
 	})
 }
@@ -343,12 +341,12 @@
 			env.Await(
 				OnceMet(
 					env.DoneWithChangeWatchedFiles(),
-					NoDiagnostics("a/a.go"),
+					EmptyOrNoDiagnostics("a/a.go"),
 				),
 			)
 			env.WriteWorkspaceFile("b/b.go", newMethod)
 			env.Await(
-				NoDiagnostics("a/a.go"),
+				EmptyOrNoDiagnostics("a/a.go"),
 			)
 		})
 	})
@@ -362,9 +360,9 @@
 			env.Await(
 				OnceMet(
 					env.DoneWithChangeWatchedFiles(),
-					NoDiagnostics("a/a.go"),
+					EmptyOrNoDiagnostics("a/a.go"),
 				),
-				NoDiagnostics("b/b.go"),
+				EmptyOrNoDiagnostics("b/b.go"),
 			)
 		})
 	})
@@ -389,9 +387,9 @@
 package a
 `
 	t.Run("close then delete", func(t *testing.T) {
-		WithOptions(EditorConfig{
-			VerboseOutput: true,
-		}).Run(t, pkg, func(t *testing.T, env *Env) {
+		WithOptions(
+			Settings{"verboseOutput": true},
+		).Run(t, pkg, func(t *testing.T, env *Env) {
 			env.OpenFile("a/a.go")
 			env.OpenFile("a/a_unneeded.go")
 			env.Await(
@@ -424,7 +422,7 @@
 
 	t.Run("delete then close", func(t *testing.T) {
 		WithOptions(
-			EditorConfig{VerboseOutput: true},
+			Settings{"verboseOutput": true},
 		).Run(t, pkg, func(t *testing.T, env *Env) {
 			env.OpenFile("a/a.go")
 			env.OpenFile("a/a_unneeded.go")
@@ -620,11 +618,7 @@
 `
 	WithOptions(
 		InGOPATH(),
-		EditorConfig{
-			Env: map[string]string{
-				"GO111MODULE": "auto",
-			},
-		},
+		EnvVars{"GO111MODULE": "auto"},
 		Modes(Experimental), // module is in a subdirectory
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("foo/main.go")
@@ -663,11 +657,7 @@
 `
 	WithOptions(
 		InGOPATH(),
-		EditorConfig{
-			Env: map[string]string{
-				"GO111MODULE": "auto",
-			},
-		},
+		EnvVars{"GO111MODULE": "auto"},
 	).Run(t, files, func(t *testing.T, env *Env) {
 		env.OpenFile("foo/main.go")
 		env.RemoveWorkspaceFile("foo/go.mod")
@@ -725,11 +715,11 @@
 		env.Await(
 			OnceMet(
 				env.DoneWithChangeWatchedFiles(),
-				NoDiagnostics("a/a.go"),
+				EmptyOrNoDiagnostics("a/a.go"),
 			),
 			OnceMet(
 				env.DoneWithChangeWatchedFiles(),
-				NoDiagnostics("a/a_test.go"),
+				EmptyOrNoDiagnostics("a/a_test.go"),
 			),
 		)
 		// Now, add a new file to the test variant and use its symbol in the
@@ -757,11 +747,11 @@
 		env.Await(
 			OnceMet(
 				env.DoneWithChangeWatchedFiles(),
-				NoDiagnostics("a/a_test.go"),
+				EmptyOrNoDiagnostics("a/a_test.go"),
 			),
 			OnceMet(
 				env.DoneWithChangeWatchedFiles(),
-				NoDiagnostics("a/a2_test.go"),
+				EmptyOrNoDiagnostics("a/a2_test.go"),
 			),
 		)
 	})
diff --git a/gopls/internal/regtest/workspace/broken_test.go b/gopls/internal/regtest/workspace/broken_test.go
new file mode 100644
index 0000000..e88b98b
--- /dev/null
+++ b/gopls/internal/regtest/workspace/broken_test.go
@@ -0,0 +1,169 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+	"strings"
+	"testing"
+
+	"golang.org/x/tools/internal/lsp"
+	. "golang.org/x/tools/internal/lsp/regtest"
+	"golang.org/x/tools/internal/testenv"
+)
+
+// This file holds various tests for UX with respect to broken workspaces.
+//
+// TODO: consolidate other tests here.
+
+// Test for golang/go#53933
+func TestBrokenWorkspace_DuplicateModules(t *testing.T) {
+	testenv.NeedsGo1Point(t, 18)
+
+	// This proxy module content is replaced by the workspace, but is still
+	// required for module resolution to function in the Go command.
+	const proxy = `
+-- example.com/foo@v0.0.1/go.mod --
+module example.com/foo
+
+go 1.12
+-- example.com/foo@v1.2.3/foo.go --
+package foo
+`
+
+	const src = `
+-- go.work --
+go 1.18
+
+use (
+	./package1
+	./package1/vendor/example.com/foo
+	./package2
+	./package2/vendor/example.com/foo
+)
+
+-- package1/go.mod --
+module mod.test
+
+go 1.18
+
+require example.com/foo v0.0.1
+-- package1/main.go --
+package main
+
+import "example.com/foo"
+
+func main() {
+	_ = foo.CompleteMe
+}
+-- package1/vendor/example.com/foo/go.mod --
+module example.com/foo
+
+go 1.18
+-- package1/vendor/example.com/foo/foo.go --
+package foo
+
+const CompleteMe = 111
+-- package2/go.mod --
+module mod2.test
+
+go 1.18
+
+require example.com/foo v0.0.1
+-- package2/main.go --
+package main
+
+import "example.com/foo"
+
+func main() {
+	_ = foo.CompleteMe
+}
+-- package2/vendor/example.com/foo/go.mod --
+module example.com/foo
+
+go 1.18
+-- package2/vendor/example.com/foo/foo.go --
+package foo
+
+const CompleteMe = 222
+`
+
+	WithOptions(
+		ProxyFiles(proxy),
+	).Run(t, src, func(t *testing.T, env *Env) {
+		env.OpenFile("package1/main.go")
+		env.Await(
+			OutstandingWork(lsp.WorkspaceLoadFailure, `found module "example.com/foo" multiple times in the workspace`),
+		)
+
+		// Remove the redundant vendored copy of example.com.
+		env.WriteWorkspaceFile("go.work", `go 1.18
+		use (
+			./package1
+			./package2
+			./package2/vendor/example.com/foo
+		)
+		`)
+		env.Await(NoOutstandingWork())
+
+		// Check that definitions in package1 go to the copy vendored in package2.
+		location, _ := env.GoToDefinition("package1/main.go", env.RegexpSearch("package1/main.go", "CompleteMe"))
+		const wantLocation = "package2/vendor/example.com/foo/foo.go"
+		if !strings.HasSuffix(location, wantLocation) {
+			t.Errorf("got definition of CompleteMe at %q, want %q", location, wantLocation)
+		}
+	})
+}
+
+// Test for golang/go#43186: correcting the module path should fix errors
+// without restarting gopls.
+func TestBrokenWorkspace_WrongModulePath(t *testing.T) {
+	const files = `
+-- go.mod --
+module mod.testx
+
+go 1.18
+-- p/internal/foo/foo.go --
+package foo
+
+const C = 1
+-- p/internal/bar/bar.go --
+package bar
+
+import "mod.test/p/internal/foo"
+
+const D = foo.C + 1
+-- p/internal/bar/bar_test.go --
+package bar_test
+
+import (
+	"mod.test/p/internal/foo"
+	. "mod.test/p/internal/bar"
+)
+
+const E = D + foo.C
+-- p/internal/baz/baz_test.go --
+package baz_test
+
+import (
+	named "mod.test/p/internal/bar"
+)
+
+const F = named.D - 3
+`
+
+	Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("p/internal/bar/bar.go")
+		env.Await(
+			OnceMet(
+				env.DoneWithOpen(),
+				env.DiagnosticAtRegexp("p/internal/bar/bar.go", "\"mod.test/p/internal/foo\""),
+			),
+		)
+		env.OpenFile("go.mod")
+		env.RegexpReplace("go.mod", "mod.testx", "mod.test")
+		env.SaveBuffer("go.mod") // saving triggers a reload
+		env.Await(NoOutstandingDiagnostics())
+	})
+}
diff --git a/gopls/internal/regtest/workspace/directoryfilters_test.go b/gopls/internal/regtest/workspace/directoryfilters_test.go
new file mode 100644
index 0000000..bdc60a0
--- /dev/null
+++ b/gopls/internal/regtest/workspace/directoryfilters_test.go
@@ -0,0 +1,252 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workspace
+
+import (
+	"sort"
+	"strings"
+	"testing"
+
+	. "golang.org/x/tools/internal/lsp/regtest"
+)
+
+// This file contains regression tests for the directoryFilters setting.
+//
+// TODO:
+//  - consolidate some of these tests into a single test
+//  - add more tests for changing directory filters
+
+func TestDirectoryFilters(t *testing.T) {
+	WithOptions(
+		ProxyFiles(workspaceProxy),
+		WorkspaceFolders("pkg"),
+		Settings{
+			"directoryFilters": []string{"-inner"},
+		},
+	).Run(t, workspaceModule, func(t *testing.T, env *Env) {
+		syms := env.WorkspaceSymbol("Hi")
+		sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
+		for _, s := range syms {
+			if strings.Contains(s.ContainerName, "inner") {
+				t.Errorf("WorkspaceSymbol: found symbol %q with container %q, want \"inner\" excluded", s.Name, s.ContainerName)
+			}
+		}
+	})
+}
+
+func TestDirectoryFiltersLoads(t *testing.T) {
+	// exclude, and its error, should be excluded from the workspace.
+	const files = `
+-- go.mod --
+module example.com
+
+go 1.12
+-- exclude/exclude.go --
+package exclude
+
+const _ = Nonexistant
+`
+
+	WithOptions(
+		Settings{"directoryFilters": []string{"-exclude"}},
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.Await(NoDiagnostics("exclude/x.go"))
+	})
+}
+
+func TestDirectoryFiltersTransitiveDep(t *testing.T) {
+	// Even though exclude is excluded from the workspace, it should
+	// still be importable as a non-workspace package.
+	const files = `
+-- go.mod --
+module example.com
+
+go 1.12
+-- include/include.go --
+package include
+import "example.com/exclude"
+
+const _ = exclude.X
+-- exclude/exclude.go --
+package exclude
+
+const _ = Nonexistant // should be ignored, since this is a non-workspace package
+const X = 1
+`
+
+	WithOptions(
+		Settings{"directoryFilters": []string{"-exclude"}},
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.Await(
+			NoDiagnostics("exclude/exclude.go"), // filtered out
+			NoDiagnostics("include/include.go"), // successfully builds
+		)
+	})
+}
+
+func TestDirectoryFiltersWorkspaceModules(t *testing.T) {
+	// Define a module include.com which should be in the workspace, plus a
+	// module exclude.com which should be excluded and therefore come from
+	// the proxy.
+	const files = `
+-- include/go.mod --
+module include.com
+
+go 1.12
+
+require exclude.com v1.0.0
+
+-- include/go.sum --
+exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I=
+exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4=
+
+-- include/include.go --
+package include
+
+import "exclude.com"
+
+var _ = exclude.X // satisfied only by the workspace version
+-- exclude/go.mod --
+module exclude.com
+
+go 1.12
+-- exclude/exclude.go --
+package exclude
+
+const X = 1
+`
+	const proxy = `
+-- exclude.com@v1.0.0/go.mod --
+module exclude.com
+
+go 1.12
+-- exclude.com@v1.0.0/exclude.go --
+package exclude
+`
+	WithOptions(
+		Modes(Experimental),
+		ProxyFiles(proxy),
+		Settings{"directoryFilters": []string{"-exclude"}},
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.Await(env.DiagnosticAtRegexp("include/include.go", `exclude.(X)`))
+	})
+}
+
+// Test for golang/go#46438: support for '**' in directory filters.
+func TestDirectoryFilters_Wildcard(t *testing.T) {
+	filters := []string{"-**/bye"}
+	WithOptions(
+		ProxyFiles(workspaceProxy),
+		WorkspaceFolders("pkg"),
+		Settings{
+			"directoryFilters": filters,
+		},
+	).Run(t, workspaceModule, func(t *testing.T, env *Env) {
+		syms := env.WorkspaceSymbol("Bye")
+		sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
+		for _, s := range syms {
+			if strings.Contains(s.ContainerName, "bye") {
+				t.Errorf("WorkspaceSymbol: found symbol %q with container %q with filters %v", s.Name, s.ContainerName, filters)
+			}
+		}
+	})
+}
+
+// Test for golang/go#52993: wildcard directoryFilters should apply to
+// goimports scanning as well.
+func TestDirectoryFilters_ImportScanning(t *testing.T) {
+	const files = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- main.go --
+package main
+
+func main() {
+	bye.Goodbye()
+}
+-- p/bye/bye.go --
+package bye
+
+func Goodbye() {}
+`
+
+	WithOptions(
+		Settings{
+			"directoryFilters": []string{"-**/bye"},
+		},
+		// This test breaks in 'Experimental' mode, because with
+		// experimentalWorkspaceModule set we the goimports scan behaves
+		// differently.
+		//
+		// Since this feature is going away (golang/go#52897), don't investigate.
+		Modes(Default),
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("main.go")
+		beforeSave := env.Editor.BufferText("main.go")
+		env.OrganizeImports("main.go")
+		got := env.Editor.BufferText("main.go")
+		if got != beforeSave {
+			t.Errorf("after organizeImports code action, got modified buffer:\n%s", got)
+		}
+	})
+}
+
+// Test for golang/go#52993: non-wildcard directoryFilters should still be
+// applied relative to the workspace folder, not the module root.
+func TestDirectoryFilters_MultiRootImportScanning(t *testing.T) {
+	const files = `
+-- go.work --
+go 1.18
+
+use (
+	a
+	b
+)
+-- a/go.mod --
+module mod1.test
+
+go 1.18
+-- a/main.go --
+package main
+
+func main() {
+	hi.Hi()
+}
+-- a/hi/hi.go --
+package hi
+
+func Hi() {}
+-- b/go.mod --
+module mod2.test
+
+go 1.18
+-- b/main.go --
+package main
+
+func main() {
+	hi.Hi()
+}
+-- b/hi/hi.go --
+package hi
+
+func Hi() {}
+`
+
+	WithOptions(
+		Settings{
+			"directoryFilters": []string{"-hi"}, // this test fails with -**/hi
+		},
+	).Run(t, files, func(t *testing.T, env *Env) {
+		env.OpenFile("a/main.go")
+		beforeSave := env.Editor.BufferText("a/main.go")
+		env.OrganizeImports("a/main.go")
+		got := env.Editor.BufferText("a/main.go")
+		if got == beforeSave {
+			t.Errorf("after organizeImports code action, got identical buffer:\n%s", got)
+		}
+	})
+}
diff --git a/gopls/internal/regtest/workspace/metadata_test.go b/gopls/internal/regtest/workspace/metadata_test.go
index 28291a2..4c3f46b 100644
--- a/gopls/internal/regtest/workspace/metadata_test.go
+++ b/gopls/internal/regtest/workspace/metadata_test.go
@@ -41,3 +41,63 @@
 		))
 	})
 }
+
+// Test that moving ignoring a file via build constraints causes diagnostics to
+// be resolved.
+func TestIgnoreFile(t *testing.T) {
+	testenv.NeedsGo1Point(t, 16) // needs native overlays
+
+	const src = `
+-- go.mod --
+module mod.test
+
+go 1.12
+-- foo.go --
+package main
+
+func main() {}
+-- bar.go --
+package main
+
+func main() {}
+	`
+
+	WithOptions(
+		// TODO(golang/go#54180): we don't run in 'experimental' mode here, because
+		// with "experimentalUseInvalidMetadata", this test fails because the
+		// orphaned bar.go is diagnosed using stale metadata, and then not
+		// re-diagnosed when new metadata arrives.
+		//
+		// We could fix this by re-running diagnostics after a load, but should
+		// consider whether that is worthwhile.
+		Modes(Default),
+	).Run(t, src, func(t *testing.T, env *Env) {
+		env.OpenFile("foo.go")
+		env.OpenFile("bar.go")
+		env.Await(
+			OnceMet(
+				env.DoneWithOpen(),
+				env.DiagnosticAtRegexp("foo.go", "func (main)"),
+				env.DiagnosticAtRegexp("bar.go", "func (main)"),
+			),
+		)
+		// Ignore bar.go. This should resolve diagnostics.
+		env.RegexpReplace("bar.go", "package main", "// +build ignore\n\npackage main")
+
+		// To make this test pass with experimentalUseInvalidMetadata, we could make
+		// an arbitrary edit that invalidates the snapshot, at which point the
+		// orphaned diagnostics will be invalidated.
+		//
+		// But of course, this should not be necessary: we should invalidate stale
+		// information when fresh metadata arrives.
+		// env.RegexpReplace("foo.go", "package main", "package main // test")
+		env.Await(
+			OnceMet(
+				env.DoneWithChange(),
+				EmptyDiagnostics("foo.go"),
+				env.DiagnosticAtRegexpWithMessage("bar.go", "package (main)", "No packages"),
+				env.NoDiagnosticAtRegexp("bar.go", "func (main)"),
+			),
+		)
+	})
+}
diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go
index 9e4b85f..86da9d1 100644
--- a/gopls/internal/regtest/workspace/workspace_test.go
+++ b/gopls/internal/regtest/workspace/workspace_test.go
@@ -7,7 +7,6 @@
 import (
 	"fmt"
 	"path/filepath"
-	"sort"
 	"strings"
 	"testing"
 
@@ -15,7 +14,6 @@
 	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/fake"
 	"golang.org/x/tools/internal/lsp/protocol"
-	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/testenv"
 
 	. "golang.org/x/tools/internal/lsp/regtest"
@@ -138,38 +136,6 @@
 	}
 }
 
-// make sure that directory filters work
-func TestFilters(t *testing.T) {
-	for _, tt := range []struct {
-		name, rootPath string
-	}{
-		{
-			name:     "module root",
-			rootPath: "pkg",
-		},
-	} {
-		t.Run(tt.name, func(t *testing.T) {
-			opts := []RunOption{ProxyFiles(workspaceProxy)}
-			if tt.rootPath != "" {
-				opts = append(opts, WorkspaceFolders(tt.rootPath))
-			}
-			f := func(o *source.Options) {
-				o.DirectoryFilters = append(o.DirectoryFilters, "-inner")
-			}
-			opts = append(opts, Options(f))
-			WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) {
-				syms := env.WorkspaceSymbol("Hi")
-				sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName })
-				for i, s := range syms {
-					if strings.Contains(s.ContainerName, "/inner") {
-						t.Errorf("%s %v %s %s %d\n", s.Name, s.Kind, s.ContainerName, tt.name, i)
-					}
-				}
-			})
-		})
-	}
-}
-
 // Make sure that analysis diagnostics are cleared for the whole package when
 // the only opened file is closed. This test was inspired by the experience in
 // VS Code, where clicking on a reference result triggers a
@@ -305,10 +271,6 @@
 module b.com
 
 go 1.12
--- b.com@v1.2.4/b/b.go --
-package b
-
-func Hello() {}
 `
 	const multiModule = `
 -- go.mod --
@@ -595,21 +557,16 @@
 replace a.com => %s/moda/a
 replace b.com => %s/modb
 `, workdir, workdir))
-		env.Await(env.DoneWithChangeWatchedFiles())
-		// Check that go.mod diagnostics picked up the newly active mod file.
-		// The local version of modb has an extra dependency we need to download.
-		env.OpenFile("modb/go.mod")
-		env.Await(env.DoneWithOpen())
 
-		var d protocol.PublishDiagnosticsParams
+		// As of golang/go#54069, writing a gopls.mod to the workspace triggers a
+		// workspace reload.
 		env.Await(
 			OnceMet(
-				env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"),
-				ReadDiagnostics("modb/go.mod", &d),
+				env.DoneWithChangeWatchedFiles(),
+				env.DiagnosticAtRegexp("modb/b/b.go", "x"),
 			),
 		)
-		env.ApplyQuickFixes("modb/go.mod", d.Diagnostics)
-		env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x"))
+
 		// Jumping to definition should now go to b.com in the workspace.
 		if err := checkHelloLocation("modb/b/b.go"); err != nil {
 			t.Fatal(err)
@@ -736,21 +693,15 @@
 	./modb
 )
 `)
-		env.Await(env.DoneWithChangeWatchedFiles())
-		// Check that go.mod diagnostics picked up the newly active mod file.
-		// The local version of modb has an extra dependency we need to download.
-		env.OpenFile("modb/go.mod")
-		env.Await(env.DoneWithOpen())
 
-		var d protocol.PublishDiagnosticsParams
+		// As of golang/go#54069, writing go.work to the workspace triggers a
+		// workspace reload.
 		env.Await(
 			OnceMet(
-				env.DiagnosticAtRegexpWithMessage("modb/go.mod", `require example.com v1.2.3`, "has not been downloaded"),
-				ReadDiagnostics("modb/go.mod", &d),
+				env.DoneWithChangeWatchedFiles(),
+				env.DiagnosticAtRegexp("modb/b/b.go", "x"),
 			),
 		)
-		env.ApplyQuickFixes("modb/go.mod", d.Diagnostics)
-		env.Await(env.DiagnosticAtRegexp("modb/b/b.go", "x"))
 
 		// Jumping to definition should now go to b.com in the workspace.
 		if err := checkHelloLocation("modb/b/b.go"); err != nil {
@@ -1024,104 +975,6 @@
 	})
 }
 
-func TestDirectoryFiltersLoads(t *testing.T) {
-	// exclude, and its error, should be excluded from the workspace.
-	const files = `
--- go.mod --
-module example.com
-
-go 1.12
--- exclude/exclude.go --
-package exclude
-
-const _ = Nonexistant
-`
-	cfg := EditorConfig{
-		DirectoryFilters: []string{"-exclude"},
-	}
-	WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) {
-		env.Await(NoDiagnostics("exclude/x.go"))
-	})
-}
-
-func TestDirectoryFiltersTransitiveDep(t *testing.T) {
-	// Even though exclude is excluded from the workspace, it should
-	// still be importable as a non-workspace package.
-	const files = `
--- go.mod --
-module example.com
-
-go 1.12
--- include/include.go --
-package include
-import "example.com/exclude"
-
-const _ = exclude.X
--- exclude/exclude.go --
-package exclude
-
-const _ = Nonexistant // should be ignored, since this is a non-workspace package
-const X = 1
-`
-
-	cfg := EditorConfig{
-		DirectoryFilters: []string{"-exclude"},
-	}
-	WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) {
-		env.Await(
-			NoDiagnostics("exclude/exclude.go"), // filtered out
-			NoDiagnostics("include/include.go"), // successfully builds
-		)
-	})
-}
-
-func TestDirectoryFiltersWorkspaceModules(t *testing.T) {
-	// Define a module include.com which should be in the workspace, plus a
-	// module exclude.com which should be excluded and therefore come from
-	// the proxy.
-	const files = `
--- include/go.mod --
-module include.com
-
-go 1.12
-
-require exclude.com v1.0.0
-
--- include/go.sum --
-exclude.com v1.0.0 h1:Q5QSfDXY5qyNCBeUiWovUGqcLCRZKoTs9XdBeVz+w1I=
-exclude.com v1.0.0/go.mod h1:hFox2uDlNB2s2Jfd9tHlQVfgqUiLVTmh6ZKat4cvnj4=
-
--- include/include.go --
-package include
-
-import "exclude.com"
-
-var _ = exclude.X // satisfied only by the workspace version
--- exclude/go.mod --
-module exclude.com
-
-go 1.12
--- exclude/exclude.go --
-package exclude
-
-const X = 1
-`
-	const proxy = `
--- exclude.com@v1.0.0/go.mod --
-module exclude.com
-
-go 1.12
--- exclude.com@v1.0.0/exclude.go --
-package exclude
-`
-	cfg := EditorConfig{
-		DirectoryFilters: []string{"-exclude"},
-	}
-	WithOptions(cfg, Modes(Experimental), ProxyFiles(proxy)).Run(t, files, func(t *testing.T, env *Env) {
-		env.Await(env.DiagnosticAtRegexp("include/include.go", `exclude.(X)`))
-	})
-}
-
 // Confirm that a fix for a tidy module will correct all modules in the
 // workspace.
 func TestMultiModule_OneBrokenModule(t *testing.T) {
@@ -1204,10 +1057,8 @@
 package main
 `
 	WithOptions(
-		EditorConfig{Env: map[string]string{
-			"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath"),
-		}},
-		Modes(Singleton),
+		EnvVars{"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath")},
+		Modes(Default),
 	).Run(t, mod, func(t *testing.T, env *Env) {
 		env.Await(
 			// Confirm that the build configuration is seen as valid,
@@ -1238,7 +1089,7 @@
 func main() {}
 `
 	WithOptions(
-		Modes(Singleton),
+		Modes(Default),
 	).Run(t, nomod, func(t *testing.T, env *Env) {
 		env.OpenFile("a/main.go")
 		env.OpenFile("b/main.go")
diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go
index 53bf0f0..60d582c 100644
--- a/gopls/internal/vulncheck/command.go
+++ b/gopls/internal/vulncheck/command.go
@@ -26,9 +26,9 @@
 	Govulncheck = govulncheck
 }
 
-func govulncheck(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) {
-	if args.Pattern == "" {
-		args.Pattern = "."
+func govulncheck(ctx context.Context, cfg *packages.Config, patterns string) (res command.VulncheckResult, _ error) {
+	if patterns == "" {
+		patterns = "."
 	}
 
 	dbClient, err := client.NewClient(findGOVULNDB(cfg), client.Options{HTTPCache: gvc.DefaultCache()})
@@ -37,7 +37,7 @@
 	}
 
 	c := cmd{Client: dbClient}
-	vulns, err := c.Run(ctx, cfg, args.Pattern)
+	vulns, err := c.Run(ctx, cfg, patterns)
 	if err != nil {
 		return res, err
 	}
@@ -70,27 +70,30 @@
 
 // Run runs the govulncheck after loading packages using the provided packages.Config.
 func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) (_ []Vuln, err error) {
+	logger := log.New(log.Default().Writer(), "", 0)
 	cfg.Mode |= packages.NeedModule | packages.NeedName | packages.NeedFiles |
 		packages.NeedCompiledGoFiles | packages.NeedImports | packages.NeedTypes |
 		packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedDeps
 
-	log.Println("loading packages...")
+	logger.Println("loading packages...")
 	loadedPkgs, err := gvc.LoadPackages(cfg, patterns...)
 	if err != nil {
-		log.Printf("package load failed: %v", err)
+		logger.Printf("package load failed: %v", err)
 		return nil, err
 	}
-	log.Printf("loaded %d packages\n", len(loadedPkgs))
 
-	log.Printf("analyzing %d packages...\n", len(loadedPkgs))
+	logger.Printf("analyzing %d packages...\n", len(loadedPkgs))
 
-	r, err := vulncheck.Source(ctx, loadedPkgs, &vulncheck.Config{Client: c.Client})
+	r, err := vulncheck.Source(ctx, loadedPkgs, &vulncheck.Config{Client: c.Client, SourceGoVersion: goVersion()})
 	if err != nil {
 		return nil, err
 	}
+
+	logger.Printf("selecting affecting vulnerabilities from %d findings...\n", len(r.Vulns))
 	unaffectedMods := filterUnaffected(r.Vulns)
 	r.Vulns = filterCalled(r)
 
+	logger.Printf("found %d vulnerabilities.\n", len(r.Vulns))
 	callInfo := gvc.GetCallInfo(r, loadedPkgs)
 	return toVulns(callInfo, unaffectedMods)
 	// TODO: add import graphs.
diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go
index f6e2d1b..71eaf4a 100644
--- a/gopls/internal/vulncheck/command_test.go
+++ b/gopls/internal/vulncheck/command_test.go
@@ -293,7 +293,7 @@
 		t.Fatal(err)
 	}
 
-	cache := cache.New(nil)
+	cache := cache.New(nil, nil, nil)
 	session := cache.NewSession(ctx)
 	options := source.DefaultOptions().Clone()
 	tests.DefaultOptions(options)
@@ -309,8 +309,13 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer release()
-	defer view.Shutdown(ctx)
+
+	defer func() {
+		// The snapshot must be released before calling view.Shutdown, to avoid a
+		// deadlock.
+		release()
+		view.Shutdown(ctx)
+	}()
 
 	test(ctx, snapshot)
 }
diff --git a/gopls/internal/vulncheck/util.go b/gopls/internal/vulncheck/util.go
index c329461..05332d3 100644
--- a/gopls/internal/vulncheck/util.go
+++ b/gopls/internal/vulncheck/util.go
@@ -8,8 +8,11 @@
 package vulncheck
 
 import (
+	"bytes"
 	"fmt"
 	"go/token"
+	"os"
+	"os/exec"
 
 	gvc "golang.org/x/tools/gopls/internal/govulncheck"
 	"golang.org/x/tools/internal/lsp/protocol"
@@ -80,3 +83,16 @@
 	}
 	return p
 }
+
+func goVersion() string {
+	if v := os.Getenv("GOVERSION"); v != "" {
+		// Unlikely to happen in practice, mostly used for testing.
+		return v
+	}
+	out, err := exec.Command("go", "env", "GOVERSION").Output()
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "failed to determine go version; skipping stdlib scanning: %v\n", err)
+		return ""
+	}
+	return string(bytes.TrimSpace(out))
+}
diff --git a/gopls/internal/vulncheck/vulncheck.go b/gopls/internal/vulncheck/vulncheck.go
index 2c4d0d2..7fc05ae 100644
--- a/gopls/internal/vulncheck/vulncheck.go
+++ b/gopls/internal/vulncheck/vulncheck.go
@@ -18,6 +18,6 @@
 
 // Govulncheck runs the in-process govulncheck implementation.
 // With go1.18+, this is swapped with the real implementation.
-var Govulncheck = func(ctx context.Context, cfg *packages.Config, args command.VulncheckArgs) (res command.VulncheckResult, _ error) {
+var Govulncheck = func(ctx context.Context, cfg *packages.Config, patterns string) (res command.VulncheckResult, _ error) {
 	return res, errors.New("not implemented")
 }
diff --git a/internal/analysisinternal/analysis.go b/internal/analysisinternal/analysis.go
index 3f1e573..e32152a 100644
--- a/internal/analysisinternal/analysis.go
+++ b/internal/analysisinternal/analysis.go
@@ -12,8 +12,6 @@
 	"go/token"
 	"go/types"
 	"strconv"
-
-	"golang.org/x/tools/internal/lsp/fuzzy"
 )
 
 // Flag to gate diagnostics for fuzz tests in 1.18.
@@ -397,30 +395,3 @@
 	}
 	return types.AssignableTo(want, got)
 }
-
-// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the
-// given pattern. We return the identifier whose name is most similar to the pattern.
-func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr {
-	fuzz := fuzzy.NewMatcher(pattern)
-	var bestFuzz ast.Expr
-	highScore := float32(0) // minimum score is 0 (no match)
-	for _, ident := range idents {
-		// TODO: Improve scoring algorithm.
-		score := fuzz.Score(ident.Name)
-		if score > highScore {
-			highScore = score
-			bestFuzz = ident
-		} else if score == 0 {
-			// Order matters in the fuzzy matching algorithm. If we find no match
-			// when matching the target to the identifier, try matching the identifier
-			// to the target.
-			revFuzz := fuzzy.NewMatcher(ident.Name)
-			revScore := revFuzz.Score(pattern)
-			if revScore > highScore {
-				highScore = revScore
-				bestFuzz = ident
-			}
-		}
-	}
-	return bestFuzz
-}
diff --git a/internal/imports/fix.go b/internal/imports/fix.go
index 9e373d6..45a492e 100644
--- a/internal/imports/fix.go
+++ b/internal/imports/fix.go
@@ -807,6 +807,11 @@
 	ModFlag    string
 	ModFile    string
 
+	// SkipPathInScan returns true if the path should be skipped from scans of
+	// the RootCurrentModule root type. The function argument is a clean,
+	// absolute path.
+	SkipPathInScan func(string) bool
+
 	// Env overrides the OS environment, and can be used to specify
 	// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
 	// exec.Command will not honor it.
diff --git a/internal/imports/mod.go b/internal/imports/mod.go
index 46693f2..dec388b 100644
--- a/internal/imports/mod.go
+++ b/internal/imports/mod.go
@@ -466,6 +466,16 @@
 	// We assume cached directories are fully cached, including all their
 	// children, and have not changed. We can skip them.
 	skip := func(root gopathwalk.Root, dir string) bool {
+		if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule {
+			if root.Path == dir {
+				return false
+			}
+
+			if r.env.SkipPathInScan(filepath.Clean(dir)) {
+				return true
+			}
+		}
+
 		info, ok := r.cacheLoad(dir)
 		if !ok {
 			return false
diff --git a/internal/jsonrpc2/servertest/servertest.go b/internal/jsonrpc2/servertest/servertest.go
index b879ebd..37f8475 100644
--- a/internal/jsonrpc2/servertest/servertest.go
+++ b/internal/jsonrpc2/servertest/servertest.go
@@ -50,7 +50,7 @@
 
 // Connect dials the test server and returns a jsonrpc2 Connection that is
 // ready for use.
-func (s *TCPServer) Connect(ctx context.Context) jsonrpc2.Conn {
+func (s *TCPServer) Connect(_ context.Context) jsonrpc2.Conn {
 	netConn, err := net.Dial("tcp", s.Addr)
 	if err != nil {
 		panic(fmt.Sprintf("servertest: failed to connect to test instance: %v", err))
diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/internal/lsp/analysis/fillreturns/fillreturns.go
index 72fe65d..705ae12 100644
--- a/internal/lsp/analysis/fillreturns/fillreturns.go
+++ b/internal/lsp/analysis/fillreturns/fillreturns.go
@@ -19,6 +19,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/internal/analysisinternal"
+	"golang.org/x/tools/internal/lsp/fuzzy"
 	"golang.org/x/tools/internal/typeparams"
 )
 
@@ -112,7 +113,7 @@
 				break
 			}
 		}
-		if enclosingFunc == nil {
+		if enclosingFunc == nil || enclosingFunc.Results == nil {
 			continue
 		}
 
@@ -191,7 +192,7 @@
 				// Find the identifier whose name is most similar to the return type.
 				// If we do not find any identifier that matches the pattern,
 				// generate a zero value.
-				value := analysisinternal.FindBestMatch(retTyp.String(), idents)
+				value := fuzzy.FindBestMatch(retTyp.String(), idents)
 				if value == nil {
 					value = analysisinternal.ZeroValue(file, pass.Pkg, retTyp)
 				}
diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/internal/lsp/analysis/fillstruct/fillstruct.go
index f160d44..2c0084f 100644
--- a/internal/lsp/analysis/fillstruct/fillstruct.go
+++ b/internal/lsp/analysis/fillstruct/fillstruct.go
@@ -21,6 +21,7 @@
 	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/ast/inspector"
 	"golang.org/x/tools/internal/analysisinternal"
+	"golang.org/x/tools/internal/lsp/fuzzy"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/typeparams"
 )
@@ -67,14 +68,6 @@
 			return
 		}
 
-		// Ignore types that have type parameters for now.
-		// TODO: support type params.
-		if typ, ok := typ.(*types.Named); ok {
-			if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 {
-				return
-			}
-		}
-
 		// Find reference to the type declaration of the struct being initialized.
 		for {
 			p, ok := typ.Underlying().(*types.Pointer)
@@ -254,7 +247,7 @@
 			// Find the identifier whose name is most similar to the name of the field's key.
 			// If we do not find any identifier that matches the pattern, generate a new value.
 			// NOTE: We currently match on the name of the field key rather than the field type.
-			value := analysisinternal.FindBestMatch(obj.Field(i).Name(), idents)
+			value := fuzzy.FindBestMatch(obj.Field(i).Name(), idents)
 			if value == nil {
 				value = populateValue(file, pkg, fieldTyp)
 			}
diff --git a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
index 9029061..7972bd3 100644
--- a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
+++ b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go
@@ -12,18 +12,16 @@
 	foo T
 }
 
-var _ = basicStruct[int]{}
-
-type fooType[T any] T
+var _ = basicStruct[int]{} // want ""
 
 type twoArgStruct[F, B any] struct {
-	foo fooType[F]
-	bar fooType[B]
+	foo F
+	bar B
 }
 
-var _ = twoArgStruct[string, int]{}
+var _ = twoArgStruct[string, int]{} // want ""
 
-var _ = twoArgStruct[int, string]{
+var _ = twoArgStruct[int, string]{ // want ""
 	bar: "bar",
 }
 
diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
new file mode 100644
index 0000000..eccfe14
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go
@@ -0,0 +1,74 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+	"fmt"
+	"os"
+)
+
+type A struct {
+	b int
+}
+
+func singleAssignment() {
+	v := "s" // want `v declared but not used`
+
+	s := []int{ // want `s declared but not used`
+		1,
+		2,
+	}
+
+	a := func(s string) bool { // want `a declared but not used`
+		return false
+	}
+
+	if 1 == 1 {
+		s := "v" // want `s declared but not used`
+	}
+
+	panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+	v := "s" // want `v declared but not used`
+}
+
+func partOfMultiAssignment() {
+	f, err := os.Open("file") // want `f declared but not used`
+	panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+	b := <-c            // want `b declared but not used`
+	s := fmt.Sprint("") // want `s declared but not used`
+	a := A{             // want `a declared but not used`
+		b: func() int {
+			return 1
+		}(),
+	}
+	c := A{<-cInt}          // want `c declared but not used`
+	d := fInt() + <-cInt    // want `d declared but not used`
+	e := fBool() && <-cBool // want `e declared but not used`
+	f := map[int]int{       // want `f declared but not used`
+		fInt(): <-cInt,
+	}
+	g := []int{<-cInt}       // want `g declared but not used`
+	h := func(s string) {}   // want `h declared but not used`
+	i := func(s string) {}() // want `i declared but not used`
+}
+
+func commentAbove() {
+	// v is a variable
+	v := "s" // want `v declared but not used`
+}
+
+func fBool() bool {
+	return true
+}
+
+func fInt() int {
+	return 1
+}
diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
new file mode 100644
index 0000000..8d6e561
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden
@@ -0,0 +1,59 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+	"fmt"
+	"os"
+)
+
+type A struct {
+	b int
+}
+
+func singleAssignment() {
+	if 1 == 1 {
+	}
+
+	panic("I should survive")
+}
+
+func noOtherStmtsInBlock() {
+}
+
+func partOfMultiAssignment() {
+	_, err := os.Open("file") // want `f declared but not used`
+	panic(err)
+}
+
+func sideEffects(cBool chan bool, cInt chan int) {
+	<-c            // want `b declared but not used`
+	fmt.Sprint("") // want `s declared but not used`
+	A{             // want `a declared but not used`
+		b: func() int {
+			return 1
+		}(),
+	}
+	A{<-cInt}          // want `c declared but not used`
+	fInt() + <-cInt    // want `d declared but not used`
+	fBool() && <-cBool // want `e declared but not used`
+	map[int]int{       // want `f declared but not used`
+		fInt(): <-cInt,
+	}
+	[]int{<-cInt}       // want `g declared but not used`
+	func(s string) {}() // want `i declared but not used`
+}
+
+func commentAbove() {
+	// v is a variable
+}
+
+func fBool() bool {
+	return true
+}
+
+func fInt() int {
+	return 1
+}
diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
new file mode 100644
index 0000000..024e49d
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+	var b, c bool // want `b declared but not used`
+	panic(c)
+
+	if 1 == 1 {
+		var s string // want `s declared but not used`
+	}
+}
+
+func b() {
+	// b is a variable
+	var b bool // want `b declared but not used`
+}
+
+func c() {
+	var (
+		d string
+
+		// some comment for c
+		c bool // want `c declared but not used`
+	)
+
+	panic(d)
+}
diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
new file mode 100644
index 0000000..a589a47
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decl
+
+func a() {
+	var c bool // want `b declared but not used`
+	panic(c)
+
+	if 1 == 1 {
+	}
+}
+
+func b() {
+	// b is a variable
+}
+
+func c() {
+	var (
+		d string
+	)
+	panic(d)
+}
diff --git a/internal/lsp/analysis/unusedvariable/unusedvariable.go b/internal/lsp/analysis/unusedvariable/unusedvariable.go
new file mode 100644
index 0000000..47564f1
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/unusedvariable.go
@@ -0,0 +1,300 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unusedvariable defines an analyzer that checks for unused variables.
+package unusedvariable
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/format"
+	"go/token"
+	"go/types"
+	"strings"
+
+	"golang.org/x/tools/go/analysis"
+	"golang.org/x/tools/go/ast/astutil"
+	"golang.org/x/tools/internal/analysisinternal"
+)
+
+const Doc = `check for unused variables
+
+The unusedvariable analyzer suggests fixes for unused variables errors.
+`
+
+var Analyzer = &analysis.Analyzer{
+	Name:             "unusedvariable",
+	Doc:              Doc,
+	Requires:         []*analysis.Analyzer{},
+	Run:              run,
+	RunDespiteErrors: true, // an unusedvariable diagnostic is a compile error
+}
+
+type fixesForError map[types.Error][]analysis.SuggestedFix
+
+const unusedVariableSuffix = " declared but not used"
+
+func run(pass *analysis.Pass) (interface{}, error) {
+	for _, typeErr := range analysisinternal.GetTypeErrors(pass) {
+		if strings.HasSuffix(typeErr.Msg, unusedVariableSuffix) {
+			varName := strings.TrimSuffix(typeErr.Msg, unusedVariableSuffix)
+			err := runForError(pass, typeErr, varName)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return nil, nil
+}
+
+func runForError(pass *analysis.Pass, err types.Error, name string) error {
+	var file *ast.File
+	for _, f := range pass.Files {
+		if f.Pos() <= err.Pos && err.Pos < f.End() {
+			file = f
+			break
+		}
+	}
+	if file == nil {
+		return nil
+	}
+
+	path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos)
+	if len(path) < 2 {
+		return nil
+	}
+
+	ident, ok := path[0].(*ast.Ident)
+	if !ok || ident.Name != name {
+		return nil
+	}
+
+	diag := analysis.Diagnostic{
+		Pos:     ident.Pos(),
+		End:     ident.End(),
+		Message: err.Msg,
+	}
+
+	for i := range path {
+		switch stmt := path[i].(type) {
+		case *ast.ValueSpec:
+			// Find GenDecl to which offending ValueSpec belongs.
+			if decl, ok := path[i+1].(*ast.GenDecl); ok {
+				fixes := removeVariableFromSpec(pass, path, stmt, decl, ident)
+				// fixes may be nil
+				if len(fixes) > 0 {
+					diag.SuggestedFixes = fixes
+					pass.Report(diag)
+				}
+			}
+
+		case *ast.AssignStmt:
+			if stmt.Tok != token.DEFINE {
+				continue
+			}
+
+			containsIdent := false
+			for _, expr := range stmt.Lhs {
+				if expr == ident {
+					containsIdent = true
+				}
+			}
+			if !containsIdent {
+				continue
+			}
+
+			fixes := removeVariableFromAssignment(pass, path, stmt, ident)
+			// fixes may be nil
+			if len(fixes) > 0 {
+				diag.SuggestedFixes = fixes
+				pass.Report(diag)
+			}
+		}
+	}
+
+	return nil
+}
+
+func removeVariableFromSpec(pass *analysis.Pass, path []ast.Node, stmt *ast.ValueSpec, decl *ast.GenDecl, ident *ast.Ident) []analysis.SuggestedFix {
+	newDecl := new(ast.GenDecl)
+	*newDecl = *decl
+	newDecl.Specs = nil
+
+	for _, spec := range decl.Specs {
+		if spec != stmt {
+			newDecl.Specs = append(newDecl.Specs, spec)
+			continue
+		}
+
+		newSpec := new(ast.ValueSpec)
+		*newSpec = *stmt
+		newSpec.Names = nil
+
+		for _, n := range stmt.Names {
+			if n != ident {
+				newSpec.Names = append(newSpec.Names, n)
+			}
+		}
+
+		if len(newSpec.Names) > 0 {
+			newDecl.Specs = append(newDecl.Specs, newSpec)
+		}
+	}
+
+	// decl.End() does not include any comments, so if a comment is present we
+	// need to account for it when we delete the statement
+	end := decl.End()
+	if stmt.Comment != nil && stmt.Comment.End() > end {
+		end = stmt.Comment.End()
+	}
+
+	// There are no other specs left in the declaration, the whole statement can
+	// be deleted
+	if len(newDecl.Specs) == 0 {
+		// Find parent DeclStmt and delete it
+		for _, node := range path {
+			if declStmt, ok := node.(*ast.DeclStmt); ok {
+				return []analysis.SuggestedFix{
+					{
+						Message:   suggestedFixMessage(ident.Name),
+						TextEdits: deleteStmtFromBlock(path, declStmt),
+					},
+				}
+			}
+		}
+	}
+
+	var b bytes.Buffer
+	if err := format.Node(&b, pass.Fset, newDecl); err != nil {
+		return nil
+	}
+
+	return []analysis.SuggestedFix{
+		{
+			Message: suggestedFixMessage(ident.Name),
+			TextEdits: []analysis.TextEdit{
+				{
+					Pos: decl.Pos(),
+					// Avoid adding a new empty line
+					End:     end + 1,
+					NewText: b.Bytes(),
+				},
+			},
+		},
+	}
+}
+
+func removeVariableFromAssignment(pass *analysis.Pass, path []ast.Node, stmt *ast.AssignStmt, ident *ast.Ident) []analysis.SuggestedFix {
+	// The only variable in the assignment is unused
+	if len(stmt.Lhs) == 1 {
+		// If LHS has only one expression to be valid it has to have 1 expression
+		// on RHS
+		//
+		// RHS may have side effects, preserve RHS
+		if exprMayHaveSideEffects(stmt.Rhs[0]) {
+			// Delete until RHS
+			return []analysis.SuggestedFix{
+				{
+					Message: suggestedFixMessage(ident.Name),
+					TextEdits: []analysis.TextEdit{
+						{
+							Pos: ident.Pos(),
+							End: stmt.Rhs[0].Pos(),
+						},
+					},
+				},
+			}
+		}
+
+		// RHS does not have any side effects, delete the whole statement
+		return []analysis.SuggestedFix{
+			{
+				Message:   suggestedFixMessage(ident.Name),
+				TextEdits: deleteStmtFromBlock(path, stmt),
+			},
+		}
+	}
+
+	// Otherwise replace ident with `_`
+	return []analysis.SuggestedFix{
+		{
+			Message: suggestedFixMessage(ident.Name),
+			TextEdits: []analysis.TextEdit{
+				{
+					Pos:     ident.Pos(),
+					End:     ident.End(),
+					NewText: []byte("_"),
+				},
+			},
+		},
+	}
+}
+
+func suggestedFixMessage(name string) string {
+	return fmt.Sprintf("Remove variable %s", name)
+}
+
+func deleteStmtFromBlock(path []ast.Node, stmt ast.Stmt) []analysis.TextEdit {
+	// Find innermost enclosing BlockStmt.
+	var block *ast.BlockStmt
+	for i := range path {
+		if blockStmt, ok := path[i].(*ast.BlockStmt); ok {
+			block = blockStmt
+			break
+		}
+	}
+
+	nodeIndex := -1
+	for i, blockStmt := range block.List {
+		if blockStmt == stmt {
+			nodeIndex = i
+			break
+		}
+	}
+
+	// The statement we need to delete was not found in BlockStmt
+	if nodeIndex == -1 {
+		return nil
+	}
+
+	// Delete until the end of the block unless there is another statement after
+	// the one we are trying to delete
+	end := block.Rbrace
+	if nodeIndex < len(block.List)-1 {
+		end = block.List[nodeIndex+1].Pos()
+	}
+
+	return []analysis.TextEdit{
+		{
+			Pos: stmt.Pos(),
+			End: end,
+		},
+	}
+}
+
+// exprMayHaveSideEffects reports whether the expression may have side effects
+// (because it contains a function call or channel receive). We disregard
+// runtime panics as well written programs should not encounter them.
+func exprMayHaveSideEffects(expr ast.Expr) bool {
+	var mayHaveSideEffects bool
+	ast.Inspect(expr, func(n ast.Node) bool {
+		switch n := n.(type) {
+		case *ast.CallExpr: // possible function call
+			mayHaveSideEffects = true
+			return false
+		case *ast.UnaryExpr:
+			if n.Op == token.ARROW { // channel receive
+				mayHaveSideEffects = true
+				return false
+			}
+		case *ast.FuncLit:
+			return false // evaluating what's inside a FuncLit has no effect
+		}
+		return true
+	})
+
+	return mayHaveSideEffects
+}
diff --git a/internal/lsp/analysis/unusedvariable/unusedvariable_test.go b/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
new file mode 100644
index 0000000..e6d7c02
--- /dev/null
+++ b/internal/lsp/analysis/unusedvariable/unusedvariable_test.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unusedvariable_test
+
+import (
+	"testing"
+
+	"golang.org/x/tools/go/analysis/analysistest"
+	"golang.org/x/tools/internal/lsp/analysis/unusedvariable"
+)
+
+func Test(t *testing.T) {
+	testdata := analysistest.TestData()
+
+	t.Run("decl", func(t *testing.T) {
+		analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "decl")
+	})
+
+	t.Run("assign", func(t *testing.T) {
+		analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "assign")
+	})
+}
diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go
index 9f7a19c..ca0e04d 100644
--- a/internal/lsp/cache/analysis.go
+++ b/internal/lsp/cache/analysis.go
@@ -10,7 +10,6 @@
 	"go/ast"
 	"go/types"
 	"reflect"
-	"sort"
 	"sync"
 
 	"golang.org/x/sync/errgroup"
@@ -24,6 +23,11 @@
 )
 
 func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) {
+	// TODO(adonovan): merge these two loops. There's no need to
+	// construct all the root action handles before beginning
+	// analysis. Operations should be concurrent (though that first
+	// requires buildPackageHandle not to be inefficient when
+	// called in parallel.)
 	var roots []*actionHandle
 	for _, a := range analyzers {
 		if !a.IsEnabled(s.view) {
@@ -54,6 +58,11 @@
 	return results, nil
 }
 
+type actionKey struct {
+	pkg      packageKey
+	analyzer *analysis.Analyzer
+}
+
 type actionHandleKey source.Hash
 
 // An action represents one unit of analysis work: the application of
@@ -61,7 +70,7 @@
 // package (as different analyzers are applied, either in sequence or
 // parallel), and across packages (as dependencies are analyzed).
 type actionHandle struct {
-	handle *memoize.Handle
+	promise *memoize.Promise
 
 	analyzer *analysis.Analyzer
 	pkg      *pkg
@@ -86,27 +95,41 @@
 }
 
 func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) {
-	ph, err := s.buildPackageHandle(ctx, id, source.ParseFull)
-	if err != nil {
-		return nil, err
-	}
-	act := s.getActionHandle(id, ph.mode, a)
-	if act != nil {
-		return act, nil
-	}
-	if len(ph.key) == 0 {
-		return nil, fmt.Errorf("actionHandle: no key for package %s", id)
-	}
-	pkg, err := ph.check(ctx, s)
-	if err != nil {
-		return nil, err
-	}
-	act = &actionHandle{
+	const mode = source.ParseFull
+	key := actionKey{
+		pkg:      packageKey{id: id, mode: mode},
 		analyzer: a,
-		pkg:      pkg,
 	}
+
+	s.mu.Lock()
+	entry, hit := s.actions.Get(key)
+	s.mu.Unlock()
+
+	if hit {
+		return entry.(*actionHandle), nil
+	}
+
+	// TODO(adonovan): opt: this block of code sequentially loads a package
+	// (and all its dependencies), then sequentially creates action handles
+	// for the direct dependencies (whose packages have by then been loaded
+	// as a consequence of ph.check) which does a sequential recursion
+	// down the action graph. Only once all that work is complete do we
+	// put a handle in the cache. As with buildPackageHandle, this does
+	// not exploit the natural parallelism in the problem, and the naive
+	// use of concurrency would lead to an exponential amount of duplicated
+	// work. We should instead use an atomically updated future cache
+	// and a parallel graph traversal.
+	ph, err := s.buildPackageHandle(ctx, id, mode)
+	if err != nil {
+		return nil, err
+	}
+	pkg, err := ph.await(ctx, s)
+	if err != nil {
+		return nil, err
+	}
+
+	// Add a dependency on each required analyzer.
 	var deps []*actionHandle
-	// Add a dependency on each required analyzers.
 	for _, req := range a.Requires {
 		reqActionHandle, err := s.actionHandle(ctx, id, req)
 		if err != nil {
@@ -122,13 +145,8 @@
 		// An analysis that consumes/produces facts
 		// must run on the package's dependencies too.
 		if len(a.FactTypes) > 0 {
-			importIDs := make([]string, 0, len(ph.m.Deps))
 			for _, importID := range ph.m.Deps {
-				importIDs = append(importIDs, string(importID))
-			}
-			sort.Strings(importIDs) // for determinism
-			for _, importID := range importIDs {
-				depActionHandle, err := s.actionHandle(ctx, PackageID(importID), a)
+				depActionHandle, err := s.actionHandle(ctx, importID, a)
 				if err != nil {
 					return nil, err
 				}
@@ -137,7 +155,7 @@
 		}
 	}
 
-	h := s.generation.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} {
+	promise, release := s.store.Promise(buildActionKey(a, ph), func(ctx context.Context, arg interface{}) interface{} {
 		snapshot := arg.(*snapshot)
 		// Analyze dependencies first.
 		results, err := execAll(ctx, snapshot, deps)
@@ -147,15 +165,30 @@
 			}
 		}
 		return runAnalysis(ctx, snapshot, a, pkg, results)
-	}, nil)
-	act.handle = h
+	})
 
-	act = s.addActionHandle(act)
-	return act, nil
+	ah := &actionHandle{
+		analyzer: a,
+		pkg:      pkg,
+		promise:  promise,
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	// Check cache again in case another thread got there first.
+	if result, ok := s.actions.Get(key); ok {
+		release()
+		return result.(*actionHandle), nil
+	}
+
+	s.actions.Set(key, ah, func(_, _ interface{}) { release() })
+
+	return ah, nil
 }
 
 func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) {
-	d, err := act.handle.Get(ctx, snapshot.generation, snapshot)
+	d, err := snapshot.awaitPromise(ctx, act.promise)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -185,7 +218,7 @@
 	for _, act := range actions {
 		act := act
 		g.Go(func() error {
-			v, err := act.handle.Get(ctx, snapshot.generation, snapshot)
+			v, err := snapshot.awaitPromise(ctx, act.promise)
 			if err != nil {
 				return err
 			}
diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go
index 3640272..c002850 100644
--- a/internal/lsp/cache/cache.go
+++ b/internal/lsp/cache/cache.go
@@ -28,23 +28,46 @@
 	"golang.org/x/tools/internal/span"
 )
 
-func New(options func(*source.Options)) *Cache {
+// New Creates a new cache for gopls operation results, using the given file
+// set, shared store, and session options.
+//
+// All of the fset, store and options may be nil, but if store is non-nil so
+// must be fset (and they must always be used together), otherwise it may be
+// possible to get cached data referencing token.Pos values not mapped by the
+// FileSet.
+func New(fset *token.FileSet, store *memoize.Store, options func(*source.Options)) *Cache {
 	index := atomic.AddInt64(&cacheIndex, 1)
+
+	if store != nil && fset == nil {
+		panic("non-nil store with nil fset")
+	}
+	if fset == nil {
+		fset = token.NewFileSet()
+	}
+	if store == nil {
+		store = &memoize.Store{}
+	}
+
 	c := &Cache{
 		id:          strconv.FormatInt(index, 10),
-		fset:        token.NewFileSet(),
+		fset:        fset,
 		options:     options,
+		store:       store,
 		fileContent: map[span.URI]*fileHandle{},
 	}
 	return c
 }
 
 type Cache struct {
-	id      string
-	fset    *token.FileSet
+	id   string
+	fset *token.FileSet
+
+	// TODO(rfindley): it doesn't make sense that cache accepts LSP options, just
+	// so that it can create a session: the cache does not (and should not)
+	// depend on options. Invert this relationship to remove options from Cache.
 	options func(*source.Options)
 
-	store memoize.Store
+	store *memoize.Store
 
 	fileMu      sync.Mutex
 	fileContent map[span.URI]*fileHandle
@@ -101,7 +124,7 @@
 		return fh, nil
 	}
 
-	fh, err := readFile(ctx, uri, fi)
+	fh, err := readFile(ctx, uri, fi) // ~25us
 	if err != nil {
 		return nil, err
 	}
@@ -126,7 +149,7 @@
 	_ = ctx
 	defer done()
 
-	data, err := ioutil.ReadFile(uri.Filename())
+	data, err := ioutil.ReadFile(uri.Filename()) // ~20us
 	if err != nil {
 		return &fileHandle{
 			modTime: fi.ModTime(),
@@ -199,7 +222,7 @@
 	c.store.DebugOnlyIterate(func(k, v interface{}) {
 		switch k.(type) {
 		case packageHandleKey:
-			v := v.(*packageData)
+			v := v.(typeCheckResult)
 			if v.pkg == nil {
 				break
 			}
diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go
index f09fc29..6beee1f 100644
--- a/internal/lsp/cache/check.go
+++ b/internal/lsp/cache/check.go
@@ -14,14 +14,15 @@
 	"path"
 	"path/filepath"
 	"regexp"
-	"sort"
 	"strings"
 	"sync"
 
 	"golang.org/x/mod/module"
+	"golang.org/x/sync/errgroup"
 	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/internal/event"
+	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/debug/tag"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
@@ -32,145 +33,75 @@
 	"golang.org/x/tools/internal/typesinternal"
 )
 
+// A packageKey identifies a packageHandle in the snapshot.packages map.
+type packageKey struct {
+	mode source.ParseMode
+	id   PackageID
+}
+
 type packageHandleKey source.Hash
 
+// A packageHandle is a handle to the future result of type-checking a package.
+// The resulting package is obtained from the await() method.
 type packageHandle struct {
-	handle *memoize.Handle
-
-	goFiles, compiledGoFiles []*parseGoHandle
-
-	// mode is the mode the files were parsed in.
-	mode source.ParseMode
+	promise *memoize.Promise // [typeCheckResult]
 
 	// m is the metadata associated with the package.
 	m *KnownMetadata
 
 	// key is the hashed key for the package.
+	//
+	// It includes the all bits of the transitive closure of
+	// dependencies's sources. This is more than type checking
+	// really depends on: export data of direct deps should be
+	// enough. (The key for analysis actions could similarly
+	// hash only Facts of direct dependencies.)
 	key packageHandleKey
 }
 
-func (ph *packageHandle) packageKey() packageKey {
-	return packageKey{
-		id:   ph.m.ID,
-		mode: ph.mode,
-	}
-}
-
-func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) {
-	for _, pgh := range ph.goFiles {
-		f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader)
-		if err != nil {
-			continue
-		}
-		seen := map[string]struct{}{}
-		for _, impSpec := range f.File.Imports {
-			imp := strings.Trim(impSpec.Path.Value, `"`)
-			if _, ok := seen[imp]; !ok {
-				seen[imp] = struct{}{}
-				result = append(result, imp)
-			}
-		}
-	}
-
-	sort.Strings(result)
-	return result
-}
-
-// packageData contains the data produced by type-checking a package.
-type packageData struct {
+// typeCheckResult contains the result of a call to
+// typeCheckImpl, which type-checks a package.
+type typeCheckResult struct {
 	pkg *pkg
 	err error
 }
 
-// buildPackageHandle returns a packageHandle for a given package and mode.
+// buildPackageHandle returns a handle for the future results of
+// type-checking the package identified by id in the given mode.
 // It assumes that the given ID already has metadata available, so it does not
 // attempt to reload missing or invalid metadata. The caller must reload
 // metadata if needed.
 func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) {
-	if ph := s.getPackage(id, mode); ph != nil {
-		return ph, nil
-	}
+	packageKey := packageKey{id: id, mode: mode}
 
-	// Build the packageHandle for this ID and its dependencies.
-	ph, deps, err := s.buildKey(ctx, id, mode)
-	if err != nil {
-		return nil, err
-	}
+	s.mu.Lock()
+	entry, hit := s.packages.Get(packageKey)
+	m := s.meta.metadata[id]
+	s.mu.Unlock()
 
-	// Do not close over the packageHandle or the snapshot in the Bind function.
-	// This creates a cycle, which causes the finalizers to never run on the handles.
-	// The possible cycles are:
-	//
-	//     packageHandle.h.function -> packageHandle
-	//     packageHandle.h.function -> snapshot -> packageHandle
-	//
-
-	m := ph.m
-	key := ph.key
-
-	h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
-		snapshot := arg.(*snapshot)
-
-		// Begin loading the direct dependencies, in parallel.
-		var wg sync.WaitGroup
-		for _, dep := range deps {
-			wg.Add(1)
-			go func(dep *packageHandle) {
-				dep.check(ctx, snapshot)
-				wg.Done()
-			}(dep)
-		}
-
-		data := &packageData{}
-		data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
-		// Make sure that the workers above have finished before we return,
-		// especially in case of cancellation.
-		wg.Wait()
-
-		return data
-	}, nil)
-	ph.handle = h
-
-	// Cache the handle in the snapshot. If a package handle has already
-	// been cached, addPackage will return the cached value. This is fine,
-	// since the original package handle above will have no references and be
-	// garbage collected.
-	ph = s.addPackageHandle(ph)
-
-	return ph, nil
-}
-
-// buildKey computes the key for a given packageHandle.
-func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) {
-	m := s.getMetadata(id)
 	if m == nil {
-		return nil, nil, fmt.Errorf("no metadata for %s", id)
+		return nil, fmt.Errorf("no metadata for %s", id)
 	}
-	goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode)
-	if err != nil {
-		return nil, nil, err
-	}
-	compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode)
-	if err != nil {
-		return nil, nil, err
-	}
-	ph := &packageHandle{
-		m:               m,
-		goFiles:         goFiles,
-		compiledGoFiles: compiledGoFiles,
-		mode:            mode,
-	}
-	// Make sure all of the depList are sorted.
-	depList := append([]PackageID{}, m.Deps...)
-	sort.Slice(depList, func(i, j int) bool {
-		return depList[i] < depList[j]
-	})
 
-	deps := make(map[PackagePath]*packageHandle)
+	if hit {
+		return entry.(*packageHandle), nil
+	}
 
 	// Begin computing the key by getting the depKeys for all dependencies.
-	var depKeys []packageHandleKey
-	for _, depID := range depList {
+	// This requires reading the transitive closure of dependencies' source files.
+	//
+	// It is tempting to parallelize the recursion here, but
+	// without de-duplication of subtasks this would lead to an
+	// exponential amount of work, and computing the key is
+	// expensive as it reads all the source files transitively.
+	// Notably, we don't update the s.packages cache until the
+	// entire key has been computed.
+	// TODO(adonovan): use a promise cache to ensure that the key
+	// for each package is computed by at most one thread, then do
+	// the recursive key building of dependencies in parallel.
+	deps := make(map[PackagePath]*packageHandle)
+	depKeys := make([]packageHandleKey, len(m.Deps))
+	for i, depID := range m.Deps {
 		depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
 		// Don't use invalid metadata for dependencies if the top-level
 		// metadata is valid. We only load top-level packages, so if the
@@ -182,20 +113,98 @@
 				event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
 			}
 
+			// This check ensures we break out of the slow
+			// buildPackageHandle recursion quickly when
+			// context cancelation is detected within GetFile.
 			if ctx.Err() != nil {
-				return nil, nil, ctx.Err()
+				return nil, ctx.Err() // cancelled
 			}
-			// One bad dependency should not prevent us from checking the entire package.
-			// Add a special key to mark a bad dependency.
-			depKeys = append(depKeys, packageHandleKey(source.Hashf("%s import not found", depID)))
+
+			// One bad dependency should not prevent us from
+			// checking the entire package. Leave depKeys[i] unset.
 			continue
 		}
+
 		deps[depHandle.m.PkgPath] = depHandle
-		depKeys = append(depKeys, depHandle.key)
+		depKeys[i] = depHandle.key
 	}
+
+	// Read both lists of files of this package, in parallel.
+	//
+	// goFiles aren't presented to the type checker--nor
+	// are they included in the key, unsoundly--but their
+	// syntax trees are available from (*pkg).File(URI).
+	// TODO(adonovan): consider parsing them on demand?
+	// The need should be rare.
+	goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m.Metadata)
+	if err != nil {
+		return nil, err
+	}
+
+	// All the file reading has now been done.
+	// Create a handle for the result of type checking.
 	experimentalKey := s.View().Options().ExperimentalPackageCacheKey
-	ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey)
-	return ph, deps, nil
+	phKey := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey)
+	promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} {
+
+		pkg, err := typeCheckImpl(ctx, arg.(*snapshot), goFiles, compiledGoFiles, m.Metadata, mode, deps)
+		return typeCheckResult{pkg, err}
+	})
+
+	ph := &packageHandle{
+		promise: promise,
+		m:       m,
+		key:     phKey,
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	// Check that the metadata has not changed
+	// (which should invalidate this handle).
+	//
+	// (In future, handles should form a graph with edges from a
+	// packageHandle to the handles for parsing its files and the
+	// handles for type-checking its immediate deps, at which
+	// point there will be no need to even access s.meta.)
+	if s.meta.metadata[ph.m.ID].Metadata != ph.m.Metadata {
+		return nil, fmt.Errorf("stale metadata for %s", ph.m.ID)
+	}
+
+	// Check cache again in case another thread got there first.
+	if prev, ok := s.packages.Get(packageKey); ok {
+		prevPH := prev.(*packageHandle)
+		release()
+		if prevPH.m.Metadata != ph.m.Metadata {
+			return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID)
+		}
+		return prevPH, nil
+	}
+
+	// Update the map.
+	s.packages.Set(packageKey, ph, func(_, _ interface{}) { release() })
+
+	return ph, nil
+}
+
+// readGoFiles reads the content of Metadata.GoFiles and
+// Metadata.CompiledGoFiles, in parallel.
+func readGoFiles(ctx context.Context, s *snapshot, m *Metadata) (goFiles, compiledGoFiles []source.FileHandle, err error) {
+	var group errgroup.Group
+	getFileHandles := func(files []span.URI) []source.FileHandle {
+		fhs := make([]source.FileHandle, len(files))
+		for i, uri := range files {
+			i, uri := i, uri
+			group.Go(func() (err error) {
+				fhs[i], err = s.GetFile(ctx, uri) // ~25us
+				return
+			})
+		}
+		return fhs
+	}
+	return getFileHandles(m.GoFiles),
+		getFileHandles(m.CompiledGoFiles),
+		group.Wait()
 }
 
 func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode {
@@ -208,13 +217,16 @@
 	if s.view.Options().MemoryMode == source.ModeNormal {
 		return source.ParseFull
 	}
-	if s.isActiveLocked(id, nil) {
+	if s.isActiveLocked(id) {
 		return source.ParseFull
 	}
 	return source.ParseExported
 }
 
-func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
+// computePackageKey returns a key representing the act of type checking
+// a package named id containing the specified files, metadata, and
+// dependency hashes.
+func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
 	// TODO(adonovan): opt: no need to materalize the bytes; hash them directly.
 	// Also, use field separators to avoid spurious collisions.
 	b := bytes.NewBuffer(nil)
@@ -234,21 +246,22 @@
 	for _, dep := range deps {
 		b.Write(dep[:])
 	}
-	for _, cgf := range pghs {
-		b.WriteString(cgf.file.FileIdentity().String())
+	for _, file := range files {
+		b.WriteString(file.FileIdentity().String())
+	}
+	// Metadata errors are interpreted and memoized on the computed package, so
+	// we must hash them into the key here.
+	//
+	// TODO(rfindley): handle metadata diagnostics independently from
+	// type-checking diagnostics.
+	for _, err := range m.Errors {
+		b.WriteString(err.Msg)
+		b.WriteString(err.Pos)
+		b.WriteRune(rune(err.Kind))
 	}
 	return packageHandleKey(source.HashOf(b.Bytes()))
 }
 
-// hashEnv returns a hash of the snapshot's configuration.
-func hashEnv(s *snapshot) source.Hash {
-	s.view.optionsMu.Lock()
-	env := s.view.options.EnvSlice()
-	s.view.optionsMu.Unlock()
-
-	return source.Hashf("%s", env)
-}
-
 // hashConfig returns the hash for the *packages.Config.
 func hashConfig(config *packages.Config) source.Hash {
 	// TODO(adonovan): opt: don't materialize the bytes; hash them directly.
@@ -268,16 +281,13 @@
 	return source.HashOf(b.Bytes())
 }
 
-func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) {
-	return ph.check(ctx, s.(*snapshot))
-}
-
-func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) {
-	v, err := ph.handle.Get(ctx, s.generation, s)
+// await waits for typeCheckImpl to complete and returns its result.
+func (ph *packageHandle) await(ctx context.Context, s *snapshot) (*pkg, error) {
+	v, err := s.awaitPromise(ctx, ph.promise)
 	if err != nil {
 		return nil, err
 	}
-	data := v.(*packageData)
+	data := v.(typeCheckResult)
 	return data.pkg, data.err
 }
 
@@ -289,33 +299,44 @@
 	return string(ph.m.ID)
 }
 
-func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) {
-	v := ph.handle.Cached(g)
+func (ph *packageHandle) cached() (*pkg, error) {
+	v := ph.promise.Cached()
 	if v == nil {
 		return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath)
 	}
-	data := v.(*packageData)
+	data := v.(typeCheckResult)
 	return data.pkg, data.err
 }
 
-func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) {
-	pghs := make([]*parseGoHandle, 0, len(files))
-	for _, uri := range files {
-		fh, err := s.GetFile(ctx, uri)
-		if err != nil {
-			return nil, err
-		}
-		pghs = append(pghs, s.parseGoHandle(ctx, fh, mode))
+// typeCheckImpl type checks the parsed source files in compiledGoFiles.
+// (The resulting pkg also holds the parsed but not type-checked goFiles.)
+// deps holds the future results of type-checking the direct dependencies.
+func typeCheckImpl(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
+	// Start type checking of direct dependencies,
+	// in parallel and asynchronously.
+	// As the type checker imports each of these
+	// packages, it will wait for its completion.
+	var wg sync.WaitGroup
+	for _, dep := range deps {
+		wg.Add(1)
+		go func(dep *packageHandle) {
+			dep.await(ctx, snapshot) // ignore result
+			wg.Done()
+		}(dep)
 	}
-	return pghs, nil
-}
+	// The 'defer' below is unusual but intentional:
+	// it is not necessary that each call to dep.check
+	// complete before type checking begins, as the type
+	// checker will wait for those it needs. But they do
+	// need to complete before this function returns and
+	// the snapshot is possibly destroyed.
+	defer wg.Wait()
 
-func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
 	var filter *unexportedFilter
 	if mode == source.ParseExported {
 		filter = &unexportedFilter{uses: map[string]bool{}}
 	}
-	pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter)
+	pkg, err := doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter)
 	if err != nil {
 		return nil, err
 	}
@@ -327,15 +348,16 @@
 		missing, unexpected := filter.ProcessErrors(pkg.typeErrors)
 		if len(unexpected) == 0 && len(missing) != 0 {
 			event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID)))
-			pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter)
+			pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter)
 			if err != nil {
 				return nil, err
 			}
 			missing, unexpected = filter.ProcessErrors(pkg.typeErrors)
 		}
 		if len(unexpected) != 0 || len(missing) != 0 {
+			// TODO(rfindley): remove this distracting log
 			event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID)))
-			pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil)
+			pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, nil)
 			if err != nil {
 				return nil, err
 			}
@@ -427,7 +449,7 @@
 
 var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
 
-func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
+func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
 	ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID)))
 	defer done()
 
@@ -448,19 +470,19 @@
 	}
 	typeparams.InitInstanceInfo(pkg.typesInfo)
 
-	for _, gf := range pkg.m.GoFiles {
-		// In the presence of line directives, we may need to report errors in
-		// non-compiled Go files, so we need to register them on the package.
-		// However, we only need to really parse them in ParseFull mode, when
-		// the user might actually be looking at the file.
-		fh, err := snapshot.GetFile(ctx, gf)
-		if err != nil {
-			return nil, err
-		}
-		goMode := source.ParseFull
-		if mode != source.ParseFull {
-			goMode = source.ParseHeader
-		}
+	// In the presence of line directives, we may need to report errors in
+	// non-compiled Go files, so we need to register them on the package.
+	// However, we only need to really parse them in ParseFull mode, when
+	// the user might actually be looking at the file.
+	goMode := source.ParseFull
+	if mode != source.ParseFull {
+		goMode = source.ParseHeader
+	}
+
+	// Parse the GoFiles. (These aren't presented to the type
+	// checker but are part of the returned pkg.)
+	// TODO(adonovan): opt: parallelize parsing.
+	for _, fh := range goFiles {
 		pgf, err := snapshot.ParseGo(ctx, fh, goMode)
 		if err != nil {
 			return nil, err
@@ -468,7 +490,8 @@
 		pkg.goFiles = append(pkg.goFiles, pgf)
 	}
 
-	if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil {
+	// Parse the CompiledGoFiles: those seen by the compiler/typechecker.
+	if err := parseCompiledGoFiles(ctx, compiledGoFiles, snapshot, mode, pkg, astFilter); err != nil {
 		return nil, err
 	}
 
@@ -514,7 +537,7 @@
 			if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) {
 				return nil, fmt.Errorf("invalid use of internal package %s", pkgPath)
 			}
-			depPkg, err := dep.check(ctx, snapshot)
+			depPkg, err := dep.await(ctx, snapshot)
 			if err != nil {
 				return nil, err
 			}
@@ -549,7 +572,7 @@
 	}
 
 	// Type checking errors are handled via the config, so ignore them here.
-	_ = check.Files(files)
+	_ = check.Files(files) // 50us-15ms, depending on size of package
 
 	// If the context was cancelled, we may have returned a ton of transient
 	// errors to the type checker. Swallow them.
@@ -559,22 +582,17 @@
 	return pkg, nil
 }
 
-func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
-	for _, cgf := range pkg.m.CompiledGoFiles {
-		fh, err := snapshot.GetFile(ctx, cgf)
-		if err != nil {
-			return err
-		}
-
+func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHandle, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
+	// TODO(adonovan): opt: parallelize this loop, which takes 1-25ms.
+	for _, fh := range compiledGoFiles {
 		var pgf *source.ParsedGoFile
-		var fixed bool
+		var err error
 		// Only parse Full through the cache -- we need to own Exported ASTs
 		// to prune them.
 		if mode == source.ParseFull {
-			pgf, fixed, err = snapshot.parseGo(ctx, fh, mode)
+			pgf, err = snapshot.ParseGo(ctx, fh, mode)
 		} else {
-			d := parseGo(ctx, snapshot.FileSet(), fh, mode)
-			pgf, fixed, err = d.parsed, d.fixed, d.err
+			pgf, err = parseGoImpl(ctx, snapshot.FileSet(), fh, mode) // ~20us/KB
 		}
 		if err != nil {
 			return err
@@ -585,22 +603,26 @@
 		}
 		// If we have fixed parse errors in any of the files, we should hide type
 		// errors, as they may be completely nonsensical.
-		pkg.hasFixedFiles = pkg.hasFixedFiles || fixed
+		pkg.hasFixedFiles = pkg.hasFixedFiles || pgf.Fixed
 	}
-	if mode != source.ParseExported {
-		return nil
-	}
-	if astFilter != nil {
-		var files []*ast.File
-		for _, cgf := range pkg.compiledGoFiles {
-			files = append(files, cgf.File)
-		}
-		astFilter.Filter(files)
-	} else {
-		for _, cgf := range pkg.compiledGoFiles {
-			trimAST(cgf.File)
+
+	// Optionally remove parts that don't affect the exported API.
+	if mode == source.ParseExported {
+		if astFilter != nil {
+			// aggressive pruning based on reachability
+			var files []*ast.File
+			for _, cgf := range pkg.compiledGoFiles {
+				files = append(files, cgf.File)
+			}
+			astFilter.Filter(files)
+		} else {
+			// simple trimming of function bodies
+			for _, cgf := range pkg.compiledGoFiles {
+				trimAST(cgf.File)
+			}
 		}
 	}
+
 	return nil
 }
 
@@ -660,7 +682,7 @@
 			}
 
 			for _, imp := range allImports[item] {
-				rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
+				rng, err := source.NewMappedRange(imp.cgf.Tok, imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
 				if err != nil {
 					return nil, err
 				}
diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go
index 342f2be..a1aecb3 100644
--- a/internal/lsp/cache/errors.go
+++ b/internal/lsp/cache/errors.go
@@ -16,6 +16,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/internal/analysisinternal"
+	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
@@ -82,7 +83,7 @@
 		return nil, err
 	}
 	pos := pgf.Tok.Pos(e.Pos.Offset)
-	spn, err := span.NewRange(snapshot.FileSet(), pos, pos).Span()
+	spn, err := span.NewRange(pgf.Tok, pos, pos).Span()
 	if err != nil {
 		return nil, err
 	}
@@ -196,8 +197,15 @@
 			break
 		}
 	}
-
-	spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
+	tokFile := snapshot.FileSet().File(e.Pos)
+	if tokFile == nil {
+		return nil, bug.Errorf("no file for position of %q diagnostic", e.Category)
+	}
+	end := e.End
+	if !end.IsValid() {
+		end = e.Pos
+	}
+	spn, err := span.NewRange(tokFile, e.Pos, end).Span()
 	if err != nil {
 		return nil, err
 	}
@@ -282,7 +290,11 @@
 	for _, fix := range diag.SuggestedFixes {
 		edits := make(map[span.URI][]protocol.TextEdit)
 		for _, e := range fix.TextEdits {
-			spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span()
+			tokFile := snapshot.FileSet().File(e.Pos)
+			if tokFile == nil {
+				return nil, bug.Errorf("no file for edit position")
+			}
+			spn, err := span.NewRange(tokFile, e.Pos, e.End).Span()
 			if err != nil {
 				return nil, err
 			}
@@ -310,7 +322,11 @@
 func relatedInformation(pkg *pkg, fset *token.FileSet, diag *analysis.Diagnostic) ([]source.RelatedInformation, error) {
 	var out []source.RelatedInformation
 	for _, related := range diag.Related {
-		spn, err := span.NewRange(fset, related.Pos, related.End).Span()
+		tokFile := fset.File(related.Pos)
+		if tokFile == nil {
+			return nil, bug.Errorf("no file for %q diagnostic position", diag.Category)
+		}
+		spn, err := span.NewRange(tokFile, related.Pos, related.End).Span()
 		if err != nil {
 			return nil, err
 		}
@@ -397,7 +413,7 @@
 		// Search file imports for the import that is causing the import cycle.
 		for _, imp := range cgf.File.Imports {
 			if imp.Path.Value == circImp {
-				spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span()
+				spn, err := span.NewRange(cgf.Tok, imp.Pos(), imp.End()).Span()
 				if err != nil {
 					return msg, span.Span{}, false
 				}
diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go
index 88c9f14..c1beff8 100644
--- a/internal/lsp/cache/graph.go
+++ b/internal/lsp/cache/graph.go
@@ -24,8 +24,8 @@
 	// importedBy maps package IDs to the list of packages that import them.
 	importedBy map[PackageID][]PackageID
 
-	// ids maps file URIs to package IDs. A single file may belong to multiple
-	// packages due to tests packages.
+	// ids maps file URIs to package IDs, sorted by (!valid, cli, packageID).
+	// A single file may belong to multiple packages due to tests packages.
 	ids map[span.URI][]PackageID
 }
 
@@ -89,21 +89,21 @@
 	// 4: an invalid command-line-arguments package
 	for uri, ids := range g.ids {
 		sort.Slice(ids, func(i, j int) bool {
-			// Sort valid packages first.
+			// 1. valid packages appear earlier.
 			validi := g.metadata[ids[i]].Valid
 			validj := g.metadata[ids[j]].Valid
 			if validi != validj {
 				return validi
 			}
 
+			// 2. command-line-args packages appear later.
 			cli := source.IsCommandLineArguments(string(ids[i]))
 			clj := source.IsCommandLineArguments(string(ids[j]))
-			if cli && !clj {
-				return false
+			if cli != clj {
+				return clj
 			}
-			if !cli && clj {
-				return true
-			}
+
+			// 3. packages appear in name order.
 			return ids[i] < ids[j]
 		})
 
diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go
index f333f70..6510bbd 100644
--- a/internal/lsp/cache/imports.go
+++ b/internal/lsp/cache/imports.go
@@ -7,6 +7,7 @@
 import (
 	"context"
 	"fmt"
+	"os"
 	"reflect"
 	"strings"
 	"sync"
@@ -22,13 +23,14 @@
 type importsState struct {
 	ctx context.Context
 
-	mu                   sync.Mutex
-	processEnv           *imports.ProcessEnv
-	cleanupProcessEnv    func()
-	cacheRefreshDuration time.Duration
-	cacheRefreshTimer    *time.Timer
-	cachedModFileHash    source.Hash
-	cachedBuildFlags     []string
+	mu                     sync.Mutex
+	processEnv             *imports.ProcessEnv
+	cleanupProcessEnv      func()
+	cacheRefreshDuration   time.Duration
+	cacheRefreshTimer      *time.Timer
+	cachedModFileHash      source.Hash
+	cachedBuildFlags       []string
+	cachedDirectoryFilters []string
 }
 
 func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot, fn func(*imports.Options) error) error {
@@ -69,9 +71,11 @@
 	snapshot.view.optionsMu.Lock()
 	localPrefix := snapshot.view.options.Local
 	currentBuildFlags := snapshot.view.options.BuildFlags
+	currentDirectoryFilters := snapshot.view.options.DirectoryFilters
 	changed := !reflect.DeepEqual(currentBuildFlags, s.cachedBuildFlags) ||
 		snapshot.view.options.VerboseOutput != (s.processEnv.Logf != nil) ||
-		modFileHash != s.cachedModFileHash
+		modFileHash != s.cachedModFileHash ||
+		!reflect.DeepEqual(snapshot.view.options.DirectoryFilters, s.cachedDirectoryFilters)
 	snapshot.view.optionsMu.Unlock()
 
 	// If anything relevant to imports has changed, clear caches and
@@ -91,6 +95,7 @@
 		}
 		s.cachedModFileHash = modFileHash
 		s.cachedBuildFlags = currentBuildFlags
+		s.cachedDirectoryFilters = currentDirectoryFilters
 		var err error
 		s.cleanupProcessEnv, err = s.populateProcessEnv(ctx, snapshot)
 		if err != nil {
@@ -141,20 +146,21 @@
 		pe.Logf = nil
 	}
 
-	// Take an extra reference to the snapshot so that its workspace directory
-	// (if any) isn't destroyed while we're using it.
-	release := snapshot.generation.Acquire()
+	// Extract invocation details from the snapshot to use with goimports.
+	//
+	// TODO(rfindley): refactor to extract the necessary invocation logic into
+	// separate functions. Using goCommandInvocation is unnecessarily indirect,
+	// and has led to memory leaks in the past, when the snapshot was
+	// unintentionally held past its lifetime.
 	_, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{
 		WorkingDir: snapshot.view.rootURI.Filename(),
 	})
 	if err != nil {
 		return nil, err
 	}
-	pe.WorkingDir = inv.WorkingDir
+
 	pe.BuildFlags = inv.BuildFlags
-	pe.WorkingDir = inv.WorkingDir
-	pe.ModFile = inv.ModFile
-	pe.ModFlag = inv.ModFlag
+	pe.ModFlag = "readonly" // processEnv operations should not mutate the modfile
 	pe.Env = map[string]string{}
 	for _, kv := range inv.Env {
 		split := strings.SplitN(kv, "=", 2)
@@ -163,11 +169,31 @@
 		}
 		pe.Env[split[0]] = split[1]
 	}
+	// We don't actually use the invocation, so clean it up now.
+	cleanupInvocation()
 
-	return func() {
-		cleanupInvocation()
-		release()
-	}, nil
+	// If the snapshot uses a synthetic workspace directory, create a copy for
+	// the lifecycle of the importsState.
+	//
+	// Notably, we cannot use the snapshot invocation working directory, as that
+	// is tied to the lifecycle of the snapshot.
+	//
+	// Otherwise return a no-op cleanup function.
+	cleanup = func() {}
+	if snapshot.usesWorkspaceDir() {
+		tmpDir, err := makeWorkspaceDir(ctx, snapshot.workspace, snapshot)
+		if err != nil {
+			return nil, err
+		}
+		pe.WorkingDir = tmpDir
+		cleanup = func() {
+			os.RemoveAll(tmpDir) // ignore error
+		}
+	} else {
+		pe.WorkingDir = snapshot.view.rootURI.Filename()
+	}
+
+	return cleanup, nil
 }
 
 func (s *importsState) refreshProcessEnv() {
diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go
index da0b246..0952fc6 100644
--- a/internal/lsp/cache/load.go
+++ b/internal/lsp/cache/load.go
@@ -7,8 +7,6 @@
 import (
 	"bytes"
 	"context"
-	"crypto/sha256"
-	"errors"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -24,7 +22,6 @@
 	"golang.org/x/tools/internal/lsp/debug/tag"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
-	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/packagesinternal"
 	"golang.org/x/tools/internal/span"
 )
@@ -43,22 +40,10 @@
 	var query []string
 	var containsDir bool // for logging
 
-	// Unless the context was canceled, set "shouldLoad" to false for all
-	// of the metadata we attempted to load.
-	defer func() {
-		if errors.Is(err, context.Canceled) {
-			return
-		}
-		s.clearShouldLoad(scopes...)
-	}()
-
 	// Keep track of module query -> module path so that we can later correlate query
 	// errors with errors.
 	moduleQueries := make(map[string]string)
 	for _, scope := range scopes {
-		if !s.shouldLoad(scope) {
-			continue
-		}
 		switch scope := scope.(type) {
 		case PackagePath:
 			if source.IsCommandLineArguments(string(scope)) {
@@ -156,7 +141,8 @@
 	}
 
 	moduleErrs := make(map[string][]packages.Error) // module path -> errors
-	updates := make(map[PackageID]*KnownMetadata)
+	filterer := buildFilterer(s.view.rootURI.Filename(), s.view.gomodcache, s.view.Options())
+	newMetadata := make(map[PackageID]*KnownMetadata)
 	for _, pkg := range pkgs {
 		// The Go command returns synthetic list results for module queries that
 		// encountered module errors.
@@ -198,39 +184,56 @@
 		}
 		// Skip filtered packages. They may be added anyway if they're
 		// dependencies of non-filtered packages.
-		if s.view.allFilesExcluded(pkg) {
+		//
+		// TODO(rfindley): why exclude metadata arbitrarily here? It should be safe
+		// to capture all metadata.
+		if s.view.allFilesExcluded(pkg, filterer) {
 			continue
 		}
-		// TODO: once metadata is immutable, we shouldn't have to lock here.
-		s.mu.Lock()
-		err := computeMetadataUpdates(ctx, s.meta, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil)
-		s.mu.Unlock()
-		if err != nil {
+		if err := buildMetadata(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, newMetadata, nil); err != nil {
 			return err
 		}
 	}
 
-	var loadedIDs []PackageID
-	for id := range updates {
-		loadedIDs = append(loadedIDs, id)
+	s.mu.Lock()
+
+	// Only update metadata where we don't already have valid metadata.
+	//
+	// We want to preserve an invariant that s.packages.Get(id).m.Metadata
+	// matches s.meta.metadata[id].Metadata. By avoiding overwriting valid
+	// metadata, we minimize the amount of invalidation required to preserve this
+	// invariant.
+	//
+	// TODO(rfindley): perform a sanity check that metadata matches here. If not,
+	// we have an invalidation bug elsewhere.
+	updates := make(map[PackageID]*KnownMetadata)
+	var updatedIDs []PackageID
+	for _, m := range newMetadata {
+		if existing := s.meta.metadata[m.ID]; existing == nil || !existing.Valid {
+			updates[m.ID] = m
+			updatedIDs = append(updatedIDs, m.ID)
+		}
 	}
 
 	event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates)))
 
-	s.mu.Lock()
+	// Invalidate the reverse transitive closure of packages that have changed.
+	//
+	// Note that the original metadata is being invalidated here, so we use the
+	// original metadata graph to compute the reverse closure.
+	invalidatedPackages := s.meta.reverseTransitiveClosure(true, updatedIDs...)
 
-	// invalidate the reverse transitive closure of packages that have changed.
-	invalidatedPackages := s.meta.reverseTransitiveClosure(true, loadedIDs...)
 	s.meta = s.meta.Clone(updates)
+	s.resetIsActivePackageLocked()
 
 	// Invalidate any packages we may have associated with this metadata.
 	//
 	// TODO(rfindley): this should not be necessary, as we should have already
 	// invalidated in snapshot.clone.
 	for id := range invalidatedPackages {
-		for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
+		for _, mode := range source.AllParseModes {
 			key := packageKey{mode, id}
-			delete(s.packages, key)
+			s.packages.Delete(key)
 		}
 	}
 
@@ -307,8 +310,8 @@
 Improvements to this workflow will be coming soon, and you can learn more here:
 https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`
 		return &source.CriticalError{
-			MainError: fmt.Errorf(msg),
-			DiagList:  s.applyCriticalErrorToFiles(ctx, msg, openFiles),
+			MainError:   fmt.Errorf(msg),
+			Diagnostics: s.applyCriticalErrorToFiles(ctx, msg, openFiles),
 		}
 	}
 
@@ -346,7 +349,7 @@
 				MainError: fmt.Errorf(`You are working in a nested module.
 Please open it as a separate workspace folder. Learn more:
 https://github.com/golang/tools/blob/master/gopls/doc/workspace.md.`),
-				DiagList: srcDiags,
+				Diagnostics: srcDiags,
 			}
 		}
 	}
@@ -361,7 +364,7 @@
 		switch s.view.FileKind(fh) {
 		case source.Go:
 			if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil {
-				pkgDecl := span.NewRange(s.FileSet(), pgf.File.Package, pgf.File.Name.End())
+				pkgDecl := span.NewRange(pgf.Tok, pgf.File.Package, pgf.File.Name.End())
 				if spn, err := pkgDecl.Span(); err == nil {
 					rng, _ = pgf.Mapper.Range(spn)
 				}
@@ -384,83 +387,59 @@
 	return srcDiags
 }
 
-type workspaceDirKey string
-
-type workspaceDirData struct {
-	dir string
-	err error
-}
-
-// getWorkspaceDir gets the URI for the workspace directory associated with
-// this snapshot. The workspace directory is a temp directory containing the
-// go.mod file computed from all active modules.
+// getWorkspaceDir returns the URI for the workspace directory
+// associated with this snapshot. The workspace directory is a
+// temporary directory containing the go.mod file computed from all
+// active modules.
 func (s *snapshot) getWorkspaceDir(ctx context.Context) (span.URI, error) {
 	s.mu.Lock()
-	h := s.workspaceDirHandle
+	dir, err := s.workspaceDir, s.workspaceDirErr
 	s.mu.Unlock()
-	if h != nil {
-		return getWorkspaceDir(ctx, h, s.generation)
+	if dir == "" && err == nil { // cache miss
+		dir, err = makeWorkspaceDir(ctx, s.workspace, s)
+		s.mu.Lock()
+		s.workspaceDir, s.workspaceDirErr = dir, err
+		s.mu.Unlock()
 	}
-	file, err := s.workspace.modFile(ctx, s)
+	return span.URIFromPath(dir), err
+}
+
+// makeWorkspaceDir creates a temporary directory containing a go.mod
+// and go.sum file for each module in the workspace.
+// Note: snapshot's mutex must be unlocked for it to satisfy FileSource.
+func makeWorkspaceDir(ctx context.Context, workspace *workspace, fs source.FileSource) (string, error) {
+	file, err := workspace.modFile(ctx, fs)
 	if err != nil {
 		return "", err
 	}
-	hash := sha256.New()
 	modContent, err := file.Format()
 	if err != nil {
 		return "", err
 	}
-	sumContent, err := s.workspace.sumFile(ctx, s)
+	sumContent, err := workspace.sumFile(ctx, fs)
 	if err != nil {
 		return "", err
 	}
-	hash.Write(modContent)
-	hash.Write(sumContent)
-	key := workspaceDirKey(hash.Sum(nil))
-	s.mu.Lock()
-	h = s.generation.Bind(key, func(context.Context, memoize.Arg) interface{} {
-		tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod")
-		if err != nil {
-			return &workspaceDirData{err: err}
-		}
-
-		for name, content := range map[string][]byte{
-			"go.mod": modContent,
-			"go.sum": sumContent,
-		} {
-			filename := filepath.Join(tmpdir, name)
-			if err := ioutil.WriteFile(filename, content, 0644); err != nil {
-				os.RemoveAll(tmpdir)
-				return &workspaceDirData{err: err}
-			}
-		}
-
-		return &workspaceDirData{dir: tmpdir}
-	}, func(v interface{}) {
-		d := v.(*workspaceDirData)
-		if d.dir != "" {
-			if err := os.RemoveAll(d.dir); err != nil {
-				event.Error(context.Background(), "cleaning workspace dir", err)
-			}
-		}
-	})
-	s.workspaceDirHandle = h
-	s.mu.Unlock()
-	return getWorkspaceDir(ctx, h, s.generation)
-}
-
-func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generation) (span.URI, error) {
-	v, err := h.Get(ctx, g, nil)
+	tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod")
 	if err != nil {
 		return "", err
 	}
-	return span.URIFromPath(v.(*workspaceDirData).dir), nil
+	for name, content := range map[string][]byte{
+		"go.mod": modContent,
+		"go.sum": sumContent,
+	} {
+		if err := ioutil.WriteFile(filepath.Join(tmpdir, name), content, 0644); err != nil {
+			os.RemoveAll(tmpdir) // ignore error
+			return "", err
+		}
+	}
+	return tmpdir, nil
 }
 
-// computeMetadataUpdates populates the updates map with metadata updates to
+// buildMetadata populates the updates map with metadata updates to
 // apply, based on the given pkg. It recurs through pkg.Imports to ensure that
 // metadata exists for all dependencies.
-func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error {
+func buildMetadata(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error {
 	id := PackageID(pkg.ID)
 	if source.IsCommandLineArguments(pkg.ID) {
 		suffix := ":" + strings.Join(query, ",")
@@ -468,21 +447,12 @@
 		pkgPath = PackagePath(string(pkgPath) + suffix)
 	}
 
-	// If we have valid metadata for this package, don't update. This minimizes
-	// the amount of subsequent invalidation.
-	//
-	// TODO(rfindley): perform a sanity check that metadata matches here. If not,
-	// we have an invalidation bug elsewhere.
-	if existing := g.metadata[id]; existing != nil && existing.Valid {
-		return nil
-	}
-
 	if _, ok := updates[id]; ok {
 		// If we've already seen this dependency, there may be an import cycle, or
 		// we may have reached the same package transitively via distinct paths.
 		// Check the path to confirm.
 
-		// TODO(rfindley): this doesn't look right. Any single piece of new
+		// TODO(rfindley): this doesn't look sufficient. Any single piece of new
 		// metadata could theoretically introduce import cycles in the metadata
 		// graph. What's the point of this limited check here (and is it even
 		// possible to get an import cycle in data from go/packages)? Consider
@@ -561,10 +531,11 @@
 			m.MissingDeps[importPkgPath] = struct{}{}
 			continue
 		}
-		if err := computeMetadataUpdates(ctx, g, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil {
+		if err := buildMetadata(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil {
 			event.Error(ctx, "error in dependency", err)
 		}
 	}
+	sort.Slice(m.Deps, func(i, j int) bool { return m.Deps[i] < m.Deps[j] }) // for determinism
 
 	return nil
 }
@@ -596,10 +567,11 @@
 			uris[uri] = struct{}{}
 		}
 
+		filterFunc := s.view.filterFunc()
 		for uri := range uris {
 			// Don't use view.contains here. go.work files may include modules
 			// outside of the workspace folder.
-			if !strings.Contains(string(uri), "/vendor/") && !s.view.filters(uri) {
+			if !strings.Contains(string(uri), "/vendor/") && !filterFunc(uri) {
 				return true
 			}
 		}
@@ -663,17 +635,13 @@
 func computeWorkspacePackagesLocked(s *snapshot, meta *metadataGraph) map[PackageID]PackagePath {
 	workspacePackages := make(map[PackageID]PackagePath)
 	for _, m := range meta.metadata {
-		if !containsPackageLocked(s, m.Metadata) {
+		// Don't consider invalid packages to be workspace packages. Doing so can
+		// result in type-checking and diagnosing packages that no longer exist,
+		// which can lead to memory leaks and confusing errors.
+		if !m.Valid {
 			continue
 		}
-		if m.PkgFilesChanged {
-			// If a package name has changed, it's possible that the package no
-			// longer exists. Leaving it as a workspace package can result in
-			// persistent stale diagnostics.
-			//
-			// If there are still valid files in the package, it will be reloaded.
-			//
-			// There may be more precise heuristics.
+		if !containsPackageLocked(s, m.Metadata) {
 			continue
 		}
 
diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go
index 91b0e77..eef9188 100644
--- a/internal/lsp/cache/maps.go
+++ b/internal/lsp/cache/maps.go
@@ -16,11 +16,14 @@
 	impl *persistent.Map
 }
 
+// uriLessInterface is the < relation for "any" values containing span.URIs.
+func uriLessInterface(a, b interface{}) bool {
+	return a.(span.URI) < b.(span.URI)
+}
+
 func newFilesMap() filesMap {
 	return filesMap{
-		impl: persistent.NewMap(func(a, b interface{}) bool {
-			return a.(span.URI) < b.(span.URI)
-		}),
+		impl: persistent.NewMap(uriLessInterface),
 	}
 }
 
@@ -56,16 +59,8 @@
 	m.impl.Delete(key)
 }
 
-type goFilesMap struct {
-	impl *persistent.Map
-}
-
-func newGoFilesMap() goFilesMap {
-	return goFilesMap{
-		impl: persistent.NewMap(func(a, b interface{}) bool {
-			return parseKeyLess(a.(parseKey), b.(parseKey))
-		}),
-	}
+func parseKeyLessInterface(a, b interface{}) bool {
+	return parseKeyLess(a.(parseKey), b.(parseKey))
 }
 
 func parseKeyLess(a, b parseKey) bool {
@@ -78,38 +73,38 @@
 	return a.file.URI < b.file.URI
 }
 
-func (m goFilesMap) Clone() goFilesMap {
-	return goFilesMap{
+type isActivePackageCacheMap struct {
+	impl *persistent.Map
+}
+
+func newIsActivePackageCacheMap() isActivePackageCacheMap {
+	return isActivePackageCacheMap{
+		impl: persistent.NewMap(func(a, b interface{}) bool {
+			return a.(PackageID) < b.(PackageID)
+		}),
+	}
+}
+
+func (m isActivePackageCacheMap) Clone() isActivePackageCacheMap {
+	return isActivePackageCacheMap{
 		impl: m.impl.Clone(),
 	}
 }
 
-func (m goFilesMap) Destroy() {
+func (m isActivePackageCacheMap) Destroy() {
 	m.impl.Destroy()
 }
 
-func (m goFilesMap) Get(key parseKey) (*parseGoHandle, bool) {
+func (m isActivePackageCacheMap) Get(key PackageID) (bool, bool) {
 	value, ok := m.impl.Get(key)
 	if !ok {
-		return nil, false
+		return false, false
 	}
-	return value.(*parseGoHandle), true
+	return value.(bool), true
 }
 
-func (m goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) {
-	m.impl.Range(func(key, value interface{}) {
-		do(key.(parseKey), value.(*parseGoHandle))
-	})
-}
-
-func (m goFilesMap) Set(key parseKey, value *parseGoHandle, release func()) {
-	m.impl.Set(key, value, func(key, value interface{}) {
-		release()
-	})
-}
-
-func (m goFilesMap) Delete(key parseKey) {
-	m.impl.Delete(key)
+func (m isActivePackageCacheMap) Set(key PackageID, value bool) {
+	m.impl.Set(key, value, nil)
 }
 
 type parseKeysByURIMap struct {
@@ -118,9 +113,7 @@
 
 func newParseKeysByURIMap() parseKeysByURIMap {
 	return parseKeysByURIMap{
-		impl: persistent.NewMap(func(a, b interface{}) bool {
-			return a.(span.URI) < b.(span.URI)
-		}),
+		impl: persistent.NewMap(uriLessInterface),
 	}
 }
 
@@ -155,3 +148,69 @@
 func (m parseKeysByURIMap) Delete(key span.URI) {
 	m.impl.Delete(key)
 }
+
+func packageKeyLessInterface(x, y interface{}) bool {
+	return packageKeyLess(x.(packageKey), y.(packageKey))
+}
+
+func packageKeyLess(x, y packageKey) bool {
+	if x.mode != y.mode {
+		return x.mode < y.mode
+	}
+	return x.id < y.id
+}
+
+type knownDirsSet struct {
+	impl *persistent.Map
+}
+
+func newKnownDirsSet() knownDirsSet {
+	return knownDirsSet{
+		impl: persistent.NewMap(func(a, b interface{}) bool {
+			return a.(span.URI) < b.(span.URI)
+		}),
+	}
+}
+
+func (s knownDirsSet) Clone() knownDirsSet {
+	return knownDirsSet{
+		impl: s.impl.Clone(),
+	}
+}
+
+func (s knownDirsSet) Destroy() {
+	s.impl.Destroy()
+}
+
+func (s knownDirsSet) Contains(key span.URI) bool {
+	_, ok := s.impl.Get(key)
+	return ok
+}
+
+func (s knownDirsSet) Range(do func(key span.URI)) {
+	s.impl.Range(func(key, value interface{}) {
+		do(key.(span.URI))
+	})
+}
+
+func (s knownDirsSet) SetAll(other knownDirsSet) {
+	s.impl.SetAll(other.impl)
+}
+
+func (s knownDirsSet) Insert(key span.URI) {
+	s.impl.Set(key, nil, nil)
+}
+
+func (s knownDirsSet) Remove(key span.URI) {
+	s.impl.Delete(key)
+}
+
+// actionKeyLessInterface is the less-than relation for actionKey
+// values wrapped in an interface.
+func actionKeyLessInterface(a, b interface{}) bool {
+	x, y := a.(actionKey), b.(actionKey)
+	if x.analyzer.Name != y.analyzer.Name {
+		return x.analyzer.Name < y.analyzer.Name
+	}
+	return packageKeyLess(x.pkg, y.pkg)
+}
diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go
index b4da713..668d082 100644
--- a/internal/lsp/cache/metadata.go
+++ b/internal/lsp/cache/metadata.go
@@ -29,10 +29,10 @@
 	Name            PackageName
 	GoFiles         []span.URI
 	CompiledGoFiles []span.URI
-	ForTest         PackagePath
+	ForTest         PackagePath // package path under test, or ""
 	TypesSizes      types.Sizes
 	Errors          []packages.Error
-	Deps            []PackageID
+	Deps            []PackageID // direct dependencies, in string order
 	MissingDeps     map[PackagePath]struct{}
 	Module          *packages.Module
 	depsErrors      []*packagesinternal.PackageError
@@ -91,15 +91,4 @@
 	// Valid is true if the given metadata is Valid.
 	// Invalid metadata can still be used if a metadata reload fails.
 	Valid bool
-
-	// PkgFilesChanged reports whether the file set of this metadata has
-	// potentially changed.
-	PkgFilesChanged bool
-
-	// ShouldLoad is true if the given metadata should be reloaded.
-	//
-	// Note that ShouldLoad is different from !Valid: when we try to load a
-	// package, we mark ShouldLoad = false regardless of whether the load
-	// succeeded, to prevent endless loads.
-	ShouldLoad bool
 }
diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go
index c076f42..57fa1e2 100644
--- a/internal/lsp/cache/mod.go
+++ b/internal/lsp/cache/mod.go
@@ -24,152 +24,156 @@
 	"golang.org/x/tools/internal/span"
 )
 
-type parseModHandle struct {
-	handle *memoize.Handle
-}
+// ParseMod parses a go.mod file, using a cache. It may return partial results and an error.
+func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+	uri := fh.URI()
 
-type parseModData struct {
-	parsed *source.ParsedModule
+	s.mu.Lock()
+	entry, hit := s.parseModHandles.Get(uri)
+	s.mu.Unlock()
 
-	// err is any error encountered while parsing the file.
-	err error
-}
+	type parseModResult struct {
+		parsed *source.ParsedModule
+		err    error
+	}
 
-func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) {
-	v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
+	// cache miss?
+	if !hit {
+		promise, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} {
+			parsed, err := parseModImpl(ctx, fh)
+			return parseModResult{parsed, err}
+		})
+
+		entry = promise
+		s.mu.Lock()
+		s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+		s.mu.Unlock()
+	}
+
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
 	if err != nil {
 		return nil, err
 	}
-	data := v.(*parseModData)
-	return data.parsed, data.err
+	res := v.(parseModResult)
+	return res.parsed, res.err
 }
 
-func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*source.ParsedModule, error) {
-	if handle := s.getParseModHandle(modFH.URI()); handle != nil {
-		return handle.parse(ctx, s)
-	}
-	h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
-		_, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
-		defer done()
+// parseModImpl parses the go.mod file whose name and contents are in fh.
+// It may return partial results and an error.
+func parseModImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) {
+	_, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI()))
+	defer done()
 
-		contents, err := modFH.Read()
-		if err != nil {
-			return &parseModData{err: err}
-		}
-		m := protocol.NewColumnMapper(modFH.URI(), contents)
-		file, parseErr := modfile.Parse(modFH.URI().Filename(), contents, nil)
-		// Attempt to convert the error to a standardized parse error.
-		var parseErrors []*source.Diagnostic
-		if parseErr != nil {
-			mfErrList, ok := parseErr.(modfile.ErrorList)
-			if !ok {
-				return &parseModData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
-			}
-			for _, mfErr := range mfErrList {
-				rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
-				if err != nil {
-					return &parseModData{err: err}
-				}
-				parseErrors = append(parseErrors, &source.Diagnostic{
-					URI:      modFH.URI(),
-					Range:    rng,
-					Severity: protocol.SeverityError,
-					Source:   source.ParseError,
-					Message:  mfErr.Err.Error(),
-				})
-			}
-		}
-		return &parseModData{
-			parsed: &source.ParsedModule{
-				URI:         modFH.URI(),
-				Mapper:      m,
-				File:        file,
-				ParseErrors: parseErrors,
-			},
-			err: parseErr,
-		}
-	}, nil)
-
-	pmh := &parseModHandle{handle: h}
-	s.mu.Lock()
-	s.parseModHandles[modFH.URI()] = pmh
-	s.mu.Unlock()
-
-	return pmh.parse(ctx, s)
-}
-
-type parseWorkHandle struct {
-	handle *memoize.Handle
-}
-
-type parseWorkData struct {
-	parsed *source.ParsedWorkFile
-
-	// err is any error encountered while parsing the file.
-	err error
-}
-
-func (mh *parseWorkHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedWorkFile, error) {
-	v, err := mh.handle.Get(ctx, snapshot.generation, snapshot)
+	contents, err := fh.Read()
 	if err != nil {
 		return nil, err
 	}
-	data := v.(*parseWorkData)
-	return data.parsed, data.err
+	m := protocol.NewColumnMapper(fh.URI(), contents)
+	file, parseErr := modfile.Parse(fh.URI().Filename(), contents, nil)
+	// Attempt to convert the error to a standardized parse error.
+	var parseErrors []*source.Diagnostic
+	if parseErr != nil {
+		mfErrList, ok := parseErr.(modfile.ErrorList)
+		if !ok {
+			return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+		}
+		for _, mfErr := range mfErrList {
+			rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
+			if err != nil {
+				return nil, err
+			}
+			parseErrors = append(parseErrors, &source.Diagnostic{
+				URI:      fh.URI(),
+				Range:    rng,
+				Severity: protocol.SeverityError,
+				Source:   source.ParseError,
+				Message:  mfErr.Err.Error(),
+			})
+		}
+	}
+	return &source.ParsedModule{
+		URI:         fh.URI(),
+		Mapper:      m,
+		File:        file,
+		ParseErrors: parseErrors,
+	}, parseErr
 }
 
-func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*source.ParsedWorkFile, error) {
-	if handle := s.getParseWorkHandle(modFH.URI()); handle != nil {
-		return handle.parse(ctx, s)
-	}
-	h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} {
-		_, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI()))
-		defer done()
+// ParseWork parses a go.work file, using a cache. It may return partial results and an error.
+// TODO(adonovan): move to new work.go file.
+func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+	uri := fh.URI()
 
-		contents, err := modFH.Read()
-		if err != nil {
-			return &parseWorkData{err: err}
-		}
-		m := protocol.NewColumnMapper(modFH.URI(), contents)
-		file, parseErr := modfile.ParseWork(modFH.URI().Filename(), contents, nil)
-		// Attempt to convert the error to a standardized parse error.
-		var parseErrors []*source.Diagnostic
-		if parseErr != nil {
-			mfErrList, ok := parseErr.(modfile.ErrorList)
-			if !ok {
-				return &parseWorkData{err: fmt.Errorf("unexpected parse error type %v", parseErr)}
-			}
-			for _, mfErr := range mfErrList {
-				rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
-				if err != nil {
-					return &parseWorkData{err: err}
-				}
-				parseErrors = append(parseErrors, &source.Diagnostic{
-					URI:      modFH.URI(),
-					Range:    rng,
-					Severity: protocol.SeverityError,
-					Source:   source.ParseError,
-					Message:  mfErr.Err.Error(),
-				})
-			}
-		}
-		return &parseWorkData{
-			parsed: &source.ParsedWorkFile{
-				URI:         modFH.URI(),
-				Mapper:      m,
-				File:        file,
-				ParseErrors: parseErrors,
-			},
-			err: parseErr,
-		}
-	}, nil)
-
-	pwh := &parseWorkHandle{handle: h}
 	s.mu.Lock()
-	s.parseWorkHandles[modFH.URI()] = pwh
+	entry, hit := s.parseWorkHandles.Get(uri)
 	s.mu.Unlock()
 
-	return pwh.parse(ctx, s)
+	type parseWorkResult struct {
+		parsed *source.ParsedWorkFile
+		err    error
+	}
+
+	// cache miss?
+	if !hit {
+		handle, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} {
+			parsed, err := parseWorkImpl(ctx, fh)
+			return parseWorkResult{parsed, err}
+		})
+
+		entry = handle
+		s.mu.Lock()
+		s.parseWorkHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+		s.mu.Unlock()
+	}
+
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+	if err != nil {
+		return nil, err
+	}
+	res := v.(parseWorkResult)
+	return res.parsed, res.err
+}
+
+// parseWorkImpl parses a go.work file. It may return partial results and an error.
+func parseWorkImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) {
+	_, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI()))
+	defer done()
+
+	contents, err := fh.Read()
+	if err != nil {
+		return nil, err
+	}
+	m := protocol.NewColumnMapper(fh.URI(), contents)
+	file, parseErr := modfile.ParseWork(fh.URI().Filename(), contents, nil)
+	// Attempt to convert the error to a standardized parse error.
+	var parseErrors []*source.Diagnostic
+	if parseErr != nil {
+		mfErrList, ok := parseErr.(modfile.ErrorList)
+		if !ok {
+			return nil, fmt.Errorf("unexpected parse error type %v", parseErr)
+		}
+		for _, mfErr := range mfErrList {
+			rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos)
+			if err != nil {
+				return nil, err
+			}
+			parseErrors = append(parseErrors, &source.Diagnostic{
+				URI:      fh.URI(),
+				Range:    rng,
+				Severity: protocol.SeverityError,
+				Source:   source.ParseError,
+				Message:  mfErr.Err.Error(),
+			})
+		}
+	}
+	return &source.ParsedWorkFile{
+		URI:         fh.URI(),
+		Mapper:      m,
+		File:        file,
+		ParseErrors: parseErrors,
+	}, parseErr
 }
 
 // goSum reads the go.sum file for the go.mod file at modURI, if it exists. If
@@ -198,104 +202,82 @@
 	return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum"
 }
 
-// modKey is uniquely identifies cached data for `go mod why` or dependencies
-// to upgrade.
-type modKey struct {
-	sessionID string
-	env       source.Hash
-	view      string
-	mod       source.FileIdentity
-	verb      modAction
-}
+// ModWhy returns the "go mod why" result for each module named in a
+// require statement in the go.mod file.
+// TODO(adonovan): move to new mod_why.go file.
+func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
+	uri := fh.URI()
 
-type modAction int
+	if s.View().FileKind(fh) != source.Mod {
+		return nil, fmt.Errorf("%s is not a go.mod file", uri)
+	}
 
-const (
-	why modAction = iota
-	upgrade
-)
+	s.mu.Lock()
+	entry, hit := s.modWhyHandles.Get(uri)
+	s.mu.Unlock()
 
-type modWhyHandle struct {
-	handle *memoize.Handle
-}
+	type modWhyResult struct {
+		why map[string]string
+		err error
+	}
 
-type modWhyData struct {
-	// why keeps track of the `go mod why` results for each require statement
-	// in the go.mod file.
-	why map[string]string
+	// cache miss?
+	if !hit {
+		handle := memoize.NewPromise("modWhy", func(ctx context.Context, arg interface{}) interface{} {
+			why, err := modWhyImpl(ctx, arg.(*snapshot), fh)
+			return modWhyResult{why, err}
+		})
 
-	err error
-}
+		entry = handle
+		s.mu.Lock()
+		s.modWhyHandles.Set(uri, entry, nil)
+		s.mu.Unlock()
+	}
 
-func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) {
-	v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot)
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
 	if err != nil {
 		return nil, err
 	}
-	data := v.(*modWhyData)
-	return data.why, data.err
+	res := v.(modWhyResult)
+	return res.why, res.err
 }
 
-func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) {
-	if s.View().FileKind(fh) != source.Mod {
-		return nil, fmt.Errorf("%s is not a go.mod file", fh.URI())
+// modWhyImpl returns the result of "go mod why -m" on the specified go.mod file.
+func modWhyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) (map[string]string, error) {
+	ctx, done := event.Start(ctx, "cache.ModWhy", tag.URI.Of(fh.URI()))
+	defer done()
+
+	pm, err := snapshot.ParseMod(ctx, fh)
+	if err != nil {
+		return nil, err
 	}
-	if handle := s.getModWhyHandle(fh.URI()); handle != nil {
-		return handle.why(ctx, s)
+	// No requires to explain.
+	if len(pm.File.Require) == 0 {
+		return nil, nil // empty result
 	}
-	key := modKey{
-		sessionID: s.view.session.id,
-		env:       hashEnv(s),
-		mod:       fh.FileIdentity(),
-		view:      s.view.rootURI.Filename(),
-		verb:      why,
+	// Run `go mod why` on all the dependencies.
+	inv := &gocommand.Invocation{
+		Verb:       "mod",
+		Args:       []string{"why", "-m"},
+		WorkingDir: filepath.Dir(fh.URI().Filename()),
 	}
-	h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
-		ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI()))
-		defer done()
-
-		snapshot := arg.(*snapshot)
-
-		pm, err := snapshot.ParseMod(ctx, fh)
-		if err != nil {
-			return &modWhyData{err: err}
-		}
-		// No requires to explain.
-		if len(pm.File.Require) == 0 {
-			return &modWhyData{}
-		}
-		// Run `go mod why` on all the dependencies.
-		inv := &gocommand.Invocation{
-			Verb:       "mod",
-			Args:       []string{"why", "-m"},
-			WorkingDir: filepath.Dir(fh.URI().Filename()),
-		}
-		for _, req := range pm.File.Require {
-			inv.Args = append(inv.Args, req.Mod.Path)
-		}
-		stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
-		if err != nil {
-			return &modWhyData{err: err}
-		}
-		whyList := strings.Split(stdout.String(), "\n\n")
-		if len(whyList) != len(pm.File.Require) {
-			return &modWhyData{
-				err: fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)),
-			}
-		}
-		why := make(map[string]string, len(pm.File.Require))
-		for i, req := range pm.File.Require {
-			why[req.Mod.Path] = whyList[i]
-		}
-		return &modWhyData{why: why}
-	}, nil)
-
-	mwh := &modWhyHandle{handle: h}
-	s.mu.Lock()
-	s.modWhyHandles[fh.URI()] = mwh
-	s.mu.Unlock()
-
-	return mwh.why(ctx, s)
+	for _, req := range pm.File.Require {
+		inv.Args = append(inv.Args, req.Mod.Path)
+	}
+	stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
+	if err != nil {
+		return nil, err
+	}
+	whyList := strings.Split(stdout.String(), "\n\n")
+	if len(whyList) != len(pm.File.Require) {
+		return nil, fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require))
+	}
+	why := make(map[string]string, len(pm.File.Require))
+	for i, req := range pm.File.Require {
+		why[req.Mod.Path] = whyList[i]
+	}
+	return why, nil
 }
 
 // extractGoCommandError tries to parse errors that come from the go command
diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go
index bd2ff0c..704e1a6 100644
--- a/internal/lsp/cache/mod_tidy.go
+++ b/internal/lsp/cache/mod_tidy.go
@@ -8,10 +8,10 @@
 	"context"
 	"fmt"
 	"go/ast"
+	"go/token"
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"sort"
 	"strconv"
 	"strings"
 
@@ -27,146 +27,120 @@
 	"golang.org/x/tools/internal/span"
 )
 
-type modTidyKey struct {
-	sessionID       string
-	env             source.Hash
-	gomod           source.FileIdentity
-	imports         source.Hash
-	unsavedOverlays source.Hash
-	view            string
-}
-
-type modTidyHandle struct {
-	handle *memoize.Handle
-}
-
-type modTidyData struct {
-	tidied *source.TidiedModule
-	err    error
-}
-
-func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) {
-	v, err := mth.handle.Get(ctx, snapshot.generation, snapshot)
-	if err != nil {
-		return nil, err
-	}
-	data := v.(*modTidyData)
-	return data.tidied, data.err
-}
-
+// ModTidy returns the go.mod file that would be obtained by running
+// "go mod tidy". Concurrent requests are combined into a single command.
 func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
+	uri := pm.URI
 	if pm.File == nil {
-		return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", pm.URI)
-	}
-	if handle := s.getModTidyHandle(pm.URI); handle != nil {
-		return handle.tidy(ctx, s)
-	}
-	fh, err := s.GetFile(ctx, pm.URI)
-	if err != nil {
-		return nil, err
-	}
-	// If the file handle is an overlay, it may not be written to disk.
-	// The go.mod file has to be on disk for `go mod tidy` to work.
-	if _, ok := fh.(*overlay); ok {
-		if info, _ := os.Stat(fh.URI().Filename()); info == nil {
-			return nil, source.ErrNoModOnDisk
-		}
-	}
-	if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
-		return &source.TidiedModule{
-			Diagnostics: criticalErr.DiagList,
-		}, nil
-	}
-	workspacePkgs, err := s.workspacePackageHandles(ctx)
-	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", uri)
 	}
 
 	s.mu.Lock()
-	overlayHash := hashUnsavedOverlays(s.files)
+	entry, hit := s.modTidyHandles.Get(uri)
 	s.mu.Unlock()
 
-	key := modTidyKey{
-		sessionID:       s.view.session.id,
-		view:            s.view.folder.Filename(),
-		imports:         s.hashImports(ctx, workspacePkgs),
-		unsavedOverlays: overlayHash,
-		gomod:           fh.FileIdentity(),
-		env:             hashEnv(s),
+	type modTidyResult struct {
+		tidied *source.TidiedModule
+		err    error
 	}
-	h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
-		ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(fh.URI()))
-		defer done()
 
-		snapshot := arg.(*snapshot)
-		inv := &gocommand.Invocation{
-			Verb:       "mod",
-			Args:       []string{"tidy"},
-			WorkingDir: filepath.Dir(fh.URI().Filename()),
-		}
-		tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
+	// Cache miss?
+	if !hit {
+		// If the file handle is an overlay, it may not be written to disk.
+		// The go.mod file has to be on disk for `go mod tidy` to work.
+		// TODO(rfindley): is this still true with Go 1.16 overlay support?
+		fh, err := s.GetFile(ctx, pm.URI)
 		if err != nil {
-			return &modTidyData{err: err}
+			return nil, err
 		}
-		// Keep the temporary go.mod file around long enough to parse it.
-		defer cleanup()
-
-		if _, err := s.view.session.gocmdRunner.Run(ctx, *inv); err != nil {
-			return &modTidyData{err: err}
-		}
-		// Go directly to disk to get the temporary mod file, since it is
-		// always on disk.
-		tempContents, err := ioutil.ReadFile(tmpURI.Filename())
-		if err != nil {
-			return &modTidyData{err: err}
-		}
-		ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
-		if err != nil {
-			// We do not need to worry about the temporary file's parse errors
-			// since it has been "tidied".
-			return &modTidyData{err: err}
-		}
-		// Compare the original and tidied go.mod files to compute errors and
-		// suggested fixes.
-		diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs)
-		if err != nil {
-			return &modTidyData{err: err}
-		}
-		return &modTidyData{
-			tidied: &source.TidiedModule{
-				Diagnostics:   diagnostics,
-				TidiedContent: tempContents,
-			},
-		}
-	}, nil)
-
-	mth := &modTidyHandle{handle: h}
-	s.mu.Lock()
-	s.modTidyHandles[fh.URI()] = mth
-	s.mu.Unlock()
-
-	return mth.tidy(ctx, s)
-}
-
-func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) source.Hash {
-	seen := map[string]struct{}{}
-	var imports []string
-	for _, ph := range wsPackages {
-		for _, imp := range ph.imports(ctx, s) {
-			if _, ok := seen[imp]; !ok {
-				imports = append(imports, imp)
-				seen[imp] = struct{}{}
+		if _, ok := fh.(*overlay); ok {
+			if info, _ := os.Stat(uri.Filename()); info == nil {
+				return nil, source.ErrNoModOnDisk
 			}
 		}
+
+		if criticalErr := s.GetCriticalError(ctx); criticalErr != nil {
+			return &source.TidiedModule{
+				Diagnostics: criticalErr.Diagnostics,
+			}, nil
+		}
+
+		if err := s.awaitLoaded(ctx); err != nil {
+			return nil, err
+		}
+
+		handle := memoize.NewPromise("modTidy", func(ctx context.Context, arg interface{}) interface{} {
+			tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm)
+			return modTidyResult{tidied, err}
+		})
+
+		entry = handle
+		s.mu.Lock()
+		s.modTidyHandles.Set(uri, entry, nil)
+		s.mu.Unlock()
 	}
-	sort.Strings(imports)
-	return source.Hashf("%s", imports)
+
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+	if err != nil {
+		return nil, err
+	}
+	res := v.(modTidyResult)
+	return res.tidied, res.err
+}
+
+// modTidyImpl runs "go mod tidy" on a go.mod file.
+func modTidyImpl(ctx context.Context, snapshot *snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) {
+	ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename))
+	defer done()
+
+	inv := &gocommand.Invocation{
+		Verb:       "mod",
+		Args:       []string{"tidy"},
+		WorkingDir: filepath.Dir(filename),
+	}
+	// TODO(adonovan): ensure that unsaved overlays are passed through to 'go'.
+	tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
+	if err != nil {
+		return nil, err
+	}
+	// Keep the temporary go.mod file around long enough to parse it.
+	defer cleanup()
+
+	if _, err := snapshot.view.session.gocmdRunner.Run(ctx, *inv); err != nil {
+		return nil, err
+	}
+
+	// Go directly to disk to get the temporary mod file,
+	// since it is always on disk.
+	tempContents, err := ioutil.ReadFile(tmpURI.Filename())
+	if err != nil {
+		return nil, err
+	}
+	ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil)
+	if err != nil {
+		// We do not need to worry about the temporary file's parse errors
+		// since it has been "tidied".
+		return nil, err
+	}
+
+	// Compare the original and tidied go.mod files to compute errors and
+	// suggested fixes.
+	diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal)
+	if err != nil {
+		return nil, err
+	}
+
+	return &source.TidiedModule{
+		Diagnostics:   diagnostics,
+		TidiedContent: tempContents,
+	}, nil
 }
 
 // modTidyDiagnostics computes the differences between the original and tidied
 // go.mod files to produce diagnostic and suggested fixes. Some diagnostics
 // may appear on the Go files that import packages from missing modules.
-func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *source.ParsedModule, ideal *modfile.File, workspacePkgs []*packageHandle) (diagnostics []*source.Diagnostic, err error) {
+func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) {
 	// First, determine which modules are unused and which are missing from the
 	// original go.mod file.
 	var (
@@ -215,15 +189,25 @@
 	}
 	// Add diagnostics for missing modules anywhere they are imported in the
 	// workspace.
-	for _, ph := range workspacePkgs {
+	// TODO(adonovan): opt: opportunities for parallelism abound.
+	for _, id := range snapshot.workspacePackageIDs() {
+		m := snapshot.getMetadata(id)
+		if m == nil {
+			return nil, fmt.Errorf("no metadata for %s", id)
+		}
+
+		// Read both lists of files of this package, in parallel.
+		goFiles, compiledGoFiles, err := readGoFiles(ctx, snapshot, m.Metadata)
+		if err != nil {
+			return nil, err
+		}
+
 		missingImports := map[string]*modfile.Require{}
 
 		// If -mod=readonly is not set we may have successfully imported
 		// packages from missing modules. Otherwise they'll be in
 		// MissingDependencies. Combine both.
-		importedPkgs := ph.imports(ctx, snapshot)
-
-		for _, imp := range importedPkgs {
+		for imp := range parseImports(ctx, snapshot, goFiles) {
 			if req, ok := missing[imp]; ok {
 				missingImports[imp] = req
 				break
@@ -252,8 +236,8 @@
 		if len(missingImports) == 0 {
 			continue
 		}
-		for _, pgh := range ph.compiledGoFiles {
-			pgf, err := snapshot.ParseGo(ctx, pgh.file, source.ParseHeader)
+		for _, goFile := range compiledGoFiles {
+			pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader)
 			if err != nil {
 				continue
 			}
@@ -282,7 +266,7 @@
 				if !ok {
 					return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path)
 				}
-				srcErr, err := missingModuleForImport(snapshot, m, imp, req, fixes)
+				srcErr, err := missingModuleForImport(pgf.Tok, m, imp, req, fixes)
 				if err != nil {
 					return nil, err
 				}
@@ -445,11 +429,11 @@
 
 // missingModuleForImport creates an error for a given import path that comes
 // from a missing module.
-func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
+func missingModuleForImport(file *token.File, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
 	if req.Syntax == nil {
 		return nil, fmt.Errorf("no syntax for %v", req)
 	}
-	spn, err := span.NewRange(snapshot.FileSet(), imp.Path.Pos(), imp.Path.End()).Span()
+	spn, err := span.NewRange(file, imp.Path.Pos(), imp.Path.End()).Span()
 	if err != nil {
 		return nil, err
 	}
@@ -493,3 +477,27 @@
 	}
 	return span.New(m.URI, start, end), nil
 }
+
+// parseImports parses the headers of the specified files and returns
+// the set of strings that appear in import declarations within
+// GoFiles. Errors are ignored.
+//
+// (We can't simply use ph.m.Metadata.Deps because it contains
+// PackageIDs--not import paths--and is based on CompiledGoFiles,
+// after cgo processing.)
+func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) map[string]bool {
+	s.mu.Lock() // peekOrParse requires a locked snapshot (!)
+	defer s.mu.Unlock()
+	seen := make(map[string]bool)
+	for _, file := range files {
+		f, err := peekOrParse(ctx, s, file, source.ParseHeader)
+		if err != nil {
+			continue
+		}
+		for _, spec := range f.File.Imports {
+			path, _ := strconv.Unquote(spec.Path.Value)
+			seen[path] = true
+		}
+	}
+	return seen
+}
diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go
index f7b4f9c..77e893a 100644
--- a/internal/lsp/cache/parse.go
+++ b/internal/lsp/cache/parse.go
@@ -26,7 +26,6 @@
 	"golang.org/x/tools/internal/lsp/safetoken"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/memoize"
-	"golang.org/x/tools/internal/span"
 )
 
 // parseKey uniquely identifies a parsed Go file.
@@ -35,233 +34,90 @@
 	mode source.ParseMode
 }
 
-type parseGoHandle struct {
-	handle *memoize.Handle
-	file   source.FileHandle
-	mode   source.ParseMode
-}
+// ParseGo parses the file whose contents are provided by fh, using a cache.
+// The resulting tree may have be fixed up.
+//
+// The parser mode must not be ParseExported: that mode is used during
+// type checking to destructively trim the tree to reduce work,
+// which is not safe for values from a shared cache.
+// TODO(adonovan): opt: shouldn't parseGoImpl do the trimming?
+// Then we can cache the result since it would never change.
+func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
+	if mode == source.ParseExported {
+		panic("only type checking should use Exported")
+	}
 
-type parseGoData struct {
-	parsed *source.ParsedGoFile
-
-	// If true, we adjusted the AST to make it type check better, and
-	// it may not match the source code.
-	fixed bool
-	err   error // any other errors
-}
-
-func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode source.ParseMode) *parseGoHandle {
 	key := parseKey{
 		file: fh.FileIdentity(),
 		mode: mode,
 	}
-	if pgh := s.getGoFile(key); pgh != nil {
-		return pgh
-	}
-	parseHandle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} {
-		snapshot := arg.(*snapshot)
-		return parseGo(ctx, snapshot.FileSet(), fh, mode)
-	})
 
-	pgh := &parseGoHandle{
-		handle: parseHandle,
-		file:   fh,
-		mode:   mode,
-	}
-	return s.addGoFile(key, pgh, release)
-}
+	s.mu.Lock()
+	entry, hit := s.parsedGoFiles.Get(key)
+	s.mu.Unlock()
 
-func (pgh *parseGoHandle) String() string {
-	return pgh.file.URI().Filename()
-}
+	// cache miss?
+	if !hit {
+		handle, release := s.store.Promise(key, func(ctx context.Context, arg interface{}) interface{} {
+			parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode)
+			return parseGoResult{parsed, err}
+		})
 
-func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
-	pgf, _, err := s.parseGo(ctx, fh, mode)
-	return pgf, err
-}
-
-func (s *snapshot) parseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, bool, error) {
-	if mode == source.ParseExported {
-		panic("only type checking should use Exported")
-	}
-	pgh := s.parseGoHandle(ctx, fh, mode)
-	d, err := pgh.handle.Get(ctx, s.generation, s)
-	if err != nil {
-		return nil, false, err
-	}
-	data := d.(*parseGoData)
-	return data.parsed, data.fixed, data.err
-}
-
-// cachedPGF returns the cached ParsedGoFile for the given ParseMode, if it
-// has already been computed. Otherwise, it returns nil.
-func (s *snapshot) cachedPGF(fh source.FileHandle, mode source.ParseMode) *source.ParsedGoFile {
-	key := parseKey{file: fh.FileIdentity(), mode: mode}
-	if pgh := s.getGoFile(key); pgh != nil {
-		cached := pgh.handle.Cached(s.generation)
-		if cached != nil {
-			cached := cached.(*parseGoData)
-			if cached.parsed != nil {
-				return cached.parsed
-			}
+		s.mu.Lock()
+		// Check cache again in case another thread got there first.
+		if prev, ok := s.parsedGoFiles.Get(key); ok {
+			entry = prev
+			release()
+		} else {
+			entry = handle
+			s.parsedGoFiles.Set(key, entry, func(_, _ interface{}) { release() })
 		}
+		s.mu.Unlock()
 	}
-	return nil
-}
 
-type astCacheKey struct {
-	pkg packageHandleKey
-	uri span.URI
-}
-
-func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) {
-	pkg := spkg.(*pkg)
-	pkgHandle := s.getPackage(pkg.m.ID, pkg.mode)
-	if pkgHandle == nil {
-		return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID)
-	}
-	tok := s.FileSet().File(pos)
-	if tok == nil {
-		return nil, fmt.Errorf("no file for pos %v", pos)
-	}
-	pgf, err := pkg.File(span.URIFromPath(tok.Name()))
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
 	if err != nil {
 		return nil, err
 	}
-	astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} {
-		return buildASTCache(pgf)
-	}, nil)
-
-	d, err := astHandle.Get(ctx, s.generation, s)
-	if err != nil {
-		return nil, err
-	}
-	data := d.(*astCacheData)
-	if data.err != nil {
-		return nil, data.err
-	}
-	return data, nil
+	res := v.(parseGoResult)
+	return res.parsed, res.err
 }
 
-func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) {
-	data, err := s.astCacheData(ctx, spkg, pos)
-	if err != nil {
-		return nil, err
+// peekParseGoLocked peeks at the cache used by ParseGo but does not
+// populate it or wait for other threads to do so. On cache hit, it returns
+// the cache result of parseGoImpl; otherwise it returns (nil, nil).
+func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
+	entry, hit := s.parsedGoFiles.Get(parseKey{fh.FileIdentity(), mode})
+	if !hit {
+		return nil, nil // no-one has requested this file
 	}
-	return data.posToDecl[pos], nil
-}
-
-func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos token.Pos) (*ast.Field, error) {
-	data, err := s.astCacheData(ctx, spkg, pos)
-	if err != nil {
-		return nil, err
+	v := entry.(*memoize.Promise).Cached()
+	if v == nil {
+		return nil, nil // parsing is still in progress
 	}
-	return data.posToField[pos], nil
+	res := v.(parseGoResult)
+	return res.parsed, res.err
 }
 
-type astCacheData struct {
-	err error
-
-	posToDecl  map[token.Pos]ast.Decl
-	posToField map[token.Pos]*ast.Field
+// parseGoResult holds the result of a call to parseGoImpl.
+type parseGoResult struct {
+	parsed *source.ParsedGoFile
+	err    error
 }
 
-// buildASTCache builds caches to aid in quickly going from the typed
-// world to the syntactic world.
-func buildASTCache(pgf *source.ParsedGoFile) *astCacheData {
-	var (
-		// path contains all ancestors, including n.
-		path []ast.Node
-		// decls contains all ancestors that are decls.
-		decls []ast.Decl
-	)
-
-	data := &astCacheData{
-		posToDecl:  make(map[token.Pos]ast.Decl),
-		posToField: make(map[token.Pos]*ast.Field),
-	}
-
-	ast.Inspect(pgf.File, func(n ast.Node) bool {
-		if n == nil {
-			lastP := path[len(path)-1]
-			path = path[:len(path)-1]
-			if len(decls) > 0 && decls[len(decls)-1] == lastP {
-				decls = decls[:len(decls)-1]
-			}
-			return false
-		}
-
-		path = append(path, n)
-
-		switch n := n.(type) {
-		case *ast.Field:
-			addField := func(f ast.Node) {
-				if f.Pos().IsValid() {
-					data.posToField[f.Pos()] = n
-					if len(decls) > 0 {
-						data.posToDecl[f.Pos()] = decls[len(decls)-1]
-					}
-				}
-			}
-
-			// Add mapping for *ast.Field itself. This handles embedded
-			// fields which have no associated *ast.Ident name.
-			addField(n)
-
-			// Add mapping for each field name since you can have
-			// multiple names for the same type expression.
-			for _, name := range n.Names {
-				addField(name)
-			}
-
-			// Also map "X" in "...X" to the containing *ast.Field. This
-			// makes it easy to format variadic signature params
-			// properly.
-			if elips, ok := n.Type.(*ast.Ellipsis); ok && elips.Elt != nil {
-				addField(elips.Elt)
-			}
-		case *ast.FuncDecl:
-			decls = append(decls, n)
-
-			if n.Name != nil && n.Name.Pos().IsValid() {
-				data.posToDecl[n.Name.Pos()] = n
-			}
-		case *ast.GenDecl:
-			decls = append(decls, n)
-
-			for _, spec := range n.Specs {
-				switch spec := spec.(type) {
-				case *ast.TypeSpec:
-					if spec.Name != nil && spec.Name.Pos().IsValid() {
-						data.posToDecl[spec.Name.Pos()] = n
-					}
-				case *ast.ValueSpec:
-					for _, id := range spec.Names {
-						if id != nil && id.Pos().IsValid() {
-							data.posToDecl[id.Pos()] = n
-						}
-					}
-				}
-			}
-		}
-
-		return true
-	})
-
-	return data
-}
-
-func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) *parseGoData {
+// parseGoImpl parses the Go source file whose content is provided by fh.
+func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
 	ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename()))
 	defer done()
 
 	ext := filepath.Ext(fh.URI().Filename())
 	if ext != ".go" && ext != "" { // files generated by cgo have no extension
-		return &parseGoData{err: fmt.Errorf("cannot parse non-Go file %s", fh.URI())}
+		return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI())
 	}
 	src, err := fh.Read()
 	if err != nil {
-		return &parseGoData{err: err}
+		return nil, err
 	}
 
 	parserMode := parser.AllErrors | parser.ParseComments
@@ -323,22 +179,20 @@
 		}
 	}
 
-	return &parseGoData{
-		parsed: &source.ParsedGoFile{
-			URI:  fh.URI(),
-			Mode: mode,
-			Src:  src,
-			File: file,
-			Tok:  tok,
-			Mapper: &protocol.ColumnMapper{
-				URI:     fh.URI(),
-				TokFile: tok,
-				Content: src,
-			},
-			ParseErr: parseErr,
+	return &source.ParsedGoFile{
+		URI:   fh.URI(),
+		Mode:  mode,
+		Src:   src,
+		Fixed: fixed,
+		File:  file,
+		Tok:   tok,
+		Mapper: &protocol.ColumnMapper{
+			URI:     fh.URI(),
+			TokFile: tok,
+			Content: src,
 		},
-		fixed: fixed,
-	}
+		ParseErr: parseErr,
+	}, nil
 }
 
 // An unexportedFilter removes as much unexported AST from a set of Files as possible.
@@ -425,6 +279,8 @@
 		}
 		switch typ := spec.Type.(type) {
 		case *ast.StructType:
+			// In practice this no longer filters anything;
+			// see comment at StructType case in recordUses.
 			f.filterFieldList(typ.Fields)
 		case *ast.InterfaceType:
 			f.filterFieldList(typ.Methods)
@@ -480,9 +336,19 @@
 				case *ast.TypeSpec:
 					switch typ := spec.Type.(type) {
 					case *ast.StructType:
-						f.recordFieldUses(false, typ.Fields)
+						// We used to trim unexported fields but this
+						// had observable consequences. For example,
+						// the 'fieldalignment' analyzer would compute
+						// incorrect diagnostics from the size and
+						// offsets, and the UI hover information for
+						// types was inaccurate. So now we keep them.
+						if typ.Fields != nil {
+							for _, field := range typ.Fields.List {
+								f.recordIdents(field.Type)
+							}
+						}
 					case *ast.InterfaceType:
-						f.recordFieldUses(false, typ.Methods)
+						f.recordInterfaceMethodUses(typ.Methods)
 					}
 				}
 			}
@@ -531,37 +397,32 @@
 }
 
 // recordFuncType records the types mentioned by a function type.
-func (f *unexportedFilter) recordFuncType(x *ast.FuncType) {
-	f.recordFieldUses(true, x.Params)
-	f.recordFieldUses(true, x.Results)
+func (f *unexportedFilter) recordFuncType(fn *ast.FuncType) {
+	// Parameter and result types of retained functions need to be retained.
+	if fn.Params != nil {
+		for _, field := range fn.Params.List {
+			f.recordIdents(field.Type)
+		}
+	}
+	if fn.Results != nil {
+		for _, field := range fn.Results.List {
+			f.recordIdents(field.Type)
+		}
+	}
 }
 
-// recordFieldUses records unexported identifiers used in fields, which may be
-// struct members, interface members, or function parameter/results.
-func (f *unexportedFilter) recordFieldUses(isParams bool, fields *ast.FieldList) {
-	if fields == nil {
-		return
-	}
-	for _, field := range fields.List {
-		if isParams {
-			// Parameter types of retained functions need to be retained.
-			f.recordIdents(field.Type)
-			continue
-		}
-		if ft, ok := field.Type.(*ast.FuncType); ok {
-			// Function declarations in interfaces need all their types retained.
-			f.recordFuncType(ft)
-			continue
-		}
-		if len(field.Names) == 0 {
-			// Embedded fields might contribute exported names.
-			f.recordIdents(field.Type)
-		}
-		for _, name := range field.Names {
-			// We only need normal fields if they're exported.
-			if ast.IsExported(name.Name) {
-				f.recordIdents(field.Type)
-				break
+// recordInterfaceMethodUses records unexported identifiers used in interface methods.
+func (f *unexportedFilter) recordInterfaceMethodUses(methods *ast.FieldList) {
+	if methods != nil {
+		for _, method := range methods.List {
+			if len(method.Names) == 0 {
+				// I, pkg.I, I[T] -- embedded interface:
+				// may contribute exported names.
+				f.recordIdents(method.Type)
+			} else if ft, ok := method.Type.(*ast.FuncType); ok {
+				// f(T) -- ordinary interface method:
+				// needs all its types retained.
+				f.recordFuncType(ft)
 			}
 		}
 	}
@@ -588,32 +449,35 @@
 }
 
 // trimAST clears any part of the AST not relevant to type checking
-// expressions at pos.
+// the package-level declarations.
 func trimAST(file *ast.File) {
-	ast.Inspect(file, func(n ast.Node) bool {
-		if n == nil {
-			return false
+	// Eliminate bodies of top-level functions, methods, inits.
+	for _, decl := range file.Decls {
+		if fn, ok := decl.(*ast.FuncDecl); ok {
+			fn.Body = nil
 		}
+	}
+
+	// Simplify remaining declarations.
+	ast.Inspect(file, func(n ast.Node) bool {
 		switch n := n.(type) {
-		case *ast.FuncDecl:
-			n.Body = nil
-		case *ast.BlockStmt:
-			n.List = nil
-		case *ast.CaseClause:
-			n.Body = nil
-		case *ast.CommClause:
-			n.Body = nil
+		case *ast.FuncLit:
+			// Eliminate bodies of literal functions.
+			// func() { ... } => func() {}
+			n.Body.List = nil
 		case *ast.CompositeLit:
 			// types.Info.Types for long slice/array literals are particularly
-			// expensive. Try to clear them out.
+			// expensive. Try to clear them out: T{e, ..., e} => T{}
 			at, ok := n.Type.(*ast.ArrayType)
 			if !ok {
-				// Composite literal. No harm removing all its fields.
+				// Map or struct literal: no harm removing all its fields.
 				n.Elts = nil
 				break
 			}
+
 			// Removing the elements from an ellipsis array changes its type.
 			// Try to set the length explicitly so we can continue.
+			//  [...]T{e, ..., e} => [3]T[]{}
 			if _, ok := at.Len.(*ast.Ellipsis); ok {
 				length, ok := arrayLength(n)
 				if !ok {
diff --git a/internal/lsp/cache/parse_test.go b/internal/lsp/cache/parse_test.go
index cb620f2..e8db645 100644
--- a/internal/lsp/cache/parse_test.go
+++ b/internal/lsp/cache/parse_test.go
@@ -149,7 +149,7 @@
 }
 var Var = Exported{foo:1}
 `,
-			kept: []string{"Exported", "Var"},
+			kept: []string{"Exported", "Var", "x"},
 		},
 		{
 			name: "drop_function_literals",
diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go
index 4a7a5b2..984e8c1 100644
--- a/internal/lsp/cache/session.go
+++ b/internal/lsp/cache/session.go
@@ -8,6 +8,7 @@
 	"context"
 	"fmt"
 	"strconv"
+	"strings"
 	"sync"
 	"sync/atomic"
 
@@ -16,6 +17,7 @@
 	"golang.org/x/tools/internal/imports"
 	"golang.org/x/tools/internal/lsp/progress"
 	"golang.org/x/tools/internal/lsp/source"
+	"golang.org/x/tools/internal/persistent"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/xcontext"
 )
@@ -164,7 +166,7 @@
 	}
 	view, snapshot, release, err := s.createView(ctx, name, folder, options, 0)
 	if err != nil {
-		return nil, nil, func() {}, err
+		return nil, nil, nil, err
 	}
 	s.views = append(s.views, view)
 	// we always need to drop the view map
@@ -179,21 +181,26 @@
 		s.cache.options(options)
 	}
 
-	// Set the module-specific information.
-	ws, err := s.getWorkspaceInformation(ctx, folder, options)
+	// Get immutable workspace configuration.
+	//
+	// TODO(rfindley): this info isn't actually immutable. For example, GOWORK
+	// could be changed, or a user's environment could be modified.
+	// We need a mechanism to invalidate it.
+	wsInfo, err := s.getWorkspaceInformation(ctx, folder, options)
 	if err != nil {
 		return nil, nil, func() {}, err
 	}
+
 	root := folder
 	if options.ExpandWorkspaceToModule {
-		root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), options.ExperimentalWorkspaceModule)
+		root, err = findWorkspaceRoot(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), wsInfo.gomodcache, options), options.ExperimentalWorkspaceModule)
 		if err != nil {
 			return nil, nil, func() {}, err
 		}
 	}
 
 	// Build the gopls workspace, collecting active modules in the view.
-	workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), ws.gomodcache, options), ws.userGo111Module == off, options.ExperimentalWorkspaceModule)
+	workspace, err := newWorkspace(ctx, root, s, pathExcludedByFilterFunc(root.Filename(), wsInfo.gomodcache, options), wsInfo.userGo111Module == off, options.ExperimentalWorkspaceModule)
 	if err != nil {
 		return nil, nil, func() {}, err
 	}
@@ -216,47 +223,65 @@
 		filesByURI:           map[span.URI]*fileBase{},
 		filesByBase:          map[string][]*fileBase{},
 		rootURI:              root,
-		workspaceInformation: *ws,
+		workspaceInformation: *wsInfo,
 	}
 	v.importsState = &importsState{
 		ctx: backgroundCtx,
 		processEnv: &imports.ProcessEnv{
 			GocmdRunner: s.gocmdRunner,
+			SkipPathInScan: func(dir string) bool {
+				prefix := strings.TrimSuffix(string(v.folder), "/") + "/"
+				uri := strings.TrimSuffix(string(span.URIFromPath(dir)), "/")
+				if !strings.HasPrefix(uri+"/", prefix) {
+					return false
+				}
+				filterer := source.NewFilterer(options.DirectoryFilters)
+				rel := strings.TrimPrefix(uri, prefix)
+				disallow := filterer.Disallow(rel)
+				return disallow
+			},
 		},
 	}
 	v.snapshot = &snapshot{
-		id:                snapshotID,
-		view:              v,
-		backgroundCtx:     backgroundCtx,
-		cancel:            cancel,
-		initializeOnce:    &sync.Once{},
-		generation:        s.cache.store.Generation(generationName(v, 0)),
-		packages:          make(map[packageKey]*packageHandle),
-		meta:              &metadataGraph{},
-		files:             newFilesMap(),
-		goFiles:           newGoFilesMap(),
-		parseKeysByURI:    newParseKeysByURIMap(),
-		symbols:           make(map[span.URI]*symbolHandle),
-		actions:           make(map[actionKey]*actionHandle),
-		workspacePackages: make(map[PackageID]PackagePath),
-		unloadableFiles:   make(map[span.URI]struct{}),
-		parseModHandles:   make(map[span.URI]*parseModHandle),
-		parseWorkHandles:  make(map[span.URI]*parseWorkHandle),
-		modTidyHandles:    make(map[span.URI]*modTidyHandle),
-		modWhyHandles:     make(map[span.URI]*modWhyHandle),
-		workspace:         workspace,
+		id:                   snapshotID,
+		view:                 v,
+		backgroundCtx:        backgroundCtx,
+		cancel:               cancel,
+		store:                s.cache.store,
+		packages:             persistent.NewMap(packageKeyLessInterface),
+		meta:                 &metadataGraph{},
+		files:                newFilesMap(),
+		isActivePackageCache: newIsActivePackageCacheMap(),
+		parsedGoFiles:        persistent.NewMap(parseKeyLessInterface),
+		parseKeysByURI:       newParseKeysByURIMap(),
+		symbolizeHandles:     persistent.NewMap(uriLessInterface),
+		actions:              persistent.NewMap(actionKeyLessInterface),
+		workspacePackages:    make(map[PackageID]PackagePath),
+		unloadableFiles:      make(map[span.URI]struct{}),
+		parseModHandles:      persistent.NewMap(uriLessInterface),
+		parseWorkHandles:     persistent.NewMap(uriLessInterface),
+		modTidyHandles:       persistent.NewMap(uriLessInterface),
+		modWhyHandles:        persistent.NewMap(uriLessInterface),
+		knownSubdirs:         newKnownDirsSet(),
+		workspace:            workspace,
 	}
+	// Save one reference in the view.
+	v.releaseSnapshot = v.snapshot.Acquire()
 
 	// Initialize the view without blocking.
 	initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx))
 	v.initCancelFirstAttempt = initCancel
 	snapshot := v.snapshot
-	release := snapshot.generation.Acquire()
+
+	// Pass a second reference to the background goroutine.
+	bgRelease := snapshot.Acquire()
 	go func() {
-		defer release()
+		defer bgRelease()
 		snapshot.initialize(initCtx, true)
 	}()
-	return v, snapshot, snapshot.generation.Acquire(), nil
+
+	// Return a third reference to the caller.
+	return v, snapshot, snapshot.Acquire(), nil
 }
 
 // View returns the view by name.
@@ -292,19 +317,6 @@
 	return s.viewMap[uri], nil
 }
 
-func (s *Session) viewsOf(uri span.URI) []*View {
-	s.viewMu.RLock()
-	defer s.viewMu.RUnlock()
-
-	var views []*View
-	for _, view := range s.views {
-		if source.InDir(view.folder.Filename(), uri.Filename()) {
-			views = append(views, view)
-		}
-	}
-	return views
-}
-
 func (s *Session) Views() []source.View {
 	s.viewMu.RLock()
 	defer s.viewMu.RUnlock()
@@ -408,13 +420,13 @@
 }
 
 func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error {
-	_, releases, err := s.DidModifyFiles(ctx, changes)
-	for _, release := range releases {
-		release()
-	}
+	_, release, err := s.DidModifyFiles(ctx, changes)
+	release()
 	return err
 }
 
+// TODO(rfindley): fileChange seems redundant with source.FileModification.
+// De-dupe into a common representation for changes.
 type fileChange struct {
 	content    []byte
 	exists     bool
@@ -426,7 +438,7 @@
 	isUnchanged bool
 }
 
-func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) {
+func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, func(), error) {
 	s.viewMu.RLock()
 	defer s.viewMu.RUnlock()
 	views := make(map[*View]map[span.URI]*fileChange)
@@ -507,6 +519,14 @@
 		viewToSnapshot[view] = snapshot
 	}
 
+	// The release function is called when the
+	// returned URIs no longer need to be valid.
+	release := func() {
+		for _, release := range releases {
+			release()
+		}
+	}
+
 	// We only want to diagnose each changed file once, in the view to which
 	// it "most" belongs. We do this by picking the best view for each URI,
 	// and then aggregating the set of snapshots and their URIs (to avoid
@@ -524,7 +544,8 @@
 		}
 		snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI)
 	}
-	return snapshotURIs, releases, nil
+
+	return snapshotURIs, release, nil
 }
 
 func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification {
@@ -536,10 +557,14 @@
 		defer release()
 		snapshots = append(snapshots, snapshot)
 	}
+	// TODO(adonovan): opt: release lock here.
+
 	knownDirs := knownDirectories(ctx, snapshots)
+	defer knownDirs.Destroy()
+
 	var result []source.FileModification
 	for _, c := range changes {
-		if _, ok := knownDirs[c.URI]; !ok {
+		if !knownDirs.Contains(c.URI) {
 			result = append(result, c)
 			continue
 		}
@@ -561,16 +586,17 @@
 
 // knownDirectories returns all of the directories known to the given
 // snapshots, including workspace directories and their subdirectories.
-func knownDirectories(ctx context.Context, snapshots []*snapshot) map[span.URI]struct{} {
-	result := map[span.URI]struct{}{}
+// It is responsibility of the caller to destroy the returned set.
+func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet {
+	result := newKnownDirsSet()
 	for _, snapshot := range snapshots {
 		dirs := snapshot.workspace.dirs(ctx, snapshot)
 		for _, dir := range dirs {
-			result[dir] = struct{}{}
+			result.Insert(dir)
 		}
-		for _, dir := range snapshot.getKnownSubdirs(dirs) {
-			result[dir] = struct{}{}
-		}
+		knownSubdirs := snapshot.getKnownSubdirs(dirs)
+		result.SetAll(knownSubdirs)
+		knownSubdirs.Destroy()
 	}
 	return result
 }
diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go
index e94ad7b..da2d7b5 100644
--- a/internal/lsp/cache/snapshot.go
+++ b/internal/lsp/cache/snapshot.go
@@ -14,6 +14,7 @@
 	"go/types"
 	"io"
 	"io/ioutil"
+	"log"
 	"os"
 	"path/filepath"
 	"regexp"
@@ -22,12 +23,13 @@
 	"strconv"
 	"strings"
 	"sync"
+	"sync/atomic"
+	"unsafe"
 
 	"golang.org/x/mod/modfile"
 	"golang.org/x/mod/module"
 	"golang.org/x/mod/semver"
 	"golang.org/x/sync/errgroup"
-	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/gocommand"
@@ -36,28 +38,28 @@
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/packagesinternal"
+	"golang.org/x/tools/internal/persistent"
 	"golang.org/x/tools/internal/span"
 	"golang.org/x/tools/internal/typesinternal"
 )
 
 type snapshot struct {
-	memoize.Arg // allow as a memoize.Function arg
-
 	id   uint64
 	view *View
 
 	cancel        func()
 	backgroundCtx context.Context
 
-	// the cache generation that contains the data for this snapshot.
-	generation *memoize.Generation
+	store *memoize.Store // cache of handles shared by all snapshots
 
-	// The snapshot's initialization state is controlled by the fields below.
-	//
-	// initializeOnce guards snapshot initialization. Each snapshot is
-	// initialized at most once: reinitialization is triggered on later snapshots
-	// by invalidating this field.
-	initializeOnce *sync.Once
+	refcount    sync.WaitGroup // number of references
+	destroyedBy *string        // atomically set to non-nil in Destroy once refcount = 0
+
+	// initialized reports whether the snapshot has been initialized. Concurrent
+	// initialization is guarded by the view.initializationSema. Each snapshot is
+	// initialized at most once: concurrent initialization is guarded by
+	// view.initializationSema.
+	initialized bool
 	// initializedErr holds the last error resulting from initialization. If
 	// initialization fails, we only retry when the the workspace modules change,
 	// to avoid too many go/packages calls.
@@ -80,47 +82,73 @@
 	// It may invalidated when a file's content changes.
 	files filesMap
 
-	// goFiles maps a parseKey to its parseGoHandle.
-	goFiles        goFilesMap
+	// parsedGoFiles maps a parseKey to the handle of the future result of parsing it.
+	parsedGoFiles *persistent.Map // from parseKey to *memoize.Promise[parseGoResult]
+
+	// parseKeysByURI records the set of keys of parsedGoFiles that
+	// need to be invalidated for each URI.
+	// TODO(adonovan): opt: parseKey = ParseMode + URI, so this could
+	// be just a set of ParseModes, or we could loop over AllParseModes.
 	parseKeysByURI parseKeysByURIMap
 
-	// TODO(rfindley): consider merging this with files to reduce burden on clone.
-	symbols map[span.URI]*symbolHandle
+	// symbolizeHandles maps each file URI to a handle for the future
+	// result of computing the symbols declared in that file.
+	symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult]
 
-	// packages maps a packageKey to a set of packageHandles to which that file belongs.
+	// packages maps a packageKey to a *packageHandle.
 	// It may be invalidated when a file's content changes.
-	packages map[packageKey]*packageHandle
+	//
+	// Invariants to preserve:
+	//  - packages.Get(id).m.Metadata == meta.metadata[id].Metadata for all ids
+	//  - if a package is in packages, then all of its dependencies should also
+	//    be in packages, unless there is a missing import
+	packages *persistent.Map // from packageKey to *memoize.Promise[*packageHandle]
 
-	// actions maps an actionkey to its actionHandle.
-	actions map[actionKey]*actionHandle
+	// isActivePackageCache maps package ID to the cached value if it is active or not.
+	// It may be invalidated when metadata changes or a new file is opened or closed.
+	isActivePackageCache isActivePackageCacheMap
+
+	// actions maps an actionKey to the handle for the future
+	// result of execution an analysis pass on a package.
+	actions *persistent.Map // from actionKey to *actionHandle
 
 	// workspacePackages contains the workspace's packages, which are loaded
 	// when the view is created.
 	workspacePackages map[PackageID]PackagePath
 
+	// shouldLoad tracks packages that need to be reloaded, mapping a PackageID
+	// to the package paths that should be used to reload it
+	//
+	// When we try to load a package, we clear it from the shouldLoad map
+	// regardless of whether the load succeeded, to prevent endless loads.
+	shouldLoad map[PackageID][]PackagePath
+
 	// unloadableFiles keeps track of files that we've failed to load.
 	unloadableFiles map[span.URI]struct{}
 
 	// parseModHandles keeps track of any parseModHandles for the snapshot.
 	// The handles need not refer to only the view's go.mod file.
-	parseModHandles map[span.URI]*parseModHandle
+	parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult]
 
 	// parseWorkHandles keeps track of any parseWorkHandles for the snapshot.
 	// The handles need not refer to only the view's go.work file.
-	parseWorkHandles map[span.URI]*parseWorkHandle
+	parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult]
 
 	// Preserve go.mod-related handles to avoid garbage-collecting the results
 	// of various calls to the go command. The handles need not refer to only
 	// the view's go.mod file.
-	modTidyHandles map[span.URI]*modTidyHandle
-	modWhyHandles  map[span.URI]*modWhyHandle
+	modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult]
+	modWhyHandles  *persistent.Map // from span.URI to *memoize.Promise[modWhyResult]
 
-	workspace          *workspace
-	workspaceDirHandle *memoize.Handle
+	workspace *workspace // (not guarded by mu)
+
+	// The cached result of makeWorkspaceDir, created on demand and deleted by Snapshot.Destroy.
+	workspaceDir    string
+	workspaceDirErr error
 
 	// knownSubdirs is the set of subdirectories in the workspace, used to
 	// create glob patterns for file watching.
-	knownSubdirs             map[span.URI]struct{}
+	knownSubdirs             knownDirsSet
 	knownSubdirsPatternCache string
 	// unprocessedSubdirChanges are any changes that might affect the set of
 	// subdirectories in the workspace. They are not reflected to knownSubdirs
@@ -128,21 +156,80 @@
 	unprocessedSubdirChanges []*fileChange
 }
 
-type packageKey struct {
-	mode source.ParseMode
-	id   PackageID
+var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
+
+// Acquire prevents the snapshot from being destroyed until the returned function is called.
+//
+// (s.Acquire().release() could instead be expressed as a pair of
+// method calls s.IncRef(); s.DecRef(). The latter has the advantage
+// that the DecRefs are fungible and don't require holding anything in
+// addition to the refcounted object s, but paradoxically that is also
+// an advantage of the current approach, which forces the caller to
+// consider the release function at every stage, making a reference
+// leak more obvious.)
+func (s *snapshot) Acquire() func() {
+	type uP = unsafe.Pointer
+	if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil {
+		log.Panicf("%d: acquire() after Destroy(%q)", s.id, *(*string)(destroyedBy))
+	}
+	s.refcount.Add(1)
+	return s.refcount.Done
 }
 
-type actionKey struct {
-	pkg      packageKey
-	analyzer *analysis.Analyzer
+func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) {
+	return p.Get(ctx, s)
 }
 
-func (s *snapshot) Destroy(destroyedBy string) {
-	s.generation.Destroy(destroyedBy)
+// destroy waits for all leases on the snapshot to expire then releases
+// any resources (reference counts and files) associated with it.
+// Snapshots being destroyed can be awaited using v.destroyWG.
+//
+// TODO(adonovan): move this logic into the release function returned
+// by Acquire when the reference count becomes zero. (This would cost
+// us the destroyedBy debug info, unless we add it to the signature of
+// memoize.RefCounted.Acquire.)
+//
+// The destroyedBy argument is used for debugging.
+//
+// v.snapshotMu must be held while calling this function, in order to preserve
+// the invariants described by the the docstring for v.snapshot.
+func (v *View) destroy(s *snapshot, destroyedBy string) {
+	v.snapshotWG.Add(1)
+	go func() {
+		defer v.snapshotWG.Done()
+		s.destroy(destroyedBy)
+	}()
+}
+
+func (s *snapshot) destroy(destroyedBy string) {
+	// Wait for all leases to end before commencing destruction.
+	s.refcount.Wait()
+
+	// Report bad state as a debugging aid.
+	// Not foolproof: another thread could acquire() at this moment.
+	type uP = unsafe.Pointer // looking forward to generics...
+	if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil {
+		log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.id, destroyedBy, *(*string)(old))
+	}
+
+	s.packages.Destroy()
+	s.isActivePackageCache.Destroy()
+	s.actions.Destroy()
 	s.files.Destroy()
-	s.goFiles.Destroy()
+	s.parsedGoFiles.Destroy()
 	s.parseKeysByURI.Destroy()
+	s.knownSubdirs.Destroy()
+	s.symbolizeHandles.Destroy()
+	s.parseModHandles.Destroy()
+	s.parseWorkHandles.Destroy()
+	s.modTidyHandles.Destroy()
+	s.modWhyHandles.Destroy()
+
+	if s.workspaceDir != "" {
+		if err := os.RemoveAll(s.workspaceDir); err != nil {
+			event.Error(context.Background(), "cleaning workspace dir", err)
+		}
+	}
 }
 
 func (s *snapshot) ID() uint64 {
@@ -320,6 +407,14 @@
 	return true, modBytes, sumBytes, nil
 }
 
+// goCommandInvocation populates inv with configuration for running go commands on the snapshot.
+//
+// TODO(rfindley): refactor this function to compose the required configuration
+// explicitly, rather than implicitly deriving it from flags and inv.
+//
+// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but
+// it used only after call to tempModFile. Clarify that it is only
+// non-nil on success.
 func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
 	s.view.optionsMu.Lock()
 	allowModfileModificationOption := s.view.options.AllowModfileModifications
@@ -348,7 +443,6 @@
 	//  - the working directory.
 	//  - the -mod flag
 	//  - the -modfile flag
-	//  - the -workfile flag
 	//
 	// These are dependent on a number of factors: whether we need to run in a
 	// synthetic workspace, whether flags are supported at the current go
@@ -399,6 +493,9 @@
 		}
 	}
 
+	// TODO(rfindley): in the case of go.work mode, modURI is empty and we fall
+	// back on the default behavior of vendorEnabled with an empty modURI. Figure
+	// out what is correct here and implement it explicitly.
 	vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent)
 	if err != nil {
 		return "", nil, cleanup, err
@@ -434,13 +531,15 @@
 		return "", nil, cleanup, source.ErrTmpModfileUnsupported
 	}
 
-	// We should use -workfile if:
-	//  1. We're not actively trying to mutate a modfile.
-	//  2. We have an active go.work file.
-	//  3. We're using at least Go 1.18.
+	// We should use -modfile if:
+	//  - the workspace mode supports it
+	//  - we're using a go.work file on go1.18+, or we need a temp mod file (for
+	//    example, if running go mod tidy in a go.work workspace)
+	//
+	// TODO(rfindley): this is very hard to follow. Refactor.
 	useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18
 	if useWorkFile {
-		// TODO(#51215): build a temp workfile and set GOWORK in the environment.
+		// Since we're running in the workspace root, the go command will resolve GOWORK automatically.
 	} else if useTempMod {
 		if modURI == "" {
 			return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir)
@@ -461,6 +560,25 @@
 	return tmpURI, inv, cleanup, nil
 }
 
+// usesWorkspaceDir reports whether the snapshot should use a synthetic
+// workspace directory for running workspace go commands such as go list.
+//
+// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up
+// the latter, and deduplicate.
+func (s *snapshot) usesWorkspaceDir() bool {
+	switch s.workspace.moduleSource {
+	case legacyWorkspace:
+		return false
+	case goWorkWorkspace:
+		if s.view.goversion >= 18 {
+			return false
+		}
+		// Before go 1.18, the Go command did not natively support go.work files,
+		// so we 'fake' them with a workspace module.
+	}
+	return true
+}
+
 func (s *snapshot) buildOverlay() map[string][]byte {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -480,17 +598,6 @@
 	return overlays
 }
 
-func hashUnsavedOverlays(files filesMap) source.Hash {
-	var unsaved []string
-	files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
-		if overlay, ok := fh.(*overlay); ok && !overlay.saved {
-			unsaved = append(unsaved, uri.Filename())
-		}
-	})
-	sort.Strings(unsaved)
-	return source.Hashf("%s", unsaved)
-}
-
 func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) {
 	ctx = event.Label(ctx, tag.URI.Of(uri))
 
@@ -500,7 +607,7 @@
 	}
 	var pkgs []source.Package
 	for _, ph := range phs {
-		pkg, err := ph.check(ctx, s)
+		pkg, err := ph.await(ctx, s)
 		if err != nil {
 			return nil, err
 		}
@@ -538,10 +645,14 @@
 		return nil, fmt.Errorf("no packages in input")
 	}
 
-	return ph.check(ctx, s)
+	return ph.await(ctx, s)
 }
 
 func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) {
+	// TODO(rfindley): why can't/shouldn't we awaitLoaded here? It seems that if
+	// we ask for package handles for a file, we should wait for pending loads.
+	// Else we will reload orphaned files before the initial load completes.
+
 	// Check if we should reload metadata for the file. We don't invalidate IDs
 	// (though we should), so the IDs will be a better source of truth than the
 	// metadata. If there are no IDs for the file, then we should also reload.
@@ -590,13 +701,13 @@
 }
 
 func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) {
-	knownIDs := s.getIDsForURI(uri)
-	reload := len(knownIDs) == 0
-	for _, id := range knownIDs {
-		// Reload package metadata if any of the metadata has missing
-		// dependencies, in case something has changed since the last time we
-		// reloaded it.
-		if s.noValidMetadataForID(id) {
+	s.mu.Lock()
+	ids := s.meta.ids[uri]
+	reload := len(ids) == 0
+	for _, id := range ids {
+		// If the file is part of a package that needs reloading, reload it now to
+		// improve our responsiveness.
+		if len(s.shouldLoad[id]) > 0 {
 			reload = true
 			break
 		}
@@ -604,20 +715,38 @@
 		// missing dependencies. This is expensive and results in too many
 		// calls to packages.Load. Determine what we should do instead.
 	}
-	if reload {
-		err := s.load(ctx, false, fileURI(uri))
+	s.mu.Unlock()
 
+	if reload {
+		scope := fileURI(uri)
+		err := s.load(ctx, false, scope)
+
+		// As in reloadWorkspace, we must clear scopes after loading.
+		//
+		// TODO(rfindley): simply call reloadWorkspace here, first, to avoid this
+		// duplication.
+		if !errors.Is(err, context.Canceled) {
+			s.clearShouldLoad(scope)
+		}
+
+		// TODO(rfindley): this doesn't look right. If we don't reload, we use
+		// invalid metadata anyway, but if we DO reload and it fails, we don't?
 		if !s.useInvalidMetadata() && err != nil {
 			return nil, err
 		}
+
+		s.mu.Lock()
+		ids = s.meta.ids[uri]
+		s.mu.Unlock()
+
 		// We've tried to reload and there are still no known IDs for the URI.
 		// Return the load error, if there was one.
-		knownIDs = s.getIDsForURI(uri)
-		if len(knownIDs) == 0 {
+		if len(ids) == 0 {
 			return nil, err
 		}
 	}
-	return knownIDs, nil
+
+	return ids, nil
 }
 
 // Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has
@@ -655,54 +784,7 @@
 	if err != nil {
 		return nil, err
 	}
-	return ph.check(ctx, s)
-}
-
-func (s *snapshot) getGoFile(key parseKey) *parseGoHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if result, ok := s.goFiles.Get(key); ok {
-		return result
-	}
-	return nil
-}
-
-func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle, release func()) *parseGoHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if result, ok := s.goFiles.Get(key); ok {
-		release()
-		return result
-	}
-	s.goFiles.Set(key, pgh, release)
-	keys, _ := s.parseKeysByURI.Get(key.file.URI)
-	keys = append([]parseKey{key}, keys...)
-	s.parseKeysByURI.Set(key.file.URI, keys)
-	return pgh
-}
-
-func (s *snapshot) getParseModHandle(uri span.URI) *parseModHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.parseModHandles[uri]
-}
-
-func (s *snapshot) getParseWorkHandle(uri span.URI) *parseWorkHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.parseWorkHandles[uri]
-}
-
-func (s *snapshot) getModWhyHandle(uri span.URI) *modWhyHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.modWhyHandles[uri]
-}
-
-func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return s.modTidyHandles[uri]
+	return ph.await(ctx, s)
 }
 
 func (s *snapshot) getImportedBy(id PackageID) []PackageID {
@@ -711,19 +793,6 @@
 	return s.meta.importedBy[id]
 }
 
-func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	// If the package handle has already been cached,
-	// return the cached handle instead of overriding it.
-	if ph, ok := s.packages[ph.packageKey()]; ok {
-		return ph
-	}
-	s.packages[ph.packageKey()] = ph
-	return ph
-}
-
 func (s *snapshot) workspacePackageIDs() (ids []PackageID) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -742,24 +811,20 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	seen := make(map[PackageID]bool)
 	for id := range s.workspacePackages {
-		if s.isActiveLocked(id, seen) {
+		if s.isActiveLocked(id) {
 			ids = append(ids, id)
 		}
 	}
 	return ids
 }
 
-func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) {
-	if seen == nil {
-		seen = make(map[PackageID]bool)
-	}
-	if seen, ok := seen[id]; ok {
+func (s *snapshot) isActiveLocked(id PackageID) (active bool) {
+	if seen, ok := s.isActivePackageCache.Get(id); ok {
 		return seen
 	}
 	defer func() {
-		seen[id] = active
+		s.isActivePackageCache.Set(id, active)
 	}()
 	m, ok := s.meta.metadata[id]
 	if !ok {
@@ -773,13 +838,18 @@
 	// TODO(rfindley): it looks incorrect that we don't also check GoFiles here.
 	// If a CGo file is open, we want to consider the package active.
 	for _, dep := range m.Deps {
-		if s.isActiveLocked(dep, seen) {
+		if s.isActiveLocked(dep) {
 			return true
 		}
 	}
 	return false
 }
 
+func (s *snapshot) resetIsActivePackageLocked() {
+	s.isActivePackageCache.Destroy()
+	s.isActivePackageCache = newIsActivePackageCacheMap()
+}
+
 const fileExtensions = "go,mod,sum,work"
 
 func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
@@ -831,17 +901,20 @@
 	// It may change list of known subdirs and therefore invalidate the cache.
 	s.applyKnownSubdirsChangesLocked(wsDirs)
 
-	if len(s.knownSubdirs) == 0 {
-		return ""
-	}
-
 	if s.knownSubdirsPatternCache == "" {
-		dirNames := make([]string, 0, len(s.knownSubdirs))
-		for uri := range s.knownSubdirs {
-			dirNames = append(dirNames, uri.Filename())
+		var builder strings.Builder
+		s.knownSubdirs.Range(func(uri span.URI) {
+			if builder.Len() == 0 {
+				builder.WriteString("{")
+			} else {
+				builder.WriteString(",")
+			}
+			builder.WriteString(uri.Filename())
+		})
+		if builder.Len() > 0 {
+			builder.WriteString("}")
+			s.knownSubdirsPatternCache = builder.String()
 		}
-		sort.Strings(dirNames)
-		s.knownSubdirsPatternCache = fmt.Sprintf("{%s}", strings.Join(dirNames, ","))
 	}
 
 	return s.knownSubdirsPatternCache
@@ -856,14 +929,15 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	s.knownSubdirs = map[span.URI]struct{}{}
+	s.knownSubdirs.Destroy()
+	s.knownSubdirs = newKnownDirsSet()
 	s.knownSubdirsPatternCache = ""
 	s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
 		s.addKnownSubdirLocked(uri, dirs)
 	})
 }
 
-func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI {
+func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
@@ -871,11 +945,7 @@
 	// subdirectories.
 	s.applyKnownSubdirsChangesLocked(wsDirs)
 
-	result := make([]span.URI, 0, len(s.knownSubdirs))
-	for uri := range s.knownSubdirs {
-		result = append(result, uri)
-	}
-	return result
+	return s.knownSubdirs.Clone()
 }
 
 func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) {
@@ -896,7 +966,7 @@
 	dir := filepath.Dir(uri.Filename())
 	// First check if the directory is already known, because then we can
 	// return early.
-	if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok {
+	if s.knownSubdirs.Contains(span.URIFromPath(dir)) {
 		return
 	}
 	var matched span.URI
@@ -915,10 +985,10 @@
 			break
 		}
 		uri := span.URIFromPath(dir)
-		if _, ok := s.knownSubdirs[uri]; ok {
+		if s.knownSubdirs.Contains(uri) {
 			break
 		}
-		s.knownSubdirs[uri] = struct{}{}
+		s.knownSubdirs.Insert(uri)
 		dir = filepath.Dir(dir)
 		s.knownSubdirsPatternCache = ""
 	}
@@ -928,11 +998,11 @@
 	dir := filepath.Dir(uri.Filename())
 	for dir != "" {
 		uri := span.URIFromPath(dir)
-		if _, ok := s.knownSubdirs[uri]; !ok {
+		if !s.knownSubdirs.Contains(uri) {
 			break
 		}
 		if info, _ := os.Stat(dir); info == nil {
-			delete(s.knownSubdirs, uri)
+			s.knownSubdirs.Remove(uri)
 			s.knownSubdirsPatternCache = ""
 		}
 		dir = filepath.Dir(dir)
@@ -954,21 +1024,6 @@
 	return files
 }
 
-func (s *snapshot) workspacePackageHandles(ctx context.Context) ([]*packageHandle, error) {
-	if err := s.awaitLoaded(ctx); err != nil {
-		return nil, err
-	}
-	var phs []*packageHandle
-	for _, pkgID := range s.workspacePackageIDs() {
-		ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
-		if err != nil {
-			return nil, err
-		}
-		phs = append(phs, ph)
-	}
-	return phs, nil
-}
-
 func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) {
 	phs, err := s.activePackageHandles(ctx)
 	if err != nil {
@@ -976,7 +1031,7 @@
 	}
 	var pkgs []source.Package
 	for _, ph := range phs {
-		pkg, err := ph.check(ctx, s)
+		pkg, err := ph.await(ctx, s)
 		if err != nil {
 			return nil, err
 		}
@@ -1018,12 +1073,12 @@
 		iolimit <- struct{}{} // acquire token
 		group.Go(func() error {
 			defer func() { <-iolimit }() // release token
-			v, err := s.buildSymbolHandle(ctx, f).handle.Get(ctx, s.generation, s)
+			symbols, err := s.symbolize(ctx, f)
 			if err != nil {
 				return err
 			}
 			resultMu.Lock()
-			result[uri] = v.(*symbolData).symbols
+			result[uri] = symbols
 			resultMu.Unlock()
 			return nil
 		})
@@ -1090,10 +1145,10 @@
 	defer s.mu.Unlock()
 
 	results := map[string]source.Package{}
-	for _, ph := range s.packages {
-		cachedPkg, err := ph.cached(s.generation)
+	s.packages.Range(func(_, v interface{}) {
+		cachedPkg, err := v.(*packageHandle).cached()
 		if err != nil {
-			continue
+			return
 		}
 		for importPath, newPkg := range cachedPkg.imports {
 			if oldPkg, ok := results[string(importPath)]; ok {
@@ -1105,7 +1160,7 @@
 				results[string(importPath)] = newPkg
 			}
 		}
-	}
+	})
 	return results, nil
 }
 
@@ -1126,76 +1181,6 @@
 	return match
 }
 
-func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := packageKey{
-		id:   id,
-		mode: mode,
-	}
-	return s.packages[key]
-}
-
-func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.symbols[uri]
-}
-
-func (s *snapshot) addSymbolHandle(uri span.URI, sh *symbolHandle) *symbolHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	// If the package handle has already been cached,
-	// return the cached handle instead of overriding it.
-	if sh, ok := s.symbols[uri]; ok {
-		return sh
-	}
-	s.symbols[uri] = sh
-	return sh
-}
-
-func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := actionKey{
-		pkg: packageKey{
-			id:   id,
-			mode: m,
-		},
-		analyzer: a,
-	}
-	return s.actions[key]
-}
-
-func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := actionKey{
-		analyzer: ah.analyzer,
-		pkg: packageKey{
-			id:   ah.pkg.m.ID,
-			mode: ah.pkg.mode,
-		},
-	}
-	if ah, ok := s.actions[key]; ok {
-		return ah
-	}
-	s.actions[key] = ah
-	return ah
-}
-
-func (s *snapshot) getIDsForURI(uri span.URI) []PackageID {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.meta.ids[uri]
-}
-
 func (s *snapshot) getMetadata(id PackageID) *KnownMetadata {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -1203,77 +1188,34 @@
 	return s.meta.metadata[id]
 }
 
-func (s *snapshot) shouldLoad(scope interface{}) bool {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	g := s.meta
-
-	switch scope := scope.(type) {
-	case PackagePath:
-		var meta *KnownMetadata
-		for _, m := range g.metadata {
-			if m.PkgPath != scope {
-				continue
-			}
-			meta = m
-		}
-		if meta == nil || meta.ShouldLoad {
-			return true
-		}
-		return false
-	case fileURI:
-		uri := span.URI(scope)
-		ids := g.ids[uri]
-		if len(ids) == 0 {
-			return true
-		}
-		for _, id := range ids {
-			m, ok := g.metadata[id]
-			if !ok || m.ShouldLoad {
-				return true
-			}
-		}
-		return false
-	default:
-		return true
-	}
-}
-
+// clearShouldLoad clears package IDs that no longer need to be reloaded after
+// scopes has been loaded.
 func (s *snapshot) clearShouldLoad(scopes ...interface{}) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	g := s.meta
-
-	var updates map[PackageID]*KnownMetadata
-	markLoaded := func(m *KnownMetadata) {
-		if updates == nil {
-			updates = make(map[PackageID]*KnownMetadata)
-		}
-		next := *m
-		next.ShouldLoad = false
-		updates[next.ID] = &next
-	}
 	for _, scope := range scopes {
 		switch scope := scope.(type) {
 		case PackagePath:
-			for _, m := range g.metadata {
-				if m.PkgPath == scope {
-					markLoaded(m)
+			var toDelete []PackageID
+			for id, pkgPaths := range s.shouldLoad {
+				for _, pkgPath := range pkgPaths {
+					if pkgPath == scope {
+						toDelete = append(toDelete, id)
+					}
 				}
 			}
+			for _, id := range toDelete {
+				delete(s.shouldLoad, id)
+			}
 		case fileURI:
 			uri := span.URI(scope)
-			ids := g.ids[uri]
+			ids := s.meta.ids[uri]
 			for _, id := range ids {
-				if m, ok := g.metadata[id]; ok {
-					markLoaded(m)
-				}
+				delete(s.shouldLoad, id)
 			}
 		}
 	}
-	s.meta = g.Clone(updates)
 }
 
 // noValidMetadataForURILocked reports whether there is any valid metadata for
@@ -1291,16 +1233,6 @@
 	return true
 }
 
-// noValidMetadataForID reports whether there is no valid metadata for the
-// given ID.
-func (s *snapshot) noValidMetadataForID(id PackageID) bool {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	m := s.meta.metadata[id]
-	return m == nil || !m.Valid
-}
-
 func (s *snapshot) isWorkspacePackage(id PackageID) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
@@ -1364,7 +1296,7 @@
 
 	var open []source.VersionedFileHandle
 	s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
-		if s.isOpenLocked(fh.URI()) {
+		if isFileOpen(fh) {
 			open = append(open, fh)
 		}
 	})
@@ -1373,6 +1305,10 @@
 
 func (s *snapshot) isOpenLocked(uri span.URI) bool {
 	fh, _ := s.files.Get(uri)
+	return isFileOpen(fh)
+}
+
+func isFileOpen(fh source.VersionedFileHandle) bool {
 	_, open := fh.(*overlay)
 	return open
 }
@@ -1400,6 +1336,10 @@
 }
 
 func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
+	if wsErr := s.workspace.criticalError(ctx, s); wsErr != nil {
+		return wsErr
+	}
+
 	loadErr := s.awaitLoadedAllErrors(ctx)
 	if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) {
 		return nil
@@ -1459,32 +1399,45 @@
 	// Do not return results until the snapshot's view has been initialized.
 	s.AwaitInitialized(ctx)
 
-	// TODO(rstambler): Should we be more careful about returning the
+	// TODO(rfindley): Should we be more careful about returning the
 	// initialization error? Is it possible for the initialization error to be
 	// corrected without a successful reinitialization?
 	s.mu.Lock()
 	initializedErr := s.initializedErr
 	s.mu.Unlock()
+
 	if initializedErr != nil {
 		return initializedErr
 	}
 
+	// TODO(rfindley): revisit this handling. Calling reloadWorkspace with a
+	// cancelled context should have the same effect, so this preemptive handling
+	// should not be necessary.
+	//
+	// Also: GetCriticalError ignores context cancellation errors. Should we be
+	// returning nil here?
 	if ctx.Err() != nil {
 		return &source.CriticalError{MainError: ctx.Err()}
 	}
 
+	// TODO(rfindley): reloading is not idempotent: if we try to reload or load
+	// orphaned files below and fail, we won't try again. For that reason, we
+	// could get different results from subsequent calls to this function, which
+	// may cause critical errors to be suppressed.
+
 	if err := s.reloadWorkspace(ctx); err != nil {
 		diags := s.extractGoCommandErrors(ctx, err)
 		return &source.CriticalError{
-			MainError: err,
-			DiagList:  diags,
+			MainError:   err,
+			Diagnostics: diags,
 		}
 	}
+
 	if err := s.reloadOrphanedFiles(ctx); err != nil {
 		diags := s.extractGoCommandErrors(ctx, err)
 		return &source.CriticalError{
-			MainError: err,
-			DiagList:  diags,
+			MainError:   err,
+			Diagnostics: diags,
 		}
 	}
 	return nil
@@ -1510,39 +1463,42 @@
 
 // reloadWorkspace reloads the metadata for all invalidated workspace packages.
 func (s *snapshot) reloadWorkspace(ctx context.Context) error {
-	// See which of the workspace packages are missing metadata.
+	var scopes []interface{}
+	var seen map[PackagePath]bool
 	s.mu.Lock()
-	missingMetadata := len(s.workspacePackages) == 0 || len(s.meta.metadata) == 0
-	pkgPathSet := map[PackagePath]struct{}{}
-	for id, pkgPath := range s.workspacePackages {
-		if m, ok := s.meta.metadata[id]; ok && m.Valid {
-			continue
+	for _, pkgPaths := range s.shouldLoad {
+		for _, pkgPath := range pkgPaths {
+			if seen == nil {
+				seen = make(map[PackagePath]bool)
+			}
+			if seen[pkgPath] {
+				continue
+			}
+			seen[pkgPath] = true
+			scopes = append(scopes, pkgPath)
 		}
-		missingMetadata = true
-
-		// Don't try to reload "command-line-arguments" directly.
-		if source.IsCommandLineArguments(string(pkgPath)) {
-			continue
-		}
-		pkgPathSet[pkgPath] = struct{}{}
 	}
 	s.mu.Unlock()
 
-	// If the view's build configuration is invalid, we cannot reload by
-	// package path. Just reload the directory instead.
-	if missingMetadata && !s.ValidBuildConfiguration() {
-		return s.load(ctx, false, viewLoadScope("LOAD_INVALID_VIEW"))
-	}
-
-	if len(pkgPathSet) == 0 {
+	if len(scopes) == 0 {
 		return nil
 	}
 
-	var pkgPaths []interface{}
-	for pkgPath := range pkgPathSet {
-		pkgPaths = append(pkgPaths, pkgPath)
+	// If the view's build configuration is invalid, we cannot reload by
+	// package path. Just reload the directory instead.
+	if !s.ValidBuildConfiguration() {
+		scopes = []interface{}{viewLoadScope("LOAD_INVALID_VIEW")}
 	}
-	return s.load(ctx, false, pkgPaths...)
+
+	err := s.load(ctx, false, scopes...)
+
+	// Unless the context was canceled, set "shouldLoad" to false for all
+	// of the metadata we attempted to load.
+	if !errors.Is(err, context.Canceled) {
+		s.clearShouldLoad(scopes...)
+	}
+
+	return err
 }
 
 func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
@@ -1608,7 +1564,7 @@
 		}
 		// If the URI doesn't belong to this view, then it's not in a workspace
 		// package and should not be reloaded directly.
-		if !contains(s.view.session.viewsOf(uri), s.view) {
+		if !source.InDir(s.view.folder.Filename(), uri.Filename()) {
 			return
 		}
 		// If the file is not open and is in a vendor directory, don't treat it
@@ -1636,6 +1592,13 @@
 	return false
 }
 
+// TODO(golang/go#53756): this function needs to consider more than just the
+// absolute URI, for example:
+//   - the position of /vendor/ with respect to the relevant module root
+//   - whether or not go.work is in use (as vendoring isn't supported in workspace mode)
+//
+// Most likely, each call site of inVendor needs to be reconsidered to
+// understand and correctly implement the desired behavior.
 func inVendor(uri span.URI) bool {
 	if !strings.Contains(string(uri), "/vendor/") {
 		return false
@@ -1649,10 +1612,6 @@
 	return strings.Contains(split[1], "/")
 }
 
-func generationName(v *View, snapshotID uint64) string {
-	return fmt.Sprintf("v%v/%v", v.id, snapshotID)
-}
-
 // unappliedChanges is a file source that handles an uncloned snapshot.
 type unappliedChanges struct {
 	originalSnapshot *snapshot
@@ -1666,12 +1625,11 @@
 	return ac.originalSnapshot.GetFile(ctx, uri)
 }
 
-func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot {
+func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
 	ctx, done := event.Start(ctx, "snapshot.clone")
 	defer done()
 
-	var vendorChanged bool
-	newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{
+	newWorkspace, reinit := s.workspace.Clone(ctx, changes, &unappliedChanges{
 		originalSnapshot: s,
 		changes:          changes,
 	})
@@ -1679,94 +1637,86 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1))
+	// If there is an initialization error and a vendor directory changed, try to
+	// reinit.
+	if s.initializedErr != nil {
+		for uri := range changes {
+			if inVendor(uri) {
+				reinit = true
+				break
+			}
+		}
+	}
+
 	bgCtx, cancel := context.WithCancel(bgCtx)
 	result := &snapshot{
-		id:                s.id + 1,
-		generation:        newGen,
-		view:              s.view,
-		backgroundCtx:     bgCtx,
-		cancel:            cancel,
-		builtin:           s.builtin,
-		initializeOnce:    s.initializeOnce,
-		initializedErr:    s.initializedErr,
-		packages:          make(map[packageKey]*packageHandle, len(s.packages)),
-		actions:           make(map[actionKey]*actionHandle, len(s.actions)),
-		files:             s.files.Clone(),
-		goFiles:           s.goFiles.Clone(),
-		parseKeysByURI:    s.parseKeysByURI.Clone(),
-		symbols:           make(map[span.URI]*symbolHandle, len(s.symbols)),
-		workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
-		unloadableFiles:   make(map[span.URI]struct{}, len(s.unloadableFiles)),
-		parseModHandles:   make(map[span.URI]*parseModHandle, len(s.parseModHandles)),
-		parseWorkHandles:  make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)),
-		modTidyHandles:    make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)),
-		modWhyHandles:     make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)),
-		knownSubdirs:      make(map[span.URI]struct{}, len(s.knownSubdirs)),
-		workspace:         newWorkspace,
+		id:                   s.id + 1,
+		store:                s.store,
+		view:                 s.view,
+		backgroundCtx:        bgCtx,
+		cancel:               cancel,
+		builtin:              s.builtin,
+		initialized:          s.initialized,
+		initializedErr:       s.initializedErr,
+		packages:             s.packages.Clone(),
+		isActivePackageCache: s.isActivePackageCache.Clone(),
+		actions:              s.actions.Clone(),
+		files:                s.files.Clone(),
+		parsedGoFiles:        s.parsedGoFiles.Clone(),
+		parseKeysByURI:       s.parseKeysByURI.Clone(),
+		symbolizeHandles:     s.symbolizeHandles.Clone(),
+		workspacePackages:    make(map[PackageID]PackagePath, len(s.workspacePackages)),
+		unloadableFiles:      make(map[span.URI]struct{}, len(s.unloadableFiles)),
+		parseModHandles:      s.parseModHandles.Clone(),
+		parseWorkHandles:     s.parseWorkHandles.Clone(),
+		modTidyHandles:       s.modTidyHandles.Clone(),
+		modWhyHandles:        s.modWhyHandles.Clone(),
+		knownSubdirs:         s.knownSubdirs.Clone(),
+		workspace:            newWorkspace,
 	}
 
-	if !workspaceChanged && s.workspaceDirHandle != nil {
-		result.workspaceDirHandle = s.workspaceDirHandle
-		newGen.Inherit(s.workspaceDirHandle)
+	// The snapshot should be initialized if either s was uninitialized, or we've
+	// detected a change that triggers reinitialization.
+	if reinit {
+		result.initialized = false
 	}
 
-	// Copy all of the FileHandles.
-	for k, v := range s.symbols {
-		if change, ok := changes[k]; ok {
-			if change.exists {
-				result.symbols[k] = result.buildSymbolHandle(ctx, change.fileHandle)
-			}
-			continue
-		}
-		newGen.Inherit(v.handle)
-		result.symbols[k] = v
-	}
+	// Create a lease on the new snapshot.
+	// (Best to do this early in case the code below hides an
+	// incref/decref operation that might destroy it prematurely.)
+	release := result.Acquire()
 
 	// Copy the set of unloadable files.
+	//
+	// TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on
+	// changes to environment or workspace layout, or more generally on any
+	// metadata change?
 	for k, v := range s.unloadableFiles {
 		result.unloadableFiles[k] = v
 	}
-	// Copy all of the modHandles.
-	for k, v := range s.parseModHandles {
-		result.parseModHandles[k] = v
-	}
-	// Copy all of the parseWorkHandles.
-	for k, v := range s.parseWorkHandles {
-		result.parseWorkHandles[k] = v
-	}
 
+	// TODO(adonovan): merge loops over "changes".
 	for uri := range changes {
 		keys, ok := result.parseKeysByURI.Get(uri)
 		if ok {
 			for _, key := range keys {
-				result.goFiles.Delete(key)
+				result.parsedGoFiles.Delete(key)
 			}
 			result.parseKeysByURI.Delete(uri)
 		}
-	}
 
-	// Copy all of the go.mod-related handles. They may be invalidated later,
-	// so we inherit them at the end of the function.
-	for k, v := range s.modTidyHandles {
-		if _, ok := changes[k]; ok {
-			continue
-		}
-		result.modTidyHandles[k] = v
-	}
-	for k, v := range s.modWhyHandles {
-		if _, ok := changes[k]; ok {
-			continue
-		}
-		result.modWhyHandles[k] = v
+		// Invalidate go.mod-related handles.
+		result.modTidyHandles.Delete(uri)
+		result.modWhyHandles.Delete(uri)
+
+		// Invalidate handles for cached symbols.
+		result.symbolizeHandles.Delete(uri)
 	}
 
 	// Add all of the known subdirectories, but don't update them for the
 	// changed files. We need to rebuild the workspace module to know the
 	// true set of known subdirectories, but we don't want to do that in clone.
-	for k, v := range s.knownSubdirs {
-		result.knownSubdirs[k] = v
-	}
+	result.knownSubdirs = s.knownSubdirs.Clone()
 	result.knownSubdirsPatternCache = s.knownSubdirsPatternCache
 	for _, c := range changes {
 		result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
@@ -1777,29 +1727,25 @@
 	directIDs := map[PackageID]bool{}
 
 	// Invalidate all package metadata if the workspace module has changed.
-	if workspaceReload {
+	if reinit {
 		for k := range s.meta.metadata {
 			directIDs[k] = true
 		}
 	}
 
 	// Compute invalidations based on file changes.
-	changedPkgFiles := map[PackageID]bool{} // packages whose file set may have changed
-	anyImportDeleted := false
-	anyFileOpenedOrClosed := false
-	for uri, change := range changes {
-		// Maybe reinitialize the view if we see a change in the vendor
-		// directory.
-		if inVendor(uri) {
-			vendorChanged = true
-		}
+	anyImportDeleted := false      // import deletions can resolve cycles
+	anyFileOpenedOrClosed := false // opened files affect workspace packages
+	anyFileAdded := false          // adding a file can resolve missing dependencies
 
+	for uri, change := range changes {
 		// The original FileHandle for this URI is cached on the snapshot.
 		originalFH, _ := s.files.Get(uri)
 		var originalOpen, newOpen bool
 		_, originalOpen = originalFH.(*overlay)
 		_, newOpen = change.fileHandle.(*overlay)
-		anyFileOpenedOrClosed = originalOpen != newOpen
+		anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen)
+		anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil)
 
 		// If uri is a Go file, check if it has changed in a way that would
 		// invalidate metadata. Note that we can't use s.view.FileKind here,
@@ -1810,16 +1756,11 @@
 			invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle)
 		}
 
-		invalidateMetadata = invalidateMetadata || forceReloadMetadata || workspaceReload
+		invalidateMetadata = invalidateMetadata || forceReloadMetadata || reinit
 		anyImportDeleted = anyImportDeleted || importDeleted
 
 		// Mark all of the package IDs containing the given file.
 		filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged)
-		if pkgFileChanged {
-			for id := range filePackageIDs {
-				changedPkgFiles[id] = true
-			}
-		}
 		for id := range filePackageIDs {
 			directIDs[id] = directIDs[id] || invalidateMetadata
 		}
@@ -1827,17 +1768,16 @@
 		// Invalidate the previous modTidyHandle if any of the files have been
 		// saved or if any of the metadata has been invalidated.
 		if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) {
-			// TODO(rstambler): Only delete mod handles for which the
-			// withoutURI is relevant.
-			for k := range s.modTidyHandles {
-				delete(result.modTidyHandles, k)
-			}
-			for k := range s.modWhyHandles {
-				delete(result.modWhyHandles, k)
-			}
+			// TODO(maybe): Only delete mod handles for
+			// which the withoutURI is relevant.
+			// Requires reverse-engineering the go command. (!)
+
+			result.modTidyHandles.Clear()
+			result.modWhyHandles.Clear()
 		}
-		delete(result.parseModHandles, uri)
-		delete(result.parseWorkHandles, uri)
+
+		result.parseModHandles.Delete(uri)
+		result.parseWorkHandles.Delete(uri)
 		// Handle the invalidated file; it may have new contents or not exist.
 		if !change.exists {
 			result.files.Delete(uri)
@@ -1870,6 +1810,19 @@
 		}
 	}
 
+	// Adding a file can resolve missing dependencies from existing packages.
+	//
+	// We could be smart here and try to guess which packages may have been
+	// fixed, but until that proves necessary, just invalidate metadata for any
+	// package with missing dependencies.
+	if anyFileAdded {
+		for id, metadata := range s.meta.metadata {
+			if len(metadata.MissingDeps) > 0 {
+				directIDs[id] = true
+			}
+		}
+	}
+
 	// Invalidate reverse dependencies too.
 	// idsToInvalidate keeps track of transitive reverse dependencies.
 	// If an ID is present in the map, invalidate its types.
@@ -1894,31 +1847,29 @@
 		addRevDeps(id, invalidateMetadata)
 	}
 
-	// Copy the package type information.
-	for k, v := range s.packages {
-		if _, ok := idsToInvalidate[k.id]; ok {
-			continue
+	// Delete invalidated package type information.
+	for id := range idsToInvalidate {
+		for _, mode := range source.AllParseModes {
+			key := packageKey{mode, id}
+			result.packages.Delete(key)
 		}
-		newGen.Inherit(v.handle)
-		result.packages[k] = v
 	}
 
-	// Copy the package analysis information.
-	for k, v := range s.actions {
-		if _, ok := idsToInvalidate[k.pkg.id]; ok {
-			continue
+	// Copy actions.
+	// TODO(adonovan): opt: avoid iteration over s.actions.
+	var actionsToDelete []actionKey
+	s.actions.Range(func(k, _ interface{}) {
+		key := k.(actionKey)
+		if _, ok := idsToInvalidate[key.pkg.id]; ok {
+			actionsToDelete = append(actionsToDelete, key)
 		}
-		newGen.Inherit(v.handle)
-		result.actions[k] = v
+	})
+	for _, key := range actionsToDelete {
+		result.actions.Delete(key)
 	}
 
-	// If the workspace mode has changed, we must delete all metadata, as it
-	// is unusable and may produce confusing or incorrect diagnostics.
 	// If a file has been deleted, we must delete metadata for all packages
 	// containing that file.
-	workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
-
-	// Don't keep package metadata for packages that have lost files.
 	//
 	// TODO(rfindley): why not keep invalid metadata in this case? If we
 	// otherwise allow operate on invalid metadata, why not continue to do so,
@@ -1936,27 +1887,71 @@
 		}
 	}
 
+	// Any packages that need loading in s still need loading in the new
+	// snapshot.
+	for k, v := range s.shouldLoad {
+		if result.shouldLoad == nil {
+			result.shouldLoad = make(map[PackageID][]PackagePath)
+		}
+		result.shouldLoad[k] = v
+	}
+
+	// TODO(rfindley): consolidate the this workspace mode detection with
+	// workspace invalidation.
+	workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
+
+	// We delete invalid metadata in the following cases:
+	// - If we are forcing a reload of metadata.
+	// - If the workspace mode has changed, as stale metadata may produce
+	//   confusing or incorrect diagnostics.
+	//
+	// TODO(rfindley): we should probably also clear metadata if we are
+	// reinitializing the workspace, as otherwise we could leave around a bunch
+	// of irrelevant and duplicate metadata (for example, if the module path
+	// changed). However, this breaks the "experimentalUseInvalidMetadata"
+	// feature, which relies on stale metadata when, for example, a go.mod file
+	// is broken via invalid syntax.
+	deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
+
 	// Compute which metadata updates are required. We only need to invalidate
 	// packages directly containing the affected file, and only if it changed in
 	// a relevant way.
 	metadataUpdates := make(map[PackageID]*KnownMetadata)
-	deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
 	for k, v := range s.meta.metadata {
 		invalidateMetadata := idsToInvalidate[k]
+
+		// For metadata that has been newly invalidated, capture package paths
+		// requiring reloading in the shouldLoad map.
+		if invalidateMetadata && !source.IsCommandLineArguments(string(v.ID)) {
+			if result.shouldLoad == nil {
+				result.shouldLoad = make(map[PackageID][]PackagePath)
+			}
+			needsReload := []PackagePath{v.PkgPath}
+			if v.ForTest != "" && v.ForTest != v.PkgPath {
+				// When reloading test variants, always reload their ForTest package as
+				// well. Otherwise, we may miss test variants in the resulting load.
+				//
+				// TODO(rfindley): is this actually sufficient? Is it possible that
+				// other test variants may be invalidated? Either way, we should
+				// determine exactly what needs to be reloaded here.
+				needsReload = append(needsReload, v.ForTest)
+			}
+			result.shouldLoad[k] = needsReload
+		}
+
+		// Check whether the metadata should be deleted.
 		if skipID[k] || (invalidateMetadata && deleteInvalidMetadata) {
 			metadataUpdates[k] = nil
 			continue
 		}
+
+		// Check if the metadata has changed.
 		valid := v.Valid && !invalidateMetadata
-		pkgFilesChanged := v.PkgFilesChanged || changedPkgFiles[k]
-		shouldLoad := v.ShouldLoad || invalidateMetadata
-		if valid != v.Valid || pkgFilesChanged != v.PkgFilesChanged || shouldLoad != v.ShouldLoad {
+		if valid != v.Valid {
 			// Mark invalidated metadata rather than deleting it outright.
 			metadataUpdates[k] = &KnownMetadata{
-				Metadata:        v.Metadata,
-				Valid:           valid,
-				PkgFilesChanged: pkgFilesChanged,
-				ShouldLoad:      shouldLoad,
+				Metadata: v.Metadata,
+				Valid:    valid,
 			}
 		}
 	}
@@ -1970,26 +1965,14 @@
 		result.meta = s.meta
 	}
 
-	// Update workspace packages, if necessary.
+	// Update workspace and active packages, if necessary.
 	if result.meta != s.meta || anyFileOpenedOrClosed {
 		result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta)
+		result.resetIsActivePackageLocked()
 	} else {
 		result.workspacePackages = s.workspacePackages
 	}
 
-	// Inherit all of the go.mod-related handles.
-	for _, v := range result.modTidyHandles {
-		newGen.Inherit(v.handle)
-	}
-	for _, v := range result.modWhyHandles {
-		newGen.Inherit(v.handle)
-	}
-	for _, v := range result.parseModHandles {
-		newGen.Inherit(v.handle)
-	}
-	for _, v := range result.parseWorkHandles {
-		newGen.Inherit(v.handle)
-	}
 	// Don't bother copying the importedBy graph,
 	// as it changes each time we update metadata.
 
@@ -1998,15 +1981,8 @@
 	if workspaceModeChanged {
 		result.workspacePackages = map[PackageID]PackagePath{}
 	}
-
-	// The snapshot may need to be reinitialized.
-	if workspaceReload || vendorChanged {
-		if workspaceChanged || result.initializedErr != nil {
-			result.initializeOnce = &sync.Once{}
-		}
-	}
 	result.dumpWorkspace("clone")
-	return result
+	return result, release
 }
 
 // invalidatedPackageIDs returns all packages invalidated by a change to uri.
@@ -2176,28 +2152,20 @@
 	return invalidate, pkgFileChanged, importDeleted
 }
 
-// peekOrParse returns the cached ParsedGoFile if it exists, otherwise parses
-// without caching.
+// peekOrParse returns the cached ParsedGoFile if it exists,
+// otherwise parses without populating the cache.
 //
 // It returns an error if the file could not be read (note that parsing errors
 // are stored in ParsedGoFile.ParseErr).
 //
 // lockedSnapshot must be locked.
 func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
-	key := parseKey{file: fh.FileIdentity(), mode: mode}
-	if pgh, ok := lockedSnapshot.goFiles.Get(key); ok {
-		cached := pgh.handle.Cached(lockedSnapshot.generation)
-		if cached != nil {
-			cached := cached.(*parseGoData)
-			if cached.parsed != nil {
-				return cached.parsed, nil
-			}
-		}
+	// Peek in the cache without populating it.
+	// We do this to reduce retained heap, not work.
+	if parsed, _ := lockedSnapshot.peekParseGoLocked(fh, mode); parsed != nil {
+		return parsed, nil // cache hit
 	}
-
-	fset := token.NewFileSet()
-	data := parseGo(ctx, fset, fh, mode)
-	return data.parsed, data.err
+	return parseGoImpl(ctx, token.NewFileSet(), fh, mode)
 }
 
 func magicCommentsChanged(original *ast.File, current *ast.File) bool {
@@ -2297,7 +2265,7 @@
 	return buildWorkspaceModFile(ctx, allModules, s)
 }
 
-// TODO(rfindley): move this to workspacemodule.go
+// TODO(rfindley): move this to workspace.go
 func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) (*modfile.File, error) {
 	file := &modfile.File{}
 	file.AddModuleStmt("gopls-workspace")
@@ -2335,8 +2303,8 @@
 			goVersion = parsed.Go.Version
 		}
 		path := parsed.Module.Mod.Path
-		if _, ok := paths[path]; ok {
-			return nil, fmt.Errorf("found module %q twice in the workspace", path)
+		if seen, ok := paths[path]; ok {
+			return nil, fmt.Errorf("found module %q multiple times in the workspace, at:\n\t%q\n\t%q", path, seen, modURI)
 		}
 		paths[path] = modURI
 		// If the module's path includes a major version, we expect it to have
diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go
index d56a036..e98f554 100644
--- a/internal/lsp/cache/symbols.go
+++ b/internal/lsp/cache/symbols.go
@@ -18,42 +18,48 @@
 	"golang.org/x/tools/internal/memoize"
 )
 
-// A symbolHandle contains a handle to the result of symbolizing a file.
-type symbolHandle struct {
-	handle *memoize.Handle
-}
+// symbolize returns the result of symbolizing the file identified by fh, using a cache.
+func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]source.Symbol, error) {
+	uri := fh.URI()
 
-// symbolData contains the data produced by extracting symbols from a file.
-type symbolData struct {
-	symbols []source.Symbol
-	err     error
-}
+	s.mu.Lock()
+	entry, hit := s.symbolizeHandles.Get(uri)
+	s.mu.Unlock()
 
-// buildSymbolHandle returns a handle to the result of symbolizing a file,
-// if necessary creating it and saving it in the snapshot.
-func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle {
-	if h := s.getSymbolHandle(fh.URI()); h != nil {
-		return h
-	}
-	type symbolHandleKey source.Hash
-	key := symbolHandleKey(fh.FileIdentity().Hash)
-	handle := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} {
-		snapshot := arg.(*snapshot)
-		symbols, err := symbolize(snapshot, fh)
-		return &symbolData{symbols, err}
-	}, nil)
-
-	sh := &symbolHandle{
-		handle: handle,
+	type symbolizeResult struct {
+		symbols []source.Symbol
+		err     error
 	}
 
-	return s.addSymbolHandle(fh.URI(), sh)
+	// Cache miss?
+	if !hit {
+		type symbolHandleKey source.Hash
+		key := symbolHandleKey(fh.FileIdentity().Hash)
+		promise, release := s.store.Promise(key, func(_ context.Context, arg interface{}) interface{} {
+			symbols, err := symbolizeImpl(arg.(*snapshot), fh)
+			return symbolizeResult{symbols, err}
+		})
+
+		entry = promise
+
+		s.mu.Lock()
+		s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() })
+		s.mu.Unlock()
+	}
+
+	// Await result.
+	v, err := s.awaitPromise(ctx, entry.(*memoize.Promise))
+	if err != nil {
+		return nil, err
+	}
+	res := v.(symbolizeResult)
+	return res.symbols, res.err
 }
 
-// symbolize reads and parses a file and extracts symbols from it.
+// symbolizeImpl reads and parses a file and extracts symbols from it.
 // It may use a parsed file already present in the cache but
 // otherwise does not populate the cache.
-func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
+func symbolizeImpl(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) {
 	src, err := fh.Read()
 	if err != nil {
 		return nil, err
@@ -64,9 +70,13 @@
 		fileDesc *token.File
 	)
 
-	// If the file has already been fully parsed through the cache, we can just
-	// use the result.
-	if pgf := snapshot.cachedPGF(fh, source.ParseFull); pgf != nil {
+	// If the file has already been fully parsed through the
+	// cache, we can just use the result. But we don't want to
+	// populate the cache after a miss.
+	snapshot.mu.Lock()
+	pgf, _ := snapshot.peekParseGoLocked(fh, source.ParseFull)
+	snapshot.mu.Unlock()
+	if pgf != nil {
 		file = pgf.File
 		fileDesc = pgf.Tok
 	}
diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go
index 1810f6e..6150109 100644
--- a/internal/lsp/cache/view.go
+++ b/internal/lsp/cache/view.go
@@ -47,10 +47,6 @@
 	// background contexts created for this view.
 	baseCtx context.Context
 
-	// cancel is called when all action being performed by the current view
-	// should be stopped.
-	cancel context.CancelFunc
-
 	// name is the user visible name of this view.
 	name string
 
@@ -71,8 +67,18 @@
 	// attempt at initialization.
 	initCancelFirstAttempt context.CancelFunc
 
-	snapshotMu sync.Mutex
-	snapshot   *snapshot // nil after shutdown has been called
+	// Track the latest snapshot via the snapshot field, guarded by snapshotMu.
+	//
+	// Invariant: whenever the snapshot field is overwritten, destroy(snapshot)
+	// is called on the previous (overwritten) snapshot while snapshotMu is held,
+	// incrementing snapshotWG. During shutdown the final snapshot is
+	// overwritten with nil and destroyed, guaranteeing that all observed
+	// snapshots have been destroyed via the destroy method, and snapshotWG may
+	// be waited upon to let these destroy operations complete.
+	snapshotMu      sync.Mutex
+	snapshot        *snapshot      // latest snapshot; nil after shutdown has been called
+	releaseSnapshot func()         // called when snapshot is no longer needed
+	snapshotWG      sync.WaitGroup // refcount for pending destroy operations
 
 	// initialWorkspaceLoad is closed when the first workspace initialization has
 	// completed. If we failed to load, we only retry if the go.mod file changes,
@@ -82,6 +88,10 @@
 	// initializationSema is used limit concurrent initialization of snapshots in
 	// the view. We use a channel instead of a mutex to avoid blocking when a
 	// context is canceled.
+	//
+	// This field (along with snapshot.initialized) guards against duplicate
+	// initialization of snapshots. Do not change it without adjusting snapshot
+	// accordingly.
 	initializationSema chan struct{}
 
 	// rootURI is the rootURI directory of this view. If we are in GOPATH mode, this
@@ -106,6 +116,9 @@
 	environmentVariables
 
 	// userGo111Module is the user's value of GO111MODULE.
+	//
+	// TODO(rfindley): is there really value in memoizing this variable? It seems
+	// simpler to make this a function/method.
 	userGo111Module go111module
 
 	// The value of GO111MODULE we want to run with.
@@ -128,6 +141,11 @@
 	gocache, gopath, goroot, goprivate, gomodcache, go111module string
 }
 
+// workspaceMode holds various flags defining how the gopls workspace should
+// behave. They may be derived from the environment, user configuration, or
+// depend on the Go version.
+//
+// TODO(rfindley): remove workspace mode, in favor of explicit checks.
 type workspaceMode int
 
 const (
@@ -161,9 +179,10 @@
 
 func (v *View) ID() string { return v.id }
 
-// tempModFile creates a temporary go.mod file based on the contents of the
-// given go.mod file. It is the caller's responsibility to clean up the files
-// when they are done using them.
+// tempModFile creates a temporary go.mod file based on the contents
+// of the given go.mod file. On success, it is the caller's
+// responsibility to call the cleanup function when the file is no
+// longer needed.
 func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) {
 	filenameHash := source.Hashf("%s", modFh.URI().Filename())
 	tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash))
@@ -184,7 +203,9 @@
 		return "", nil, err
 	}
 
-	cleanup = func() {
+	// We use a distinct name here to avoid subtlety around the fact
+	// that both 'return' and 'defer' update the "cleanup" variable.
+	doCleanup := func() {
 		_ = os.Remove(tmpSumName)
 		_ = os.Remove(tmpURI.Filename())
 	}
@@ -192,7 +213,7 @@
 	// Be careful to clean up if we return an error from this function.
 	defer func() {
 		if err != nil {
-			cleanup()
+			doCleanup()
 			cleanup = nil
 		}
 	}()
@@ -200,11 +221,11 @@
 	// Create an analogous go.sum, if one exists.
 	if gosum != nil {
 		if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil {
-			return "", cleanup, err
+			return "", nil, err
 		}
 	}
 
-	return tmpURI, cleanup, nil
+	return tmpURI, doCleanup, nil
 }
 
 // Name returns the user visible name of this view.
@@ -371,13 +392,14 @@
 	relativeTo := s.view.folder.Filename()
 
 	searched := 0
+	filterer := buildFilterer(dir, s.view.gomodcache, s.view.Options())
 	// Change to WalkDir when we move up to 1.16
 	err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
 		if err != nil {
 			return err
 		}
 		relpath := strings.TrimPrefix(path, relativeTo)
-		excluded := pathExcludedByFilter(relpath, dir, s.view.gomodcache, s.view.options)
+		excluded := pathExcludedByFilter(relpath, filterer)
 		if fileHasExtension(path, suffixes) && !excluded && !fi.IsDir() {
 			k := span.URIFromPath(path)
 			_, err := s.GetVersionedFile(ctx, k)
@@ -407,17 +429,20 @@
 		return false
 	}
 
-	return !v.filters(uri)
+	return !v.filterFunc()(uri)
 }
 
-// filters reports whether uri is filtered by the currently configured
+// filterFunc returns a func that reports whether uri is filtered by the currently configured
 // directoryFilters.
-func (v *View) filters(uri span.URI) bool {
-	// Only filter relative to the configured root directory.
-	if source.InDirLex(v.folder.Filename(), uri.Filename()) {
-		return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options())
+func (v *View) filterFunc() func(span.URI) bool {
+	filterer := buildFilterer(v.rootURI.Filename(), v.gomodcache, v.Options())
+	return func(uri span.URI) bool {
+		// Only filter relative to the configured root directory.
+		if source.InDirLex(v.folder.Filename(), uri.Filename()) {
+			return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), filterer)
+		}
+		return false
 	}
-	return false
 }
 
 func (v *View) mapFile(uri span.URI, f *fileBase) {
@@ -521,25 +546,26 @@
 	v.session.removeView(ctx, v)
 }
 
+// shutdown releases resources associated with the view, and waits for ongoing
+// work to complete.
+//
 // TODO(rFindley): probably some of this should also be one in View.Shutdown
 // above?
 func (v *View) shutdown(ctx context.Context) {
 	// Cancel the initial workspace load if it is still running.
 	v.initCancelFirstAttempt()
 
-	v.mu.Lock()
-	if v.cancel != nil {
-		v.cancel()
-		v.cancel = nil
-	}
-	v.mu.Unlock()
 	v.snapshotMu.Lock()
 	if v.snapshot != nil {
-		go v.snapshot.Destroy("View.shutdown")
+		v.releaseSnapshot()
+		v.destroy(v.snapshot, "View.shutdown")
 		v.snapshot = nil
+		v.releaseSnapshot = nil
 	}
 	v.snapshotMu.Unlock()
+
 	v.importsState.destroy()
+	v.snapshotWG.Wait()
 }
 
 func (v *View) Session() *Session {
@@ -594,7 +620,7 @@
 	if v.snapshot == nil {
 		panic("getSnapshot called after shutdown")
 	}
-	return v.snapshot, v.snapshot.generation.Acquire()
+	return v.snapshot, v.snapshot.Acquire()
 }
 
 func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) {
@@ -608,26 +634,36 @@
 		<-s.view.initializationSema
 	}()
 
-	if s.initializeOnce == nil {
+	s.mu.Lock()
+	initialized := s.initialized
+	s.mu.Unlock()
+
+	if initialized {
 		return
 	}
-	s.initializeOnce.Do(func() {
-		s.loadWorkspace(ctx, firstAttempt)
-		s.collectAllKnownSubdirs(ctx)
-	})
+
+	s.loadWorkspace(ctx, firstAttempt)
+	s.collectAllKnownSubdirs(ctx)
 }
 
 func (s *snapshot) loadWorkspace(ctx context.Context, firstAttempt bool) {
 	defer func() {
-		s.initializeOnce = nil
+		s.mu.Lock()
+		s.initialized = true
+		s.mu.Unlock()
 		if firstAttempt {
 			close(s.view.initialWorkspaceLoad)
 		}
 	}()
 
-	// If we have multiple modules, we need to load them by paths.
-	var scopes []interface{}
-	var modDiagnostics []*source.Diagnostic
+	// TODO(rFindley): we should only locate template files on the first attempt,
+	// or guard it via a different mechanism.
+	s.locateTemplateFiles(ctx)
+
+	// Collect module paths to load by parsing go.mod files. If a module fails to
+	// parse, capture the parsing failure as a critical diagnostic.
+	var scopes []interface{}                // scopes to load
+	var modDiagnostics []*source.Diagnostic // diagnostics for broken go.mod files
 	addError := func(uri span.URI, err error) {
 		modDiagnostics = append(modDiagnostics, &source.Diagnostic{
 			URI:      uri,
@@ -636,17 +672,23 @@
 			Message:  err.Error(),
 		})
 	}
-	s.locateTemplateFiles(ctx)
+
 	if len(s.workspace.getActiveModFiles()) > 0 {
 		for modURI := range s.workspace.getActiveModFiles() {
+			// Be careful not to add context cancellation errors as critical module
+			// errors.
 			fh, err := s.GetFile(ctx, modURI)
 			if err != nil {
-				addError(modURI, err)
+				if ctx.Err() == nil {
+					addError(modURI, err)
+				}
 				continue
 			}
 			parsed, err := s.ParseMod(ctx, fh)
 			if err != nil {
-				addError(modURI, err)
+				if ctx.Err() == nil {
+					addError(modURI, err)
+				}
 				continue
 			}
 			if parsed.File == nil || parsed.File.Module == nil {
@@ -665,7 +707,7 @@
 	if len(scopes) > 0 {
 		scopes = append(scopes, PackagePath("builtin"))
 	}
-	err := s.load(ctx, firstAttempt, scopes...)
+	err := s.load(ctx, true, scopes...)
 
 	// If the context is canceled on the first attempt, loading has failed
 	// because the go command has timed out--that should be a critical error.
@@ -684,18 +726,18 @@
 		event.Error(ctx, "initial workspace load failed", err)
 		extractedDiags := s.extractGoCommandErrors(ctx, err)
 		criticalErr = &source.CriticalError{
-			MainError: err,
-			DiagList:  append(modDiagnostics, extractedDiags...),
+			MainError:   err,
+			Diagnostics: append(modDiagnostics, extractedDiags...),
 		}
 	case len(modDiagnostics) == 1:
 		criticalErr = &source.CriticalError{
-			MainError: fmt.Errorf(modDiagnostics[0].Message),
-			DiagList:  modDiagnostics,
+			MainError:   fmt.Errorf(modDiagnostics[0].Message),
+			Diagnostics: modDiagnostics,
 		}
 	case len(modDiagnostics) > 1:
 		criticalErr = &source.CriticalError{
-			MainError: fmt.Errorf("error loading module names"),
-			DiagList:  modDiagnostics,
+			MainError:   fmt.Errorf("error loading module names"),
+			Diagnostics: modDiagnostics,
 		}
 	}
 
@@ -718,23 +760,27 @@
 	v.snapshotMu.Lock()
 	defer v.snapshotMu.Unlock()
 
-	if v.snapshot == nil {
+	prevSnapshot, prevReleaseSnapshot := v.snapshot, v.releaseSnapshot
+
+	if prevSnapshot == nil {
 		panic("invalidateContent called after shutdown")
 	}
 
 	// Cancel all still-running previous requests, since they would be
 	// operating on stale data.
-	v.snapshot.cancel()
+	prevSnapshot.cancel()
 
 	// Do not clone a snapshot until its view has finished initializing.
-	v.snapshot.AwaitInitialized(ctx)
+	prevSnapshot.AwaitInitialized(ctx)
 
-	oldSnapshot := v.snapshot
+	// Save one lease of the cloned snapshot in the view.
+	v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
 
-	v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
-	go oldSnapshot.Destroy("View.invalidateContent")
+	prevReleaseSnapshot()
+	v.destroy(prevSnapshot, "View.invalidateContent")
 
-	return v.snapshot, v.snapshot.generation.Acquire()
+	// Return a second lease to the caller.
+	return v.snapshot, v.snapshot.Acquire()
 }
 
 func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (*workspaceInformation, error) {
@@ -918,6 +964,12 @@
 	for k := range vars {
 		args = append(args, k)
 	}
+	// TODO(rfindley): GOWORK is not a property of the session. It may change
+	// when a workfile is added or removed.
+	//
+	// We need to distinguish between GOWORK values that are set by the GOWORK
+	// environment variable, and GOWORK values that are computed based on the
+	// location of a go.work file in the directory hierarchy.
 	args = append(args, "GOWORK")
 
 	inv := gocommand.Invocation{
@@ -1051,15 +1103,14 @@
 	return vendorEnabled, nil
 }
 
-func (v *View) allFilesExcluded(pkg *packages.Package) bool {
-	opts := v.Options()
+func (v *View) allFilesExcluded(pkg *packages.Package, filterer *source.Filterer) bool {
 	folder := filepath.ToSlash(v.folder.Filename())
 	for _, f := range pkg.GoFiles {
 		f = filepath.ToSlash(f)
 		if !strings.HasPrefix(f, folder) {
 			return false
 		}
-		if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) {
+		if !pathExcludedByFilter(strings.TrimPrefix(f, folder), filterer) {
 			return false
 		}
 	}
@@ -1067,8 +1118,9 @@
 }
 
 func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool {
+	filterer := buildFilterer(root, gomodcache, opts)
 	return func(path string) bool {
-		return pathExcludedByFilter(path, root, gomodcache, opts)
+		return pathExcludedByFilter(path, filterer)
 	}
 }
 
@@ -1078,12 +1130,18 @@
 // TODO(rfindley): passing root and gomodcache here makes it confusing whether
 // path should be absolute or relative, and has already caused at least one
 // bug.
-func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool {
+func pathExcludedByFilter(path string, filterer *source.Filterer) bool {
 	path = strings.TrimPrefix(filepath.ToSlash(path), "/")
+	return filterer.Disallow(path)
+}
+
+func buildFilterer(root, gomodcache string, opts *source.Options) *source.Filterer {
+	// TODO(rfindley): this looks wrong. If gomodcache isn't actually nested
+	// under root, this will do the wrong thing.
 	gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/")
 	filters := opts.DirectoryFilters
 	if gomodcache != "" {
 		filters = append(filters, "-"+gomodcache)
 	}
-	return source.FiltersDisallow(path, filters)
+	return source.NewFilterer(filters)
 }
diff --git a/internal/lsp/cache/view_test.go b/internal/lsp/cache/view_test.go
index d76dcda..59684ea 100644
--- a/internal/lsp/cache/view_test.go
+++ b/internal/lsp/cache/view_test.go
@@ -161,15 +161,14 @@
 	}
 
 	for _, tt := range tests {
-		opts := &source.Options{}
-		opts.DirectoryFilters = tt.filters
+		filterer := source.NewFilterer(tt.filters)
 		for _, inc := range tt.included {
-			if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) {
+			if pathExcludedByFilter(inc, filterer) {
 				t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)
 			}
 		}
 		for _, exc := range tt.excluded {
-			if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) {
+			if !pathExcludedByFilter(exc, filterer) {
 				t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc)
 			}
 		}
diff --git a/internal/lsp/cache/workspace.go b/internal/lsp/cache/workspace.go
index 669ce92..9182cb9 100644
--- a/internal/lsp/cache/workspace.go
+++ b/internal/lsp/cache/workspace.go
@@ -28,7 +28,7 @@
 	legacyWorkspace     = iota // non-module or single module mode
 	goplsModWorkspace          // modules provided by a gopls.mod file
 	goWorkWorkspace            // modules provided by a go.work file
-	fileSystemWorkspace        // modules scanned from the filesystem
+	fileSystemWorkspace        // modules found by walking the filesystem
 )
 
 func (s workspaceSource) String() string {
@@ -95,7 +95,12 @@
 //
 // If there is no active workspace file (a gopls.mod or go.work), newWorkspace
 // scans the filesystem to find modules.
-func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff bool, useWsModule bool) (*workspace, error) {
+//
+// TODO(rfindley): newWorkspace should perhaps never fail, relying instead on
+// the criticalError method to surface problems in the workspace.
+// TODO(rfindley): this function should accept the GOWORK value, if specified
+// by the user.
+func newWorkspace(ctx context.Context, root span.URI, fs source.FileSource, excludePath func(string) bool, go111moduleOff, useWsModule bool) (*workspace, error) {
 	ws := &workspace{
 		root:        root,
 		excludePath: excludePath,
@@ -178,6 +183,28 @@
 	return w.activeModFiles
 }
 
+// criticalError returns a critical error related to the workspace setup.
+func (w *workspace) criticalError(ctx context.Context, fs source.FileSource) (res *source.CriticalError) {
+	// For now, we narrowly report errors related to `go.work` files.
+	//
+	// TODO(rfindley): investigate whether other workspace validation errors
+	// can be consolidated here.
+	if w.moduleSource == goWorkWorkspace {
+		// We should have already built the modfile, but build here to be
+		// consistent about accessing w.mod after w.build.
+		//
+		// TODO(rfindley): build eagerly. Building lazily is a premature
+		// optimization that poses a significant burden on the code.
+		w.build(ctx, fs)
+		if w.buildErr != nil {
+			return &source.CriticalError{
+				MainError: w.buildErr,
+			}
+		}
+	}
+	return nil
+}
+
 // modFile gets the workspace modfile associated with this workspace,
 // computing it if it doesn't exist.
 //
@@ -207,9 +234,10 @@
 	// would not be obvious to the user how to recover.
 	ctx = xcontext.Detach(ctx)
 
-	// If our module source is not gopls.mod, try to build the workspace module
-	// from modules. Fall back on the pre-existing mod file if parsing fails.
-	if w.moduleSource != goplsModWorkspace {
+	// If the module source is from the filesystem, try to build the workspace
+	// module from active modules discovered by scanning the filesystem. Fall
+	// back on the pre-existing mod file if parsing fails.
+	if w.moduleSource == fileSystemWorkspace {
 		file, err := buildWorkspaceModFile(ctx, w.activeModFiles, fs)
 		switch {
 		case err == nil:
@@ -222,6 +250,7 @@
 			w.buildErr = err
 		}
 	}
+
 	if w.mod != nil {
 		w.wsDirs = map[span.URI]struct{}{
 			w.root: {},
@@ -235,18 +264,21 @@
 			w.wsDirs[span.URIFromPath(r.New.Path)] = struct{}{}
 		}
 	}
+
 	// Ensure that there is always at least the root dir.
 	if len(w.wsDirs) == 0 {
 		w.wsDirs = map[span.URI]struct{}{
 			w.root: {},
 		}
 	}
+
 	sum, err := buildWorkspaceSumFile(ctx, w.activeModFiles, fs)
 	if err == nil {
 		w.sum = sum
 	} else {
 		event.Error(ctx, "building workspace sum file", err)
 	}
+
 	w.built = true
 }
 
@@ -263,20 +295,23 @@
 	return dirs
 }
 
-// invalidate returns a (possibly) new workspace after invalidating the changed
+// Clone returns a (possibly) new workspace after invalidating the changed
 // files. If w is still valid in the presence of changedURIs, it returns itself
 // unmodified.
 //
-// The returned changed and reload flags control the level of invalidation.
-// Some workspace changes may affect workspace contents without requiring a
-// reload of metadata (for example, unsaved changes to a go.mod or go.sum
-// file).
-func (w *workspace) invalidate(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, changed, reload bool) {
+// The returned needReinit flag indicates to the caller that the workspace
+// needs to be reinitialized (because a relevant go.mod or go.work file has
+// been changed).
+//
+// TODO(rfindley): it looks wrong that we return 'needReinit' here. The caller
+// should determine whether to re-initialize..
+func (w *workspace) Clone(ctx context.Context, changes map[span.URI]*fileChange, fs source.FileSource) (_ *workspace, needReinit bool) {
 	// Prevent races to w.modFile or w.wsDirs below, if w has not yet been built.
 	w.buildMu.Lock()
 	defer w.buildMu.Unlock()
 
 	// Clone the workspace. This may be discarded if nothing changed.
+	changed := false
 	result := &workspace{
 		root:           w.root,
 		moduleSource:   w.moduleSource,
@@ -300,7 +335,7 @@
 	// determine which modules we care about. If go.work/gopls.mod has changed
 	// we need to either re-read it if it exists or walk the filesystem if it
 	// has been deleted. go.work should override the gopls.mod if both exist.
-	changed, reload = handleWorkspaceFileChanges(ctx, result, changes, fs)
+	changed, needReinit = handleWorkspaceFileChanges(ctx, result, changes, fs)
 	// Next, handle go.mod changes that could affect our workspace.
 	for uri, change := range changes {
 		// Otherwise, we only care about go.mod files in the workspace directory.
@@ -309,7 +344,7 @@
 		}
 		changed = true
 		active := result.moduleSource != legacyWorkspace || source.CompareURI(modURI(w.root), uri) == 0
-		reload = reload || (active && change.fileHandle.Saved())
+		needReinit = needReinit || (active && change.fileHandle.Saved())
 		// Don't mess with the list of mod files if using go.work or gopls.mod.
 		if result.moduleSource == goplsModWorkspace || result.moduleSource == goWorkWorkspace {
 			continue
@@ -339,14 +374,14 @@
 		// Only changes to active go.sum files actually cause the workspace to
 		// change.
 		changed = true
-		reload = reload || change.fileHandle.Saved()
+		needReinit = needReinit || change.fileHandle.Saved()
 	}
 
 	if !changed {
-		return w, false, false
+		return w, false
 	}
 
-	return result, changed, reload
+	return result, needReinit
 }
 
 // handleWorkspaceFileChanges handles changes related to a go.work or gopls.mod
@@ -381,6 +416,11 @@
 				// An unparseable file should not invalidate the workspace:
 				// nothing good could come from changing the workspace in
 				// this case.
+				//
+				// TODO(rfindley): well actually, it could potentially lead to a better
+				// critical error. Evaluate whether we can unify this case with the
+				// error returned by newWorkspace, without needlessly invalidating
+				// metadata.
 				event.Error(ctx, fmt.Sprintf("parsing %s", filepath.Base(uri.Filename())), err)
 			} else {
 				// only update the modfile if it parsed.
@@ -501,6 +541,10 @@
 		modURI := span.URIFromPath(filepath.Join(dir.Path, "go.mod"))
 		modFiles[modURI] = struct{}{}
 	}
+
+	// TODO(rfindley): we should either not build the workspace modfile here, or
+	// not fail so hard. A failure in building the workspace modfile should not
+	// invalidate the active module paths extracted above.
 	modFile, err := buildWorkspaceModFile(ctx, modFiles, fs)
 	if err != nil {
 		return nil, nil, err
diff --git a/internal/lsp/cache/workspace_test.go b/internal/lsp/cache/workspace_test.go
index b809ad1..871e4bb 100644
--- a/internal/lsp/cache/workspace_test.go
+++ b/internal/lsp/cache/workspace_test.go
@@ -298,12 +298,13 @@
 						t.Fatal(err)
 					}
 				}
-				got, gotChanged, gotReload := w.invalidate(ctx, changes, fs)
+				got, gotReinit := w.Clone(ctx, changes, fs)
+				gotChanged := got != w
 				if gotChanged != test.wantChanged {
 					t.Errorf("w.invalidate(): got changed %t, want %t", gotChanged, test.wantChanged)
 				}
-				if gotReload != test.wantReload {
-					t.Errorf("w.invalidate(): got reload %t, want %t", gotReload, test.wantReload)
+				if gotReinit != test.wantReload {
+					t.Errorf("w.invalidate(): got reload %t, want %t", gotReinit, test.wantReload)
 				}
 				checkState(ctx, t, fs, rel, got, test.finalState)
 			}
diff --git a/internal/lsp/cmd/capabilities_test.go b/internal/lsp/cmd/capabilities_test.go
index 1d01b4b..930621b 100644
--- a/internal/lsp/cmd/capabilities_test.go
+++ b/internal/lsp/cmd/capabilities_test.go
@@ -43,7 +43,7 @@
 	params.Capabilities.Workspace.Configuration = true
 
 	// Send an initialize request to the server.
-	c.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), c.Client)
+	c.Server = lsp.NewServer(cache.New(nil, nil, app.options).NewSession(ctx), c.Client)
 	result, err := c.Server.Initialize(ctx, params)
 	if err != nil {
 		t.Fatal(err)
diff --git a/internal/lsp/cmd/cmd.go b/internal/lsp/cmd/cmd.go
index a81eb83..5911f97 100644
--- a/internal/lsp/cmd/cmd.go
+++ b/internal/lsp/cmd/cmd.go
@@ -286,7 +286,7 @@
 	switch {
 	case app.Remote == "":
 		connection := newConnection(app)
-		connection.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), connection.Client)
+		connection.Server = lsp.NewServer(cache.New(nil, nil, app.options).NewSession(ctx), connection.Client)
 		ctx = protocol.WithClient(ctx, connection.Client)
 		return connection, connection.initialize(ctx, app.options)
 	case strings.HasPrefix(app.Remote, "internal@"):
diff --git a/internal/lsp/cmd/serve.go b/internal/lsp/cmd/serve.go
index 1c229a4..10730fd 100644
--- a/internal/lsp/cmd/serve.go
+++ b/internal/lsp/cmd/serve.go
@@ -101,7 +101,7 @@
 			return fmt.Errorf("creating forwarder: %w", err)
 		}
 	} else {
-		ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon)
+		ss = lsprpc.NewStreamServer(cache.New(nil, nil, s.app.options), isDaemon)
 	}
 
 	var network, addr string
diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go
index ff0461b..5342e9b 100644
--- a/internal/lsp/cmd/test/cmdtest.go
+++ b/internal/lsp/cmd/test/cmdtest.go
@@ -50,7 +50,7 @@
 
 func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer {
 	ctx = debug.WithInstance(ctx, "", "")
-	cache := cache.New(options)
+	cache := cache.New(nil, nil, options)
 	ss := lsprpc.NewStreamServer(cache, false)
 	return servertest.NewTCPServer(ctx, ss, nil)
 }
diff --git a/internal/lsp/cmd/test/suggested_fix.go b/internal/lsp/cmd/test/suggested_fix.go
index c819e05..db40135 100644
--- a/internal/lsp/cmd/test/suggested_fix.go
+++ b/internal/lsp/cmd/test/suggested_fix.go
@@ -12,14 +12,16 @@
 	"golang.org/x/tools/internal/span"
 )
 
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
+func (r *runner) SuggestedFix(t *testing.T, spn span.Span, suggestedFixes []tests.SuggestedFix, expectedActions int) {
 	uri := spn.URI()
 	filename := uri.Filename()
 	args := []string{"fix", "-a", fmt.Sprintf("%s", spn)}
-	for _, kind := range actionKinds {
-		if kind == "refactor.rewrite" {
+	var actionKinds []string
+	for _, sf := range suggestedFixes {
+		if sf.ActionKind == "refactor.rewrite" {
 			t.Skip("refactor.rewrite is not yet supported on the command line")
 		}
+		actionKinds = append(actionKinds, sf.ActionKind)
 	}
 	args = append(args, actionKinds...)
 	got, stderr := r.NormalizeGoplsCmd(t, args...)
diff --git a/internal/lsp/cmd/vulncheck.go b/internal/lsp/cmd/vulncheck.go
index 4d245ce..d5b05a9 100644
--- a/internal/lsp/cmd/vulncheck.go
+++ b/internal/lsp/cmd/vulncheck.go
@@ -12,8 +12,6 @@
 	"os"
 
 	"golang.org/x/tools/go/packages"
-	"golang.org/x/tools/internal/lsp/command"
-	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
 	"golang.org/x/tools/internal/tool"
 )
@@ -29,10 +27,6 @@
 	// the build system's query tool.
 	BuildFlags []string
 
-	// Env is the environment to use when invoking the build system's query tool.
-	// If Env is nil, the current environment is used.
-	Env []string
-
 	// If Tests is set, the loader includes related test packages.
 	Tests bool
 }
@@ -67,11 +61,6 @@
 	if len(args) == 1 {
 		pattern = args[0]
 	}
-
-	cwd, err := os.Getwd()
-	if err != nil {
-		return tool.CommandLineErrorf("failed to get current directory: %v", err)
-	}
 	var cfg pkgLoadConfig
 	if v.Config {
 		if err := json.NewDecoder(os.Stdin).Decode(&cfg); err != nil {
@@ -89,13 +78,10 @@
 		Context:    ctx,
 		Tests:      cfg.Tests,
 		BuildFlags: cfg.BuildFlags,
-		Env:        cfg.Env,
+		// inherit the current process's cwd and env.
 	}
 
-	res, err := opts.Hooks.Govulncheck(ctx, loadCfg, command.VulncheckArgs{
-		Dir:     protocol.URIFromPath(cwd),
-		Pattern: pattern,
-	})
+	res, err := opts.Hooks.Govulncheck(ctx, loadCfg, pattern)
 	if err != nil {
 		return tool.CommandLineErrorf("govulncheck failed: %v", err)
 	}
diff --git a/internal/lsp/code_action.go b/internal/lsp/code_action.go
index 9d78e3c..4147e17 100644
--- a/internal/lsp/code_action.go
+++ b/internal/lsp/code_action.go
@@ -294,7 +294,7 @@
 	}
 	puri := protocol.URIFromSpanURI(uri)
 	var commands []protocol.Command
-	if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok {
+	if _, ok, methodOk, _ := source.CanExtractFunction(pgf.Tok, srng, pgf.Src, pgf.File); ok {
 		cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{
 			URI:   puri,
 			Fix:   source.ExtractFunction,
diff --git a/internal/lsp/command.go b/internal/lsp/command.go
index 7afafc9..35bc0e43 100644
--- a/internal/lsp/command.go
+++ b/internal/lsp/command.go
@@ -13,13 +13,13 @@
 	"io"
 	"io/ioutil"
 	"os"
+	"os/exec"
 	"path/filepath"
 	"sort"
 	"strings"
 
 	"golang.org/x/mod/modfile"
 	"golang.org/x/tools/go/ast/astutil"
-	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/lsp/command"
@@ -691,7 +691,7 @@
 		if err != nil {
 			return fmt.Errorf("formatting mod file: %w", err)
 		}
-		filename := filepath.Join(snapshot.View().Folder().Filename(), "gopls.mod")
+		filename := filepath.Join(v.Folder().Filename(), "gopls.mod")
 		if err := ioutil.WriteFile(filename, content, 0644); err != nil {
 			return fmt.Errorf("writing mod file: %w", err)
 		}
@@ -790,11 +790,26 @@
 	return result, nil
 }
 
-func (c *commandHandler) RunVulncheckExp(ctx context.Context, args command.VulncheckArgs) (result command.VulncheckResult, _ error) {
+// Copy of pkgLoadConfig defined in internal/lsp/cmd/vulncheck.go
+// TODO(hyangah): decide where to define this.
+type pkgLoadConfig struct {
+	// BuildFlags is a list of command-line flags to be passed through to
+	// the build system's query tool.
+	BuildFlags []string
+
+	// If Tests is set, the loader includes related test packages.
+	Tests bool
+}
+
+func (c *commandHandler) RunVulncheckExp(ctx context.Context, args command.VulncheckArgs) error {
+	if args.URI == "" {
+		return errors.New("VulncheckArgs is missing URI field")
+	}
 	err := c.run(ctx, commandConfig{
-		progress:    "Running vulncheck",
+		async:       true, // need to be async to be cancellable
+		progress:    "govulncheck",
 		requireSave: true,
-		forURI:      args.Dir, // Will dir work?
+		forURI:      args.URI,
 	}, func(ctx context.Context, deps commandDeps) error {
 		view := deps.snapshot.View()
 		opts := view.Options()
@@ -802,22 +817,68 @@
 			return errors.New("vulncheck feature is not available")
 		}
 
-		buildFlags := opts.BuildFlags // XXX: is session.Options equivalent to view.Options?
+		cmd := exec.CommandContext(ctx, os.Args[0], "vulncheck", "-config", args.Pattern)
+		cmd.Dir = filepath.Dir(args.URI.SpanURI().Filename())
+
 		var viewEnv []string
 		if e := opts.EnvSlice(); e != nil {
 			viewEnv = append(os.Environ(), e...)
 		}
-		cfg := &packages.Config{
-			Context:    ctx,
-			Tests:      true, // TODO(hyangah): add a field in args.
-			BuildFlags: buildFlags,
-			Env:        viewEnv,
-			Dir:        args.Dir.SpanURI().Filename(),
-			// TODO(hyangah): configure overlay
+		cmd.Env = viewEnv
+
+		// stdin: gopls vulncheck expects JSON-encoded configuration from STDIN when -config flag is set.
+		var stdin bytes.Buffer
+		cmd.Stdin = &stdin
+
+		if err := json.NewEncoder(&stdin).Encode(pkgLoadConfig{
+			BuildFlags: opts.BuildFlags,
+			// TODO(hyangah): add `tests` flag in command.VulncheckArgs
+		}); err != nil {
+			return fmt.Errorf("failed to pass package load config: %v", err)
 		}
-		var err error
-		result, err = opts.Hooks.Govulncheck(ctx, cfg, args)
-		return err
+
+		// stderr: stream gopls vulncheck's STDERR as progress reports
+		er := progress.NewEventWriter(ctx, "vulncheck")
+		stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work))
+		cmd.Stderr = stderr
+		// TODO: can we stream stdout?
+		stdout, err := cmd.Output()
+		if err != nil {
+			return fmt.Errorf("failed to run govulncheck: %v", err)
+		}
+
+		var vulns command.VulncheckResult
+		if err := json.Unmarshal(stdout, &vulns); err != nil {
+			// TODO: for easy debugging, log the failed stdout somewhere?
+			return fmt.Errorf("failed to parse govulncheck output: %v", err)
+		}
+
+		// TODO(jamalc,suzmue): convert the results to diagnostics & code actions.
+		// Or should we just write to a file (*.vulncheck.json) or text format
+		// and send "Show Document" request? If *.vulncheck.json is open,
+		// VSCode Go extension will open its custom editor.
+		set := make(map[string]bool)
+		for _, v := range vulns.Vuln {
+			if len(v.CallStackSummaries) > 0 {
+				set[v.ID] = true
+			}
+		}
+		if len(set) == 0 {
+			return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+				Type:    protocol.Info,
+				Message: "No vulnerabilities found",
+			})
+		}
+
+		list := make([]string, 0, len(set))
+		for k := range set {
+			list = append(list, k)
+		}
+		sort.Strings(list)
+		return c.s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
+			Type:    protocol.Warning,
+			Message: fmt.Sprintf("Found %v", strings.Join(list, ", ")),
+		})
 	})
-	return result, err
+	return err
 }
diff --git a/internal/lsp/command/command_gen.go b/internal/lsp/command/command_gen.go
index 22cfeff..207def4 100644
--- a/internal/lsp/command/command_gen.go
+++ b/internal/lsp/command/command_gen.go
@@ -159,7 +159,7 @@
 		if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
 			return nil, err
 		}
-		return s.RunVulncheckExp(ctx, a0)
+		return nil, s.RunVulncheckExp(ctx, a0)
 	case "gopls.start_debugging":
 		var a0 DebuggingArgs
 		if err := UnmarshalArgs(params.Arguments, &a0); err != nil {
diff --git a/internal/lsp/command/interface.go b/internal/lsp/command/interface.go
index 1f3b092..4a4498a 100644
--- a/internal/lsp/command/interface.go
+++ b/internal/lsp/command/interface.go
@@ -147,7 +147,7 @@
 	// RunVulncheckExp: Run vulncheck (experimental)
 	//
 	// Run vulnerability check (`govulncheck`).
-	RunVulncheckExp(context.Context, VulncheckArgs) (VulncheckResult, error)
+	RunVulncheckExp(context.Context, VulncheckArgs) error
 }
 
 type RunTestsArgs struct {
@@ -314,14 +314,13 @@
 }
 
 type VulncheckArgs struct {
-	// Dir is the directory from which vulncheck will run from.
-	Dir protocol.DocumentURI
+	// Any document in the directory from which govulncheck will run.
+	URI protocol.DocumentURI
 
 	// Package pattern. E.g. "", ".", "./...".
 	Pattern string
 
-	// TODO: Flag []string (flags accepted by govulncheck, e.g., -tests)
-	// TODO: Format string (json, text)
+	// TODO: -tests
 }
 
 type VulncheckResult struct {
diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go
index 9648921..1977614 100644
--- a/internal/lsp/diagnostics.go
+++ b/internal/lsp/diagnostics.go
@@ -41,17 +41,42 @@
 
 // A diagnosticReport holds results for a single diagnostic source.
 type diagnosticReport struct {
-	snapshotID    uint64
-	publishedHash string
+	snapshotID    uint64 // snapshot ID on which the report was computed
+	publishedHash string // last published hash for this (URI, source)
 	diags         map[string]*source.Diagnostic
 }
 
 // fileReports holds a collection of diagnostic reports for a single file, as
 // well as the hash of the last published set of diagnostics.
 type fileReports struct {
-	snapshotID    uint64
+	// publishedSnapshotID is the last snapshot ID for which we have "published"
+	// diagnostics (though the publishDiagnostics notification may not have
+	// actually been sent, if nothing changed).
+	//
+	// Specifically, publishedSnapshotID is updated to a later snapshot ID when
+	// we either:
+	//  (1) publish diagnostics for the file for a snapshot, or
+	//  (2) determine that published diagnostics are valid for a new snapshot.
+	//
+	// Notably publishedSnapshotID may not match the snapshot id on individual reports in
+	// the reports map:
+	// - we may have published partial diagnostics from only a subset of
+	//   diagnostic sources for which new results have been computed, or
+	// - we may have started computing reports for an even new snapshot, but not
+	//   yet published.
+	//
+	// This prevents gopls from publishing stale diagnostics.
+	publishedSnapshotID uint64
+
+	// publishedHash is a hash of the latest diagnostics published for the file.
 	publishedHash string
-	reports       map[diagnosticSource]diagnosticReport
+
+	// If set, mustPublish marks diagnostics as needing publication, independent
+	// of whether their publishedHash has changed.
+	mustPublish bool
+
+	// The last stored diagnostics for each diagnostic source.
+	reports map[diagnosticSource]diagnosticReport
 }
 
 func (d diagnosticSource) String() string {
@@ -358,6 +383,24 @@
 	}
 }
 
+// mustPublishDiagnostics marks the uri as needing publication, independent of
+// whether the published contents have changed.
+//
+// This can be used for ensuring gopls publishes diagnostics after certain file
+// events.
+func (s *Server) mustPublishDiagnostics(uri span.URI) {
+	s.diagnosticsMu.Lock()
+	defer s.diagnosticsMu.Unlock()
+
+	if s.diagnostics[uri] == nil {
+		s.diagnostics[uri] = &fileReports{
+			publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
+			reports:       map[diagnosticSource]diagnosticReport{},
+		}
+	}
+	s.diagnostics[uri].mustPublish = true
+}
+
 // storeDiagnostics stores results from a single diagnostic source. If merge is
 // true, it merges results into any existing results for this snapshot.
 func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic) {
@@ -367,6 +410,7 @@
 	if fh == nil {
 		return
 	}
+
 	s.diagnosticsMu.Lock()
 	defer s.diagnosticsMu.Unlock()
 	if s.diagnostics[uri] == nil {
@@ -414,7 +458,7 @@
 	var errMsg string
 	if err != nil {
 		event.Error(ctx, "errors loading workspace", err.MainError, tag.Snapshot.Of(snapshot.ID()), tag.Directory.Of(snapshot.View().Folder()))
-		for _, d := range err.DiagList {
+		for _, d := range err.Diagnostics {
 			s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d})
 		}
 		errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ")
@@ -467,7 +511,7 @@
 	if !pgf.File.Name.Pos().IsValid() {
 		return nil
 	}
-	spn, err := span.NewRange(snapshot.FileSet(), pgf.File.Name.Pos(), pgf.File.Name.End()).Span()
+	spn, err := span.NewRange(pgf.Tok, pgf.File.Name.Pos(), pgf.File.Name.End()).Span()
 	if err != nil {
 		return nil
 	}
@@ -501,6 +545,7 @@
 	s.diagnosticsMu.Lock()
 	defer s.diagnosticsMu.Unlock()
 
+	// TODO(rfindley): remove this noisy (and not useful) logging.
 	published := 0
 	defer func() {
 		log.Trace.Logf(ctx, "published %d diagnostics", published)
@@ -512,7 +557,7 @@
 
 		// If we've already delivered diagnostics for a future snapshot for this
 		// file, do not deliver them.
-		if r.snapshotID > snapshot.ID() {
+		if r.publishedSnapshotID > snapshot.ID() {
 			continue
 		}
 		anyReportsChanged := false
@@ -541,10 +586,10 @@
 		}
 		source.SortDiagnostics(diags)
 		hash := hashDiagnostics(diags...)
-		if hash == r.publishedHash {
+		if hash == r.publishedHash && !r.mustPublish {
 			// Update snapshotID to be the latest snapshot for which this diagnostic
 			// hash is valid.
-			r.snapshotID = snapshot.ID()
+			r.publishedSnapshotID = snapshot.ID()
 			continue
 		}
 		var version int32
@@ -558,7 +603,8 @@
 		}); err == nil {
 			published++
 			r.publishedHash = hash
-			r.snapshotID = snapshot.ID()
+			r.mustPublish = false // diagnostics have been successfully published
+			r.publishedSnapshotID = snapshot.ID()
 			for dsource, hash := range reportHashes {
 				report := r.reports[dsource]
 				report.publishedHash = hash
diff --git a/internal/lsp/diff/diff_test.go b/internal/lsp/diff/diff_test.go
index dd9414e..a369949 100644
--- a/internal/lsp/diff/diff_test.go
+++ b/internal/lsp/diff/diff_test.go
@@ -6,6 +6,8 @@
 
 import (
 	"fmt"
+	"math/rand"
+	"strings"
 	"testing"
 
 	"golang.org/x/tools/internal/lsp/diff"
@@ -29,6 +31,71 @@
 	}
 }
 
+func TestNEdits(t *testing.T) {
+	for i, tc := range difftest.TestCases {
+		sp := fmt.Sprintf("file://%s.%d", tc.Name, i)
+		edits, err := diff.NComputeEdits(span.URI(sp), tc.In, tc.Out)
+		if err != nil {
+			t.Fatal(err)
+		}
+		got := diff.ApplyEdits(tc.In, edits)
+		if got != tc.Out {
+			t.Fatalf("%s: got %q wanted %q", tc.Name, got, tc.Out)
+		}
+		if len(edits) < len(tc.Edits) { // should find subline edits
+			t.Errorf("got %v, expected %v for %#v", edits, tc.Edits, tc)
+		}
+	}
+}
+
+func TestNRandom(t *testing.T) {
+	rand.Seed(1)
+	for i := 0; i < 1000; i++ {
+		fname := fmt.Sprintf("file://%x", i)
+		a := randstr("abω", 16)
+		b := randstr("abωc", 16)
+		edits, err := diff.NComputeEdits(span.URI(fname), a, b)
+		if err != nil {
+			t.Fatalf("%q,%q %v", a, b, err)
+		}
+		got := diff.ApplyEdits(a, edits)
+		if got != b {
+			t.Fatalf("%d: got %q, wanted %q, starting with %q", i, got, b, a)
+		}
+	}
+}
+
+func TestNLinesRandom(t *testing.T) {
+	rand.Seed(2)
+	for i := 0; i < 1000; i++ {
+		fname := fmt.Sprintf("file://%x", i)
+		x := randlines("abω", 4) // avg line length is 6, want a change every 3rd line or so
+		v := []rune(x)
+		for i := 0; i < len(v); i++ {
+			if rand.Float64() < .05 {
+				v[i] = 'N'
+			}
+		}
+		y := string(v)
+		// occasionally remove the trailing \n
+		if rand.Float64() < .1 {
+			x = x[:len(x)-1]
+		}
+		if rand.Float64() < .1 {
+			y = y[:len(y)-1]
+		}
+		a, b := strings.SplitAfter(x, "\n"), strings.SplitAfter(y, "\n")
+		edits, err := diff.NComputeLineEdits(span.URI(fname), a, b)
+		if err != nil {
+			t.Fatalf("%q,%q %v", a, b, err)
+		}
+		got := diff.ApplyEdits(x, edits)
+		if got != y {
+			t.Fatalf("%d: got\n%q, wanted\n%q, starting with %q", i, got, y, a)
+		}
+	}
+}
+
 func TestLineEdits(t *testing.T) {
 	for _, tc := range difftest.TestCases {
 		t.Run(tc.Name, func(t *testing.T) {
@@ -63,6 +130,41 @@
 	}
 }
 
+func TestRegressionOld001(t *testing.T) {
+	a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+
+	b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+	diffs, err := diff.NComputeEdits(span.URI("file://one"), a, b)
+	if err != nil {
+		t.Error(err)
+	}
+	got := diff.ApplyEdits(a, diffs)
+	if got != b {
+		i := 0
+		for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+		}
+		t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+		t.Errorf("\n%q\n%q", got[i:], b[i:])
+	}
+}
+
+func TestRegressionOld002(t *testing.T) {
+	a := "n\"\n)\n"
+	b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+	diffs, err := diff.NComputeEdits(span.URI("file://two"), a, b)
+	if err != nil {
+		t.Error(err)
+	}
+	got := diff.ApplyEdits(a, diffs)
+	if got != b {
+		i := 0
+		for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ {
+		}
+		t.Errorf("oops %vd\n%q\n%q", diffs, got, b)
+		t.Errorf("\n%q\n%q", got[i:], b[i:])
+	}
+}
+
 func diffEdits(got, want []diff.TextEdit) bool {
 	if len(got) != len(want) {
 		return true
@@ -78,3 +180,26 @@
 	}
 	return false
 }
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+	src := []rune(s)
+	x := make([]rune, n)
+	for i := 0; i < n; i++ {
+		x[i] = src[rand.Intn(len(src))]
+	}
+	return string(x)
+}
+
+// return some random lines, all ending with \n
+func randlines(s string, n int) string {
+	src := []rune(s)
+	var b strings.Builder
+	for i := 0; i < n; i++ {
+		for j := 0; j < 4+rand.Intn(4); j++ {
+			b.WriteRune(src[rand.Intn(len(src))])
+		}
+		b.WriteByte('\n')
+	}
+	return b.String()
+}
diff --git a/internal/lsp/diff/lcs/common.go b/internal/lsp/diff/lcs/common.go
new file mode 100644
index 0000000..e5d0801
--- /dev/null
+++ b/internal/lsp/diff/lcs/common.go
@@ -0,0 +1,184 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"log"
+	"sort"
+)
+
+// lcs is a longest common sequence
+type lcs []diag
+
+// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i<Len.
+// All computed diagonals are parts of a longest common subsequence.
+type diag struct {
+	X, Y int
+	Len  int
+}
+
+// sort sorts in place, by lowest X, and if tied, inversely by Len
+func (l lcs) sort() lcs {
+	sort.Slice(l, func(i, j int) bool {
+		if l[i].X != l[j].X {
+			return l[i].X < l[j].X
+		}
+		return l[i].Len > l[j].Len
+	})
+	return l
+}
+
+// validate that the elements of the lcs do not overlap
+// (can only happen when the two-sided algorithm ends early)
+// expects the lcs to be sorted
+func (l lcs) valid() bool {
+	for i := 1; i < len(l); i++ {
+		if l[i-1].X+l[i-1].Len > l[i].X {
+			return false
+		}
+		if l[i-1].Y+l[i-1].Len > l[i].Y {
+			return false
+		}
+	}
+	return true
+}
+
+// repair overlapping lcs
+// only called if two-sided stops early
+func (l lcs) fix() lcs {
+	// from the set of diagonals in l, find a maximal non-conflicting set
+	// this problem may be NP-complete, but we use a greedy heuristic,
+	// which is quadratic, but with a better data structure, could be D log D.
+	// indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs
+	// which has to have monotone x and y
+	if len(l) == 0 {
+		return nil
+	}
+	sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len })
+	tmp := make(lcs, 0, len(l))
+	tmp = append(tmp, l[0])
+	for i := 1; i < len(l); i++ {
+		var dir direction
+		nxt := l[i]
+		for _, in := range tmp {
+			if dir, nxt = overlap(in, nxt); dir == empty || dir == bad {
+				break
+			}
+		}
+		if nxt.Len > 0 && dir != bad {
+			tmp = append(tmp, nxt)
+		}
+	}
+	tmp.sort()
+	if false && !tmp.valid() { // debug checking
+		log.Fatalf("here %d", len(tmp))
+	}
+	return tmp
+}
+
+type direction int
+
+const (
+	empty    direction = iota // diag is empty (so not in lcs)
+	leftdown                  // proposed acceptably to the left and below
+	rightup                   // proposed diag is acceptably to the right and above
+	bad                       // proposed diag is inconsistent with the lcs so far
+)
+
+// overlap trims the proposed diag prop  so it doesn't overlap with
+// the existing diag that has already been added to the lcs.
+func overlap(exist, prop diag) (direction, diag) {
+	if prop.X <= exist.X && exist.X < prop.X+prop.Len {
+		// remove the end of prop where it overlaps with the X end of exist
+		delta := prop.X + prop.Len - exist.X
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+	}
+	if exist.X <= prop.X && prop.X < exist.X+exist.Len {
+		// remove the beginning of prop where overlaps with exist
+		delta := exist.X + exist.Len - prop.X
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+		prop.X += delta
+		prop.Y += delta
+	}
+	if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len {
+		// remove the end of prop that overlaps (in Y) with exist
+		delta := prop.Y + prop.Len - exist.Y
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+	}
+	if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len {
+		// remove the beginning of peop that overlaps with exist
+		delta := exist.Y + exist.Len - prop.Y
+		prop.Len -= delta
+		if prop.Len <= 0 {
+			return empty, prop
+		}
+		prop.X += delta // no test reaches this code
+		prop.Y += delta
+	}
+	if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y {
+		return leftdown, prop
+	}
+	if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y {
+		return rightup, prop
+	}
+	// prop can't be in an lcs that contains exist
+	return bad, prop
+}
+
+// manipulating Diag and lcs
+
+// prependlcs a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs
+// or to its first Diag. prependlcs is only called extending diagonals
+// the backward direction.
+func prependlcs(lcs lcs, x, y int) lcs {
+	if len(lcs) > 0 {
+		d := &lcs[0]
+		if int(d.X) == x+1 && int(d.Y) == y+1 {
+			// extend the diagonal down and to the left
+			d.X, d.Y = int(x), int(y)
+			d.Len++
+			return lcs
+		}
+	}
+
+	r := diag{X: int(x), Y: int(y), Len: 1}
+	lcs = append([]diag{r}, lcs...)
+	return lcs
+}
+
+// appendlcs appends a diagonal, or extends the existing one.
+// by adding the edge (x,y)-(x+1.y+1). appendlcs is only called
+// while extending diagonals in the forward direction.
+func appendlcs(lcs lcs, x, y int) lcs {
+	if len(lcs) > 0 {
+		last := &lcs[len(lcs)-1]
+		// Expand last element if adjoining.
+		if last.X+last.Len == x && last.Y+last.Len == y {
+			last.Len++
+			return lcs
+		}
+	}
+
+	return append(lcs, diag{X: x, Y: y, Len: 1})
+}
+
+// enforce constraint on d, k
+func ok(d, k int) bool {
+	return d >= 0 && -d <= k && k <= d
+}
+
+type Diff struct {
+	Start, End int    // offsets in A
+	Text       string // replacement text
+}
diff --git a/internal/lsp/diff/lcs/common_test.go b/internal/lsp/diff/lcs/common_test.go
new file mode 100644
index 0000000..4aa36ab
--- /dev/null
+++ b/internal/lsp/diff/lcs/common_test.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"log"
+	"math/rand"
+	"strings"
+	"testing"
+)
+
+type Btest struct {
+	a, b string
+	lcs  []string
+}
+
+var Btests = []Btest{
+	{"aaabab", "abaab", []string{"abab", "aaab"}},
+	{"aabbba", "baaba", []string{"aaba"}},
+	{"cabbx", "cbabx", []string{"cabx", "cbbx"}},
+	{"c", "cb", []string{"c"}},
+	{"aaba", "bbb", []string{"b"}},
+	{"bbaabb", "b", []string{"b"}},
+	{"baaabb", "bbaba", []string{"bbb", "baa", "bab"}},
+	{"baaabb", "abbab", []string{"abb", "bab", "aab"}},
+	{"baaba", "aaabba", []string{"aaba"}},
+	{"ca", "cba", []string{"ca"}},
+	{"ccbcbc", "abba", []string{"bb"}},
+	{"ccbcbc", "aabba", []string{"bb"}},
+	{"ccb", "cba", []string{"cb"}},
+	{"caef", "axe", []string{"ae"}},
+	{"bbaabb", "baabb", []string{"baabb"}},
+	// Example from Myers:
+	{"abcabba", "cbabac", []string{"caba", "baba", "cbba"}},
+	{"3456aaa", "aaa", []string{"aaa"}},
+	{"aaa", "aaa123", []string{"aaa"}},
+	{"aabaa", "aacaa", []string{"aaaa"}},
+	{"1a", "a", []string{"a"}},
+	{"abab", "bb", []string{"bb"}},
+	{"123", "ab", []string{""}},
+	{"a", "b", []string{""}},
+	{"abc", "123", []string{""}},
+	{"aa", "aa", []string{"aa"}},
+	{"abcde", "12345", []string{""}},
+	{"aaa3456", "aaa", []string{"aaa"}},
+	{"abcde", "12345a", []string{"a"}},
+	{"ab", "123", []string{""}},
+	{"1a2", "a", []string{"a"}},
+	// for two-sided
+	{"babaab", "cccaba", []string{"aba"}},
+	{"aabbab", "cbcabc", []string{"bab"}},
+	{"abaabb", "bcacab", []string{"baab"}},
+	{"abaabb", "abaaaa", []string{"abaa"}},
+	{"bababb", "baaabb", []string{"baabb"}},
+	{"abbbaa", "cabacc", []string{"aba"}},
+	{"aabbaa", "aacaba", []string{"aaaa", "aaba"}},
+}
+
+func init() {
+	log.SetFlags(log.Lshortfile)
+}
+
+func check(t *testing.T, str string, lcs lcs, want []string) {
+	t.Helper()
+	if !lcs.valid() {
+		t.Errorf("bad lcs %v", lcs)
+	}
+	var got strings.Builder
+	for _, dd := range lcs {
+		got.WriteString(str[dd.X : dd.X+dd.Len])
+	}
+	ans := got.String()
+	for _, w := range want {
+		if ans == w {
+			return
+		}
+	}
+	t.Fatalf("str=%q lcs=%v want=%q got=%q", str, lcs, want, ans)
+}
+
+func checkDiffs(t *testing.T, before string, diffs []Diff, after string) {
+	t.Helper()
+	var ans strings.Builder
+	sofar := 0 // index of position in before
+	for _, d := range diffs {
+		if sofar < d.Start {
+			ans.WriteString(before[sofar:d.Start])
+		}
+		ans.WriteString(d.Text)
+		sofar = d.End
+	}
+	ans.WriteString(before[sofar:])
+	if ans.String() != after {
+		t.Fatalf("diff %v took %q to %q, not to %q", diffs, before, ans.String(), after)
+	}
+}
+
+func lcslen(l lcs) int {
+	ans := 0
+	for _, d := range l {
+		ans += int(d.Len)
+	}
+	return ans
+}
+
+// return a random string of length n made of characters from s
+func randstr(s string, n int) string {
+	src := []rune(s)
+	x := make([]rune, n)
+	for i := 0; i < n; i++ {
+		x[i] = src[rand.Intn(len(src))]
+	}
+	return string(x)
+}
+
+func TestLcsFix(t *testing.T) {
+	tests := []struct{ before, after lcs }{
+		{lcs{diag{0, 0, 3}, diag{2, 2, 5}, diag{3, 4, 5}, diag{8, 9, 4}}, lcs{diag{0, 0, 2}, diag{2, 2, 1}, diag{3, 4, 5}, diag{8, 9, 4}}},
+		{lcs{diag{1, 1, 6}, diag{6, 12, 3}}, lcs{diag{1, 1, 5}, diag{6, 12, 3}}},
+		{lcs{diag{0, 0, 4}, diag{3, 5, 4}}, lcs{diag{0, 0, 3}, diag{3, 5, 4}}},
+		{lcs{diag{0, 20, 1}, diag{0, 0, 3}, diag{1, 20, 4}}, lcs{diag{0, 0, 3}, diag{3, 22, 2}}},
+		{lcs{diag{0, 0, 4}, diag{1, 1, 2}}, lcs{diag{0, 0, 4}}},
+		{lcs{diag{0, 0, 4}}, lcs{diag{0, 0, 4}}},
+		{lcs{}, lcs{}},
+		{lcs{diag{0, 0, 4}, diag{1, 1, 6}, diag{3, 3, 2}}, lcs{diag{0, 0, 1}, diag{1, 1, 6}}},
+	}
+	for n, x := range tests {
+		got := x.before.fix()
+		if len(got) != len(x.after) {
+			t.Errorf("got %v, expected %v, for %v", got, x.after, x.before)
+		}
+		olen := lcslen(x.after)
+		glen := lcslen(got)
+		if olen != glen {
+			t.Errorf("%d: lens(%d,%d) differ, %v, %v, %v", n, glen, olen, got, x.after, x.before)
+		}
+	}
+}
diff --git a/internal/lsp/diff/lcs/doc.go b/internal/lsp/diff/lcs/doc.go
new file mode 100644
index 0000000..dc779f3
--- /dev/null
+++ b/internal/lsp/diff/lcs/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package lcs contains code to find longest-common-subsequences
+// (and diffs)
+package lcs
+
+/*
+Compute longest-common-subsequences of two slices A, B using
+algorithms from Myers' paper. A longest-common-subsequence
+(LCS from now on) of A and B is a maximal set of lexically increasing
+pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but
+they all have the same length. An LCS determines a sequence of edits
+that changes A into B.
+
+The key concept is the edit graph of A and B.
+If A has length N and B has length M, then the edit graph has
+vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a
+horizontal edge from v[i][j] to v[i+1][j] whenever both are in
+the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly.
+When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1].
+
+A path between in the graph between (0,0) and (N,M) determines a sequence
+of edits converting A into B: each horizontal edge corresponds to removing
+an element of A, and each vertical edge corresponds to inserting an
+element of B.
+
+A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph
+is of length D if it has D non-diagonal edges. The algorithms generate
+forward paths (in which at least one of x,y increases at each edge),
+or backward paths (in which at least one of x,y decreases at each edge),
+or a combination. (Note that the orientation is the traditional mathematical one,
+with the origin in the lower-left corner.)
+
+Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.)
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   b      |             |             |   ___/‾‾‾   |   ___/‾‾‾   |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   c      |             |             |             |             |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+                 a             a             b             b             a             a
+
+
+The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at
+the end of a maximal path of length D. (Because x-y=k it suffices to remember
+only the x coordinate of the vertex.)
+
+The forward algorithm: Find the longest diagonal starting at (0,0) and
+label its end with D=0,k=0. From that vertex take a vertical step and
+then follow the longest diagonal (up and to the right), and label that vertex
+with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow
+the longest diagonal (up and to the right) and label that vertex
+D=1,k=1. In the same way, having labelled all the D vertices,
+from a vertex labelled D,k find two vertices
+tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same
+diagonal, in which case take the one with the larger x.
+
+Eventually the path gets to (N,M), and the diagonals on it are the LCS.
+
+Here is the edit graph with the ends of D-paths labelled. (So, for instance,
+0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first
+step is to go up the longest diagonal from (0,0).)
+A:"aabbaa", B:"aacaba"
+          ⊙   -------   ⊙   -------   ⊙   -------(3/3,6)-------   ⊙   -------(3/5,6)-------(4/6,6)
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------(2/3,5)-------   ⊙   -------   ⊙   -------   ⊙
+   b      |             |             |   ___/‾‾‾   |   ___/‾‾‾   |             |             |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------(3/5,4)-------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------(1/2,3)-------(2/3,3)-------   ⊙   -------   ⊙   -------   ⊙
+   c      |             |             |             |             |             |             |
+          ⊙   -------   ⊙   -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2)
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+   a      |   ___/‾‾‾   |   ___/‾‾‾   |             |             |   ___/‾‾‾   |   ___/‾‾‾   |
+          ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙   -------   ⊙
+                 a             a             b             b             a             a
+
+The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical
+to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected,
+there are 4 non-diagonal steps, and the diagonals form an LCS.
+
+There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon):
+A:"aabbaa", B:"aacaba"
+            ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+            ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------    ⊙   --------(:0/5,5)--------    ⊙
+    b       |               |               |   ____/‾‾‾    |   ____/‾‾‾    |               |               |
+            ⊙   --------    ⊙   --------    ⊙   --------(:1/3,4)--------    ⊙   --------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:3/0,3)--------(:2/1,3)--------    ⊙   --------(:2/3,3)--------(:1/4,3)--------    ⊙   --------    ⊙
+    c       |               |               |               |               |               |               |
+            ⊙   --------    ⊙   --------    ⊙   --------(:3/3,2)--------(:2/4,2)--------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:3/0,1)--------    ⊙   --------    ⊙   --------    ⊙   --------(:3/4,1)--------    ⊙   --------    ⊙
+    a       |   ____/‾‾‾    |   ____/‾‾‾    |               |               |   ____/‾‾‾    |   ____/‾‾‾    |
+        (:4/0,0)--------    ⊙   --------    ⊙   --------    ⊙   --------(:4/4,0)--------    ⊙   --------    ⊙
+                    a               a               b               b               a               a
+
+Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the
+front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short.
+We want to control how big D can be, by stopping when it gets too large. The forward algorithm then
+privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable
+asymmetry.
+
+Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in
+the edit graph look like.
+A:"aabbaa", B:"aacaba"
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    --------- (2/3,5) ---------    ⊙    --------- (:0/5,5)---------    ⊙
+    b        |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |
+             ⊙    ---------    ⊙    ---------    ⊙    --------- (:1/3,4)---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)---------    ⊙    ---------    ⊙
+    c        |                 |                 |                 |                 |                 |                 |
+             ⊙    ---------    ⊙    --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+    a        |    ____/‾‾‾‾    |    ____/‾‾‾‾    |                 |                 |    ____/‾‾‾‾    |    ____/‾‾‾‾    |
+             ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙    ---------    ⊙
+                      a                 a                 b                 b                 a                 a
+
+The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion
+is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same
+diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward
+2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path.
+Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the
+computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed
+from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path.
+
+If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a
+backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two
+computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS
+is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two
+to form a best-effort LCS. In the worst case the forward partial LCS may have to
+be recomputed.
+*/
+
+/* Eugene Myers paper is titled
+"An O(ND) Difference Algorithm and Its Variations"
+and can be found at
+http://www.xmailserver.org/diff2.pdf
+
+(There is a generic implementation of the algorithm the the repository with git hash
+b9ad7e4ade3a686d608e44475390ad428e60e7fc)
+*/
diff --git a/internal/lsp/diff/lcs/git.sh b/internal/lsp/diff/lcs/git.sh
new file mode 100644
index 0000000..caa4c42
--- /dev/null
+++ b/internal/lsp/diff/lcs/git.sh
@@ -0,0 +1,34 @@
+
+#!/bin/bash
+#
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+#
+# Creates a zip file containing all numbered versions
+# of the commit history of a large source file, for use
+# as input data for the tests of the diff algorithm.
+#
+# Run script from root of the x/tools repo.
+
+set -eu
+
+# WARNING: This script will install the latest version of $file
+# The largest real source file in the x/tools repo.
+# file=internal/lsp/source/completion/completion.go
+# file=internal/lsp/source/diagnostics.go
+file=internal/lsp/protocol/tsprotocol.go
+
+tmp=$(mktemp -d)
+git log $file |
+  awk '/^commit / {print $2}' |
+  nl -ba -nrz |
+  while read n hash; do
+    git checkout --quiet $hash $file
+    cp -f $file $tmp/$n
+  done
+(cd $tmp && zip -q - *) > testdata.zip
+rm -fr $tmp
+git restore --staged $file
+git restore $file
+echo "Created testdata.zip"
diff --git a/internal/lsp/diff/lcs/labels.go b/internal/lsp/diff/lcs/labels.go
new file mode 100644
index 0000000..39fe9f2
--- /dev/null
+++ b/internal/lsp/diff/lcs/labels.go
@@ -0,0 +1,56 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"fmt"
+)
+
+//	For each D, vec[D] has length D+1,
+//
+// and the label for (D, k) is stored in vec[D][(D+k)/2].
+type label struct {
+	vec [][]int
+}
+
+// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE
+const debug = false
+
+// debugging. check that the (d,k) pair is valid
+// (that is, -d<=k<=d and d+k even)
+func checkDK(D, k int) {
+	if k >= -D && k <= D && (D+k)%2 == 0 {
+		return
+	}
+	panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k))
+}
+
+func (t *label) set(D, k, x int) {
+	if debug {
+		checkDK(D, k)
+	}
+	for len(t.vec) <= D {
+		t.vec = append(t.vec, nil)
+	}
+	if t.vec[D] == nil {
+		t.vec[D] = make([]int, D+1)
+	}
+	t.vec[D][(D+k)/2] = x // known that D+k is even
+}
+
+func (t *label) get(d, k int) int {
+	if debug {
+		checkDK(d, k)
+	}
+	return int(t.vec[d][(d+k)/2])
+}
+
+func newtriang(limit int) label {
+	if limit < 100 {
+		// Preallocate if limit is not large.
+		return label{vec: make([][]int, limit)}
+	}
+	return label{}
+}
diff --git a/internal/lsp/diff/lcs/old.go b/internal/lsp/diff/lcs/old.go
new file mode 100644
index 0000000..a091edd
--- /dev/null
+++ b/internal/lsp/diff/lcs/old.go
@@ -0,0 +1,530 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"fmt"
+	"strings"
+)
+
+// non generic code. The names have Old at the end to indicate they are the
+// the implementation that doesn't use generics.
+
+// Compute the Diffs and the lcs.
+func Compute(a, b interface{}, limit int) ([]Diff, lcs) {
+	var ans lcs
+	g := newegraph(a, b, limit)
+	ans = g.twosided()
+	diffs := g.fromlcs(ans)
+	return diffs, ans
+}
+
+// editGraph carries the information for computing the lcs for []byte, []rune, or []string.
+type editGraph struct {
+	eq     eq    // how to compare elements of A, B, and convert slices to strings
+	vf, vb label // forward and backward labels
+
+	limit int // maximal value of D
+	// the bounding rectangle of the current edit graph
+	lx, ly, ux, uy int
+	delta          int // common subexpression: (ux-lx)-(uy-ly)
+}
+
+// abstraction in place of generic
+type eq interface {
+	eq(i, j int) bool
+	substr(i, j int) string // string from b[i:j]
+	lena() int
+	lenb() int
+}
+
+type byteeq struct {
+	a, b []byte // the input was ascii. perhaps these could be strings
+}
+
+func (x *byteeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *byteeq) substr(i, j int) string { return string(x.b[i:j]) }
+func (x *byteeq) lena() int              { return int(len(x.a)) }
+func (x *byteeq) lenb() int              { return int(len(x.b)) }
+
+type runeeq struct {
+	a, b []rune
+}
+
+func (x *runeeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *runeeq) substr(i, j int) string { return string(x.b[i:j]) }
+func (x *runeeq) lena() int              { return int(len(x.a)) }
+func (x *runeeq) lenb() int              { return int(len(x.b)) }
+
+type lineeq struct {
+	a, b []string
+}
+
+func (x *lineeq) eq(i, j int) bool       { return x.a[i] == x.b[j] }
+func (x *lineeq) substr(i, j int) string { return strings.Join(x.b[i:j], "") }
+func (x *lineeq) lena() int              { return int(len(x.a)) }
+func (x *lineeq) lenb() int              { return int(len(x.b)) }
+
+func neweq(a, b interface{}) eq {
+	switch x := a.(type) {
+	case []byte:
+		return &byteeq{a: x, b: b.([]byte)}
+	case []rune:
+		return &runeeq{a: x, b: b.([]rune)}
+	case []string:
+		return &lineeq{a: x, b: b.([]string)}
+	default:
+		panic(fmt.Sprintf("unexpected type %T in neweq", x))
+	}
+}
+
+func (g *editGraph) fromlcs(lcs lcs) []Diff {
+	var ans []Diff
+	var pa, pb int // offsets in a, b
+	for _, l := range lcs {
+		if pa < l.X && pb < l.Y {
+			ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)})
+		} else if pa < l.X {
+			ans = append(ans, Diff{pa, l.X, ""})
+		} else if pb < l.Y {
+			ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)})
+		}
+		pa = l.X + l.Len
+		pb = l.Y + l.Len
+	}
+	if pa < g.eq.lena() && pb < g.eq.lenb() {
+		ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())})
+	} else if pa < g.eq.lena() {
+		ans = append(ans, Diff{pa, g.eq.lena(), ""})
+	} else if pb < g.eq.lenb() {
+		ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())})
+	}
+	return ans
+}
+
+func newegraph(a, b interface{}, limit int) *editGraph {
+	if limit <= 0 {
+		limit = 1 << 25 // effectively infinity
+	}
+	var alen, blen int
+	switch a := a.(type) {
+	case []byte:
+		alen, blen = len(a), len(b.([]byte))
+	case []rune:
+		alen, blen = len(a), len(b.([]rune))
+	case []string:
+		alen, blen = len(a), len(b.([]string))
+	default:
+		panic(fmt.Sprintf("unexpected type %T in newegraph", a))
+	}
+	ans := &editGraph{eq: neweq(a, b), vf: newtriang(limit), vb: newtriang(limit), limit: int(limit),
+		ux: alen, uy: blen, delta: alen - blen}
+	return ans
+}
+
+// --- FORWARD ---
+// fdone decides if the forwward path has reached the upper right
+// corner of the rectangele. If so, it also returns the computed lcs.
+func (e *editGraph) fdone(D, k int) (bool, lcs) {
+	// x, y, k are relative to the rectangle
+	x := e.vf.get(D, k)
+	y := x - k
+	if x == e.ux && y == e.uy {
+		return true, e.forwardlcs(D, k)
+	}
+	return false, nil
+}
+
+// run the forward algorithm, until success or up to the limit on D.
+func (e *editGraph) forward() lcs {
+	e.setForward(0, 0, e.lx)
+	if ok, ans := e.fdone(0, 0); ok {
+		return ans
+	}
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+		if ok, ans := e.fdone(D+1, -(D + 1)); ok {
+			return ans
+		}
+		e.setForward(D+1, D+1, e.getForward(D, D)+1)
+		if ok, ans := e.fdone(D+1, D+1); ok {
+			return ans
+		}
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get backwards
+			lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+			lookh := e.lookForward(k, e.getForward(D, k+1))
+			if lookv > lookh {
+				e.setForward(D+1, k, lookv)
+			} else {
+				e.setForward(D+1, k, lookh)
+			}
+			if ok, ans := e.fdone(D+1, k); ok {
+				return ans
+			}
+		}
+	}
+	// D is too large
+	// find the D path with maximal x+y inside the rectangle and
+	// use that to compute the found part of the lcs
+	kmax := -e.limit - 1
+	diagmax := -1
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getForward(e.limit, k)
+		y := x - k
+		if x+y > diagmax && x <= e.ux && y <= e.uy {
+			diagmax, kmax = x+y, k
+		}
+	}
+	return e.forwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking from the farthest point reached
+func (e *editGraph) forwardlcs(D, k int) lcs {
+	var ans lcs
+	for x := e.getForward(D, k); x != 0 || x-k != 0; {
+		if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) {
+			// if (x-1,y) is labelled D-1, x--,D--,k--,continue
+			D, k, x = D-1, k-1, x-1
+			continue
+		} else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) {
+			// if (x,y-1) is labelled D-1, x, D--,k++, continue
+			D, k = D-1, k+1
+			continue
+		}
+		// if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue
+		y := x - k
+		realx, realy := x+e.lx, y+e.ly
+		if e.eq.eq(realx-1, realy-1) {
+			ans = prependlcs(ans, realx-1, realy-1)
+			x--
+		} else {
+			panic("broken path")
+		}
+	}
+	return ans
+}
+
+// start at (x,y), go up the diagonal as far as possible,
+// and label the result with d
+func (e *editGraph) lookForward(k, relx int) int {
+	rely := relx - k
+	x, y := relx+e.lx, rely+e.ly
+	for x < e.ux && y < e.uy && e.eq.eq(x, y) {
+		x++
+		y++
+	}
+	return x
+}
+
+func (e *editGraph) setForward(d, k, relx int) {
+	x := e.lookForward(k, relx)
+	e.vf.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getForward(d, k int) int {
+	x := e.vf.get(d, k)
+	return x
+}
+
+// --- BACKWARD ---
+// bdone decides if the backward path has reached the lower left corner
+func (e *editGraph) bdone(D, k int) (bool, lcs) {
+	// x, y, k are relative to the rectangle
+	x := e.vb.get(D, k)
+	y := x - (k + e.delta)
+	if x == 0 && y == 0 {
+		return true, e.backwardlcs(D, k)
+	}
+	return false, nil
+}
+
+// run the backward algorithm, until success or up to the limit on D.
+func (e *editGraph) backward() lcs {
+	e.setBackward(0, 0, e.ux)
+	if ok, ans := e.bdone(0, 0); ok {
+		return ans
+	}
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+		if ok, ans := e.bdone(D+1, -(D + 1)); ok {
+			return ans
+		}
+		e.setBackward(D+1, D+1, e.getBackward(D, D))
+		if ok, ans := e.bdone(D+1, D+1); ok {
+			return ans
+		}
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get wrong
+			lookv := e.lookBackward(k, e.getBackward(D, k-1))
+			lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+			if lookv < lookh {
+				e.setBackward(D+1, k, lookv)
+			} else {
+				e.setBackward(D+1, k, lookh)
+			}
+			if ok, ans := e.bdone(D+1, k); ok {
+				return ans
+			}
+		}
+	}
+
+	// D is too large
+	// find the D path with minimal x+y inside the rectangle and
+	// use that to compute the part of the lcs found
+	kmax := -e.limit - 1
+	diagmin := 1 << 25
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getBackward(e.limit, k)
+		y := x - (k + e.delta)
+		if x+y < diagmin && x >= 0 && y >= 0 {
+			diagmin, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no paths when limit=%d?", e.limit))
+	}
+	return e.backwardlcs(e.limit, kmax)
+}
+
+// recover the lcs by backtracking
+func (e *editGraph) backwardlcs(D, k int) lcs {
+	var ans lcs
+	for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; {
+		if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) {
+			// D--, k--, x unchanged
+			D, k = D-1, k-1
+			continue
+		} else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) {
+			// D--, k++, x++
+			D, k, x = D-1, k+1, x+1
+			continue
+		}
+		y := x - (k + e.delta)
+		realx, realy := x+e.lx, y+e.ly
+		if e.eq.eq(realx, realy) {
+			ans = appendlcs(ans, realx, realy)
+			x++
+		} else {
+			panic("broken path")
+		}
+	}
+	return ans
+}
+
+// start at (x,y), go down the diagonal as far as possible,
+func (e *editGraph) lookBackward(k, relx int) int {
+	rely := relx - (k + e.delta) // forward k = k + e.delta
+	x, y := relx+e.lx, rely+e.ly
+	for x > 0 && y > 0 && e.eq.eq(x-1, y-1) {
+		x--
+		y--
+	}
+	return x
+}
+
+// convert to rectangle, and label the result with d
+func (e *editGraph) setBackward(d, k, relx int) {
+	x := e.lookBackward(k, relx)
+	e.vb.set(d, k, x-e.lx)
+}
+
+func (e *editGraph) getBackward(d, k int) int {
+	x := e.vb.get(d, k)
+	return x
+}
+
+// -- TWOSIDED ---
+
+func (e *editGraph) twosided() lcs {
+	// The termination condition could be improved, as either the forward
+	// or backward pass could succeed before Myers' Lemma applies.
+	// Aside from questions of efficiency (is the extra testing cost-effective)
+	// this is more likely to matter when e.limit is reached.
+	e.setForward(0, 0, e.lx)
+	e.setBackward(0, 0, e.ux)
+
+	// from D to D+1
+	for D := 0; D < e.limit; D++ {
+		// just finished a backwards pass, so check
+		if got, ok := e.twoDone(D, D); ok {
+			return e.twolcs(D, D, got)
+		}
+		// do a forwards pass (D to D+1)
+		e.setForward(D+1, -(D + 1), e.getForward(D, -D))
+		e.setForward(D+1, D+1, e.getForward(D, D)+1)
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get backwards
+			lookv := e.lookForward(k, e.getForward(D, k-1)+1)
+			lookh := e.lookForward(k, e.getForward(D, k+1))
+			if lookv > lookh {
+				e.setForward(D+1, k, lookv)
+			} else {
+				e.setForward(D+1, k, lookh)
+			}
+		}
+		// just did a forward pass, so check
+		if got, ok := e.twoDone(D+1, D); ok {
+			return e.twolcs(D+1, D, got)
+		}
+		// do a backward pass, D to D+1
+		e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1)
+		e.setBackward(D+1, D+1, e.getBackward(D, D))
+		for k := -D + 1; k <= D-1; k += 2 {
+			// these are tricky and easy to get wrong
+			lookv := e.lookBackward(k, e.getBackward(D, k-1))
+			lookh := e.lookBackward(k, e.getBackward(D, k+1)-1)
+			if lookv < lookh {
+				e.setBackward(D+1, k, lookv)
+			} else {
+				e.setBackward(D+1, k, lookh)
+			}
+		}
+	}
+
+	// D too large. combine a forward and backward partial lcs
+	// first, a forward one
+	kmax := -e.limit - 1
+	diagmax := -1
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getForward(e.limit, k)
+		y := x - k
+		if x+y > diagmax && x <= e.ux && y <= e.uy {
+			diagmax, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit))
+	}
+	lcs := e.forwardlcs(e.limit, kmax)
+	// now a backward one
+	// find the D path with minimal x+y inside the rectangle and
+	// use that to compute the lcs
+	diagmin := 1 << 25 // infinity
+	for k := -e.limit; k <= e.limit; k += 2 {
+		x := e.getBackward(e.limit, k)
+		y := x - (k + e.delta)
+		if x+y < diagmin && x >= 0 && y >= 0 {
+			diagmin, kmax = x+y, k
+		}
+	}
+	if kmax < -e.limit {
+		panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit))
+	}
+	lcs = append(lcs, e.backwardlcs(e.limit, kmax)...)
+	// These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs)
+	ans := lcs.fix()
+	return ans
+}
+
+// Does Myers' Lemma apply?
+func (e *editGraph) twoDone(df, db int) (int, bool) {
+	if (df+db+e.delta)%2 != 0 {
+		return 0, false // diagonals cannot overlap
+	}
+	kmin := -db + e.delta
+	if -df > kmin {
+		kmin = -df
+	}
+	kmax := db + e.delta
+	if df < kmax {
+		kmax = df
+	}
+	for k := kmin; k <= kmax; k += 2 {
+		x := e.vf.get(df, k)
+		u := e.vb.get(db, k-e.delta)
+		if u <= x {
+			// is it worth looking at all the other k?
+			for l := k; l <= kmax; l += 2 {
+				x := e.vf.get(df, l)
+				y := x - l
+				u := e.vb.get(db, l-e.delta)
+				v := u - l
+				if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux {
+					return l, true
+				}
+			}
+			return k, true
+		}
+	}
+	return 0, false
+}
+
+func (e *editGraph) twolcs(df, db, kf int) lcs {
+	// db==df || db+1==df
+	x := e.vf.get(df, kf)
+	y := x - kf
+	kb := kf - e.delta
+	u := e.vb.get(db, kb)
+	v := u - kf
+
+	// Myers proved there is a df-path from (0,0) to (u,v)
+	// and a db-path from (x,y) to (N,M).
+	// In the first case the overall path is the forward path
+	// to (u,v) followed by the backward path to (N,M).
+	// In the second case the path is the backward path to (x,y)
+	// followed by the forward path to (x,y) from (0,0).
+
+	// Look for some special cases to avoid computing either of these paths.
+	if x == u {
+		// "babaab" "cccaba"
+		// already patched together
+		lcs := e.forwardlcs(df, kf)
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+
+	// is (u-1,v) or (u,v-1) labelled df-1?
+	// if so, that forward df-1-path plus a horizontal or vertical edge
+	// is the df-path to (u,v), then plus the db-path to (N,M)
+	if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 {
+		//  "aabbab" "cbcabc"
+		lcs := e.forwardlcs(df-1, u-1-v)
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+	if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u {
+		//  "abaabb" "bcacab"
+		lcs := e.forwardlcs(df-1, u-(v-1))
+		lcs = append(lcs, e.backwardlcs(db, kb)...)
+		return lcs.sort()
+	}
+
+	// The path can't possibly contribute to the lcs because it
+	// is all horizontal or vertical edges
+	if u == 0 || v == 0 || x == e.ux || y == e.uy {
+		// "abaabb" "abaaaa"
+		if u == 0 || v == 0 {
+			return e.backwardlcs(db, kb)
+		}
+		return e.forwardlcs(df, kf)
+	}
+
+	// is (x+1,y) or (x,y+1) labelled db-1?
+	if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 {
+		// "bababb" "baaabb"
+		lcs := e.backwardlcs(db-1, kb+1)
+		lcs = append(lcs, e.forwardlcs(df, kf)...)
+		return lcs.sort()
+	}
+	if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x {
+		// "abbbaa" "cabacc"
+		lcs := e.backwardlcs(db-1, kb-1)
+		lcs = append(lcs, e.forwardlcs(df, kf)...)
+		return lcs.sort()
+	}
+
+	// need to compute another path
+	// "aabbaa" "aacaba"
+	lcs := e.backwardlcs(db, kb)
+	oldx, oldy := e.ux, e.uy
+	e.ux = u
+	e.uy = v
+	lcs = append(lcs, e.forward()...)
+	e.ux, e.uy = oldx, oldy
+	return lcs.sort()
+}
diff --git a/internal/lsp/diff/lcs/old_test.go b/internal/lsp/diff/lcs/old_test.go
new file mode 100644
index 0000000..ba22fe6
--- /dev/null
+++ b/internal/lsp/diff/lcs/old_test.go
@@ -0,0 +1,203 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lcs
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func TestForwardOld(t *testing.T) {
+	for _, tx := range Btests {
+		lim := len(tx.a) + len(tx.b)
+		left, right := []byte(tx.a), []byte(tx.b)
+		g := newegraph(left, right, lim)
+		lcs := g.forward()
+		diffs := g.fromlcs(lcs)
+		check(t, tx.a, lcs, tx.lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+
+		g = newegraph(right, left, lim)
+		lcs = g.forward()
+		diffs = g.fromlcs(lcs)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestBackwardOld(t *testing.T) {
+	for _, tx := range Btests {
+		lim := len(tx.a) + len(tx.b)
+		left, right := []byte(tx.a), []byte(tx.b)
+		g := newegraph(left, right, lim)
+		lcs := g.backward()
+		check(t, tx.a, lcs, tx.lcs)
+		diffs := g.fromlcs(lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+
+		g = newegraph(right, left, lim)
+		lcs = g.backward()
+		diffs = g.fromlcs(lcs)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestTwosidedOld(t *testing.T) {
+	// test both (a,b) and (b,a)
+	for _, tx := range Btests {
+		left, right := []byte(tx.a), []byte(tx.b)
+		lim := len(tx.a) + len(tx.b)
+		diffs, lcs := Compute(left, right, lim)
+		check(t, tx.a, lcs, tx.lcs)
+		checkDiffs(t, tx.a, diffs, tx.b)
+		diffs, lcs = Compute(right, left, lim)
+		check(t, tx.b, lcs, tx.lcs)
+		checkDiffs(t, tx.b, diffs, tx.a)
+	}
+}
+
+func TestIntOld(t *testing.T) {
+	// need to avoid any characters in btests
+	lfill, rfill := "AAAAAAAAAAAA", "BBBBBBBBBBBB"
+	for _, tx := range Btests {
+		if len(tx.a) < 2 || len(tx.b) < 2 {
+			continue
+		}
+		left := []byte(tx.a + lfill)
+		right := []byte(tx.b + rfill)
+		lim := len(tx.a) + len(tx.b)
+		diffs, lcs := Compute(left, right, lim)
+		check(t, string(left), lcs, tx.lcs)
+		checkDiffs(t, string(left), diffs, string(right))
+		diffs, lcs = Compute(right, left, lim)
+		check(t, string(right), lcs, tx.lcs)
+		checkDiffs(t, string(right), diffs, string(left))
+
+		left = []byte(lfill + tx.a)
+		right = []byte(rfill + tx.b)
+		diffs, lcs = Compute(left, right, lim)
+		check(t, string(left), lcs, tx.lcs)
+		checkDiffs(t, string(left), diffs, string(right))
+		diffs, lcs = Compute(right, left, lim)
+		check(t, string(right), lcs, tx.lcs)
+		checkDiffs(t, string(right), diffs, string(left))
+	}
+}
+
+func TestSpecialOld(t *testing.T) { // needs lcs.fix
+	a := []byte("golang.org/x/tools/intern")
+	b := []byte("github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/intern")
+	diffs, lcs := Compute(a, b, 4)
+	if !lcs.valid() {
+		t.Errorf("%d,%v", len(diffs), lcs)
+	}
+}
+
+func TestRegressionOld001(t *testing.T) {
+	a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+
+	b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n"
+	for i := 1; i < len(b); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i)) // 14 from gopls
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRegressionOld002(t *testing.T) {
+	a := "n\"\n)\n"
+	b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n"
+	for i := 1; i <= len(b); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i))
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRegressionOld003(t *testing.T) {
+	a := "golang.org/x/hello v1.0.0\nrequire golang.org/x/unused v1"
+	b := "golang.org/x/hello v1"
+	for i := 1; i <= len(a); i++ {
+		diffs, lcs := Compute([]byte(a), []byte(b), int(i))
+		if !lcs.valid() {
+			t.Errorf("%d,%v", len(diffs), lcs)
+		}
+		checkDiffs(t, a, diffs, b)
+	}
+}
+
+func TestRandOld(t *testing.T) {
+	rand.Seed(1)
+	for i := 0; i < 1000; i++ {
+		a := []rune(randstr("abω", 16))
+		b := []rune(randstr("abωc", 16))
+		g := newegraph(a, b, 24) // large enough to get true lcs
+		two := g.twosided()
+		forw := g.forward()
+		back := g.backward()
+		if lcslen(two) != lcslen(forw) || lcslen(forw) != lcslen(back) {
+			t.Logf("\n%v\n%v\n%v", forw, back, two)
+			t.Fatalf("%d forw:%d back:%d two:%d", i, lcslen(forw), lcslen(back), lcslen(two))
+		}
+		if !two.valid() || !forw.valid() || !back.valid() {
+			t.Errorf("check failure")
+		}
+	}
+}
+
+func BenchmarkTwoOld(b *testing.B) {
+	tests := genBench("abc", 96)
+	for i := 0; i < b.N; i++ {
+		for _, tt := range tests {
+			_, two := Compute([]byte(tt.before), []byte(tt.after), 100)
+			if !two.valid() {
+				b.Error("check failed")
+			}
+		}
+	}
+}
+
+func BenchmarkForwOld(b *testing.B) {
+	tests := genBench("abc", 96)
+	for i := 0; i < b.N; i++ {
+		for _, tt := range tests {
+			_, two := Compute([]byte(tt.before), []byte(tt.after), 100)
+			if !two.valid() {
+				b.Error("check failed")
+			}
+		}
+	}
+}
+
+func genBench(set string, n int) []struct{ before, after string } {
+	// before and after for benchmarks. 24 strings of length n with
+	// before and after differing at least once, and about 5%
+	rand.Seed(3)
+	var ans []struct{ before, after string }
+	for i := 0; i < 24; i++ {
+		// maybe b should have an approximately known number of diffs
+		a := randstr(set, n)
+		cnt := 0
+		bb := make([]rune, 0, n)
+		for _, r := range a {
+			if rand.Float64() < .05 {
+				cnt++
+				r = 'N'
+			}
+			bb = append(bb, r)
+		}
+		if cnt == 0 {
+			// avoid == shortcut
+			bb[n/2] = 'N'
+		}
+		ans = append(ans, struct{ before, after string }{a, string(bb)})
+	}
+	return ans
+}
diff --git a/internal/lsp/diff/ndiff.go b/internal/lsp/diff/ndiff.go
new file mode 100644
index 0000000..8f7732d
--- /dev/null
+++ b/internal/lsp/diff/ndiff.go
@@ -0,0 +1,130 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package diff
+
+import (
+	"strings"
+	"unicode/utf8"
+
+	"golang.org/x/tools/internal/lsp/diff/lcs"
+	"golang.org/x/tools/internal/span"
+)
+
+// maxDiffs is a limit on how deeply the lcs algorithm should search
+// the value is just a guess
+const maxDiffs = 30
+
+// NComputeEdits computes TextEdits for strings
+// (both it and the diff in the myers package have type ComputeEdits, which
+// is why the arguments are strings, not []bytes.)
+func NComputeEdits(uri span.URI, before, after string) ([]TextEdit, error) {
+	if before == after {
+		// very frequently true
+		return nil, nil
+	}
+	// the diffs returned by the lcs package use indexes into whatever slice
+	// was passed in. TextEdits  need a span.Span which is computed with
+	// byte offsets, so rune or line offsets need to be converted.
+	if needrunes(before) || needrunes(after) {
+		diffs, _ := lcs.Compute([]rune(before), []rune(after), maxDiffs/2)
+		diffs = runeOffsets(diffs, []rune(before))
+		ans, err := convertDiffs(uri, diffs, []byte(before))
+		return ans, err
+	} else {
+		diffs, _ := lcs.Compute([]byte(before), []byte(after), maxDiffs/2)
+		ans, err := convertDiffs(uri, diffs, []byte(before))
+		return ans, err
+	}
+}
+
+// NComputeLineEdits computes TextEdits for []strings
+func NComputeLineEdits(uri span.URI, before, after []string) ([]TextEdit, error) {
+	diffs, _ := lcs.Compute(before, after, maxDiffs/2)
+	diffs = lineOffsets(diffs, before)
+	ans, err := convertDiffs(uri, diffs, []byte(strJoin(before)))
+	// the code is not coping with possible missing \ns at the ends
+	return ans, err
+}
+
+// convert diffs with byte offsets into diffs with line and column
+func convertDiffs(uri span.URI, diffs []lcs.Diff, src []byte) ([]TextEdit, error) {
+	ans := make([]TextEdit, len(diffs))
+	tf := span.NewTokenFile(uri.Filename(), src)
+	for i, d := range diffs {
+		s := newSpan(uri, d.Start, d.End)
+		s, err := s.WithPosition(tf)
+		if err != nil {
+			return nil, err
+		}
+		ans[i] = TextEdit{s, d.Text}
+	}
+	return ans, nil
+}
+
+// convert diffs with rune offsets into diffs with byte offsets
+func runeOffsets(diffs []lcs.Diff, src []rune) []lcs.Diff {
+	var idx int
+	var tmp strings.Builder // string because []byte([]rune) is illegal
+	for i, d := range diffs {
+		tmp.WriteString(string(src[idx:d.Start]))
+		v := tmp.Len()
+		tmp.WriteString(string(src[d.Start:d.End]))
+		d.Start = v
+		idx = d.End
+		d.End = tmp.Len()
+		diffs[i] = d
+	}
+	return diffs
+}
+
+// convert diffs with line offsets into diffs with byte offsets
+func lineOffsets(diffs []lcs.Diff, src []string) []lcs.Diff {
+	var idx int
+	var tmp strings.Builder // bytes/
+	for i, d := range diffs {
+		tmp.WriteString(strJoin(src[idx:d.Start]))
+		v := tmp.Len()
+		tmp.WriteString(strJoin(src[d.Start:d.End]))
+		d.Start = v
+		idx = d.End
+		d.End = tmp.Len()
+		diffs[i] = d
+	}
+	return diffs
+}
+
+// join lines. (strings.Join doesn't add a trailing separator)
+func strJoin(elems []string) string {
+	if len(elems) == 0 {
+		return ""
+	}
+	n := 0
+	for i := 0; i < len(elems); i++ {
+		n += len(elems[i])
+	}
+
+	var b strings.Builder
+	b.Grow(n)
+	for _, s := range elems {
+		b.WriteString(s)
+		//b.WriteByte('\n')
+	}
+	return b.String()
+}
+
+func newSpan(uri span.URI, left, right int) span.Span {
+	return span.New(uri, span.NewPoint(0, 0, left), span.NewPoint(0, 0, right))
+}
+
+// need runes is true if the string needs to be converted to []rune
+// for random access
+func needrunes(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] >= utf8.RuneSelf {
+			return true
+		}
+	}
+	return false
+}
diff --git a/internal/lsp/fake/client.go b/internal/lsp/fake/client.go
index fdc67a6..4c5f2a2 100644
--- a/internal/lsp/fake/client.go
+++ b/internal/lsp/fake/client.go
@@ -77,7 +77,7 @@
 		if item.Section != "gopls" {
 			continue
 		}
-		results[i] = c.editor.configuration()
+		results[i] = c.editor.settings()
 	}
 	return results, nil
 }
diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go
index 0fc99a0..bc2cb2f 100644
--- a/internal/lsp/fake/editor.go
+++ b/internal/lsp/fake/editor.go
@@ -17,31 +17,30 @@
 	"sync"
 
 	"golang.org/x/tools/internal/jsonrpc2"
+	"golang.org/x/tools/internal/jsonrpc2/servertest"
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/span"
+	"golang.org/x/tools/internal/xcontext"
 )
 
 // Editor is a fake editor client.  It keeps track of client state and can be
 // used for writing LSP tests.
 type Editor struct {
-	Config EditorConfig
 
 	// Server, client, and sandbox are concurrency safe and written only
 	// at construction time, so do not require synchronization.
 	Server     protocol.Server
+	cancelConn func()
 	serverConn jsonrpc2.Conn
 	client     *Client
 	sandbox    *Sandbox
 	defaultEnv map[string]string
 
-	// Since this editor is intended just for testing, we use very coarse
-	// locking.
-	mu sync.Mutex
-	// Editor state.
-	buffers map[string]buffer
-	// Capabilities / Options
-	serverCapabilities protocol.ServerCapabilities
+	mu                 sync.Mutex                  // guards config, buffers, serverCapabilities
+	config             EditorConfig                // editor configuration
+	buffers            map[string]buffer           // open buffers
+	serverCapabilities protocol.ServerCapabilities // capabilities / options
 
 	// Call metrics for the purpose of expectations. This is done in an ad-hoc
 	// manner for now. Perhaps in the future we should do something more
@@ -77,21 +76,11 @@
 //
 // The zero value for EditorConfig should correspond to its defaults.
 type EditorConfig struct {
-	Env        map[string]string
-	BuildFlags []string
-
-	// CodeLenses is a map defining whether codelens are enabled, keyed by the
-	// codeLens command. CodeLenses which are not present in this map are left in
-	// their default state.
-	CodeLenses map[string]bool
-
-	// SymbolMatcher is the config associated with the "symbolMatcher" gopls
-	// config option.
-	SymbolMatcher, SymbolStyle *string
-
-	// LimitWorkspaceScope is true if the user does not want to expand their
-	// workspace scope to the entire module.
-	LimitWorkspaceScope bool
+	// Env holds environment variables to apply on top of the default editor
+	// environment. When applying these variables, the special string
+	// $SANDBOX_WORKDIR is replaced by the absolute path to the sandbox working
+	// directory.
+	Env map[string]string
 
 	// WorkspaceFolders is the workspace folders to configure on the LSP server,
 	// relative to the sandbox workdir.
@@ -101,14 +90,6 @@
 	// To explicitly send no workspace folders, use an empty (non-nil) slice.
 	WorkspaceFolders []string
 
-	// AllExperiments sets the "allExperiments" configuration, which enables
-	// all of gopls's opt-in settings.
-	AllExperiments bool
-
-	// Whether to send the current process ID, for testing data that is joined to
-	// the PID. This can only be set by one test.
-	SendPID bool
-
 	// Whether to edit files with windows line endings.
 	WindowsLineEndings bool
 
@@ -120,14 +101,8 @@
 	//  "gotmpl" -> ".*tmpl"
 	FileAssociations map[string]string
 
-	// Settings holds arbitrary additional settings to apply to the gopls config.
-	// TODO(rfindley): replace existing EditorConfig fields with Settings.
+	// Settings holds user-provided configuration for the LSP server.
 	Settings map[string]interface{}
-
-	ImportShortcut                 string
-	DirectoryFilters               []string
-	VerboseOutput                  bool
-	ExperimentalUseInvalidMetadata bool
 }
 
 // NewEditor Creates a new Editor.
@@ -136,7 +111,7 @@
 		buffers:    make(map[string]buffer),
 		sandbox:    sandbox,
 		defaultEnv: sandbox.GoEnv(),
-		Config:     config,
+		config:     config,
 	}
 }
 
@@ -147,15 +122,20 @@
 // It returns the editor, so that it may be called as follows:
 //
 //	editor, err := NewEditor(s).Connect(ctx, conn)
-func (e *Editor) Connect(ctx context.Context, conn jsonrpc2.Conn, hooks ClientHooks) (*Editor, error) {
+func (e *Editor) Connect(ctx context.Context, connector servertest.Connector, hooks ClientHooks) (*Editor, error) {
+	bgCtx, cancelConn := context.WithCancel(xcontext.Detach(ctx))
+	conn := connector.Connect(bgCtx)
+	e.cancelConn = cancelConn
+
 	e.serverConn = conn
 	e.Server = protocol.ServerDispatcher(conn)
 	e.client = &Client{editor: e, hooks: hooks}
-	conn.Go(ctx,
+	conn.Go(bgCtx,
 		protocol.Handlers(
 			protocol.ClientHandler(e.client,
 				jsonrpc2.MethodNotFound)))
-	if err := e.initialize(ctx, e.Config.WorkspaceFolders); err != nil {
+
+	if err := e.initialize(ctx, e.config.WorkspaceFolders); err != nil {
 		return nil, err
 	}
 	e.sandbox.Workdir.AddWatcher(e.onFileChanges)
@@ -198,6 +178,10 @@
 	if err := e.Exit(ctx); err != nil {
 		return err
 	}
+	defer func() {
+		e.cancelConn()
+	}()
+
 	// called close on the editor should result in the connection closing
 	select {
 	case <-e.serverConn.Done():
@@ -213,65 +197,47 @@
 	return e.client
 }
 
-func (e *Editor) overlayEnv() map[string]string {
+// settings builds the settings map for use in LSP settings
+// RPCs.
+func (e *Editor) settings() map[string]interface{} {
+	e.mu.Lock()
+	defer e.mu.Unlock()
 	env := make(map[string]string)
 	for k, v := range e.defaultEnv {
+		env[k] = v
+	}
+	for k, v := range e.config.Env {
+		env[k] = v
+	}
+	for k, v := range env {
 		v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
 		env[k] = v
 	}
-	for k, v := range e.Config.Env {
-		v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename())
-		env[k] = v
-	}
-	return env
-}
 
-func (e *Editor) configuration() map[string]interface{} {
-	config := map[string]interface{}{
+	settings := map[string]interface{}{
+		"env": env,
+
+		// Use verbose progress reporting so that regtests can assert on
+		// asynchronous operations being completed (such as diagnosing a snapshot).
 		"verboseWorkDoneProgress": true,
-		"env":                     e.overlayEnv(),
-		"expandWorkspaceToModule": !e.Config.LimitWorkspaceScope,
-		"completionBudget":        "10s",
+
+		// Set a generous completion budget, so that tests don't flake because
+		// completions are too slow.
+		"completionBudget": "10s",
+
+		// Shorten the diagnostic delay to speed up test execution (else we'd add
+		// the default delay to each assertion about diagnostics)
+		"diagnosticsDelay": "10ms",
 	}
 
-	for k, v := range e.Config.Settings {
-		config[k] = v
+	for k, v := range e.config.Settings {
+		if k == "env" {
+			panic("must not provide env via the EditorConfig.Settings field: use the EditorConfig.Env field instead")
+		}
+		settings[k] = v
 	}
 
-	if e.Config.BuildFlags != nil {
-		config["buildFlags"] = e.Config.BuildFlags
-	}
-	if e.Config.DirectoryFilters != nil {
-		config["directoryFilters"] = e.Config.DirectoryFilters
-	}
-	if e.Config.ExperimentalUseInvalidMetadata {
-		config["experimentalUseInvalidMetadata"] = true
-	}
-	if e.Config.CodeLenses != nil {
-		config["codelenses"] = e.Config.CodeLenses
-	}
-	if e.Config.SymbolMatcher != nil {
-		config["symbolMatcher"] = *e.Config.SymbolMatcher
-	}
-	if e.Config.SymbolStyle != nil {
-		config["symbolStyle"] = *e.Config.SymbolStyle
-	}
-	if e.Config.AllExperiments {
-		config["allExperiments"] = true
-	}
-
-	if e.Config.VerboseOutput {
-		config["verboseOutput"] = true
-	}
-
-	if e.Config.ImportShortcut != "" {
-		config["importShortcut"] = e.Config.ImportShortcut
-	}
-
-	config["diagnosticsDelay"] = "10ms"
-
-	// ExperimentalWorkspaceModule is only set as a mode, not a configuration.
-	return config
+	return settings
 }
 
 func (e *Editor) initialize(ctx context.Context, workspaceFolders []string) error {
@@ -293,10 +259,7 @@
 	params.Capabilities.Window.WorkDoneProgress = true
 	// TODO: set client capabilities
 	params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated}
-	params.InitializationOptions = e.configuration()
-	if e.Config.SendPID {
-		params.ProcessID = int32(os.Getpid())
-	}
+	params.InitializationOptions = e.settings()
 
 	params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true
 	params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true
@@ -397,20 +360,21 @@
 }
 
 func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content string) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
 	buf := buffer{
-		windowsLineEndings: e.Config.WindowsLineEndings,
+		windowsLineEndings: e.config.WindowsLineEndings,
 		version:            1,
 		path:               path,
 		lines:              lines(content),
 		dirty:              dirty,
 	}
-	e.mu.Lock()
-	defer e.mu.Unlock()
 	e.buffers[path] = buf
 
 	item := protocol.TextDocumentItem{
 		URI:        e.sandbox.Workdir.URI(buf.path),
-		LanguageID: e.languageID(buf.path),
+		LanguageID: languageID(buf.path, e.config.FileAssociations),
 		Version:    int32(buf.version),
 		Text:       buf.text(),
 	}
@@ -436,9 +400,11 @@
 	"gotmpl":  regexp.MustCompile(`^.*tmpl$`),
 }
 
-func (e *Editor) languageID(p string) string {
+// languageID returns the language identifier for the path p given the user
+// configured fileAssociations.
+func languageID(p string, fileAssociations map[string]string) string {
 	base := path.Base(p)
-	for lang, re := range e.Config.FileAssociations {
+	for lang, re := range fileAssociations {
 		re := regexp.MustCompile(re)
 		if re.MatchString(base) {
 			return lang
@@ -474,7 +440,7 @@
 
 	if e.Server != nil {
 		if err := e.Server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{
-			TextDocument: e.textDocumentIdentifier(path),
+			TextDocument: e.TextDocumentIdentifier(path),
 		}); err != nil {
 			return fmt.Errorf("DidClose: %w", err)
 		}
@@ -485,7 +451,7 @@
 	return nil
 }
 
-func (e *Editor) textDocumentIdentifier(path string) protocol.TextDocumentIdentifier {
+func (e *Editor) TextDocumentIdentifier(path string) protocol.TextDocumentIdentifier {
 	return protocol.TextDocumentIdentifier{
 		URI: e.sandbox.Workdir.URI(path),
 	}
@@ -517,7 +483,7 @@
 		includeText = syncOptions.Save.IncludeText
 	}
 
-	docID := e.textDocumentIdentifier(buf.path)
+	docID := e.TextDocumentIdentifier(buf.path)
 	if e.Server != nil {
 		if err := e.Server.WillSave(ctx, &protocol.WillSaveTextDocumentParams{
 			TextDocument: docID,
@@ -739,7 +705,7 @@
 	params := &protocol.DidChangeTextDocumentParams{
 		TextDocument: protocol.VersionedTextDocumentIdentifier{
 			Version:                int32(buf.version),
-			TextDocumentIdentifier: e.textDocumentIdentifier(buf.path),
+			TextDocumentIdentifier: e.TextDocumentIdentifier(buf.path),
 		},
 		ContentChanges: evts,
 	}
@@ -1054,7 +1020,7 @@
 		return nil, fmt.Errorf("buffer %q is not open", path)
 	}
 	params := &protocol.CodeLensParams{
-		TextDocument: e.textDocumentIdentifier(path),
+		TextDocument: e.TextDocumentIdentifier(path),
 	}
 	lens, err := e.Server.CodeLens(ctx, params)
 	if err != nil {
@@ -1076,7 +1042,7 @@
 	}
 	params := &protocol.CompletionParams{
 		TextDocumentPositionParams: protocol.TextDocumentPositionParams{
-			TextDocument: e.textDocumentIdentifier(path),
+			TextDocument: e.TextDocumentIdentifier(path),
 			Position:     pos.ToProtocolPosition(),
 		},
 	}
@@ -1126,7 +1092,7 @@
 		return nil, fmt.Errorf("buffer %q is not open", path)
 	}
 	params := &protocol.InlayHintParams{
-		TextDocument: e.textDocumentIdentifier(path),
+		TextDocument: e.TextDocumentIdentifier(path),
 	}
 	hints, err := e.Server.InlayHint(ctx, params)
 	if err != nil {
@@ -1148,7 +1114,7 @@
 	}
 	params := &protocol.ReferenceParams{
 		TextDocumentPositionParams: protocol.TextDocumentPositionParams{
-			TextDocument: e.textDocumentIdentifier(path),
+			TextDocument: e.TextDocumentIdentifier(path),
 			Position:     pos.ToProtocolPosition(),
 		},
 		Context: protocol.ReferenceContext{
@@ -1167,7 +1133,7 @@
 		return nil
 	}
 	params := &protocol.RenameParams{
-		TextDocument: e.textDocumentIdentifier(path),
+		TextDocument: e.TextDocumentIdentifier(path),
 		Position:     pos.ToProtocolPosition(),
 		NewName:      newName,
 	}
@@ -1205,6 +1171,30 @@
 	return e.EditBuffer(ctx, path, fakeEdits)
 }
 
+// Config returns the current editor configuration.
+func (e *Editor) Config() EditorConfig {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.config
+}
+
+// ChangeConfiguration sets the new editor configuration, and if applicable
+// sends a didChangeConfiguration notification.
+//
+// An error is returned if the change notification failed to send.
+func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig) error {
+	e.mu.Lock()
+	e.config = newConfig
+	e.mu.Unlock() // don't hold e.mu during server calls
+	if e.Server != nil {
+		var params protocol.DidChangeConfigurationParams // empty: gopls ignores the Settings field
+		if err := e.Server.DidChangeConfiguration(ctx, &params); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 // CodeAction executes a codeAction request on the server.
 func (e *Editor) CodeAction(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) {
 	if e.Server == nil {
@@ -1217,7 +1207,7 @@
 		return nil, fmt.Errorf("buffer %q is not open", path)
 	}
 	params := &protocol.CodeActionParams{
-		TextDocument: e.textDocumentIdentifier(path),
+		TextDocument: e.TextDocumentIdentifier(path),
 		Context: protocol.CodeActionContext{
 			Diagnostics: diagnostics,
 		},
diff --git a/internal/lsp/fake/sandbox.go b/internal/lsp/fake/sandbox.go
index b439564..72b846c 100644
--- a/internal/lsp/fake/sandbox.go
+++ b/internal/lsp/fake/sandbox.go
@@ -9,9 +9,11 @@
 	"errors"
 	"fmt"
 	"io/ioutil"
+	"math/rand"
 	"os"
 	"path/filepath"
 	"strings"
+	"time"
 
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/testenv"
@@ -68,6 +70,10 @@
 // If rootDir is non-empty, it will be used as the root of temporary
 // directories created for the sandbox. Otherwise, a new temporary directory
 // will be used as root.
+//
+// TODO(rfindley): the sandbox abstraction doesn't seem to carry its weight.
+// Sandboxes should be composed out of their building-blocks, rather than via a
+// monolithic configuration.
 func NewSandbox(config *SandboxConfig) (_ *Sandbox, err error) {
 	if config == nil {
 		config = new(SandboxConfig)
@@ -157,6 +163,9 @@
 	dataMap := make(map[string][]byte)
 	archive := txtar.Parse([]byte(txt))
 	for _, f := range archive.Files {
+		if _, ok := dataMap[f.Name]; ok {
+			panic(fmt.Sprintf("found file %q twice", f.Name))
+		}
 		dataMap[f.Name] = f.Data
 	}
 	return dataMap
@@ -266,9 +275,36 @@
 	if sb.gopath != "" {
 		goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false)
 	}
-	err := os.RemoveAll(sb.rootdir)
+	err := removeAll(sb.rootdir)
 	if err != nil || goCleanErr != nil {
 		return fmt.Errorf("error(s) cleaning sandbox: cleaning modcache: %v; removing files: %v", goCleanErr, err)
 	}
 	return nil
 }
+
+// removeAll is copied from GOROOT/src/testing/testing.go
+//
+// removeAll is like os.RemoveAll, but retries Windows "Access is denied."
+// errors up to an arbitrary timeout.
+//
+// See https://go.dev/issue/50051 for additional context.
+func removeAll(path string) error {
+	const arbitraryTimeout = 2 * time.Second
+	var (
+		start     time.Time
+		nextSleep = 1 * time.Millisecond
+	)
+	for {
+		err := os.RemoveAll(path)
+		if !isWindowsRetryable(err) {
+			return err
+		}
+		if start.IsZero() {
+			start = time.Now()
+		} else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
+			return err
+		}
+		time.Sleep(nextSleep)
+		nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
+	}
+}
diff --git a/internal/lsp/fake/workdir.go b/internal/lsp/fake/workdir.go
index 734f5fd..0a72083 100644
--- a/internal/lsp/fake/workdir.go
+++ b/internal/lsp/fake/workdir.go
@@ -77,6 +77,10 @@
 // on Windows.
 var isWindowsErrLockViolation = func(err error) bool { return false }
 
+// isWindowsRetryable reports whether err is a Windows error code
+// that may be fixed by retrying a failed filesystem operation.
+var isWindowsRetryable = func(err error) bool { return false }
+
 // Workdir is a temporary working directory for tests. It exposes file
 // operations in terms of relative paths, and fakes file watching by triggering
 // events on file operations.
diff --git a/internal/lsp/fake/workdir_windows.go b/internal/lsp/fake/workdir_windows.go
index bcd18b7..fc5ad1a 100644
--- a/internal/lsp/fake/workdir_windows.go
+++ b/internal/lsp/fake/workdir_windows.go
@@ -10,10 +10,31 @@
 )
 
 func init() {
-	// from https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
-	const ERROR_LOCK_VIOLATION syscall.Errno = 33
+	// constants copied from GOROOT/src/internal/syscall/windows/syscall_windows.go
+	const (
+		ERROR_SHARING_VIOLATION syscall.Errno = 32
+		ERROR_LOCK_VIOLATION    syscall.Errno = 33
+	)
 
 	isWindowsErrLockViolation = func(err error) bool {
 		return errors.Is(err, ERROR_LOCK_VIOLATION)
 	}
+
+	// Copied from GOROOT/src/testing/testing_windows.go
+	isWindowsRetryable = func(err error) bool {
+		for {
+			unwrapped := errors.Unwrap(err)
+			if unwrapped == nil {
+				break
+			}
+			err = unwrapped
+		}
+		if err == syscall.ERROR_ACCESS_DENIED {
+			return true // Observed in https://go.dev/issue/50051.
+		}
+		if err == ERROR_SHARING_VIOLATION {
+			return true // Observed in https://go.dev/issue/51442.
+		}
+		return false
+	}
 }
diff --git a/internal/lsp/fuzzy/matcher.go b/internal/lsp/fuzzy/matcher.go
index 265cdcf..92e1001 100644
--- a/internal/lsp/fuzzy/matcher.go
+++ b/internal/lsp/fuzzy/matcher.go
@@ -8,6 +8,7 @@
 import (
 	"bytes"
 	"fmt"
+	"go/ast"
 )
 
 const (
@@ -405,3 +406,30 @@
 	}
 	return false
 }
+
+// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the
+// given pattern. We return the identifier whose name is most similar to the pattern.
+func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr {
+	fuzz := NewMatcher(pattern)
+	var bestFuzz ast.Expr
+	highScore := float32(0) // minimum score is 0 (no match)
+	for _, ident := range idents {
+		// TODO: Improve scoring algorithm.
+		score := fuzz.Score(ident.Name)
+		if score > highScore {
+			highScore = score
+			bestFuzz = ident
+		} else if score == 0 {
+			// Order matters in the fuzzy matching algorithm. If we find no match
+			// when matching the target to the identifier, try matching the identifier
+			// to the target.
+			revFuzz := NewMatcher(ident.Name)
+			revScore := revFuzz.Score(pattern)
+			if revScore > highScore {
+				highScore = revScore
+				bestFuzz = ident
+			}
+		}
+	}
+	return bestFuzz
+}
diff --git a/internal/lsp/general.go b/internal/lsp/general.go
index 385a04a..8ea4d7f 100644
--- a/internal/lsp/general.go
+++ b/internal/lsp/general.go
@@ -249,17 +249,21 @@
 		}
 		work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil)
 		snapshot, release, err := s.addView(ctx, folder.Name, uri)
-		if err == source.ErrViewExists {
-			continue
-		}
 		if err != nil {
+			if err == source.ErrViewExists {
+				continue
+			}
 			viewErrors[uri] = err
 			work.End(ctx, fmt.Sprintf("Error loading packages: %s", err))
 			continue
 		}
+		// Inv: release() must be called once.
+
 		var swg sync.WaitGroup
 		swg.Add(1)
 		allFoldersWg.Add(1)
+		// TODO(adonovan): this looks fishy. Is AwaitInitialized
+		// supposed to be called once per folder?
 		go func() {
 			defer swg.Done()
 			defer allFoldersWg.Done()
@@ -271,6 +275,7 @@
 		buf := &bytes.Buffer{}
 		if err := snapshot.WriteEnv(ctx, buf); err != nil {
 			viewErrors[uri] = err
+			release()
 			continue
 		}
 		event.Log(ctx, buf.String())
@@ -474,8 +479,7 @@
 		release()
 		return nil, nil, false, func() {}, err
 	}
-	kind := snapshot.View().FileKind(fh)
-	if expectKind != source.UnknownKind && kind != expectKind {
+	if expectKind != source.UnknownKind && view.FileKind(fh) != expectKind {
 		// Wrong kind of file. Nothing to do.
 		release()
 		return nil, nil, false, func() {}, nil
@@ -483,6 +487,8 @@
 	return snapshot, fh, true, release, nil
 }
 
+// shutdown implements the 'shutdown' LSP handler. It releases resources
+// associated with the server and waits for all ongoing work to complete.
 func (s *Server) shutdown(ctx context.Context) error {
 	s.stateMu.Lock()
 	defer s.stateMu.Unlock()
diff --git a/internal/lsp/link.go b/internal/lsp/link.go
index 7bb09b4..65da8a5 100644
--- a/internal/lsp/link.go
+++ b/internal/lsp/link.go
@@ -49,6 +49,8 @@
 	if err != nil {
 		return nil, err
 	}
+	tokFile := pm.Mapper.TokFile
+
 	var links []protocol.DocumentLink
 	for _, req := range pm.File.Require {
 		if req.Syntax == nil {
@@ -66,9 +68,9 @@
 		}
 		// Shift the start position to the location of the
 		// dependency within the require statement.
-		start, end := token.Pos(s+i), token.Pos(s+i+len(dep))
+		start, end := tokFile.Pos(s+i), tokFile.Pos(s+i+len(dep))
 		target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "")
-		l, err := toProtocolLink(snapshot, pm.Mapper, target, start, end, source.Mod)
+		l, err := toProtocolLink(tokFile, pm.Mapper, target, start, end)
 		if err != nil {
 			return nil, err
 		}
@@ -78,6 +80,7 @@
 	if syntax := pm.File.Syntax; syntax == nil {
 		return links, nil
 	}
+
 	// Get all the links that are contained in the comments of the file.
 	for _, expr := range pm.File.Syntax.Stmt {
 		comments := expr.Comment()
@@ -86,7 +89,8 @@
 		}
 		for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} {
 			for _, comment := range section {
-				l, err := findLinksInString(ctx, snapshot, comment.Token, token.Pos(comment.Start.Byte), pm.Mapper, source.Mod)
+				start := tokFile.Pos(comment.Start.Byte)
+				l, err := findLinksInString(ctx, snapshot, comment.Token, start, tokFile, pm.Mapper)
 				if err != nil {
 					return nil, err
 				}
@@ -143,8 +147,8 @@
 			// Account for the quotation marks in the positions.
 			start := imp.Path.Pos() + 1
 			end := imp.Path.End() - 1
-			target = source.BuildLink(view.Options().LinkTarget, target, "")
-			l, err := toProtocolLink(snapshot, pgf.Mapper, target, start, end, source.Go)
+			targetURL := source.BuildLink(view.Options().LinkTarget, target, "")
+			l, err := toProtocolLink(pgf.Tok, pgf.Mapper, targetURL, start, end)
 			if err != nil {
 				return nil, err
 			}
@@ -152,7 +156,7 @@
 		}
 	}
 	for _, s := range str {
-		l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Mapper, source.Go)
+		l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Tok, pgf.Mapper)
 		if err != nil {
 			return nil, err
 		}
@@ -160,7 +164,7 @@
 	}
 	for _, commentGroup := range pgf.File.Comments {
 		for _, comment := range commentGroup.List {
-			l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Mapper, source.Go)
+			l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Tok, pgf.Mapper)
 			if err != nil {
 				return nil, err
 			}
@@ -193,7 +197,8 @@
 	"https": true,
 }
 
-func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) {
+// tokFile may be a throwaway File for non-Go files.
+func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, tokFile *token.File, m *protocol.ColumnMapper) ([]protocol.DocumentLink, error) {
 	var links []protocol.DocumentLink
 	for _, index := range snapshot.View().Options().URLRegexp.FindAllIndex([]byte(src), -1) {
 		start, end := index[0], index[1]
@@ -216,7 +221,7 @@
 		if !acceptedSchemes[linkURL.Scheme] {
 			continue
 		}
-		l, err := toProtocolLink(snapshot, m, linkURL.String(), startPos, endPos, fileKind)
+		l, err := toProtocolLink(tokFile, m, linkURL.String(), startPos, endPos)
 		if err != nil {
 			return nil, err
 		}
@@ -233,8 +238,8 @@
 			continue
 		}
 		org, repo, number := matches[1], matches[2], matches[3]
-		target := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number)
-		l, err := toProtocolLink(snapshot, m, target, startPos, endPos, fileKind)
+		targetURL := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number)
+		l, err := toProtocolLink(tokFile, m, targetURL, startPos, endPos)
 		if err != nil {
 			return nil, err
 		}
@@ -255,37 +260,17 @@
 	issueRegexp *regexp.Regexp
 )
 
-func toProtocolLink(snapshot source.Snapshot, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) {
-	var rng protocol.Range
-	switch fileKind {
-	case source.Go:
-		spn, err := span.NewRange(snapshot.FileSet(), start, end).Span()
-		if err != nil {
-			return protocol.DocumentLink{}, err
-		}
-		rng, err = m.Range(spn)
-		if err != nil {
-			return protocol.DocumentLink{}, err
-		}
-	case source.Mod:
-		s, e := int(start), int(end)
-		line, col, err := span.ToPosition(m.TokFile, s)
-		if err != nil {
-			return protocol.DocumentLink{}, err
-		}
-		start := span.NewPoint(line, col, s)
-		line, col, err = span.ToPosition(m.TokFile, e)
-		if err != nil {
-			return protocol.DocumentLink{}, err
-		}
-		end := span.NewPoint(line, col, e)
-		rng, err = m.Range(span.New(m.URI, start, end))
-		if err != nil {
-			return protocol.DocumentLink{}, err
-		}
+func toProtocolLink(tokFile *token.File, m *protocol.ColumnMapper, targetURL string, start, end token.Pos) (protocol.DocumentLink, error) {
+	spn, err := span.NewRange(tokFile, start, end).Span()
+	if err != nil {
+		return protocol.DocumentLink{}, err
+	}
+	rng, err := m.Range(spn)
+	if err != nil {
+		return protocol.DocumentLink{}, err
 	}
 	return protocol.DocumentLink{
 		Range:  rng,
-		Target: target,
+		Target: targetURL,
 	}, nil
 }
diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go
index e8febec..53890dc 100644
--- a/internal/lsp/lsp_test.go
+++ b/internal/lsp/lsp_test.go
@@ -49,7 +49,7 @@
 func testLSP(t *testing.T, datum *tests.Data) {
 	ctx := tests.Context(t)
 
-	cache := cache.New(nil)
+	cache := cache.New(nil, nil, nil)
 	session := cache.NewSession(ctx)
 	options := source.DefaultOptions().Clone()
 	tests.DefaultOptions(options)
@@ -487,7 +487,7 @@
 	}
 }
 
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
+func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []tests.SuggestedFix, expectedActions int) {
 	uri := spn.URI()
 	view, err := r.server.session.ViewOf(uri)
 	if err != nil {
@@ -516,9 +516,9 @@
 	}
 	codeActionKinds := []protocol.CodeActionKind{}
 	for _, k := range actionKinds {
-		codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k))
+		codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k.ActionKind))
 	}
-	actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
+	allActions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
 		TextDocument: protocol.TextDocumentIdentifier{
 			URI: protocol.URIFromSpanURI(uri),
 		},
@@ -531,6 +531,16 @@
 	if err != nil {
 		t.Fatalf("CodeAction %s failed: %v", spn, err)
 	}
+	var actions []protocol.CodeAction
+	for _, action := range allActions {
+		for _, fix := range actionKinds {
+			if strings.Contains(action.Title, fix.Title) {
+				actions = append(actions, action)
+				break
+			}
+		}
+
+	}
 	if len(actions) != expectedActions {
 		// Hack: We assume that we only get one code action per range.
 		var cmds []string
diff --git a/internal/lsp/lsppos/lsppos.go b/internal/lsp/lsppos/lsppos.go
index 35f6f13..6afad47 100644
--- a/internal/lsp/lsppos/lsppos.go
+++ b/internal/lsp/lsppos/lsppos.go
@@ -17,6 +17,7 @@
 package lsppos
 
 import (
+	"bytes"
 	"errors"
 	"sort"
 	"unicode/utf8"
@@ -36,9 +37,10 @@
 
 // NewMapper creates a new Mapper for the given content.
 func NewMapper(content []byte) *Mapper {
+	nlines := bytes.Count(content, []byte("\n"))
 	m := &Mapper{
 		content: content,
-		lines:   []int{0},
+		lines:   make([]int, 1, nlines+1), // initially []int{0}
 	}
 	for offset, b := range content {
 		if b == '\n' {
diff --git a/internal/lsp/lsprpc/lsprpc.go b/internal/lsp/lsprpc/lsprpc.go
index a85e791..7e37229 100644
--- a/internal/lsp/lsprpc/lsprpc.go
+++ b/internal/lsp/lsprpc/lsprpc.go
@@ -56,7 +56,9 @@
 		server := s.serverForTest
 		if server == nil {
 			server = lsp.NewServer(session, client)
-			debug.GetInstance(ctx).AddService(server, session)
+			if instance := debug.GetInstance(ctx); instance != nil {
+				instance.AddService(server, session)
+			}
 		}
 		return server
 	}
@@ -71,7 +73,9 @@
 	server := s.serverForTest
 	if server == nil {
 		server = lsp.NewServer(session, client)
-		debug.GetInstance(ctx).AddService(server, session)
+		if instance := debug.GetInstance(ctx); instance != nil {
+			instance.AddService(server, session)
+		}
 	}
 	// Clients may or may not send a shutdown message. Make sure the server is
 	// shut down.
diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go
index cde641c..b43629b 100644
--- a/internal/lsp/lsprpc/lsprpc_test.go
+++ b/internal/lsp/lsprpc/lsprpc_test.go
@@ -58,7 +58,7 @@
 	client := FakeClient{Logs: make(chan string, 10)}
 
 	ctx = debug.WithInstance(ctx, "", "")
-	ss := NewStreamServer(cache.New(nil), false)
+	ss := NewStreamServer(cache.New(nil, nil, nil), false)
 	ss.serverForTest = server
 	ts := servertest.NewPipeServer(ss, nil)
 	defer checkClose(t, ts.Close)
@@ -121,7 +121,7 @@
 func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) {
 	t.Helper()
 	serveCtx := debug.WithInstance(ctx, "", "")
-	ss := NewStreamServer(cache.New(nil), false)
+	ss := NewStreamServer(cache.New(nil, nil, nil), false)
 	ss.serverForTest = s
 	tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
 
@@ -216,7 +216,7 @@
 	clientCtx := debug.WithInstance(baseCtx, "", "")
 	serverCtx := debug.WithInstance(baseCtx, "", "")
 
-	cache := cache.New(nil)
+	cache := cache.New(nil, nil, nil)
 	ss := NewStreamServer(cache, false)
 	tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
 
@@ -226,14 +226,12 @@
 	}
 	tsForwarder := servertest.NewPipeServer(forwarder, nil)
 
-	conn1 := tsForwarder.Connect(clientCtx)
-	ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{})
+	ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, tsForwarder, fake.ClientHooks{})
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer ed1.Close(clientCtx)
-	conn2 := tsBackend.Connect(baseCtx)
-	ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2, fake.ClientHooks{})
+	ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, tsBackend, fake.ClientHooks{})
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/internal/lsp/mod/code_lens.go b/internal/lsp/mod/code_lens.go
index b26bae7..1de25c2 100644
--- a/internal/lsp/mod/code_lens.go
+++ b/internal/lsp/mod/code_lens.go
@@ -22,6 +22,7 @@
 		command.UpgradeDependency: upgradeLenses,
 		command.Tidy:              tidyLens,
 		command.Vendor:            vendorLens,
+		command.RunVulncheckExp:   vulncheckLenses,
 	}
 }
 
@@ -151,3 +152,29 @@
 	}
 	return source.LineToRange(pm.Mapper, fh.URI(), start, end)
 }
+
+func vulncheckLenses(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle) ([]protocol.CodeLens, error) {
+	pm, err := snapshot.ParseMod(ctx, fh)
+	if err != nil || pm.File == nil {
+		return nil, err
+	}
+	// Place the codelenses near the module statement.
+	// A module may not have the require block,
+	// but vulnerabilities can exist in standard libraries.
+	uri := protocol.URIFromSpanURI(fh.URI())
+	rng, err := moduleStmtRange(fh, pm)
+	if err != nil {
+		return nil, err
+	}
+
+	vulncheck, err := command.NewRunVulncheckExpCommand("Run govulncheck", command.VulncheckArgs{
+		URI:     uri,
+		Pattern: "./...",
+	})
+	if err != nil {
+		return nil, err
+	}
+	return []protocol.CodeLens{
+		{Range: rng, Command: vulncheck},
+	}, nil
+}
diff --git a/internal/lsp/mod/mod_test.go b/internal/lsp/mod/mod_test.go
index b2d257c..56af986 100644
--- a/internal/lsp/mod/mod_test.go
+++ b/internal/lsp/mod/mod_test.go
@@ -26,7 +26,7 @@
 	testenv.NeedsGo1Point(t, 14)
 
 	ctx := tests.Context(t)
-	cache := cache.New(nil)
+	cache := cache.New(nil, nil, nil)
 	session := cache.NewSession(ctx)
 	options := source.DefaultOptions().Clone()
 	tests.DefaultOptions(options)
@@ -46,10 +46,10 @@
 		t.Fatal(err)
 	}
 	_, _, release, err := session.NewView(ctx, "diagnostics_test", span.URIFromPath(folder), options)
-	release()
 	if err != nil {
 		t.Fatal(err)
 	}
+	release()
 	after, err := ioutil.ReadFile(filepath.Join(folder, "go.mod"))
 	if err != nil {
 		t.Fatal(err)
diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go
index a37cbf6..502636a 100644
--- a/internal/lsp/regtest/env.go
+++ b/internal/lsp/regtest/env.go
@@ -14,25 +14,36 @@
 	"golang.org/x/tools/internal/jsonrpc2/servertest"
 	"golang.org/x/tools/internal/lsp/fake"
 	"golang.org/x/tools/internal/lsp/protocol"
-	"golang.org/x/tools/internal/xcontext"
 )
 
-// Env holds an initialized fake Editor, Workspace, and Server, which may be
-// used for writing tests. It also provides adapter methods that call t.Fatal
-// on any error, so that tests for the happy path may be written without
-// checking errors.
+// Env holds the building blocks of an editor testing environment, providing
+// wrapper methods that hide the boilerplate of plumbing contexts and checking
+// errors.
 type Env struct {
-	T   testing.TB
+	T   testing.TB // TODO(rfindley): rename to TB
 	Ctx context.Context
 
 	// Most tests should not need to access the scratch area, editor, server, or
 	// connection, but they are available if needed.
 	Sandbox *fake.Sandbox
-	Editor  *fake.Editor
 	Server  servertest.Connector
 
-	// mu guards the fields below, for the purpose of checking conditions on
-	// every change to diagnostics.
+	// Editor is owned by the Env, and shut down
+	Editor *fake.Editor
+
+	Awaiter *Awaiter
+}
+
+// An Awaiter keeps track of relevant LSP state, so that it may be asserted
+// upon with Expectations.
+//
+// Wire it into a fake.Editor using Awaiter.Hooks().
+//
+// TODO(rfindley): consider simply merging Awaiter with the fake.Editor. It
+// probably is not worth its own abstraction.
+type Awaiter struct {
+	workdir *fake.Workdir
+
 	mu sync.Mutex
 	// For simplicity, each waiter gets a unique ID.
 	nextWaiterID int
@@ -40,6 +51,32 @@
 	waiters      map[int]*condition
 }
 
+func NewAwaiter(workdir *fake.Workdir) *Awaiter {
+	return &Awaiter{
+		workdir: workdir,
+		state: State{
+			diagnostics:     make(map[string]*protocol.PublishDiagnosticsParams),
+			outstandingWork: make(map[protocol.ProgressToken]*workProgress),
+			startedWork:     make(map[string]uint64),
+			completedWork:   make(map[string]uint64),
+		},
+		waiters: make(map[int]*condition),
+	}
+}
+
+func (a *Awaiter) Hooks() fake.ClientHooks {
+	return fake.ClientHooks{
+		OnDiagnostics:            a.onDiagnostics,
+		OnLogMessage:             a.onLogMessage,
+		OnWorkDoneProgressCreate: a.onWorkDoneProgressCreate,
+		OnProgress:               a.onProgress,
+		OnShowMessage:            a.onShowMessage,
+		OnShowMessageRequest:     a.onShowMessageRequest,
+		OnRegistration:           a.onRegistration,
+		OnUnregistration:         a.onUnregistration,
+	}
+}
+
 // State encapsulates the server state TODO: explain more
 type State struct {
 	// diagnostics are a map of relative path->diagnostics params
@@ -108,99 +145,55 @@
 	verdict      chan Verdict
 }
 
-// NewEnv creates a new test environment using the given scratch environment
-// and gopls server.
-//
-// The resulting func must be called to close the jsonrpc2 connection.
-func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) (_ *Env, cleanup func()) {
-	tb.Helper()
+func (a *Awaiter) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	bgCtx, cleanupConn := context.WithCancel(xcontext.Detach(ctx))
-	conn := ts.Connect(bgCtx)
-
-	env := &Env{
-		T:       tb,
-		Ctx:     ctx,
-		Sandbox: sandbox,
-		Server:  ts,
-		state: State{
-			diagnostics:     make(map[string]*protocol.PublishDiagnosticsParams),
-			outstandingWork: make(map[protocol.ProgressToken]*workProgress),
-			startedWork:     make(map[string]uint64),
-			completedWork:   make(map[string]uint64),
-		},
-		waiters: make(map[int]*condition),
-	}
-	var hooks fake.ClientHooks
-	if withHooks {
-		hooks = fake.ClientHooks{
-			OnDiagnostics:            env.onDiagnostics,
-			OnLogMessage:             env.onLogMessage,
-			OnWorkDoneProgressCreate: env.onWorkDoneProgressCreate,
-			OnProgress:               env.onProgress,
-			OnShowMessage:            env.onShowMessage,
-			OnShowMessageRequest:     env.onShowMessageRequest,
-			OnRegistration:           env.onRegistration,
-			OnUnregistration:         env.onUnregistration,
-		}
-	}
-	editor, err := fake.NewEditor(sandbox, editorConfig).Connect(bgCtx, conn, hooks)
-	if err != nil {
-		tb.Fatal(err)
-	}
-	env.Editor = editor
-	return env, cleanupConn
-}
-
-func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
-
-	pth := e.Sandbox.Workdir.URIToPath(d.URI)
-	e.state.diagnostics[pth] = d
-	e.checkConditionsLocked()
+	pth := a.workdir.URIToPath(d.URI)
+	a.state.diagnostics[pth] = d
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.showMessage = append(e.state.showMessage, m)
-	e.checkConditionsLocked()
+	a.state.showMessage = append(a.state.showMessage, m)
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.showMessageRequest = append(e.state.showMessageRequest, m)
-	e.checkConditionsLocked()
+	a.state.showMessageRequest = append(a.state.showMessageRequest, m)
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.logs = append(e.state.logs, m)
-	e.checkConditionsLocked()
+	a.state.logs = append(a.state.logs, m)
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.outstandingWork[m.Token] = &workProgress{}
+	a.state.outstandingWork[m.Token] = &workProgress{}
 	return nil
 }
 
-func (e *Env) onProgress(_ context.Context, m *protocol.ProgressParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
-	work, ok := e.state.outstandingWork[m.Token]
+func (a *Awaiter) onProgress(_ context.Context, m *protocol.ProgressParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+	work, ok := a.state.outstandingWork[m.Token]
 	if !ok {
 		panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m))
 	}
@@ -208,7 +201,7 @@
 	switch kind := v["kind"]; kind {
 	case "begin":
 		work.title = v["title"].(string)
-		e.state.startedWork[work.title] = e.state.startedWork[work.title] + 1
+		a.state.startedWork[work.title] = a.state.startedWork[work.title] + 1
 		if msg, ok := v["message"]; ok {
 			work.msg = msg.(string)
 		}
@@ -220,36 +213,36 @@
 			work.msg = msg.(string)
 		}
 	case "end":
-		title := e.state.outstandingWork[m.Token].title
-		e.state.completedWork[title] = e.state.completedWork[title] + 1
-		delete(e.state.outstandingWork, m.Token)
+		title := a.state.outstandingWork[m.Token].title
+		a.state.completedWork[title] = a.state.completedWork[title] + 1
+		delete(a.state.outstandingWork, m.Token)
 	}
-	e.checkConditionsLocked()
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onRegistration(_ context.Context, m *protocol.RegistrationParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onRegistration(_ context.Context, m *protocol.RegistrationParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.registrations = append(e.state.registrations, m)
-	e.checkConditionsLocked()
+	a.state.registrations = append(a.state.registrations, m)
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error {
-	e.mu.Lock()
-	defer e.mu.Unlock()
+func (a *Awaiter) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
 
-	e.state.unregistrations = append(e.state.unregistrations, m)
-	e.checkConditionsLocked()
+	a.state.unregistrations = append(a.state.unregistrations, m)
+	a.checkConditionsLocked()
 	return nil
 }
 
-func (e *Env) checkConditionsLocked() {
-	for id, condition := range e.waiters {
-		if v, _ := checkExpectations(e.state, condition.expectations); v != Unmet {
-			delete(e.waiters, id)
+func (a *Awaiter) checkConditionsLocked() {
+	for id, condition := range a.waiters {
+		if v, _ := checkExpectations(a.state, condition.expectations); v != Unmet {
+			delete(a.waiters, id)
 			condition.verdict <- v
 		}
 	}
@@ -272,53 +265,62 @@
 // DiagnosticsFor returns the current diagnostics for the file. It is useful
 // after waiting on AnyDiagnosticAtCurrentVersion, when the desired diagnostic
 // is not simply described by DiagnosticAt.
-func (e *Env) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams {
-	e.mu.Lock()
-	defer e.mu.Unlock()
-	return e.state.diagnostics[name]
+//
+// TODO(rfindley): this method is inherently racy. Replace usages of this
+// method with the atomic OnceMet(..., ReadDiagnostics) pattern.
+func (a *Awaiter) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+	return a.state.diagnostics[name]
+}
+
+func (e *Env) Await(expectations ...Expectation) {
+	if err := e.Awaiter.Await(e.Ctx, expectations...); err != nil {
+		e.T.Fatal(err)
+	}
 }
 
 // Await waits for all expectations to simultaneously be met. It should only be
 // called from the main test goroutine.
-func (e *Env) Await(expectations ...Expectation) {
-	e.T.Helper()
-	e.mu.Lock()
+func (a *Awaiter) Await(ctx context.Context, expectations ...Expectation) error {
+	a.mu.Lock()
 	// Before adding the waiter, we check if the condition is currently met or
 	// failed to avoid a race where the condition was realized before Await was
 	// called.
-	switch verdict, summary := checkExpectations(e.state, expectations); verdict {
+	switch verdict, summary := checkExpectations(a.state, expectations); verdict {
 	case Met:
-		e.mu.Unlock()
-		return
+		a.mu.Unlock()
+		return nil
 	case Unmeetable:
-		failure := fmt.Sprintf("unmeetable expectations:\n%s\nstate:\n%v", summary, e.state)
-		e.mu.Unlock()
-		e.T.Fatal(failure)
+		err := fmt.Errorf("unmeetable expectations:\n%s\nstate:\n%v", summary, a.state)
+		a.mu.Unlock()
+		return err
 	}
 	cond := &condition{
 		expectations: expectations,
 		verdict:      make(chan Verdict),
 	}
-	e.waiters[e.nextWaiterID] = cond
-	e.nextWaiterID++
-	e.mu.Unlock()
+	a.waiters[a.nextWaiterID] = cond
+	a.nextWaiterID++
+	a.mu.Unlock()
 
 	var err error
 	select {
-	case <-e.Ctx.Done():
-		err = e.Ctx.Err()
+	case <-ctx.Done():
+		err = ctx.Err()
 	case v := <-cond.verdict:
 		if v != Met {
 			err = fmt.Errorf("condition has final verdict %v", v)
 		}
 	}
-	e.mu.Lock()
-	defer e.mu.Unlock()
-	_, summary := checkExpectations(e.state, expectations)
+	a.mu.Lock()
+	defer a.mu.Unlock()
+	_, summary := checkExpectations(a.state, expectations)
 
 	// Debugging an unmet expectation can be tricky, so we put some effort into
 	// nicely formatting the failure.
 	if err != nil {
-		e.T.Fatalf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, e.state)
+		return fmt.Errorf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, a.state)
 	}
+	return nil
 }
diff --git a/internal/lsp/regtest/env_test.go b/internal/lsp/regtest/env_test.go
index fe5864c..f54f7f2 100644
--- a/internal/lsp/regtest/env_test.go
+++ b/internal/lsp/regtest/env_test.go
@@ -13,7 +13,7 @@
 )
 
 func TestProgressUpdating(t *testing.T) {
-	e := &Env{
+	a := &Awaiter{
 		state: State{
 			outstandingWork: make(map[protocol.ProgressToken]*workProgress),
 			startedWork:     make(map[string]uint64),
@@ -21,12 +21,12 @@
 		},
 	}
 	ctx := context.Background()
-	if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+	if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
 		Token: "foo",
 	}); err != nil {
 		t.Fatal(err)
 	}
-	if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+	if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
 		Token: "bar",
 	}); err != nil {
 		t.Fatal(err)
@@ -53,14 +53,14 @@
 		if err := json.Unmarshal(data, &unmarshaled); err != nil {
 			t.Fatal(err)
 		}
-		if err := e.onProgress(ctx, &unmarshaled); err != nil {
+		if err := a.onProgress(ctx, &unmarshaled); err != nil {
 			t.Fatal(err)
 		}
 	}
-	if _, ok := e.state.outstandingWork["foo"]; ok {
+	if _, ok := a.state.outstandingWork["foo"]; ok {
 		t.Error("got work entry for \"foo\", want none")
 	}
-	got := *e.state.outstandingWork["bar"]
+	got := *a.state.outstandingWork["bar"]
 	want := workProgress{title: "bar work", percent: 42}
 	if got != want {
 		t.Errorf("work progress for \"bar\": %v, want %v", got, want)
diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go
index 737f83d..a0a7d52 100644
--- a/internal/lsp/regtest/expectation.go
+++ b/internal/lsp/regtest/expectation.go
@@ -308,7 +308,7 @@
 	}
 	return SimpleExpectation{
 		check:       check,
-		description: fmt.Sprintf("outstanding work: %s", title),
+		description: fmt.Sprintf("outstanding work: %q containing %q", title, msg),
 	}
 }
 
@@ -617,24 +617,6 @@
 	}
 }
 
-// AnyDiagnosticAtCurrentVersion asserts that there is a diagnostic report for
-// the current edited version of the buffer corresponding to the given
-// workdir-relative pathname.
-func (e *Env) AnyDiagnosticAtCurrentVersion(name string) Expectation {
-	version := e.Editor.BufferVersion(name)
-	check := func(s State) Verdict {
-		diags, ok := s.diagnostics[name]
-		if ok && diags.Version == int32(version) {
-			return Met
-		}
-		return Unmet
-	}
-	return SimpleExpectation{
-		check:       check,
-		description: fmt.Sprintf("any diagnostics at version %d", version),
-	}
-}
-
 // DiagnosticAtRegexp expects that there is a diagnostic entry at the start
 // position matching the regexp search string re in the buffer specified by
 // name. Note that this currently ignores the end position.
diff --git a/internal/lsp/regtest/regtest.go b/internal/lsp/regtest/regtest.go
index 9ebc673..d499bde 100644
--- a/internal/lsp/regtest/regtest.go
+++ b/internal/lsp/regtest/regtest.go
@@ -8,6 +8,7 @@
 	"context"
 	"flag"
 	"fmt"
+	"go/token"
 	"io/ioutil"
 	"os"
 	"runtime"
@@ -16,6 +17,7 @@
 
 	"golang.org/x/tools/internal/lsp/cmd"
 	"golang.org/x/tools/internal/lsp/source"
+	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/testenv"
 	"golang.org/x/tools/internal/tool"
 )
@@ -77,38 +79,34 @@
 	}
 }
 
-// The regtests run significantly slower on these operating systems, due to (we
-// believe) kernel locking behavior. Only run in singleton mode on these
-// operating system when using -short.
-var slowGOOS = map[string]bool{
-	"darwin":  true,
-	"openbsd": true,
-	"plan9":   true,
-}
-
+// DefaultModes returns the default modes to run for each regression test (they
+// may be reconfigured by the tests themselves).
 func DefaultModes() Mode {
-	normal := Singleton | Experimental
-	if slowGOOS[runtime.GOOS] && testing.Short() {
-		normal = Singleton
+	modes := Default
+	if !testing.Short() {
+		modes |= Experimental | Forwarded
 	}
 	if *runSubprocessTests {
-		return normal | SeparateProcess
+		modes |= SeparateProcess
 	}
-	return normal
+	return modes
 }
 
 // Main sets up and tears down the shared regtest state.
 func Main(m *testing.M, hook func(*source.Options)) {
+	// If this magic environment variable is set, run gopls instead of the test
+	// suite. See the documentation for runTestAsGoplsEnvvar for more details.
+	if os.Getenv(runTestAsGoplsEnvvar) == "true" {
+		tool.Main(context.Background(), cmd.New("gopls", "", nil, hook), os.Args[1:])
+		os.Exit(0)
+	}
+
 	testenv.ExitIfSmallMachine()
 
 	// Disable GOPACKAGESDRIVER, as it can cause spurious test failures.
 	os.Setenv("GOPACKAGESDRIVER", "off")
 
 	flag.Parse()
-	if os.Getenv("_GOPLS_TEST_BINARY_RUN_AS_GOPLS") == "true" {
-		tool.Main(context.Background(), cmd.New("gopls", "", nil, nil), os.Args[1:])
-		os.Exit(0)
-	}
 
 	runner = &Runner{
 		DefaultModes:             DefaultModes(),
@@ -116,34 +114,38 @@
 		PrintGoroutinesOnFailure: *printGoroutinesOnFailure,
 		SkipCleanup:              *skipCleanup,
 		OptionsHook:              hook,
+		fset:                     token.NewFileSet(),
+		store:                    memoize.NewStore(memoize.NeverEvict),
 	}
-	if *runSubprocessTests {
-		goplsPath := *goplsBinaryPath
-		if goplsPath == "" {
-			var err error
-			goplsPath, err = os.Executable()
-			if err != nil {
-				panic(fmt.Sprintf("finding test binary path: %v", err))
-			}
+
+	runner.goplsPath = *goplsBinaryPath
+	if runner.goplsPath == "" {
+		var err error
+		runner.goplsPath, err = os.Executable()
+		if err != nil {
+			panic(fmt.Sprintf("finding test binary path: %v", err))
 		}
-		runner.GoplsPath = goplsPath
 	}
+
 	dir, err := ioutil.TempDir("", "gopls-regtest-")
 	if err != nil {
 		panic(fmt.Errorf("creating regtest temp directory: %v", err))
 	}
-	runner.TempDir = dir
+	runner.tempDir = dir
 
-	code := m.Run()
-	if err := runner.Close(); err != nil {
-		fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err)
-		// Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on
-		// Windows due to file locking, but this is OK for our CI.
-		//
-		// Fail on go1.13+, except for windows and android which have shutdown problems.
-		if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" {
-			os.Exit(1)
+	var code int
+	defer func() {
+		if err := runner.Close(); err != nil {
+			fmt.Fprintf(os.Stderr, "closing test runner: %v\n", err)
+			// Regtest cleanup is broken in go1.12 and earlier, and sometimes flakes on
+			// Windows due to file locking, but this is OK for our CI.
+			//
+			// Fail on go1.13+, except for windows and android which have shutdown problems.
+			if testenv.Go1Point() >= 13 && runtime.GOOS != "windows" && runtime.GOOS != "android" {
+				os.Exit(1)
+			}
 		}
-	}
-	os.Exit(code)
+		os.Exit(code)
+	}()
+	code = m.Run()
 }
diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go
index bebec53..93bb139 100644
--- a/internal/lsp/regtest/runner.go
+++ b/internal/lsp/regtest/runner.go
@@ -8,11 +8,13 @@
 	"bytes"
 	"context"
 	"fmt"
+	"go/token"
 	"io"
 	"io/ioutil"
 	"net"
 	"os"
 	"path/filepath"
+	"runtime"
 	"runtime/pprof"
 	"strings"
 	"sync"
@@ -29,64 +31,111 @@
 	"golang.org/x/tools/internal/lsp/lsprpc"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/lsp/source"
+	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/testenv"
 	"golang.org/x/tools/internal/xcontext"
 )
 
 // Mode is a bitmask that defines for which execution modes a test should run.
+//
+// Each mode controls several aspects of gopls' configuration:
+//   - Which server options to use for gopls sessions
+//   - Whether to use a shared cache
+//   - Whether to use a shared server
+//   - Whether to run the server in-process or in a separate process
+//
+// The behavior of each mode with respect to these aspects is summarized below.
+// TODO(rfindley, cleanup): rather than using arbitrary names for these modes,
+// we can compose them explicitly out of the features described here, allowing
+// individual tests more freedom in constructing problematic execution modes.
+// For example, a test could assert on a certain behavior when running with
+// experimental options on a separate process. Moreover, we could unify 'Modes'
+// with 'Options', and use RunMultiple rather than a hard-coded loop through
+// modes.
+//
+// Mode            | Options      | Shared Cache? | Shared Server? | In-process?
+// ---------------------------------------------------------------------------
+// Default         | Default      | Y             | N              | Y
+// Forwarded       | Default      | Y             | Y              | Y
+// SeparateProcess | Default      | Y             | Y              | N
+// Experimental    | Experimental | N             | N              | Y
 type Mode int
 
 const (
-	// Singleton mode uses a separate in-process gopls instance for each test,
-	// and communicates over pipes to mimic the gopls sidecar execution mode,
-	// which communicates over stdin/stderr.
-	Singleton Mode = 1 << iota
-	// Forwarded forwards connections to a shared in-process gopls instance.
+	// Default mode runs gopls with the default options, communicating over pipes
+	// to emulate the lsp sidecar execution mode, which communicates over
+	// stdin/stdout.
+	//
+	// It uses separate servers for each test, but a shared cache, to avoid
+	// duplicating work when processing GOROOT.
+	Default Mode = 1 << iota
+
+	// Forwarded uses the default options, but forwards connections to a shared
+	// in-process gopls server.
 	Forwarded
-	// SeparateProcess forwards connection to a shared separate gopls process.
+
+	// SeparateProcess uses the default options, but forwards connection to an
+	// external gopls daemon.
+	//
+	// Only supported on GOOS=linux.
 	SeparateProcess
+
 	// Experimental enables all of the experimental configurations that are
-	// being developed.
+	// being developed, and runs gopls in sidecar mode.
+	//
+	// It uses a separate cache for each test, to exercise races that may only
+	// appear with cache misses.
 	Experimental
 )
 
+func (m Mode) String() string {
+	switch m {
+	case Default:
+		return "default"
+	case Forwarded:
+		return "forwarded"
+	case SeparateProcess:
+		return "separate process"
+	case Experimental:
+		return "experimental"
+	default:
+		return "unknown mode"
+	}
+}
+
 // A Runner runs tests in gopls execution environments, as specified by its
 // modes. For modes that share state (for example, a shared cache or common
 // remote), any tests that execute on the same Runner will share the same
 // state.
 type Runner struct {
-	DefaultModes             Mode
-	Timeout                  time.Duration
-	GoplsPath                string
-	PrintGoroutinesOnFailure bool
-	TempDir                  string
-	SkipCleanup              bool
-	OptionsHook              func(*source.Options)
+	// Configuration
+	DefaultModes             Mode                  // modes to run for each test
+	Timeout                  time.Duration         // per-test timeout, if set
+	PrintGoroutinesOnFailure bool                  // whether to dump goroutines on test failure
+	SkipCleanup              bool                  // if set, don't delete test data directories when the test exits
+	OptionsHook              func(*source.Options) // if set, use these options when creating gopls sessions
 
-	mu        sync.Mutex
-	ts        *servertest.TCPServer
-	socketDir string
-	// closers is a queue of clean-up functions to run at the end of the entire
-	// test suite.
-	closers []io.Closer
+	// Immutable state shared across test invocations
+	goplsPath string         // path to the gopls executable (for SeparateProcess mode)
+	tempDir   string         // shared parent temp directory
+	fset      *token.FileSet // shared FileSet
+	store     *memoize.Store // shared store
+
+	// Lazily allocated resources
+	tsOnce sync.Once
+	ts     *servertest.TCPServer // shared in-process test server ("forwarded" mode)
+
+	startRemoteOnce sync.Once
+	remoteSocket    string // unix domain socket for shared daemon ("separate process" mode)
+	remoteErr       error
+	cancelRemote    func()
 }
 
 type runConfig struct {
-	editor           fake.EditorConfig
-	sandbox          fake.SandboxConfig
-	modes            Mode
-	noDefaultTimeout bool
-	debugAddr        string
-	skipLogs         bool
-	skipHooks        bool
-	optionsHook      func(*source.Options)
-}
-
-func (r *Runner) defaultConfig() *runConfig {
-	return &runConfig{
-		modes:       r.DefaultModes,
-		optionsHook: r.OptionsHook,
-	}
+	editor    fake.EditorConfig
+	sandbox   fake.SandboxConfig
+	modes     Mode
+	skipHooks bool
 }
 
 // A RunOption augments the behavior of the test runner.
@@ -100,15 +149,6 @@
 	f(opts)
 }
 
-// NoDefaultTimeout removes the timeout set by the -regtest_timeout flag, for
-// individual tests that are expected to run longer than is reasonable for
-// ordinary regression tests.
-func NoDefaultTimeout() RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.noDefaultTimeout = true
-	})
-}
-
 // ProxyFiles configures a file proxy using the given txtar-encoded string.
 func ProxyFiles(txt string) RunOption {
 	return optionSetter(func(opts *runConfig) {
@@ -117,36 +157,40 @@
 }
 
 // Modes configures the execution modes that the test should run in.
+//
+// By default, modes are configured by the test runner. If this option is set,
+// it overrides the set of default modes and the test runs in exactly these
+// modes.
 func Modes(modes Mode) RunOption {
 	return optionSetter(func(opts *runConfig) {
+		if opts.modes != 0 {
+			panic("modes set more than once")
+		}
 		opts.modes = modes
 	})
 }
 
-// Options configures the various server and user options.
-func Options(hook func(*source.Options)) RunOption {
+// WindowsLineEndings configures the editor to use windows line endings.
+func WindowsLineEndings() RunOption {
 	return optionSetter(func(opts *runConfig) {
-		old := opts.optionsHook
-		opts.optionsHook = func(o *source.Options) {
-			if old != nil {
-				old(o)
-			}
-			hook(o)
-		}
+		opts.editor.WindowsLineEndings = true
 	})
 }
 
-func SendPID() RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.editor.SendPID = true
-	})
-}
+// Settings is a RunOption that sets user-provided configuration for the LSP
+// server.
+//
+// As a special case, the env setting must not be provided via Settings: use
+// EnvVars instead.
+type Settings map[string]interface{}
 
-// EditorConfig is a RunOption option that configured the regtest editor.
-type EditorConfig fake.EditorConfig
-
-func (c EditorConfig) set(opts *runConfig) {
-	opts.editor = fake.EditorConfig(c)
+func (s Settings) set(opts *runConfig) {
+	if opts.editor.Settings == nil {
+		opts.editor.Settings = make(map[string]interface{})
+	}
+	for k, v := range s {
+		opts.editor.Settings[k] = v
+	}
 }
 
 // WorkspaceFolders configures the workdir-relative workspace folders to send
@@ -163,6 +207,20 @@
 	})
 }
 
+// EnvVars sets environment variables for the LSP session. When applying these
+// variables to the session, the special string $SANDBOX_WORKDIR is replaced by
+// the absolute path to the sandbox working directory.
+type EnvVars map[string]string
+
+func (e EnvVars) set(opts *runConfig) {
+	if opts.editor.Env == nil {
+		opts.editor.Env = make(map[string]string)
+	}
+	for k, v := range e {
+		opts.editor.Env[k] = v
+	}
+}
+
 // InGOPATH configures the workspace working directory to be GOPATH, rather
 // than a separate working directory for use with modules.
 func InGOPATH() RunOption {
@@ -171,96 +229,49 @@
 	})
 }
 
-// DebugAddress configures a debug server bound to addr. This option is
-// currently only supported when executing in Singleton mode. It is intended to
-// be used for long-running stress tests.
-func DebugAddress(addr string) RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.debugAddr = addr
-	})
-}
-
-// SkipLogs skips the buffering of logs during test execution. It is intended
-// for long-running stress tests.
-func SkipLogs() RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.skipLogs = true
-	})
-}
-
-// InExistingDir runs the test in a pre-existing directory. If set, no initial
-// files may be passed to the runner. It is intended for long-running stress
-// tests.
-func InExistingDir(dir string) RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.sandbox.Workdir = dir
-	})
-}
-
-// SkipHooks allows for disabling the test runner's client hooks that are used
-// for instrumenting expectations (tracking diagnostics, logs, work done,
-// etc.). It is intended for performance-sensitive stress tests or benchmarks.
-func SkipHooks(skip bool) RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.skipHooks = skip
-	})
-}
-
-// GOPROXY configures the test environment to have an explicit proxy value.
-// This is intended for stress tests -- to ensure their isolation, regtests
-// should instead use WithProxyFiles.
-func GOPROXY(goproxy string) RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.sandbox.GOPROXY = goproxy
-	})
-}
-
-// LimitWorkspaceScope sets the LimitWorkspaceScope configuration.
-func LimitWorkspaceScope() RunOption {
-	return optionSetter(func(opts *runConfig) {
-		opts.editor.LimitWorkspaceScope = true
-	})
-}
-
 type TestFunc func(t *testing.T, env *Env)
 
 // Run executes the test function in the default configured gopls execution
 // modes. For each a test run, a new workspace is created containing the
 // un-txtared files specified by filedata.
 func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) {
+	// TODO(rfindley): this function has gotten overly complicated, and warrants
+	// refactoring.
 	t.Helper()
 	checkBuilder(t)
 
 	tests := []struct {
 		name      string
 		mode      Mode
-		getServer func(*testing.T, func(*source.Options)) jsonrpc2.StreamServer
+		getServer func(func(*source.Options)) jsonrpc2.StreamServer
 	}{
-		{"singleton", Singleton, singletonServer},
+		{"default", Default, r.defaultServer},
 		{"forwarded", Forwarded, r.forwardedServer},
 		{"separate_process", SeparateProcess, r.separateProcessServer},
-		{"experimental", Experimental, experimentalServer},
+		{"experimental", Experimental, r.experimentalServer},
 	}
 
 	for _, tc := range tests {
 		tc := tc
-		config := r.defaultConfig()
+		var config runConfig
 		for _, opt := range opts {
-			opt.set(config)
+			opt.set(&config)
 		}
-		if config.modes&tc.mode == 0 {
+		modes := r.DefaultModes
+		if config.modes != 0 {
+			modes = config.modes
+		}
+		if modes&tc.mode == 0 {
 			continue
 		}
-		if config.debugAddr != "" && tc.mode != Singleton {
-			// Debugging is useful for running stress tests, but since the daemon has
-			// likely already been started, it would be too late to debug.
-			t.Fatalf("debugging regtest servers only works in Singleton mode, "+
-				"got debug addr %q and mode %v", config.debugAddr, tc.mode)
-		}
 
 		t.Run(tc.name, func(t *testing.T) {
+			// TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak
+			// goroutines in this test function.
+			// stacktest.NoLeak(t)
+
 			ctx := context.Background()
-			if r.Timeout != 0 && !config.noDefaultTimeout {
+			if r.Timeout != 0 {
 				var cancel context.CancelFunc
 				ctx, cancel = context.WithTimeout(ctx, r.Timeout)
 				defer cancel()
@@ -271,17 +282,14 @@
 				defer cancel()
 			}
 
+			// TODO(rfindley): do we need an instance at all? Can it be removed?
 			ctx = debug.WithInstance(ctx, "", "off")
-			if config.debugAddr != "" {
-				di := debug.GetInstance(ctx)
-				di.Serve(ctx, config.debugAddr)
-				di.MonitorMemory(ctx)
-			}
 
-			rootDir := filepath.Join(r.TempDir, filepath.FromSlash(t.Name()))
+			rootDir := filepath.Join(r.tempDir, filepath.FromSlash(t.Name()))
 			if err := os.MkdirAll(rootDir, 0755); err != nil {
 				t.Fatal(err)
 			}
+
 			files := fake.UnpackTxt(files)
 			if config.editor.WindowsLineEndings {
 				for name, data := range files {
@@ -294,22 +302,35 @@
 			if err != nil {
 				t.Fatal(err)
 			}
-			// Deferring the closure of ws until the end of the entire test suite
-			// has, in testing, given the LSP server time to properly shutdown and
-			// release any file locks held in workspace, which is a problem on
-			// Windows. This may still be flaky however, and in the future we need a
-			// better solution to ensure that all Go processes started by gopls have
-			// exited before we clean up.
-			r.AddCloser(sandbox)
-			ss := tc.getServer(t, config.optionsHook)
+			defer func() {
+				if !r.SkipCleanup {
+					if err := sandbox.Close(); err != nil {
+						pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+						t.Errorf("closing the sandbox: %v", err)
+					}
+				}
+			}()
+
+			ss := tc.getServer(r.OptionsHook)
+
 			framer := jsonrpc2.NewRawStream
 			ls := &loggingFramer{}
-			if !config.skipLogs {
-				framer = ls.framer(jsonrpc2.NewRawStream)
-			}
+			framer = ls.framer(jsonrpc2.NewRawStream)
 			ts := servertest.NewPipeServer(ss, framer)
-			env, cleanup := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks)
-			defer cleanup()
+
+			awaiter := NewAwaiter(sandbox.Workdir)
+			editor, err := fake.NewEditor(sandbox, config.editor).Connect(ctx, ts, awaiter.Hooks())
+			if err != nil {
+				t.Fatal(err)
+			}
+			env := &Env{
+				T:       t,
+				Ctx:     ctx,
+				Sandbox: sandbox,
+				Editor:  editor,
+				Server:  ts,
+				Awaiter: awaiter,
+			}
 			defer func() {
 				if t.Failed() && r.PrintGoroutinesOnFailure {
 					pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
@@ -319,9 +340,12 @@
 				}
 				// For tests that failed due to a timeout, don't fail to shutdown
 				// because ctx is done.
-				closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second)
-				defer cancel()
-				if err := env.Editor.Close(closeCtx); err != nil {
+				//
+				// There is little point to setting an arbitrary timeout for closing
+				// the editor: in general we want to clean up before proceeding to the
+				// next test, and if there is a deadlock preventing closing it will
+				// eventually be handled by the `go test` timeout.
+				if err := editor.Close(xcontext.Detach(ctx)); err != nil {
 					t.Errorf("closing editor: %v", err)
 				}
 			}()
@@ -407,11 +431,13 @@
 	fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname)
 }
 
-func singletonServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
-	return lsprpc.NewStreamServer(cache.New(optsHook), false)
+// defaultServer handles the Default execution mode.
+func (r *Runner) defaultServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+	return lsprpc.NewStreamServer(cache.New(r.fset, r.store, optsHook), false)
 }
 
-func experimentalServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
+// experimentalServer handles the Experimental execution mode.
+func (r *Runner) experimentalServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
 	options := func(o *source.Options) {
 		optsHook(o)
 		o.EnableAllExperiments()
@@ -419,33 +445,65 @@
 		// source.Options.EnableAllExperiments, but we want to test it.
 		o.ExperimentalWorkspaceModule = true
 	}
-	return lsprpc.NewStreamServer(cache.New(options), false)
+	return lsprpc.NewStreamServer(cache.New(nil, nil, options), false)
 }
 
-func (r *Runner) forwardedServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
-	ts := r.getTestServer(optsHook)
-	return newForwarder("tcp", ts.Addr)
-}
-
-// getTestServer gets the shared test server instance to connect to, or creates
-// one if it doesn't exist.
-func (r *Runner) getTestServer(optsHook func(*source.Options)) *servertest.TCPServer {
-	r.mu.Lock()
-	defer r.mu.Unlock()
-	if r.ts == nil {
+// forwardedServer handles the Forwarded execution mode.
+func (r *Runner) forwardedServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+	r.tsOnce.Do(func() {
 		ctx := context.Background()
 		ctx = debug.WithInstance(ctx, "", "off")
-		ss := lsprpc.NewStreamServer(cache.New(optsHook), false)
+		ss := lsprpc.NewStreamServer(cache.New(nil, nil, optsHook), false)
 		r.ts = servertest.NewTCPServer(ctx, ss, nil)
-	}
-	return r.ts
+	})
+	return newForwarder("tcp", r.ts.Addr)
 }
 
-func (r *Runner) separateProcessServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer {
-	// TODO(rfindley): can we use the autostart behavior here, instead of
-	// pre-starting the remote?
-	socket := r.getRemoteSocket(t)
-	return newForwarder("unix", socket)
+// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
+// tests. It's a trick to allow tests to find a binary to use to start a gopls
+// subprocess.
+const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
+
+// separateProcessServer handles the SeparateProcess execution mode.
+func (r *Runner) separateProcessServer(optsHook func(*source.Options)) jsonrpc2.StreamServer {
+	if runtime.GOOS != "linux" {
+		panic("separate process execution mode is only supported on linux")
+	}
+
+	r.startRemoteOnce.Do(func() {
+		socketDir, err := ioutil.TempDir(r.tempDir, "gopls-regtest-socket")
+		if err != nil {
+			r.remoteErr = err
+			return
+		}
+		r.remoteSocket = filepath.Join(socketDir, "gopls-test-daemon")
+
+		// The server should be killed by when the test runner exits, but to be
+		// conservative also set a listen timeout.
+		args := []string{"serve", "-listen", "unix;" + r.remoteSocket, "-listen.timeout", "1m"}
+
+		ctx, cancel := context.WithCancel(context.Background())
+		cmd := exec.CommandContext(ctx, r.goplsPath, args...)
+		cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
+
+		// Start the external gopls process. This is still somewhat racy, as we
+		// don't know when gopls binds to the socket, but the gopls forwarder
+		// client has built-in retry behavior that should mostly mitigate this
+		// problem (and if it doesn't, we probably want to improve the retry
+		// behavior).
+		if err := cmd.Start(); err != nil {
+			cancel()
+			r.remoteSocket = ""
+			r.remoteErr = err
+		} else {
+			r.cancelRemote = cancel
+			// Spin off a goroutine to wait, so that we free up resources when the
+			// server exits.
+			go cmd.Wait()
+		}
+	})
+
+	return newForwarder("unix", r.remoteSocket)
 }
 
 func newForwarder(network, address string) *lsprpc.Forwarder {
@@ -457,73 +515,19 @@
 	return server
 }
 
-// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
-// tests. It's a trick to allow tests to find a binary to use to start a gopls
-// subprocess.
-const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
-
-func (r *Runner) getRemoteSocket(t *testing.T) string {
-	t.Helper()
-	r.mu.Lock()
-	defer r.mu.Unlock()
-	const daemonFile = "gopls-test-daemon"
-	if r.socketDir != "" {
-		return filepath.Join(r.socketDir, daemonFile)
-	}
-
-	if r.GoplsPath == "" {
-		t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured")
-	}
-	var err error
-	r.socketDir, err = ioutil.TempDir(r.TempDir, "gopls-regtest-socket")
-	if err != nil {
-		t.Fatalf("creating tempdir: %v", err)
-	}
-	socket := filepath.Join(r.socketDir, daemonFile)
-	args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"}
-	cmd := exec.Command(r.GoplsPath, args...)
-	cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
-	var stderr bytes.Buffer
-	cmd.Stderr = &stderr
-	go func() {
-		if err := cmd.Run(); err != nil {
-			panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String()))
-		}
-	}()
-	return socket
-}
-
-// AddCloser schedules a closer to be closed at the end of the test run. This
-// is useful for Windows in particular, as
-func (r *Runner) AddCloser(closer io.Closer) {
-	r.mu.Lock()
-	defer r.mu.Unlock()
-	r.closers = append(r.closers, closer)
-}
-
 // Close cleans up resource that have been allocated to this workspace.
 func (r *Runner) Close() error {
-	r.mu.Lock()
-	defer r.mu.Unlock()
-
 	var errmsgs []string
 	if r.ts != nil {
 		if err := r.ts.Close(); err != nil {
 			errmsgs = append(errmsgs, err.Error())
 		}
 	}
-	if r.socketDir != "" {
-		if err := os.RemoveAll(r.socketDir); err != nil {
-			errmsgs = append(errmsgs, err.Error())
-		}
+	if r.cancelRemote != nil {
+		r.cancelRemote()
 	}
 	if !r.SkipCleanup {
-		for _, closer := range r.closers {
-			if err := closer.Close(); err != nil {
-				errmsgs = append(errmsgs, err.Error())
-			}
-		}
-		if err := os.RemoveAll(r.TempDir); err != nil {
+		if err := os.RemoveAll(r.tempDir); err != nil {
 			errmsgs = append(errmsgs, err.Error())
 		}
 	}
diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go
index 96e2de9..d8c080c 100644
--- a/internal/lsp/regtest/wrappers.go
+++ b/internal/lsp/regtest/wrappers.go
@@ -7,7 +7,6 @@
 import (
 	"encoding/json"
 	"path"
-	"testing"
 
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/fake"
@@ -427,31 +426,10 @@
 	return actions
 }
 
-func (e *Env) ChangeConfiguration(t *testing.T, config *fake.EditorConfig) {
-	e.Editor.Config = *config
-	if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &protocol.DidChangeConfigurationParams{
-		// gopls currently ignores the Settings field
-	}); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// ChangeEnv modifies the editor environment and reconfigures the LSP client.
-// TODO: extend this to "ChangeConfiguration", once we refactor the way editor
-// configuration is defined.
-func (e *Env) ChangeEnv(overlay map[string]string) {
+// ChangeConfiguration updates the editor config, calling t.Fatal on any error.
+func (e *Env) ChangeConfiguration(newConfig fake.EditorConfig) {
 	e.T.Helper()
-	// TODO: to be correct, this should probably be synchronized, but right now
-	// configuration is only ever modified synchronously in a regtest, so this
-	// correctness can wait for the previously mentioned refactoring.
-	if e.Editor.Config.Env == nil {
-		e.Editor.Config.Env = make(map[string]string)
-	}
-	for k, v := range overlay {
-		e.Editor.Config.Env[k] = v
-	}
-	var params protocol.DidChangeConfigurationParams
-	if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &params); err != nil {
+	if err := e.Editor.ChangeConfiguration(e.Ctx, newConfig); err != nil {
 		e.T.Fatal(err)
 	}
 }
diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go
index 286d2fd..648d5c4 100644
--- a/internal/lsp/semantic.go
+++ b/internal/lsp/semantic.go
@@ -107,14 +107,16 @@
 		return nil, err
 	}
 	e := &encoded{
-		ctx:      ctx,
-		pgf:      pgf,
-		rng:      rng,
-		ti:       pkg.GetTypesInfo(),
-		pkg:      pkg,
-		fset:     snapshot.FileSet(),
-		tokTypes: s.session.Options().SemanticTypes,
-		tokMods:  s.session.Options().SemanticMods,
+		ctx:       ctx,
+		pgf:       pgf,
+		rng:       rng,
+		ti:        pkg.GetTypesInfo(),
+		pkg:       pkg,
+		fset:      snapshot.FileSet(),
+		tokTypes:  s.session.Options().SemanticTypes,
+		tokMods:   s.session.Options().SemanticMods,
+		noStrings: vv.Options().NoSemanticString,
+		noNumbers: vv.Options().NoSemanticNumber,
 	}
 	if err := e.init(); err != nil {
 		// e.init should never return an error, unless there's some
@@ -186,7 +188,7 @@
 	}
 	// want a line and column from start (in LSP coordinates)
 	// [//line directives should be ignored]
-	rng := source.NewMappedRange(e.fset, e.pgf.Mapper, start, start+token.Pos(leng))
+	rng := source.NewMappedRange(e.pgf.Tok, e.pgf.Mapper, start, start+token.Pos(leng))
 	lspRange, err := rng.Range()
 	if err != nil {
 		// possibly a //line directive. TODO(pjw): fix this somehow
@@ -223,6 +225,9 @@
 	// the generated data
 	items []semItem
 
+	noStrings bool
+	noNumbers bool
+
 	ctx               context.Context
 	tokTypes, tokMods []string
 	pgf               *source.ParsedGoFile
@@ -299,11 +304,6 @@
 		what := tokNumber
 		if x.Kind == token.STRING {
 			what = tokString
-			if _, ok := e.stack[len(e.stack)-2].(*ast.Field); ok {
-				// struct tags (this is probably pointless, as the
-				// TextMate grammar will treat all the other comments the same)
-				what = tokComment
-			}
 		}
 		e.token(x.Pos(), ln, what, nil)
 	case *ast.BinaryExpr:
@@ -832,29 +832,36 @@
 	var j int
 	var last semItem
 	for i := 0; i < len(e.items); i++ {
-		typ, ok := typeMap[e.items[i].typeStr]
+		item := e.items[i]
+		typ, ok := typeMap[item.typeStr]
 		if !ok {
 			continue // client doesn't want typeStr
 		}
+		if item.typeStr == tokString && e.noStrings {
+			continue
+		}
+		if item.typeStr == tokNumber && e.noNumbers {
+			continue
+		}
 		if j == 0 {
 			x[0] = e.items[0].line
 		} else {
-			x[j] = e.items[i].line - last.line
+			x[j] = item.line - last.line
 		}
-		x[j+1] = e.items[i].start
+		x[j+1] = item.start
 		if j > 0 && x[j] == 0 {
-			x[j+1] = e.items[i].start - last.start
+			x[j+1] = item.start - last.start
 		}
-		x[j+2] = e.items[i].len
+		x[j+2] = item.len
 		x[j+3] = uint32(typ)
 		mask := 0
-		for _, s := range e.items[i].mods {
+		for _, s := range item.mods {
 			// modMap[s] is 0 if the client doesn't want this modifier
 			mask |= modMap[s]
 		}
 		x[j+4] = uint32(mask)
 		j += 5
-		last = e.items[i]
+		last = item
 	}
 	return x[:j]
 }
diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go
index 4e2183c..0b3b3d1 100755
--- a/internal/lsp/source/api_json.go
+++ b/internal/lsp/source/api_json.go
@@ -116,7 +116,7 @@
 			{
 				Name:      "linkTarget",
 				Type:      "string",
-				Doc:       "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n",
+				Doc:       "linkTarget controls where documentation links go.\nIt might be one of:\n\n* `\"godoc.org\"`\n* `\"pkg.go.dev\"`\n\nIf company chooses to use its own `godoc.org`, its address can be used as well.\n\nModules matching the GOPRIVATE environment variable will not have\ndocumentation links in hover.\n",
 				Default:   "\"pkg.go.dev\"",
 				Hierarchy: "ui.documentation",
 			},
@@ -379,6 +379,11 @@
 							Default: "true",
 						},
 						{
+							Name:    "\"timeformat\"",
+							Doc:     "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n",
+							Default: "true",
+						},
+						{
 							Name:    "\"unmarshal\"",
 							Doc:     "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
 							Default: "true",
@@ -434,6 +439,11 @@
 							Default: "true",
 						},
 						{
+							Name:    "\"unusedvariable\"",
+							Doc:     "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n",
+							Default: "false",
+						},
+						{
 							Name:    "\"fillstruct\"",
 							Doc:     "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
 							Default: "true",
@@ -512,37 +522,37 @@
 				EnumKeys: EnumKeys{Keys: []EnumKey{
 					{
 						Name:    "\"assignVariableTypes\"",
-						Doc:     "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1",
+						Doc:     "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"compositeLiteralFields\"",
-						Doc:     "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}",
+						Doc:     "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"compositeLiteralTypes\"",
-						Doc:     "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}",
+						Doc:     "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"constantValues\"",
-						Doc:     "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone   Kind = iota/* = 0*/\n\t\tKindPrint/*  = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)",
+						Doc:     "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone   Kind = iota/* = 0*/\n\t\tKindPrint/*  = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"functionTypeParameters\"",
-						Doc:     "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")",
+						Doc:     "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"parameterNames\"",
-						Doc:     "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)",
+						Doc:     "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
 						Default: "false",
 					},
 					{
 						Name:    "\"rangeVariableTypes\"",
-						Doc:     "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}",
+						Doc:     "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
 						Default: "false",
 					},
 				}},
@@ -573,6 +583,11 @@
 							Default: "true",
 						},
 						{
+							Name:    "\"run_vulncheck_exp\"",
+							Doc:     "Run vulnerability check (`govulncheck`).",
+							Default: "false",
+						},
+						{
 							Name:    "\"test\"",
 							Doc:     "Runs `go test` for a specific set of test or benchmark functions.",
 							Default: "false",
@@ -606,6 +621,22 @@
 				Hierarchy: "ui",
 			},
 			{
+				Name:      "noSemanticString",
+				Type:      "bool",
+				Doc:       "noSemanticString turns off the sending of the semantic token 'string'\n",
+				Default:   "false",
+				Status:    "experimental",
+				Hierarchy: "ui",
+			},
+			{
+				Name:      "noSemanticNumber",
+				Type:      "bool",
+				Doc:       "noSemanticNumber  turns off the sending of the semantic token 'number'\n",
+				Default:   "false",
+				Status:    "experimental",
+				Hierarchy: "ui",
+			},
+			{
 				Name:      "local",
 				Type:      "string",
 				Doc:       "local is the equivalent of the `goimports -local` flag, which puts\nimports beginning with this string after third-party packages. It should\nbe the prefix of the import path whose imports should be grouped\nseparately.\n",
@@ -716,11 +747,10 @@
 			ArgDoc:  "{\n\t// The test file containing the tests to run.\n\t\"URI\": string,\n\t// Specific test names to run, e.g. TestFoo.\n\t\"Tests\": []string,\n\t// Specific benchmarks to run, e.g. BenchmarkFoo.\n\t\"Benchmarks\": []string,\n}",
 		},
 		{
-			Command:   "gopls.run_vulncheck_exp",
-			Title:     "Run vulncheck (experimental)",
-			Doc:       "Run vulnerability check (`govulncheck`).",
-			ArgDoc:    "{\n\t// Dir is the directory from which vulncheck will run from.\n\t\"Dir\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}",
-			ResultDoc: "{\n\t\"Vuln\": []{\n\t\t\"ID\": string,\n\t\t\"Details\": string,\n\t\t\"Aliases\": []string,\n\t\t\"Symbol\": string,\n\t\t\"PkgPath\": string,\n\t\t\"ModPath\": string,\n\t\t\"URL\": string,\n\t\t\"CurrentVersion\": string,\n\t\t\"FixedVersion\": string,\n\t\t\"CallStacks\": [][]golang.org/x/tools/internal/lsp/command.StackEntry,\n\t\t\"CallStackSummaries\": []string,\n\t},\n}",
+			Command: "gopls.run_vulncheck_exp",
+			Title:   "Run vulncheck (experimental)",
+			Doc:     "Run vulnerability check (`govulncheck`).",
+			ArgDoc:  "{\n\t// Any document in the directory from which govulncheck will run.\n\t\"URI\": string,\n\t// Package pattern. E.g. \"\", \".\", \"./...\".\n\t\"Pattern\": string,\n}",
 		},
 		{
 			Command:   "gopls.start_debugging",
@@ -783,6 +813,11 @@
 			Doc:   "Regenerates cgo definitions.",
 		},
 		{
+			Lens:  "run_vulncheck_exp",
+			Title: "Run vulncheck (experimental)",
+			Doc:   "Run vulnerability check (`govulncheck`).",
+		},
+		{
 			Lens:  "test",
 			Title: "Run test(s) (legacy)",
 			Doc:   "Runs `go test` for a specific set of test or benchmark functions.",
@@ -962,6 +997,11 @@
 			Default: true,
 		},
 		{
+			Name:    "timeformat",
+			Doc:     "check for calls of (time.Time).Format or time.Parse with 2006-02-01\n\nThe timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm)\nformat. Internationally, \"yyyy-dd-mm\" does not occur in common calendar date\nstandards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended.\n",
+			Default: true,
+		},
+		{
 			Name:    "unmarshal",
 			Doc:     "report passing non-pointer or non-interface values to unmarshal\n\nThe unmarshal analysis reports calls to functions such as json.Unmarshal\nin which the argument type is not a pointer or an interface.",
 			Default: true,
@@ -1014,6 +1054,10 @@
 			Default: true,
 		},
 		{
+			Name: "unusedvariable",
+			Doc:  "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n",
+		},
+		{
 			Name:    "fillstruct",
 			Doc:     "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n",
 			Default: true,
@@ -1027,31 +1071,31 @@
 	Hints: []*HintJSON{
 		{
 			Name: "assignVariableTypes",
-			Doc:  "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1",
+			Doc:  "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
 		},
 		{
 			Name: "compositeLiteralFields",
-			Doc:  "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}",
+			Doc:  "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
 		},
 		{
 			Name: "compositeLiteralTypes",
-			Doc:  "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}",
+			Doc:  "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
 		},
 		{
 			Name: "constantValues",
-			Doc:  "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone   Kind = iota/* = 0*/\n\t\tKindPrint/*  = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)",
+			Doc:  "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone   Kind = iota/* = 0*/\n\t\tKindPrint/*  = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
 		},
 		{
 			Name: "functionTypeParameters",
-			Doc:  "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")",
+			Doc:  "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
 		},
 		{
 			Name: "parameterNames",
-			Doc:  "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)",
+			Doc:  "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
 		},
 		{
 			Name: "rangeVariableTypes",
-			Doc:  "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}",
+			Doc:  "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
 		},
 	},
 }
diff --git a/internal/lsp/source/call_hierarchy.go b/internal/lsp/source/call_hierarchy.go
index c2c8a18..4e7daf0 100644
--- a/internal/lsp/source/call_hierarchy.go
+++ b/internal/lsp/source/call_hierarchy.go
@@ -152,12 +152,12 @@
 		kind = protocol.Function
 	}
 
-	nameStart, nameEnd := nameIdent.NamePos, nameIdent.NamePos+token.Pos(len(nameIdent.Name))
+	nameStart, nameEnd := nameIdent.Pos(), nameIdent.End()
 	if funcLit != nil {
 		nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos()
 		kind = protocol.Function
 	}
-	rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, nameStart, nameEnd).Range()
+	rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, nameStart, nameEnd).Range()
 	if err != nil {
 		return protocol.CallHierarchyItem{}, err
 	}
@@ -194,14 +194,22 @@
 	if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok {
 		return nil, nil
 	}
-	if identifier.Declaration.node == nil {
+	node := identifier.Declaration.node
+	if node == nil {
 		return nil, nil
 	}
 	if len(identifier.Declaration.MappedRange) == 0 {
 		return nil, nil
 	}
 	declMappedRange := identifier.Declaration.MappedRange[0]
-	callExprs, err := collectCallExpressions(snapshot.FileSet(), declMappedRange.m, identifier.Declaration.node)
+	// TODO(adonovan): avoid Fileset.File call by somehow getting at
+	// declMappedRange.spanRange.TokFile, or making Identifier retain the
+	// token.File of the identifier and its declaration, since it looks up both anyway.
+	tokFile := snapshot.FileSet().File(node.Pos())
+	if tokFile == nil {
+		return nil, fmt.Errorf("no file for position")
+	}
+	callExprs, err := collectCallExpressions(tokFile, declMappedRange.m, node)
 	if err != nil {
 		return nil, err
 	}
@@ -210,7 +218,7 @@
 }
 
 // collectCallExpressions collects call expression ranges inside a function.
-func collectCallExpressions(fset *token.FileSet, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) {
+func collectCallExpressions(tokFile *token.File, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) {
 	type callPos struct {
 		start, end token.Pos
 	}
@@ -240,7 +248,7 @@
 
 	callRanges := []protocol.Range{}
 	for _, call := range callPositions {
-		callRange, err := NewMappedRange(fset, mapper, call.start, call.end).Range()
+		callRange, err := NewMappedRange(tokFile, mapper, call.start, call.end).Range()
 		if err != nil {
 			return nil, err
 		}
diff --git a/internal/lsp/source/code_lens.go b/internal/lsp/source/code_lens.go
index 0ab857a..85a0a2f 100644
--- a/internal/lsp/source/code_lens.go
+++ b/internal/lsp/source/code_lens.go
@@ -67,7 +67,7 @@
 			return nil, err
 		}
 		// add a code lens to the top of the file which runs all benchmarks in the file
-		rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
+		rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
 		if err != nil {
 			return nil, err
 		}
@@ -111,7 +111,7 @@
 			continue
 		}
 
-		rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, d.Pos(), fn.End()).Range()
+		rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, fn.Pos(), fn.End()).Range()
 		if err != nil {
 			return out, err
 		}
@@ -177,7 +177,7 @@
 			if !strings.HasPrefix(l.Text, ggDirective) {
 				continue
 			}
-			rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range()
+			rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range()
 			if err != nil {
 				return nil, err
 			}
@@ -214,7 +214,7 @@
 	if c == nil {
 		return nil, nil
 	}
-	rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, c.Pos(), c.EndPos).Range()
+	rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, c.Pos(), c.End()).Range()
 	if err != nil {
 		return nil, err
 	}
@@ -231,7 +231,11 @@
 	if err != nil {
 		return nil, err
 	}
-	rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
+	if !pgf.File.Package.IsValid() {
+		// Without a package name we have nowhere to put the codelens, so give up.
+		return nil, nil
+	}
+	rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, pgf.File.Package, pgf.File.Package).Range()
 	if err != nil {
 		return nil, err
 	}
diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go
index 0c1ff3f..be613d3 100644
--- a/internal/lsp/source/completion/completion.go
+++ b/internal/lsp/source/completion/completion.go
@@ -173,8 +173,9 @@
 	// file is the AST of the file associated with this completion request.
 	file *ast.File
 
-	// pos is the position at which the request was triggered.
-	pos token.Pos
+	// (tokFile, pos) is the position at which the request was triggered.
+	tokFile *token.File
+	pos     token.Pos
 
 	// path is the path of AST nodes enclosing the position.
 	path []ast.Node
@@ -325,7 +326,7 @@
 		content: ident.Name,
 		cursor:  c.pos,
 		// Overwrite the prefix only.
-		rng: span.NewRange(c.snapshot.FileSet(), ident.Pos(), ident.End()),
+		rng: span.NewRange(c.tokFile, ident.Pos(), ident.End()),
 	}
 
 	c.setMatcherFromPrefix(c.surrounding.Prefix())
@@ -347,7 +348,7 @@
 		c.surrounding = &Selection{
 			content: "",
 			cursor:  c.pos,
-			rng:     span.NewRange(c.snapshot.FileSet(), c.pos, c.pos),
+			rng:     span.NewRange(c.tokFile, c.pos, c.pos),
 		}
 	}
 	return c.surrounding
@@ -486,7 +487,7 @@
 					qual := types.RelativeTo(pkg.GetTypes())
 					objStr = types.ObjectString(obj, qual)
 				}
-				ans, sel := definition(path, obj, snapshot.FileSet(), fh)
+				ans, sel := definition(path, obj, pgf.Tok, fh)
 				if ans != nil {
 					sort.Slice(ans, func(i, j int) bool {
 						return ans[i].Score > ans[j].Score
@@ -513,6 +514,7 @@
 		},
 		fh:                        fh,
 		filename:                  fh.URI().Filename(),
+		tokFile:                   pgf.Tok,
 		file:                      pgf.File,
 		path:                      path,
 		pos:                       pos,
@@ -798,7 +800,7 @@
 	c.surrounding = &Selection{
 		content: content,
 		cursor:  c.pos,
-		rng:     span.NewRange(c.snapshot.FileSet(), start, end),
+		rng:     span.NewRange(c.tokFile, start, end),
 	}
 
 	seenImports := make(map[string]struct{})
@@ -1018,7 +1020,7 @@
 	c.surrounding = &Selection{
 		content: cursorComment.Text[start:end],
 		cursor:  c.pos,
-		rng:     span.NewRange(c.snapshot.FileSet(), token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)),
+		rng:     span.NewRange(c.tokFile, token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)),
 	}
 	c.setMatcherFromPrefix(c.surrounding.Prefix())
 }
@@ -2314,7 +2316,7 @@
 // matchesVariadic returns true if we are completing a variadic
 // parameter and candType is a compatible slice type.
 func (ci candidateInference) matchesVariadic(candType types.Type) bool {
-	return ci.variadic && ci.objType != nil && types.AssignableTo(candType, types.NewSlice(ci.objType))
+	return ci.variadic && ci.objType != nil && assignableTo(candType, types.NewSlice(ci.objType))
 }
 
 // findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or
@@ -2640,7 +2642,7 @@
 			return false
 		}
 
-		if ci.convertibleTo != nil && types.ConvertibleTo(candType, ci.convertibleTo) {
+		if ci.convertibleTo != nil && convertibleTo(candType, ci.convertibleTo) {
 			return true
 		}
 
@@ -2728,7 +2730,7 @@
 		return false
 	}
 
-	if !types.ConvertibleTo(from, to) {
+	if !convertibleTo(from, to) {
 		return false
 	}
 
@@ -2777,7 +2779,7 @@
 
 	// AssignableTo covers the case where the types are equal, but also handles
 	// cases like assigning a concrete type to an interface type.
-	return types.AssignableTo(candType, expType)
+	return assignableTo(candType, expType)
 }
 
 // kindMatches reports whether candType's kind matches our expected
@@ -2840,7 +2842,7 @@
 			assignee = ci.assignees[i]
 		}
 
-		if assignee == nil {
+		if assignee == nil || assignee == types.Typ[types.Invalid] {
 			continue
 		}
 
@@ -2894,7 +2896,7 @@
 		//
 		// Where our expected type is "[]int", and we expect a type name.
 		if c.inference.objType != nil {
-			return types.AssignableTo(candType, c.inference.objType)
+			return assignableTo(candType, c.inference.objType)
 		}
 
 		// Default to saying any type name is a match.
diff --git a/internal/lsp/source/completion/definition.go b/internal/lsp/source/completion/definition.go
index 44d5a33..7644fc4 100644
--- a/internal/lsp/source/completion/definition.go
+++ b/internal/lsp/source/completion/definition.go
@@ -23,7 +23,7 @@
 // BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F)
 
 // path[0] is known to be *ast.Ident
-func definition(path []ast.Node, obj types.Object, fset *token.FileSet, fh source.FileHandle) ([]CompletionItem, *Selection) {
+func definition(path []ast.Node, obj types.Object, tokFile *token.File, fh source.FileHandle) ([]CompletionItem, *Selection) {
 	if _, ok := obj.(*types.Func); !ok {
 		return nil, nil // not a function at all
 	}
@@ -40,7 +40,7 @@
 	sel := &Selection{
 		content: "",
 		cursor:  pos,
-		rng:     span.NewRange(fset, pos, pos),
+		rng:     span.NewRange(tokFile, pos, pos),
 	}
 	var ans []CompletionItem
 
diff --git a/internal/lsp/source/completion/format.go b/internal/lsp/source/completion/format.go
index 72498cc..d34cee2 100644
--- a/internal/lsp/source/completion/format.go
+++ b/internal/lsp/source/completion/format.go
@@ -242,10 +242,7 @@
 		return item, nil
 	}
 
-	decl, err := c.snapshot.PosToDecl(ctx, pkg, obj.Pos())
-	if err != nil {
-		return CompletionItem{}, err
-	}
+	decl, _ := source.FindDeclAndField(pkg.GetSyntax(), obj.Pos()) // may be nil
 	hover, err := source.FindHoverContext(ctx, c.snapshot, pkg, obj, decl, nil)
 	if err != nil {
 		event.Error(ctx, "failed to find Hover", err, tag.URI.Of(uri))
diff --git a/internal/lsp/source/completion/package.go b/internal/lsp/source/completion/package.go
index 21244ef..566d8ee 100644
--- a/internal/lsp/source/completion/package.go
+++ b/internal/lsp/source/completion/package.go
@@ -104,7 +104,7 @@
 			return &Selection{
 				content: name.Name,
 				cursor:  cursor,
-				rng:     span.NewRange(fset, name.Pos(), name.End()),
+				rng:     span.NewRange(tok, name.Pos(), name.End()),
 			}, nil
 		}
 	}
@@ -141,7 +141,7 @@
 				return &Selection{
 					content: content,
 					cursor:  cursor,
-					rng:     span.NewRange(fset, start, end),
+					rng:     span.NewRange(tok, start, end),
 				}, nil
 			}
 		}
@@ -154,7 +154,7 @@
 	}
 
 	// If the cursor is in a comment, don't offer any completions.
-	if cursorInComment(fset, cursor, pgf.Src) {
+	if cursorInComment(fset.File(cursor), cursor, pgf.Src) {
 		return nil, fmt.Errorf("cursor in comment")
 	}
 
@@ -168,13 +168,13 @@
 	return &Selection{
 		content: "",
 		cursor:  cursor,
-		rng:     span.NewRange(fset, start, end),
+		rng:     span.NewRange(tok, start, end),
 	}, nil
 }
 
-func cursorInComment(fset *token.FileSet, cursor token.Pos, src []byte) bool {
+func cursorInComment(file *token.File, cursor token.Pos, src []byte) bool {
 	var s scanner.Scanner
-	s.Init(fset.File(cursor), src, func(_ token.Position, _ string) {}, scanner.ScanComments)
+	s.Init(file, src, func(_ token.Position, _ string) {}, scanner.ScanComments)
 	for {
 		pos, tok, lit := s.Scan()
 		if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) {
diff --git a/internal/lsp/source/completion/postfix_snippets.go b/internal/lsp/source/completion/postfix_snippets.go
index d7f0d90..aa8454f 100644
--- a/internal/lsp/source/completion/postfix_snippets.go
+++ b/internal/lsp/source/completion/postfix_snippets.go
@@ -150,6 +150,14 @@
 }
 {{end}}`,
 }, {
+	label:   "range",
+	details: "range over channel",
+	body: `{{if and (eq .Kind "chan") .StmtOK -}}
+for {{.VarName .ElemType "e"}} := range {{.X}} {
+	{{.Cursor}}
+}
+{{- end}}`,
+}, {
 	label:   "var",
 	details: "assign to variables",
 	body: `{{if and (eq .Kind "tuple") .StmtOK -}}
diff --git a/internal/lsp/source/completion/util.go b/internal/lsp/source/completion/util.go
index cd7849a..e0a264b 100644
--- a/internal/lsp/source/completion/util.go
+++ b/internal/lsp/source/completion/util.go
@@ -311,7 +311,7 @@
 }
 
 func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) {
-	rng := source.NewMappedRange(c.snapshot.FileSet(), c.mapper, from, to)
+	rng := source.NewMappedRange(c.tokFile, c.mapper, from, to)
 	spn, err := rng.Span()
 	if err != nil {
 		return nil, err
@@ -321,3 +321,23 @@
 		NewText: newText,
 	}})
 }
+
+// assignableTo is like types.AssignableTo, but returns false if
+// either type is invalid.
+func assignableTo(x, to types.Type) bool {
+	if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] {
+		return false
+	}
+
+	return types.AssignableTo(x, to)
+}
+
+// convertibleTo is like types.ConvertibleTo, but returns false if
+// either type is invalid.
+func convertibleTo(x, to types.Type) bool {
+	if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] {
+		return false
+	}
+
+	return types.ConvertibleTo(x, to)
+}
diff --git a/internal/lsp/source/extract.go b/internal/lsp/source/extract.go
index 90999d8..a4e0a14 100644
--- a/internal/lsp/source/extract.go
+++ b/internal/lsp/source/extract.go
@@ -18,11 +18,13 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/go/ast/astutil"
 	"golang.org/x/tools/internal/analysisinternal"
+	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/safetoken"
 	"golang.org/x/tools/internal/span"
 )
 
 func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) {
+	tokFile := fset.File(file.Pos())
 	expr, path, ok, err := CanExtractVariable(rng, file)
 	if !ok {
 		return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", fset.Position(rng.Start), err)
@@ -60,11 +62,7 @@
 	if insertBeforeStmt == nil {
 		return nil, fmt.Errorf("cannot find location to insert extraction")
 	}
-	tok := fset.File(expr.Pos())
-	if tok == nil {
-		return nil, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
-	}
-	indent, err := calculateIndentation(src, tok, insertBeforeStmt)
+	indent, err := calculateIndentation(src, tokFile, insertBeforeStmt)
 	if err != nil {
 		return nil, err
 	}
@@ -217,7 +215,12 @@
 	if isMethod {
 		errorPrefix = "extractMethod"
 	}
-	p, ok, methodOk, err := CanExtractFunction(fset, rng, src, file)
+
+	tok := fset.File(file.Pos())
+	if tok == nil {
+		return nil, bug.Errorf("no file for position")
+	}
+	p, ok, methodOk, err := CanExtractFunction(tok, rng, src, file)
 	if (!ok && !isMethod) || (!methodOk && isMethod) {
 		return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix,
 			fset.Position(rng.Start), err)
@@ -344,7 +347,7 @@
 		if v.obj.Parent() == nil {
 			return nil, fmt.Errorf("parent nil")
 		}
-		isUsed, firstUseAfter := objUsed(info, span.NewRange(fset, rng.End, v.obj.Parent().End()), v.obj)
+		isUsed, firstUseAfter := objUsed(info, span.NewRange(tok, rng.End, v.obj.Parent().End()), v.obj)
 		if v.assigned && isUsed && !varOverridden(info, firstUseAfter, v.obj, v.free, outer) {
 			returnTypes = append(returnTypes, &ast.Field{Type: typ})
 			returns = append(returns, identifier)
@@ -941,14 +944,10 @@
 
 // CanExtractFunction reports whether the code in the given range can be
 // extracted to a function.
-func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) {
+func CanExtractFunction(tok *token.File, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) {
 	if rng.Start == rng.End {
 		return nil, false, false, fmt.Errorf("start and end are equal")
 	}
-	tok := fset.File(file.Pos())
-	if tok == nil {
-		return nil, false, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos()))
-	}
 	var err error
 	rng, err = adjustRangeForWhitespace(rng, tok, src)
 	if err != nil {
diff --git a/internal/lsp/source/fix.go b/internal/lsp/source/fix.go
index 6a7f77d..dce279e 100644
--- a/internal/lsp/source/fix.go
+++ b/internal/lsp/source/fix.go
@@ -14,6 +14,7 @@
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/internal/lsp/analysis/fillstruct"
 	"golang.org/x/tools/internal/lsp/analysis/undeclaredname"
+	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/span"
 )
@@ -84,7 +85,15 @@
 	fset := snapshot.FileSet()
 	editsPerFile := map[span.URI]*protocol.TextDocumentEdit{}
 	for _, edit := range suggestion.TextEdits {
-		spn, err := span.NewRange(fset, edit.Pos, edit.End).Span()
+		tokFile := fset.File(edit.Pos)
+		if tokFile == nil {
+			return nil, bug.Errorf("no file for edit position")
+		}
+		end := edit.End
+		if !end.IsValid() {
+			end = edit.Pos
+		}
+		spn, err := span.NewRange(tokFile, edit.Pos, end).Span()
 		if err != nil {
 			return nil, err
 		}
diff --git a/internal/lsp/source/folding_range.go b/internal/lsp/source/folding_range.go
index 576308f..b70cb4d 100644
--- a/internal/lsp/source/folding_range.go
+++ b/internal/lsp/source/folding_range.go
@@ -41,13 +41,11 @@
 		return nil, nil
 	}
 
-	fset := snapshot.FileSet()
-
 	// Get folding ranges for comments separately as they are not walked by ast.Inspect.
-	ranges = append(ranges, commentsFoldingRange(fset, pgf.Mapper, pgf.File)...)
+	ranges = append(ranges, commentsFoldingRange(pgf.Tok, pgf.Mapper, pgf.File)...)
 
 	visit := func(n ast.Node) bool {
-		rng := foldingRangeFunc(fset, pgf.Mapper, n, lineFoldingOnly)
+		rng := foldingRangeFunc(pgf.Tok, pgf.Mapper, n, lineFoldingOnly)
 		if rng != nil {
 			ranges = append(ranges, rng)
 		}
@@ -66,7 +64,7 @@
 }
 
 // foldingRangeFunc calculates the line folding range for ast.Node n
-func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo {
+func foldingRangeFunc(tokFile *token.File, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo {
 	// TODO(suzmue): include trailing empty lines before the closing
 	// parenthesis/brace.
 	var kind protocol.FoldingRangeKind
@@ -78,7 +76,7 @@
 		if num := len(n.List); num != 0 {
 			startList, endList = n.List[0].Pos(), n.List[num-1].End()
 		}
-		start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly)
+		start, end = validLineFoldingRange(tokFile, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly)
 	case *ast.CaseClause:
 		// Fold from position of ":" to end.
 		start, end = n.Colon+1, n.End()
@@ -94,7 +92,7 @@
 		if num := len(n.List); num != 0 {
 			startList, endList = n.List[0].Pos(), n.List[num-1].End()
 		}
-		start, end = validLineFoldingRange(fset, n.Opening, n.Closing, startList, endList, lineFoldingOnly)
+		start, end = validLineFoldingRange(tokFile, n.Opening, n.Closing, startList, endList, lineFoldingOnly)
 	case *ast.GenDecl:
 		// If this is an import declaration, set the kind to be protocol.Imports.
 		if n.Tok == token.IMPORT {
@@ -105,7 +103,7 @@
 		if num := len(n.Specs); num != 0 {
 			startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End()
 		}
-		start, end = validLineFoldingRange(fset, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly)
+		start, end = validLineFoldingRange(tokFile, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly)
 	case *ast.BasicLit:
 		// Fold raw string literals from position of "`" to position of "`".
 		if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' {
@@ -117,7 +115,7 @@
 		if num := len(n.Elts); num != 0 {
 			startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End()
 		}
-		start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly)
+		start, end = validLineFoldingRange(tokFile, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly)
 	}
 
 	// Check that folding positions are valid.
@@ -125,18 +123,18 @@
 		return nil
 	}
 	// in line folding mode, do not fold if the start and end lines are the same.
-	if lineFoldingOnly && fset.Position(start).Line == fset.Position(end).Line {
+	if lineFoldingOnly && tokFile.Line(start) == tokFile.Line(end) {
 		return nil
 	}
 	return &FoldingRangeInfo{
-		MappedRange: NewMappedRange(fset, m, start, end),
+		MappedRange: NewMappedRange(tokFile, m, start, end),
 		Kind:        kind,
 	}
 }
 
 // validLineFoldingRange returns start and end token.Pos for folding range if the range is valid.
 // returns token.NoPos otherwise, which fails token.IsValid check
-func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) {
+func validLineFoldingRange(tokFile *token.File, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) {
 	if lineFoldingOnly {
 		if !open.IsValid() || !close.IsValid() {
 			return token.NoPos, token.NoPos
@@ -146,8 +144,8 @@
 		// as an example, the example below should *not* fold:
 		// var x = [2]string{"d",
 		// "e" }
-		if fset.Position(open).Line == fset.Position(start).Line ||
-			fset.Position(close).Line == fset.Position(end).Line {
+		if tokFile.Line(open) == tokFile.Line(start) ||
+			tokFile.Line(close) == tokFile.Line(end) {
 			return token.NoPos, token.NoPos
 		}
 
@@ -159,25 +157,25 @@
 // commentsFoldingRange returns the folding ranges for all comment blocks in file.
 // The folding range starts at the end of the first line of the comment block, and ends at the end of the
 // comment block and has kind protocol.Comment.
-func commentsFoldingRange(fset *token.FileSet, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) {
+func commentsFoldingRange(tokFile *token.File, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) {
 	for _, commentGrp := range file.Comments {
-		startGrp, endGrp := fset.Position(commentGrp.Pos()), fset.Position(commentGrp.End())
-		if startGrp.Line == endGrp.Line {
+		startGrpLine, endGrpLine := tokFile.Line(commentGrp.Pos()), tokFile.Line(commentGrp.End())
+		if startGrpLine == endGrpLine {
 			// Don't fold single line comments.
 			continue
 		}
 
 		firstComment := commentGrp.List[0]
 		startPos, endLinePos := firstComment.Pos(), firstComment.End()
-		startCmmnt, endCmmnt := fset.Position(startPos), fset.Position(endLinePos)
-		if startCmmnt.Line != endCmmnt.Line {
+		startCmmntLine, endCmmntLine := tokFile.Line(startPos), tokFile.Line(endLinePos)
+		if startCmmntLine != endCmmntLine {
 			// If the first comment spans multiple lines, then we want to have the
 			// folding range start at the end of the first line.
 			endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0]))
 		}
 		comments = append(comments, &FoldingRangeInfo{
 			// Fold from the end of the first line comment to the end of the comment block.
-			MappedRange: NewMappedRange(fset, m, endLinePos, commentGrp.End()),
+			MappedRange: NewMappedRange(tokFile, m, endLinePos, commentGrp.End()),
 			Kind:        protocol.Comment,
 		})
 	}
diff --git a/internal/lsp/source/hover.go b/internal/lsp/source/hover.go
index 58ea969..c5af019 100644
--- a/internal/lsp/source/hover.go
+++ b/internal/lsp/source/hover.go
@@ -610,11 +610,7 @@
 				break
 			}
 
-			field, err := s.PosToField(ctx, pkg, obj.Pos())
-			if err != nil {
-				return nil, err
-			}
-
+			_, field := FindDeclAndField(pkg.GetSyntax(), obj.Pos())
 			if field != nil {
 				comment := field.Doc
 				if comment.Text() == "" {
@@ -876,3 +872,99 @@
 	}
 	return false
 }
+
+// FindDeclAndField returns the var/func/type/const Decl that declares
+// the identifier at pos, searching the given list of file syntax
+// trees. If pos is the position of an ast.Field or one of its Names
+// or Ellipsis.Elt, the field is returned, along with the innermost
+// enclosing Decl, which could be only loosely related---consider:
+//
+//	var decl = f(  func(field int) {}  )
+//
+// It returns (nil, nil) if no Field or Decl is found at pos.
+func FindDeclAndField(files []*ast.File, pos token.Pos) (decl ast.Decl, field *ast.Field) {
+	// panic(nil) breaks off the traversal and
+	// causes the function to return normally.
+	defer func() {
+		if x := recover(); x != nil {
+			panic(x)
+		}
+	}()
+
+	// Visit the files in search of the node at pos.
+	var stack []ast.Node
+	for _, file := range files {
+		ast.Inspect(file, func(n ast.Node) bool {
+			if n != nil {
+				stack = append(stack, n) // push
+			} else {
+				stack = stack[:len(stack)-1] // pop
+				return false
+			}
+
+			// Skip subtrees (incl. files) that don't contain the search point.
+			if !(n.Pos() <= pos && pos < n.End()) {
+				return false
+			}
+
+			switch n := n.(type) {
+			case *ast.Field:
+				checkField := func(f ast.Node) {
+					if f.Pos() == pos {
+						field = n
+						for i := len(stack) - 1; i >= 0; i-- {
+							if d, ok := stack[i].(ast.Decl); ok {
+								decl = d // innermost enclosing decl
+								break
+							}
+						}
+						panic(nil) // found
+					}
+				}
+
+				// Check *ast.Field itself. This handles embedded
+				// fields which have no associated *ast.Ident name.
+				checkField(n)
+
+				// Check each field name since you can have
+				// multiple names for the same type expression.
+				for _, name := range n.Names {
+					checkField(name)
+				}
+
+				// Also check "X" in "...X". This makes it easy
+				// to format variadic signature params properly.
+				if ell, ok := n.Type.(*ast.Ellipsis); ok && ell.Elt != nil {
+					checkField(ell.Elt)
+				}
+
+			case *ast.FuncDecl:
+				if n.Name.Pos() == pos {
+					decl = n
+					panic(nil) // found
+				}
+
+			case *ast.GenDecl:
+				for _, spec := range n.Specs {
+					switch spec := spec.(type) {
+					case *ast.TypeSpec:
+						if spec.Name.Pos() == pos {
+							decl = n
+							panic(nil) // found
+						}
+					case *ast.ValueSpec:
+						for _, id := range spec.Names {
+							if id.Pos() == pos {
+								decl = n
+								panic(nil) // found
+							}
+						}
+					}
+				}
+			}
+			return true
+		})
+	}
+
+	return nil, nil
+}
diff --git a/internal/lsp/source/identifier.go b/internal/lsp/source/identifier.go
index 40655e2..5378ae8 100644
--- a/internal/lsp/source/identifier.go
+++ b/internal/lsp/source/identifier.go
@@ -226,7 +226,7 @@
 
 		// The builtin package isn't in the dependency graph, so the usual
 		// utilities won't work here.
-		rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name)))
+		rng := NewMappedRange(builtin.Tok, builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name)))
 		result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
 		return result, nil
 	}
@@ -267,7 +267,7 @@
 			}
 			name := method.Names[0].Name
 			result.Declaration.node = method
-			rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name)))
+			rng := NewMappedRange(builtin.Tok, builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name)))
 			result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng)
 			return result, nil
 		}
@@ -292,9 +292,8 @@
 	if err != nil {
 		return nil, err
 	}
-	if result.Declaration.node, err = snapshot.PosToDecl(ctx, declPkg, result.Declaration.obj.Pos()); err != nil {
-		return nil, err
-	}
+	result.Declaration.node, _ = FindDeclAndField(declPkg.GetSyntax(), result.Declaration.obj.Pos()) // may be nil
+
 	// Ensure that we have the full declaration, in case the declaration was
 	// parsed in ParseExported and therefore could be missing information.
 	if result.Declaration.fullDecl, err = fullNode(snapshot, result.Declaration.obj, declPkg); err != nil {
diff --git a/internal/lsp/source/implementation.go b/internal/lsp/source/implementation.go
index 6666605..39a9289 100644
--- a/internal/lsp/source/implementation.go
+++ b/internal/lsp/source/implementation.go
@@ -235,17 +235,23 @@
 	if err != nil {
 		return nil, err
 	}
-	return qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, map[objSearchKey]bool{})
+	return qualifiedObjsAtLocation(ctx, s, positionKey{uri, offset}, map[positionKey]bool{})
 }
 
-type objSearchKey struct {
+// A positionKey identifies a byte offset within a file (URI).
+//
+// When a file has been parsed multiple times in the same FileSet,
+// there may be multiple token.Pos values denoting the same logical
+// position. In such situations, a positionKey may be used for
+// de-duplication.
+type positionKey struct {
 	uri    span.URI
 	offset int
 }
 
 // qualifiedObjsAtLocation finds all objects referenced at offset in uri, across
 // all packages in the snapshot.
-func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, seen map[objSearchKey]bool) ([]qualifiedObject, error) {
+func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key positionKey, seen map[positionKey]bool) ([]qualifiedObject, error) {
 	if seen[key] {
 		return nil, nil
 	}
@@ -343,21 +349,8 @@
 			// is in another package, but this should be good enough to find all
 			// uses.
 
-			pos := obj.Pos()
-			var uri span.URI
-			offset := -1
-			for _, pgf := range pkg.CompiledGoFiles() {
-				if pgf.Tok.Base() <= int(pos) && int(pos) <= pgf.Tok.Base()+pgf.Tok.Size() {
-					var err error
-					offset, err = safetoken.Offset(pgf.Tok, pos)
-					if err != nil {
-						return nil, err
-					}
-					uri = pgf.URI
-				}
-			}
-			if offset >= 0 {
-				otherObjs, err := qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, seen)
+			if key, found := packagePositionKey(pkg, obj.Pos()); found {
+				otherObjs, err := qualifiedObjsAtLocation(ctx, s, key, seen)
 				if err != nil {
 					return nil, err
 				}
@@ -380,6 +373,19 @@
 	return qualifiedObjs, nil
 }
 
+// packagePositionKey finds the positionKey for the given pos.
+//
+// The second result reports whether the position was found.
+func packagePositionKey(pkg Package, pos token.Pos) (positionKey, bool) {
+	for _, pgf := range pkg.CompiledGoFiles() {
+		offset, err := safetoken.Offset(pgf.Tok, pos)
+		if err == nil {
+			return positionKey{pgf.URI, offset}, true
+		}
+	}
+	return positionKey{}, false
+}
+
 // pathEnclosingObjNode returns the AST path to the object-defining
 // node associated with pos. "Object-defining" means either an
 // *ast.Ident mapped directly to a types.Object or an ast.Node mapped
diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go
index 967752b..4fb1cfb 100644
--- a/internal/lsp/source/inlay_hint.go
+++ b/internal/lsp/source/inlay_hint.go
@@ -44,63 +44,38 @@
 var AllInlayHints = map[string]*Hint{
 	AssignVariableTypes: {
 		Name: AssignVariableTypes,
-		Doc: `Enable/disable inlay hints for variable types in assign statements:
-
-	i/* int/*, j/* int/* := 0, len(r)-1`,
-		Run: assignVariableTypes,
+		Doc:  "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```",
+		Run:  assignVariableTypes,
 	},
 	ParameterNames: {
 		Name: ParameterNames,
-		Doc: `Enable/disable inlay hints for parameter names:
-
-	parseInt(/* str: */ "123", /* radix: */ 8)`,
-		Run: parameterNames,
+		Doc:  "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```",
+		Run:  parameterNames,
 	},
 	ConstantValues: {
 		Name: ConstantValues,
-		Doc: `Enable/disable inlay hints for constant values:
-
-	const (
-		KindNone   Kind = iota/* = 0*/
-		KindPrint/*  = 1*/
-		KindPrintf/* = 2*/
-		KindErrorf/* = 3*/
-	)`,
-		Run: constantValues,
+		Doc:  "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone   Kind = iota/* = 0*/\n\t\tKindPrint/*  = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```",
+		Run:  constantValues,
 	},
 	RangeVariableTypes: {
 		Name: RangeVariableTypes,
-		Doc: `Enable/disable inlay hints for variable types in range statements:
-
-	for k/* int*/, v/* string/* := range []string{} {
-		fmt.Println(k, v)
-	}`,
-		Run: rangeVariableTypes,
+		Doc:  "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```",
+		Run:  rangeVariableTypes,
 	},
 	CompositeLiteralTypes: {
 		Name: CompositeLiteralTypes,
-		Doc: `Enable/disable inlay hints for composite literal types:
-
-	for _, c := range []struct {
-		in, want string
-	}{
-		/*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"},
-	}`,
-		Run: compositeLiteralTypes,
+		Doc:  "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```",
+		Run:  compositeLiteralTypes,
 	},
 	CompositeLiteralFieldNames: {
 		Name: CompositeLiteralFieldNames,
-		Doc: `Enable/disable inlay hints for composite literal field names:
-
-	{in: "Hello, world", want: "dlrow ,olleH"}`,
-		Run: compositeLiteralFields,
+		Doc:  "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```",
+		Run:  compositeLiteralFields,
 	},
 	FunctionTypeParameters: {
 		Name: FunctionTypeParameters,
-		Doc: `Enable/disable inlay hints for implicit type parameters on generic functions:
-
-	myFoo/*[int, string]*/(1, "hello")`,
-		Run: funcTypeParams,
+		Doc:  "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```",
+		Run:  funcTypeParams,
 	},
 }
 
@@ -179,17 +154,24 @@
 		if i > params.Len()-1 {
 			break
 		}
-		value := params.At(i).Name()
+		param := params.At(i)
 		// param.Name is empty for built-ins like append
-		if value == "" {
+		if param.Name() == "" {
 			continue
 		}
+		// Skip the parameter name hint if the arg matches the
+		// the parameter name.
+		if i, ok := v.(*ast.Ident); ok && i.Name == param.Name() {
+			continue
+		}
+
+		label := param.Name()
 		if signature.Variadic() && i == params.Len()-1 {
-			value = value + "..."
+			label = label + "..."
 		}
 		hints = append(hints, protocol.InlayHint{
 			Position:     &start,
-			Label:        buildLabel(value + ":"),
+			Label:        buildLabel(label + ":"),
 			Kind:         protocol.Parameter,
 			PaddingRight: true,
 		})
diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go
index 5da14eb..2f40b59 100644
--- a/internal/lsp/source/options.go
+++ b/internal/lsp/source/options.go
@@ -43,6 +43,7 @@
 	"golang.org/x/tools/go/analysis/passes/structtag"
 	"golang.org/x/tools/go/analysis/passes/testinggoroutine"
 	"golang.org/x/tools/go/analysis/passes/tests"
+	"golang.org/x/tools/go/analysis/passes/timeformat"
 	"golang.org/x/tools/go/analysis/passes/unmarshal"
 	"golang.org/x/tools/go/analysis/passes/unreachable"
 	"golang.org/x/tools/go/analysis/passes/unsafeptr"
@@ -61,6 +62,7 @@
 	"golang.org/x/tools/internal/lsp/analysis/stubmethods"
 	"golang.org/x/tools/internal/lsp/analysis/undeclaredname"
 	"golang.org/x/tools/internal/lsp/analysis/unusedparams"
+	"golang.org/x/tools/internal/lsp/analysis/unusedvariable"
 	"golang.org/x/tools/internal/lsp/analysis/useany"
 	"golang.org/x/tools/internal/lsp/command"
 	"golang.org/x/tools/internal/lsp/diff"
@@ -153,6 +155,7 @@
 						string(command.GCDetails):         false,
 						string(command.UpgradeDependency): true,
 						string(command.Vendor):            true,
+						// TODO(hyangah): enable command.RunVulncheckExp.
 					},
 				},
 			},
@@ -203,6 +206,7 @@
 	RelatedInformationSupported                bool
 	CompletionTags                             bool
 	CompletionDeprecated                       bool
+	SupportedResourceOperations                []protocol.ResourceOperationKind
 }
 
 // ServerOptions holds LSP-specific configuration that is provided by the
@@ -314,6 +318,12 @@
 	// SemanticTokens controls whether the LSP server will send
 	// semantic tokens to the client.
 	SemanticTokens bool `status:"experimental"`
+
+	// NoSemanticString turns off the sending of the semantic token 'string'
+	NoSemanticString bool `status:"experimental"`
+
+	// NoSemanticNumber  turns off the sending of the semantic token 'number'
+	NoSemanticNumber bool `status:"experimental"`
 }
 
 type CompletionOptions struct {
@@ -349,6 +359,9 @@
 	// * `"pkg.go.dev"`
 	//
 	// If company chooses to use its own `godoc.org`, its address can be used as well.
+	//
+	// Modules matching the GOPRIVATE environment variable will not have
+	// documentation links in hover.
 	LinkTarget string
 
 	// LinksInHover toggles the presence of links to documentation in hover.
@@ -476,7 +489,7 @@
 	// LicensesText holds third party licenses for software used by gopls.
 	LicensesText string
 
-	// TODO(rfindley): is this even necessary?
+	// GoDiff is used in gopls/hooks to get Myers' diff
 	GoDiff bool
 
 	// Whether staticcheck is supported.
@@ -502,7 +515,7 @@
 	StaticcheckAnalyzers map[string]*Analyzer
 
 	// Govulncheck is the implementation of the Govulncheck gopls command.
-	Govulncheck func(context.Context, *packages.Config, command.VulncheckArgs) (command.VulncheckResult, error)
+	Govulncheck func(context.Context, *packages.Config, string) (command.VulncheckResult, error)
 }
 
 // InternalOptions contains settings that are not intended for use by the
@@ -561,6 +574,23 @@
 	// on the server.
 	// This option applies only during initialization.
 	ShowBugReports bool
+
+	// NewDiff controls the choice of the new diff implementation.
+	// It can be 'new', 'checked', or 'old' which is the default.
+	// 'checked' computes diffs with both algorithms, checks
+	// that the new algorithm has worked, and write some summary
+	// statistics to a file in os.TmpDir()
+	NewDiff string
+
+	// ChattyDiagnostics controls whether to report file diagnostics for each
+	// file change. If unset, gopls only reports diagnostics when they change, or
+	// when a file is opened or closed.
+	//
+	// TODO(rfindley): is seems that for many clients this should be true by
+	// default. For example, coc.nvim seems to get confused if diagnostics are
+	// not re-published. Switch the default to true after some period of internal
+	// testing.
+	ChattyDiagnostics bool
 }
 
 type ImportShortcut string
@@ -690,6 +720,9 @@
 
 func (o *Options) ForClientCapabilities(caps protocol.ClientCapabilities) {
 	// Check if the client supports snippets in completion items.
+	if caps.Workspace.WorkspaceEdit != nil {
+		o.SupportedResourceOperations = caps.Workspace.WorkspaceEdit.ResourceOperations
+	}
 	if c := caps.TextDocument.Completion; c.CompletionItem.SnippetSupport {
 		o.InsertTextFormat = protocol.SnippetTextFormat
 	}
@@ -787,10 +820,10 @@
 // should be enabled in enableAllExperimentMaps.
 func (o *Options) EnableAllExperiments() {
 	o.SemanticTokens = true
-	o.ExperimentalPostfixCompletions = true
 	o.ExperimentalUseInvalidMetadata = true
 	o.ExperimentalWatchedFileDelay = 50 * time.Millisecond
-	o.SymbolMatcher = SymbolFastFuzzy
+	o.NewDiff = "checked"
+	o.ChattyDiagnostics = true
 }
 
 func (o *Options) enableAllExperimentMaps() {
@@ -800,6 +833,33 @@
 	if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok {
 		o.Analyses[unusedparams.Analyzer.Name] = true
 	}
+	if _, ok := o.Analyses[unusedvariable.Analyzer.Name]; !ok {
+		o.Analyses[unusedvariable.Analyzer.Name] = true
+	}
+}
+
+// validateDirectoryFilter validates if the filter string
+// - is not empty
+// - start with either + or -
+// - doesn't contain currently unsupported glob operators: *, ?
+func validateDirectoryFilter(ifilter string) (string, error) {
+	filter := fmt.Sprint(ifilter)
+	if filter == "" || (filter[0] != '+' && filter[0] != '-') {
+		return "", fmt.Errorf("invalid filter %v, must start with + or -", filter)
+	}
+	segs := strings.Split(filter[1:], "/")
+	unsupportedOps := [...]string{"?", "*"}
+	for _, seg := range segs {
+		if seg != "**" {
+			for _, op := range unsupportedOps {
+				if strings.Contains(seg, op) {
+					return "", fmt.Errorf("invalid filter %v, operator %v not supported. If you want to have this operator supported, consider filing an issue.", filter, op)
+				}
+			}
+		}
+	}
+
+	return strings.TrimRight(filepath.FromSlash(filter), "/"), nil
 }
 
 func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult {
@@ -846,9 +906,9 @@
 		}
 		var filters []string
 		for _, ifilter := range ifilters {
-			filter := fmt.Sprint(ifilter)
-			if filter == "" || (filter[0] != '+' && filter[0] != '-') {
-				result.errorf("invalid filter %q, must start with + or -", filter)
+			filter, err := validateDirectoryFilter(fmt.Sprintf("%v", ifilter))
+			if err != nil {
+				result.errorf(err.Error())
 				return result
 			}
 			filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/"))
@@ -980,6 +1040,12 @@
 	case "semanticTokens":
 		result.setBool(&o.SemanticTokens)
 
+	case "noSemanticString":
+		result.setBool(&o.NoSemanticString)
+
+	case "noSemanticNumber":
+		result.setBool(&o.NoSemanticNumber)
+
 	case "expandWorkspaceToModule":
 		result.setBool(&o.ExpandWorkspaceToModule)
 
@@ -1031,6 +1097,12 @@
 		// This setting should be handled before all of the other options are
 		// processed, so do nothing here.
 
+	case "newDiff":
+		result.setString(&o.NewDiff)
+
+	case "chattyDiagnostics":
+		result.setBool(&o.ChattyDiagnostics)
+
 	// Replaced settings.
 	case "experimentalDisabledAnalyses":
 		result.deprecated("analyses")
@@ -1270,6 +1342,10 @@
 			Fix:      UndeclaredName,
 			Enabled:  true,
 		},
+		unusedvariable.Analyzer.Name: {
+			Analyzer: unusedvariable.Analyzer,
+			Enabled:  false,
+		},
 	}
 }
 
@@ -1331,6 +1407,7 @@
 		useany.Analyzer.Name:           {Analyzer: useany.Analyzer, Enabled: false},
 		infertypeargs.Analyzer.Name:    {Analyzer: infertypeargs.Analyzer, Enabled: true},
 		embeddirective.Analyzer.Name:   {Analyzer: embeddirective.Analyzer, Enabled: true},
+		timeformat.Analyzer.Name:       {Analyzer: timeformat.Analyzer, Enabled: true},
 
 		// gofmt -s suite:
 		simplifycompositelit.Analyzer.Name: {
diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go
index 85bf41a..2bbdc07 100644
--- a/internal/lsp/source/references.go
+++ b/internal/lsp/source/references.go
@@ -16,8 +16,8 @@
 	"strconv"
 
 	"golang.org/x/tools/internal/event"
+	"golang.org/x/tools/internal/lsp/bug"
 	"golang.org/x/tools/internal/lsp/protocol"
-	"golang.org/x/tools/internal/lsp/safetoken"
 	"golang.org/x/tools/internal/span"
 )
 
@@ -31,6 +31,18 @@
 	isDeclaration bool
 }
 
+// isInPackageName reports whether the file's package name surrounds the
+// given position pp (e.g. "foo" surrounds the cursor in "package foo").
+func isInPackageName(ctx context.Context, s Snapshot, f FileHandle, pgf *ParsedGoFile, pp protocol.Position) (bool, error) {
+	// Find position of the package name declaration
+	cursorPos, err := pgf.Mapper.Pos(pp)
+	if err != nil {
+		return false, err
+	}
+
+	return pgf.File.Name.Pos() <= cursorPos && cursorPos <= pgf.File.Name.End(), nil
+}
+
 // References returns a list of references for a given identifier within the packages
 // containing i.File. Declarations appear first in the result.
 func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]*ReferenceInfo, error) {
@@ -43,22 +55,13 @@
 		return nil, err
 	}
 
-	cursorOffset, err := pgf.Mapper.Offset(pp)
+	packageName := pgf.File.Name.Name // from package decl
+	inPackageName, err := isInPackageName(ctx, s, f, pgf, pp)
 	if err != nil {
 		return nil, err
 	}
 
-	packageNameStart, err := safetoken.Offset(pgf.Tok, pgf.File.Name.Pos())
-	if err != nil {
-		return nil, err
-	}
-
-	packageNameEnd, err := safetoken.Offset(pgf.Tok, pgf.File.Name.End())
-	if err != nil {
-		return nil, err
-	}
-
-	if packageNameStart <= cursorOffset && cursorOffset < packageNameEnd {
+	if inPackageName {
 		renamingPkg, err := s.PackageForFile(ctx, f.URI(), TypecheckAll, NarrowestPackage)
 		if err != nil {
 			return nil, err
@@ -75,8 +78,8 @@
 				for _, imp := range f.File.Imports {
 					if path, err := strconv.Unquote(imp.Path.Value); err == nil && path == renamingPkg.PkgPath() {
 						refs = append(refs, &ReferenceInfo{
-							Name:        pgf.File.Name.Name,
-							MappedRange: NewMappedRange(s.FileSet(), f.Mapper, imp.Pos(), imp.End()),
+							Name:        packageName,
+							MappedRange: NewMappedRange(f.Tok, f.Mapper, imp.Pos(), imp.End()),
 						})
 					}
 				}
@@ -86,8 +89,8 @@
 		// Find internal references to the package within the package itself
 		for _, f := range renamingPkg.CompiledGoFiles() {
 			refs = append(refs, &ReferenceInfo{
-				Name:        pgf.File.Name.Name,
-				MappedRange: NewMappedRange(s.FileSet(), f.Mapper, f.File.Name.Pos(), f.File.Name.End()),
+				Name:        packageName,
+				MappedRange: NewMappedRange(f.Tok, f.Mapper, f.File.Name.Pos(), f.File.Name.End()),
 			})
 		}
 
@@ -126,7 +129,7 @@
 func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject, includeDeclaration, includeInterfaceRefs, includeEmbeddedRefs bool) ([]*ReferenceInfo, error) {
 	var (
 		references []*ReferenceInfo
-		seen       = make(map[token.Pos]bool)
+		seen       = make(map[positionKey]bool)
 	)
 
 	pos := qos[0].obj.Pos()
@@ -188,10 +191,15 @@
 						continue
 					}
 				}
-				if seen[ident.Pos()] {
+				key, found := packagePositionKey(pkg, ident.Pos())
+				if !found {
+					bug.Reportf("ident %v (pos: %v) not found in package %v", ident.Name, ident.Pos(), pkg.Name())
 					continue
 				}
-				seen[ident.Pos()] = true
+				if seen[key] {
+					continue
+				}
+				seen[key] = true
 				rng, err := posToMappedRange(snapshot, pkg, ident.Pos(), ident.End())
 				if err != nil {
 					return nil, err
diff --git a/internal/lsp/source/rename.go b/internal/lsp/source/rename.go
index 6312bcb..6bbe91a 100644
--- a/internal/lsp/source/rename.go
+++ b/internal/lsp/source/rename.go
@@ -49,6 +49,29 @@
 // the prepare fails. Probably we could eliminate the redundancy in returning
 // two errors, but for now this is done defensively.
 func PrepareRename(ctx context.Context, snapshot Snapshot, f FileHandle, pp protocol.Position) (_ *PrepareItem, usererr, err error) {
+	fileRenameSupported := false
+	for _, op := range snapshot.View().Options().SupportedResourceOperations {
+		if op == protocol.Rename {
+			fileRenameSupported = true
+			break
+		}
+	}
+
+	// Find position of the package name declaration
+	pgf, err := snapshot.ParseGo(ctx, f, ParseFull)
+	if err != nil {
+		return nil, err, err
+	}
+	inPackageName, err := isInPackageName(ctx, snapshot, f, pgf, pp)
+	if err != nil {
+		return nil, err, err
+	}
+
+	if inPackageName && !fileRenameSupported {
+		err := errors.New("can't rename packages: LSP client does not support file renaming")
+		return nil, err, err
+	}
+
 	ctx, done := event.Start(ctx, "source.PrepareRename")
 	defer done()
 
@@ -95,6 +118,41 @@
 	ctx, done := event.Start(ctx, "source.Rename")
 	defer done()
 
+	pgf, err := s.ParseGo(ctx, f, ParseFull)
+	if err != nil {
+		return nil, err
+	}
+	inPackageName, err := isInPackageName(ctx, s, f, pgf, pp)
+	if err != nil {
+		return nil, err
+	}
+
+	if inPackageName {
+		renamingPkg, err := s.PackageForFile(ctx, f.URI(), TypecheckAll, NarrowestPackage)
+		if err != nil {
+			return nil, err
+		}
+
+		result := make(map[span.URI][]protocol.TextEdit)
+		// Rename internal references to the package in the renaming package
+		// Todo(dle): need more investigation on case when pkg.GoFiles != pkg.CompiledGoFiles if using cgo.
+		for _, f := range renamingPkg.CompiledGoFiles() {
+			pkgNameMappedRange := NewMappedRange(f.Tok, f.Mapper, f.File.Name.Pos(), f.File.Name.End())
+			rng, err := pkgNameMappedRange.Range()
+			if err != nil {
+				return nil, err
+			}
+			result[f.URI] = []protocol.TextEdit{
+				{
+					Range:   rng,
+					NewText: newName,
+				},
+			}
+		}
+
+		return result, nil
+	}
+
 	qos, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp)
 	if err != nil {
 		return nil, err
@@ -159,6 +217,7 @@
 	if err != nil {
 		return nil, err
 	}
+
 	result := make(map[span.URI][]protocol.TextEdit)
 	for uri, edits := range changes {
 		// These edits should really be associated with FileHandles for maximal correctness.
@@ -238,15 +297,15 @@
 				continue
 			}
 			lines := strings.Split(comment.Text, "\n")
-			tok := r.fset.File(comment.Pos())
-			commentLine := tok.Position(comment.Pos()).Line
+			tokFile := r.fset.File(comment.Pos())
+			commentLine := tokFile.Line(comment.Pos())
 			for i, line := range lines {
 				lineStart := comment.Pos()
 				if i > 0 {
-					lineStart = tok.LineStart(commentLine + i)
+					lineStart = tokFile.LineStart(commentLine + i)
 				}
 				for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) {
-					rng := span.NewRange(r.fset, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1]))
+					rng := span.NewRange(tokFile, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1]))
 					spn, err := rng.Span()
 					if err != nil {
 						return nil, err
@@ -265,7 +324,7 @@
 
 // docComment returns the doc for an identifier.
 func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup {
-	_, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End())
+	_, tokFile, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End())
 	for _, node := range nodes {
 		switch decl := node.(type) {
 		case *ast.FuncDecl:
@@ -294,25 +353,14 @@
 				return nil
 			}
 
-			var file *ast.File
-			for _, f := range pkg.GetSyntax() {
-				if f.Pos() <= id.Pos() && id.Pos() <= f.End() {
-					file = f
-					break
-				}
-			}
-			if file == nil {
-				return nil
-			}
-
-			identLine := r.fset.Position(id.Pos()).Line
-			for _, comment := range file.Comments {
+			identLine := tokFile.Line(id.Pos())
+			for _, comment := range nodes[len(nodes)-1].(*ast.File).Comments {
 				if comment.Pos() > id.Pos() {
 					// Comment is after the identifier.
 					continue
 				}
 
-				lastCommentLine := r.fset.Position(comment.End()).Line
+				lastCommentLine := tokFile.Line(comment.End())
 				if lastCommentLine+1 == identLine {
 					return comment
 				}
@@ -328,7 +376,7 @@
 func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.TextEdit, error) {
 	// Modify ImportSpec syntax to add or remove the Name as needed.
 	pkg := r.packages[pkgName.Pkg()]
-	_, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos())
+	_, tokFile, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos())
 	if len(path) < 2 {
 		return nil, fmt.Errorf("no path enclosing interval for %s", pkgName.Name())
 	}
@@ -350,7 +398,7 @@
 		EndPos: spec.EndPos,
 	}
 
-	rng := span.NewRange(r.fset, spec.Pos(), spec.End())
+	rng := span.NewRange(tokFile, spec.Pos(), spec.End())
 	spn, err := rng.Span()
 	if err != nil {
 		return nil, err
diff --git a/internal/lsp/source/rename_check.go b/internal/lsp/source/rename_check.go
index b17f9b8..6fb7ddf 100644
--- a/internal/lsp/source/rename_check.go
+++ b/internal/lsp/source/rename_check.go
@@ -372,7 +372,7 @@
 	if !ok {
 		return
 	}
-	pkg, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos())
+	pkg, _, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos())
 	if pkg == nil || path == nil {
 		return
 	}
@@ -821,13 +821,13 @@
 	return nil
 }
 
-// pathEnclosingInterval returns the Package and ast.Node that
+// pathEnclosingInterval returns the Package, token.File, and ast.Node that
 // contain source interval [start, end), and all the node's ancestors
 // up to the AST root.  It searches all ast.Files of all packages.
 // exact is defined as for astutil.PathEnclosingInterval.
 //
 // The zero value is returned if not found.
-func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) {
+func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, tokFile *token.File, path []ast.Node, exact bool) {
 	pkgs := []Package{pkg}
 	for _, f := range pkg.GetSyntax() {
 		for _, imp := range f.Imports {
@@ -840,35 +840,36 @@
 			}
 			importPkg, err := pkg.GetImport(importPath)
 			if err != nil {
-				return nil, nil, false
+				return nil, nil, nil, false
 			}
 			pkgs = append(pkgs, importPkg)
 		}
 	}
 	for _, p := range pkgs {
 		for _, f := range p.GetSyntax() {
-			if f.Pos() == token.NoPos {
+			if !f.Pos().IsValid() {
 				// This can happen if the parser saw
 				// too many errors and bailed out.
 				// (Use parser.AllErrors to prevent that.)
 				continue
 			}
-			if !tokenFileContainsPos(fset.File(f.Pos()), start) {
+			tokFile := fset.File(f.Pos())
+			if !tokenFileContainsPos(tokFile, start) {
 				continue
 			}
 			if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
-				return pkg, path, exact
+				return pkg, tokFile, path, exact
 			}
 		}
 	}
-	return nil, nil, false
+	return nil, nil, nil, false
 }
 
 // TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
 func tokenFileContainsPos(tf *token.File, pos token.Pos) bool {
 	p := int(pos)
 	base := tf.Base()
-	return base <= p && p < base+tf.Size()
+	return base <= p && p <= base+tf.Size()
 }
 
 func objectKind(obj types.Object) string {
diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go
index 813f67e..5b087e8 100644
--- a/internal/lsp/source/signature_help.go
+++ b/internal/lsp/source/signature_help.go
@@ -98,20 +98,8 @@
 		if err != nil {
 			return nil, 0, err
 		}
-		node, err := snapshot.PosToDecl(ctx, declPkg, obj.Pos())
-		if err != nil {
-			return nil, 0, err
-		}
-		rng, err := objToMappedRange(snapshot, pkg, obj)
-		if err != nil {
-			return nil, 0, err
-		}
-		decl := Declaration{
-			obj:  obj,
-			node: node,
-		}
-		decl.MappedRange = append(decl.MappedRange, rng)
-		d, err := FindHoverContext(ctx, snapshot, pkg, decl.obj, decl.node, nil)
+		node, _ := FindDeclAndField(declPkg.GetSyntax(), obj.Pos()) // may be nil
+		d, err := FindHoverContext(ctx, snapshot, pkg, obj, node, nil)
 		if err != nil {
 			return nil, 0, err
 		}
diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go
index 9218f9d..5fdcc0f 100644
--- a/internal/lsp/source/source_test.go
+++ b/internal/lsp/source/source_test.go
@@ -49,16 +49,16 @@
 func testSource(t *testing.T, datum *tests.Data) {
 	ctx := tests.Context(t)
 
-	cache := cache.New(nil)
+	cache := cache.New(nil, nil, nil)
 	session := cache.NewSession(ctx)
 	options := source.DefaultOptions().Clone()
 	tests.DefaultOptions(options)
 	options.SetEnvSlice(datum.Config.Env)
 	view, _, release, err := session.NewView(ctx, "source_test", span.URIFromPath(datum.Config.Dir), options)
-	release()
 	if err != nil {
 		t.Fatal(err)
 	}
+	release()
 	defer view.Shutdown(ctx)
 
 	// Enable type error analyses for tests.
@@ -968,14 +968,12 @@
 }
 
 // These are pure LSP features, no source level functionality to be tested.
-func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {}
-
-func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) {
-}
-func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {}
-func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span)   {}
-func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens)   {}
-func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string)     {}
+func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link)                          {}
+func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actions []tests.SuggestedFix, want int) {}
+func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span)                  {}
+func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span)                    {}
+func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens)                    {}
+func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string)                      {}
 
 func spanToRange(data *tests.Data, spn span.Span) (*protocol.ColumnMapper, protocol.Range, error) {
 	m, err := data.Mapper(spn.URI())
diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go
index 93344e0..756d02d 100644
--- a/internal/lsp/source/types_format.go
+++ b/internal/lsp/source/types_format.go
@@ -259,10 +259,11 @@
 		return types.TypeString(obj.Type(), qf)
 	}
 
-	expr, err := varType(ctx, snapshot, pkg, obj)
-	if err != nil {
+	_, field := FindDeclAndField(pkg.GetSyntax(), obj.Pos())
+	if field == nil {
 		return types.TypeString(obj.Type(), qf)
 	}
+	expr := field.Type
 
 	// If the given expr refers to a type parameter, then use the
 	// object's Type instead of the type parameter declaration. This helps
@@ -286,18 +287,6 @@
 	return fmted
 }
 
-// varType returns the type expression for a *types.Var.
-func varType(ctx context.Context, snapshot Snapshot, pkg Package, obj *types.Var) (ast.Expr, error) {
-	field, err := snapshot.PosToField(ctx, pkg, obj.Pos())
-	if err != nil {
-		return nil, err
-	}
-	if field == nil {
-		return nil, fmt.Errorf("no declaration for object %s", obj.Name())
-	}
-	return field.Type, nil
-}
-
 // qualifyExpr applies the "pkgName." prefix to any *ast.Ident in the expr.
 func qualifyExpr(expr ast.Expr, srcpkg, pkg Package, clonedInfo map[token.Pos]*types.PkgName, qf types.Qualifier) ast.Expr {
 	ast.Inspect(expr, func(n ast.Node) bool {
diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go
index b8a7fc9..78448af 100644
--- a/internal/lsp/source/util.go
+++ b/internal/lsp/source/util.go
@@ -30,26 +30,22 @@
 	m         *protocol.ColumnMapper // a mapper of the edited source (package.GoFiles)
 }
 
-// NewMappedRange returns a MappedRange for the given start and end token.Pos.
+// NewMappedRange returns a MappedRange for the given file and valid start/end token.Pos.
 //
 // By convention, start and end are assumed to be positions in the compiled (==
 // type checked) source, whereas the column mapper m maps positions in the
-// user-edited source. Note that these may not be the same, as when using CGo:
+// user-edited source. Note that these may not be the same, as when using goyacc or CGo:
 // CompiledGoFiles contains generated files, whose positions (via
 // token.File.Position) point to locations in the edited file -- the file
 // containing `import "C"`.
-func NewMappedRange(fset *token.FileSet, m *protocol.ColumnMapper, start, end token.Pos) MappedRange {
-	if tf := fset.File(start); tf == nil {
-		bug.Report("nil file", nil)
-	} else {
-		mapped := m.TokFile.Name()
-		adjusted := tf.PositionFor(start, true) // adjusted position
-		if adjusted.Filename != mapped {
-			bug.Reportf("mapped file %q does not match start position file %q", mapped, adjusted.Filename)
-		}
+func NewMappedRange(file *token.File, m *protocol.ColumnMapper, start, end token.Pos) MappedRange {
+	mapped := m.TokFile.Name()
+	adjusted := file.PositionFor(start, true) // adjusted position
+	if adjusted.Filename != mapped {
+		bug.Reportf("mapped file %q does not match start position file %q", mapped, adjusted.Filename)
 	}
 	return MappedRange{
-		spanRange: span.NewRange(fset, start, end),
+		spanRange: span.NewRange(file, start, end),
 		m:         m,
 	}
 }
@@ -134,7 +130,10 @@
 	return mrng.Range()
 }
 
+// objToMappedRange returns the MappedRange for the object's declaring
+// identifier (or string literal, for an import).
 func objToMappedRange(snapshot Snapshot, pkg Package, obj types.Object) (MappedRange, error) {
+	nameLen := len(obj.Name())
 	if pkgName, ok := obj.(*types.PkgName); ok {
 		// An imported Go package has a package-local, unqualified name.
 		// When the name matches the imported package name, there is no
@@ -147,29 +146,35 @@
 		// When the identifier does not appear in the source, have the range
 		// of the object be the import path, including quotes.
 		if pkgName.Imported().Name() == pkgName.Name() {
-			return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(len(pkgName.Imported().Path())+2))
+			nameLen = len(pkgName.Imported().Path()) + len(`""`)
 		}
 	}
-	return nameToMappedRange(snapshot, pkg, obj.Pos(), obj.Name())
+	return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(nameLen))
 }
 
-func nameToMappedRange(snapshot Snapshot, pkg Package, pos token.Pos, name string) (MappedRange, error) {
-	return posToMappedRange(snapshot, pkg, pos, pos+token.Pos(len(name)))
-}
-
+// posToMappedRange returns the MappedRange for the given [start, end) span,
+// which must be among the transitive dependencies of pkg.
 func posToMappedRange(snapshot Snapshot, pkg Package, pos, end token.Pos) (MappedRange, error) {
-	logicalFilename := snapshot.FileSet().File(pos).Position(pos).Filename
+	tokFile := snapshot.FileSet().File(pos)
+	// Subtle: it is not safe to simplify this to tokFile.Name
+	// because, due to //line directives, a Position within a
+	// token.File may have a different filename than the File itself.
+	logicalFilename := tokFile.Position(pos).Filename
 	pgf, _, err := findFileInDeps(pkg, span.URIFromPath(logicalFilename))
 	if err != nil {
 		return MappedRange{}, err
 	}
 	if !pos.IsValid() {
-		return MappedRange{}, fmt.Errorf("invalid position for %v", pos)
+		return MappedRange{}, fmt.Errorf("invalid start position")
 	}
 	if !end.IsValid() {
-		return MappedRange{}, fmt.Errorf("invalid position for %v", end)
+		return MappedRange{}, fmt.Errorf("invalid end position")
 	}
-	return NewMappedRange(snapshot.FileSet(), pgf.Mapper, pos, end), nil
+	// It is fishy that pgf.Mapper (from the parsed Go file) is
+	// accompanied here not by pgf.Tok but by tokFile from the global
+	// FileSet, which is a distinct token.File that doesn't
+	// contain [pos,end). TODO(adonovan): clean this up.
+	return NewMappedRange(tokFile, pgf.Mapper, pos, end), nil
 }
 
 // Matches cgo generated comment as well as the proposed standard:
@@ -306,15 +311,15 @@
 	for _, pkg := range pkgs {
 		parsed, err := pkg.File(uri)
 		if err != nil {
+			// TODO(adonovan): should this be a bug.Report or log.Fatal?
+			// The logic in Identifier seems to think so.
+			// Should it be a postcondition of PackagesForFile?
+			// And perhaps PackagesForFile should return the PGFs too.
 			return nil, err
 		}
-		if parsed == nil {
-			continue
+		if parsed != nil && parsed.Tok.Base() == tok.Base() {
+			return pkg, nil
 		}
-		if parsed.Tok.Base() != tok.Base() {
-			continue
-		}
-		return pkg, nil
 	}
 	return nil, fmt.Errorf("no package for given file position")
 }
@@ -545,9 +550,13 @@
 	if i == -1 {
 		return true
 	}
+	// TODO(rfindley): this looks wrong: IsCommandLineArguments is meant to
+	// operate on package IDs, not package paths.
 	if IsCommandLineArguments(string(pkgPath)) {
 		return true
 	}
+	// TODO(rfindley): this is wrong. mod.testx/p should not be able to
+	// import mod.test/internal: https://go.dev/play/p/-Ca6P-E4V4q
 	return strings.HasPrefix(string(pkgPath), string(importPkgPath[:i]))
 }
 
@@ -555,6 +564,8 @@
 // "command-line-arguments" package, which is a package with an unknown ID
 // created by the go command. It can have a test variant, which is why callers
 // should not check that a value equals "command-line-arguments" directly.
+//
+// TODO(rfindley): this should accept a PackageID.
 func IsCommandLineArguments(s string) bool {
 	return strings.Contains(s, "command-line-arguments")
 }
diff --git a/internal/lsp/source/util_test.go b/internal/lsp/source/util_test.go
index 5d4e98f..fe505e4 100644
--- a/internal/lsp/source/util_test.go
+++ b/internal/lsp/source/util_test.go
@@ -41,7 +41,7 @@
 
 	start := cf.Pos(bytes.Index(compiled, []byte("a𐐀b")))
 	end := start + token.Pos(len("a𐐀b"))
-	mr := NewMappedRange(fset, mapper, start, end)
+	mr := NewMappedRange(cf, mapper, start, end)
 	gotRange, err := mr.Range()
 	if err != nil {
 		t.Fatal(err)
diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go
index 73e1b7f..f0d22c7 100644
--- a/internal/lsp/source/view.go
+++ b/internal/lsp/source/view.go
@@ -79,17 +79,6 @@
 	// If the file is not available, returns nil and an error.
 	ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error)
 
-	// PosToField is a cache of *ast.Fields by token.Pos. This allows us
-	// to quickly find corresponding *ast.Field node given a *types.Var.
-	// We must refer to the AST to render type aliases properly when
-	// formatting signatures and other types.
-	PosToField(ctx context.Context, pkg Package, pos token.Pos) (*ast.Field, error)
-
-	// PosToDecl maps certain objects' positions to their surrounding
-	// ast.Decl. This mapping is used when building the documentation
-	// string for the objects.
-	PosToDecl(ctx context.Context, pkg Package, pos token.Pos) (ast.Decl, error)
-
 	// DiagnosePackage returns basic diagnostics, including list, parse, and type errors
 	// for pkg, grouped by file.
 	DiagnosePackage(ctx context.Context, pkg Package) (map[span.URI][]*Diagnostic, error)
@@ -147,8 +136,8 @@
 	// IsBuiltin reports whether uri is part of the builtin package.
 	IsBuiltin(ctx context.Context, uri span.URI) bool
 
-	// PackagesForFile returns the packages that this file belongs to, checked
-	// in mode.
+	// PackagesForFile returns an unordered list of packages that contain
+	// the file denoted by uri, type checked in the specified mode.
 	PackagesForFile(ctx context.Context, uri span.URI, mode TypecheckMode, includeTestVariants bool) ([]Package, error)
 
 	// PackageForFile returns a single package that this file belongs to,
@@ -256,10 +245,14 @@
 	// original one will be.
 	SetOptions(context.Context, *Options) (View, error)
 
-	// Snapshot returns the current snapshot for the view.
+	// Snapshot returns the current snapshot for the view, and a
+	// release function that must be called when the Snapshot is
+	// no longer needed.
 	Snapshot(ctx context.Context) (Snapshot, func())
 
-	// Rebuild rebuilds the current view, replacing the original view in its session.
+	// Rebuild rebuilds the current view, replacing the original
+	// view in its session.  It returns a Snapshot and a release
+	// function that must be called when the Snapshot is no longer needed.
 	Rebuild(ctx context.Context) (Snapshot, func(), error)
 
 	// IsGoPrivatePath reports whether target is a private import path, as identified
@@ -293,6 +286,7 @@
 	// Source code used to build the AST. It may be different from the
 	// actual content of the file if we have fixed the AST.
 	Src      []byte
+	Fixed    bool
 	Mapper   *protocol.ColumnMapper
 	ParseErr scanner.ErrorList
 }
@@ -343,7 +337,8 @@
 	// NewView creates a new View, returning it and its first snapshot. If a
 	// non-empty tempWorkspace directory is provided, the View will record a copy
 	// of its gopls workspace module in that directory, so that client tooling
-	// can execute in the same main module.
+	// can execute in the same main module.  On success it also returns a release
+	// function that must be called when the Snapshot is no longer needed.
 	NewView(ctx context.Context, name string, folder span.URI, options *Options) (View, Snapshot, func(), error)
 
 	// Cache returns the cache that created this session, for debugging only.
@@ -367,7 +362,9 @@
 	// DidModifyFile reports a file modification to the session. It returns
 	// the new snapshots after the modifications have been applied, paired with
 	// the affected file URIs for those snapshots.
-	DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, []func(), error)
+	// On success, it returns a release function that
+	// must be called when the snapshots are no longer needed.
+	DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, func(), error)
 
 	// ExpandModificationsToDirectories returns the set of changes with the
 	// directory changes removed and expanded to include all of the files in
@@ -478,6 +475,10 @@
 	ParseFull
 )
 
+// AllParseModes contains all possible values of ParseMode.
+// It is used for cache invalidation on a file content change.
+var AllParseModes = []ParseMode{ParseHeader, ParseExported, ParseFull}
+
 // TypecheckMode controls what kind of parsing should be done (see ParseMode)
 // while type checking a package.
 type TypecheckMode int
@@ -647,11 +648,15 @@
 	ParseMode() ParseMode
 }
 
+// A CriticalError is a workspace-wide error that generally prevents gopls from
+// functioning correctly. In the presence of critical errors, other diagnostics
+// in the workspace may not make sense.
 type CriticalError struct {
 	// MainError is the primary error. Must be non-nil.
 	MainError error
-	// DiagList contains any supplemental (structured) diagnostics.
-	DiagList []*Diagnostic
+
+	// Diagnostics contains any supplemental (structured) diagnostics.
+	Diagnostics []*Diagnostic
 }
 
 // An Diagnostic corresponds to an LSP Diagnostic.
diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go
index 0822de0..e9da569 100644
--- a/internal/lsp/source/workspace_symbol.go
+++ b/internal/lsp/source/workspace_symbol.go
@@ -8,7 +8,9 @@
 	"context"
 	"fmt"
 	"go/types"
+	"path"
 	"path/filepath"
+	"regexp"
 	"runtime"
 	"sort"
 	"strings"
@@ -83,17 +85,19 @@
 // []string{"myType.field"} or []string{"myType.", "field"}.
 //
 // See the comment for symbolCollector for more information.
-type symbolizer func(name string, pkg Metadata, m matcherFunc) ([]string, float64)
+//
+// The space argument is an empty slice with spare capacity that may be used
+// to allocate the result.
+type symbolizer func(space []string, name string, pkg Metadata, m matcherFunc) ([]string, float64)
 
-func fullyQualifiedSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
-	_, score := dynamicSymbolMatch(name, pkg, matcher)
-	if score > 0 {
-		return []string{pkg.PackagePath(), ".", name}, score
+func fullyQualifiedSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
+	if _, score := dynamicSymbolMatch(space, name, pkg, matcher); score > 0 {
+		return append(space, pkg.PackagePath(), ".", name), score
 	}
 	return nil, 0
 }
 
-func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
+func dynamicSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
 	var score float64
 
 	endsInPkgName := strings.HasSuffix(pkg.PackagePath(), pkg.PackageName())
@@ -101,14 +105,14 @@
 	// If the package path does not end in the package name, we need to check the
 	// package-qualified symbol as an extra pass first.
 	if !endsInPkgName {
-		pkgQualified := []string{pkg.PackageName(), ".", name}
+		pkgQualified := append(space, pkg.PackageName(), ".", name)
 		idx, score := matcher(pkgQualified)
 		nameStart := len(pkg.PackageName()) + 1
 		if score > 0 {
 			// If our match is contained entirely within the unqualified portion,
 			// just return that.
 			if idx >= nameStart {
-				return []string{name}, score
+				return append(space, name), score
 			}
 			// Lower the score for matches that include the package name.
 			return pkgQualified, score * 0.8
@@ -116,13 +120,13 @@
 	}
 
 	// Now try matching the fully qualified symbol.
-	fullyQualified := []string{pkg.PackagePath(), ".", name}
+	fullyQualified := append(space, pkg.PackagePath(), ".", name)
 	idx, score := matcher(fullyQualified)
 
 	// As above, check if we matched just the unqualified symbol name.
 	nameStart := len(pkg.PackagePath()) + 1
 	if idx >= nameStart {
-		return []string{name}, score
+		return append(space, name), score
 	}
 
 	// If our package path ends in the package name, we'll have skipped the
@@ -131,7 +135,7 @@
 	if endsInPkgName && idx >= 0 {
 		pkgStart := len(pkg.PackagePath()) - len(pkg.PackageName())
 		if idx >= pkgStart {
-			return []string{pkg.PackageName(), ".", name}, score
+			return append(space, pkg.PackageName(), ".", name), score
 		}
 	}
 
@@ -140,8 +144,8 @@
 	return fullyQualified, score * 0.6
 }
 
-func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
-	qualified := []string{pkg.PackageName(), ".", name}
+func packageSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
+	qualified := append(space, pkg.PackageName(), ".", name)
 	if _, s := matcher(qualified); s > 0 {
 		return qualified, s
 	}
@@ -303,11 +307,12 @@
 		roots = append(roots, strings.TrimRight(string(v.Folder()), "/"))
 
 		filters := v.Options().DirectoryFilters
+		filterer := NewFilterer(filters)
 		folder := filepath.ToSlash(v.Folder().Filename())
 		for uri, syms := range snapshot.Symbols(ctx) {
 			norm := filepath.ToSlash(uri.Filename())
 			nm := strings.TrimPrefix(norm, folder)
-			if FiltersDisallow(nm, filters) {
+			if filterer.Disallow(nm) {
 				continue
 			}
 			// Only scan each file once.
@@ -356,28 +361,69 @@
 	return unified.results(), nil
 }
 
-// FilterDisallow is code from the body of cache.pathExcludedByFilter in cache/view.go
-// Exporting and using that function would cause an import cycle.
-// Moving it here and exporting it would leave behind view_test.go.
-// (This code is exported and used in the body of cache.pathExcludedByFilter)
-func FiltersDisallow(path string, filters []string) bool {
+type Filterer struct {
+	// Whether a filter is excluded depends on the operator (first char of the raw filter).
+	// Slices filters and excluded then should have the same length.
+	filters  []*regexp.Regexp
+	excluded []bool
+}
+
+// NewFilterer computes regular expression form of all raw filters
+func NewFilterer(rawFilters []string) *Filterer {
+	var f Filterer
+	for _, filter := range rawFilters {
+		filter = path.Clean(filepath.ToSlash(filter))
+		op, prefix := filter[0], filter[1:]
+		// convertFilterToRegexp adds "/" at the end of prefix to handle cases where a filter is a prefix of another filter.
+		// For example, it prevents [+foobar, -foo] from excluding "foobar".
+		f.filters = append(f.filters, convertFilterToRegexp(filepath.ToSlash(prefix)))
+		f.excluded = append(f.excluded, op == '-')
+	}
+
+	return &f
+}
+
+// Disallow return true if the path is excluded from the filterer's filters.
+func (f *Filterer) Disallow(path string) bool {
 	path = strings.TrimPrefix(path, "/")
 	var excluded bool
-	for _, filter := range filters {
-		op, prefix := filter[0], filter[1:]
-		// Non-empty prefixes have to be precise directory matches.
-		if prefix != "" {
-			prefix = prefix + "/"
-			path = path + "/"
+
+	for i, filter := range f.filters {
+		path := path
+		if !strings.HasSuffix(path, "/") {
+			path += "/"
 		}
-		if !strings.HasPrefix(path, prefix) {
+		if !filter.MatchString(path) {
 			continue
 		}
-		excluded = op == '-'
+		excluded = f.excluded[i]
 	}
+
 	return excluded
 }
 
+// convertFilterToRegexp replaces glob-like operator substrings in a string file path to their equivalent regex forms.
+// Supporting glob-like operators:
+//   - **: match zero or more complete path segments
+func convertFilterToRegexp(filter string) *regexp.Regexp {
+	if filter == "" {
+		return regexp.MustCompile(".*")
+	}
+	var ret strings.Builder
+	ret.WriteString("^")
+	segs := strings.Split(filter, "/")
+	for _, seg := range segs {
+		if seg == "**" {
+			ret.WriteString(".*")
+		} else {
+			ret.WriteString(regexp.QuoteMeta(seg))
+		}
+		ret.WriteString("/")
+	}
+
+	return regexp.MustCompile(ret.String())
+}
+
 // symbolFile holds symbol information for a single file.
 type symbolFile struct {
 	uri  span.URI
@@ -387,8 +433,9 @@
 
 // matchFile scans a symbol file and adds matching symbols to the store.
 func matchFile(store *symbolStore, symbolizer symbolizer, matcher matcherFunc, roots []string, i symbolFile) {
+	space := make([]string, 0, 3)
 	for _, sym := range i.syms {
-		symbolParts, score := symbolizer(sym.Name, i.md, matcher)
+		symbolParts, score := symbolizer(space, sym.Name, i.md, matcher)
 
 		// Check if the score is too low before applying any downranking.
 		if store.tooLow(score) {
diff --git a/internal/lsp/source/workspace_symbol_test.go b/internal/lsp/source/workspace_symbol_test.go
index 314ef78..24fb8b4 100644
--- a/internal/lsp/source/workspace_symbol_test.go
+++ b/internal/lsp/source/workspace_symbol_test.go
@@ -44,3 +44,93 @@
 		}
 	}
 }
+
+func TestFiltererDisallow(t *testing.T) {
+	tests := []struct {
+		filters  []string
+		included []string
+		excluded []string
+	}{
+		{
+			[]string{"+**/c.go"},
+			[]string{"a/c.go", "a/b/c.go"},
+			[]string{},
+		},
+		{
+			[]string{"+a/**/c.go"},
+			[]string{"a/b/c.go", "a/b/d/c.go", "a/c.go"},
+			[]string{},
+		},
+		{
+			[]string{"-a/c.go", "+a/**"},
+			[]string{"a/c.go"},
+			[]string{},
+		},
+		{
+			[]string{"+a/**/c.go", "-**/c.go"},
+			[]string{},
+			[]string{"a/b/c.go"},
+		},
+		{
+			[]string{"+a/**/c.go", "-a/**"},
+			[]string{},
+			[]string{"a/b/c.go"},
+		},
+		{
+			[]string{"+**/c.go", "-a/**/c.go"},
+			[]string{},
+			[]string{"a/b/c.go"},
+		},
+		{
+			[]string{"+foobar", "-foo"},
+			[]string{"foobar", "foobar/a"},
+			[]string{"foo", "foo/a"},
+		},
+		{
+			[]string{"+", "-"},
+			[]string{},
+			[]string{"foobar", "foobar/a", "foo", "foo/a"},
+		},
+		{
+			[]string{"-", "+"},
+			[]string{"foobar", "foobar/a", "foo", "foo/a"},
+			[]string{},
+		},
+		{
+			[]string{"-a/**/b/**/c.go"},
+			[]string{},
+			[]string{"a/x/y/z/b/f/g/h/c.go"},
+		},
+		// tests for unsupported glob operators
+		{
+			[]string{"+**/c.go", "-a/*/c.go"},
+			[]string{"a/b/c.go"},
+			[]string{},
+		},
+		{
+			[]string{"+**/c.go", "-a/?/c.go"},
+			[]string{"a/b/c.go"},
+			[]string{},
+		},
+		{
+			[]string{"-b"}, // should only filter paths prefixed with the "b" directory
+			[]string{"a/b/c.go", "bb"},
+			[]string{"b/c/d.go", "b"},
+		},
+	}
+
+	for _, test := range tests {
+		filterer := NewFilterer(test.filters)
+		for _, inc := range test.included {
+			if filterer.Disallow(inc) {
+				t.Errorf("Filters %v excluded %v, wanted included", test.filters, inc)
+			}
+		}
+
+		for _, exc := range test.excluded {
+			if !filterer.Disallow(exc) {
+				t.Errorf("Filters %v included %v, wanted excluded", test.filters, exc)
+			}
+		}
+	}
+}
diff --git a/internal/lsp/testdata/deep/deep.go b/internal/lsp/testdata/deep/deep.go
index 6ed5ff8..6908824 100644
--- a/internal/lsp/testdata/deep/deep.go
+++ b/internal/lsp/testdata/deep/deep.go
@@ -29,6 +29,13 @@
 }
 
 func _() {
+	var cork struct{ err error }
+	cork.err         //@item(deepCorkErr, "cork.err", "error", "field")
+	context          //@item(deepContextPkg, "context", "\"context\"", "package")
+	var _ error = co //@rank(" //", deepCorkErr, deepContextPkg)
+}
+
+func _() {
 	// deepCircle is circular.
 	type deepCircle struct {
 		*deepCircle
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
index c49e5d6..cbb70a0 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
+++ b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go
@@ -1,6 +1,6 @@
 package extract
 
 func _() {
-	var _ = 1 + 2 //@suggestedfix("1", "refactor.extract")
-	var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract")
+	var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "")
+	var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "")
 }
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
index 00ee7b4..3fd9b32 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden
@@ -3,16 +3,16 @@
 
 func _() {
 	x := 1
-	var _ = x + 2 //@suggestedfix("1", "refactor.extract")
-	var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract")
+	var _ = x + 2 //@suggestedfix("1", "refactor.extract", "")
+	var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "")
 }
 
 -- suggestedfix_extract_basic_lit_5_10 --
 package extract
 
 func _() {
-	var _ = 1 + 2 //@suggestedfix("1", "refactor.extract")
+	var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "")
 	x := 3 + 4
-	var _ = x //@suggestedfix("3 + 4", "refactor.extract")
+	var _ = x //@suggestedfix("3 + 4", "refactor.extract", "")
 }
 
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
index badc010..a20b45f 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
+++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go
@@ -3,7 +3,7 @@
 import "strconv"
 
 func _() {
-	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
 	str := "1"
-	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
 }
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
index 74df67e..4423fc9 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden
@@ -5,9 +5,9 @@
 
 func _() {
 	x0 := append([]int{}, 1)
-	a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
 	str := "1"
-	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
 }
 
 -- suggestedfix_extract_func_call_6_8 --
@@ -17,9 +17,9 @@
 
 func _() {
 	x := append([]int{}, 1)
-	x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
 	str := "1"
-	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+	b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
 }
 
 -- suggestedfix_extract_func_call_8_12 --
@@ -28,9 +28,9 @@
 import "strconv"
 
 func _() {
-	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract")
+	x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "")
 	str := "1"
 	x, x1 := strconv.Atoi(str)
-	b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract")
+	b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "")
 }
 
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/internal/lsp/testdata/extract/extract_variable/extract_scope.go
index 5dfcc36..c14ad70 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go
+++ b/internal/lsp/testdata/extract/extract_variable/extract_scope.go
@@ -5,9 +5,9 @@
 func _() {
 	x0 := 0
 	if true {
-		y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
+		y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
 	}
 	if true {
-		x1 := !false //@suggestedfix("!false", "refactor.extract")
+		x1 := !false //@suggestedfix("!false", "refactor.extract", "")
 	}
 }
diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
index e0e6464..1c2f64b 100644
--- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
+++ b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden
@@ -6,11 +6,11 @@
 func _() {
 	x0 := 0
 	if true {
-		y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
+		y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
 	}
 	if true {
 		x := !false
-		x1 := x //@suggestedfix("!false", "refactor.extract")
+		x1 := x //@suggestedfix("!false", "refactor.extract", "")
 	}
 }
 
@@ -23,10 +23,10 @@
 	x0 := 0
 	if true {
 		x := ast.CompositeLit{}
-		y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract")
+		y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "")
 	}
 	if true {
-		x1 := !false //@suggestedfix("!false", "refactor.extract")
+		x1 := !false //@suggestedfix("!false", "refactor.extract", "")
 	}
 }
 
diff --git a/internal/lsp/testdata/fillstruct/a.go b/internal/lsp/testdata/fillstruct/a.go
index 5c6df6c..4fb855d 100644
--- a/internal/lsp/testdata/fillstruct/a.go
+++ b/internal/lsp/testdata/fillstruct/a.go
@@ -8,20 +8,20 @@
 	foo int
 }
 
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type twoArgStruct struct {
 	foo int
 	bar string
 }
 
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type nestedStruct struct {
 	bar   string
 	basic basicStruct
 }
 
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/internal/lsp/testdata/fillstruct/a.go.golden b/internal/lsp/testdata/fillstruct/a.go.golden
index 5d6dbce..76789f0 100644
--- a/internal/lsp/testdata/fillstruct/a.go.golden
+++ b/internal/lsp/testdata/fillstruct/a.go.golden
@@ -11,23 +11,23 @@
 
 var _ = basicStruct{
 	foo: 0,
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type twoArgStruct struct {
 	foo int
 	bar string
 }
 
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type nestedStruct struct {
 	bar   string
 	basic basicStruct
 }
 
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a_18_22 --
 package fillstruct
@@ -40,7 +40,7 @@
 	foo int
 }
 
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type twoArgStruct struct {
 	foo int
@@ -50,16 +50,16 @@
 var _ = twoArgStruct{
 	foo: 0,
 	bar: "",
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type nestedStruct struct {
 	bar   string
 	basic basicStruct
 }
 
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a_25_22 --
 package fillstruct
@@ -72,14 +72,14 @@
 	foo int
 }
 
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type twoArgStruct struct {
 	foo int
 	bar string
 }
 
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type nestedStruct struct {
 	bar   string
@@ -89,9 +89,9 @@
 var _ = nestedStruct{
 	bar:   "",
 	basic: basicStruct{},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
-var _ = data.B{} //@suggestedfix("}", "refactor.rewrite")
+var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a_27_16 --
 package fillstruct
@@ -104,23 +104,23 @@
 	foo int
 }
 
-var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type twoArgStruct struct {
 	foo int
 	bar string
 }
 
-var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type nestedStruct struct {
 	bar   string
 	basic basicStruct
 }
 
-var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = data.B{
 	ExportedInt: 0,
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
diff --git a/internal/lsp/testdata/fillstruct/a2.go b/internal/lsp/testdata/fillstruct/a2.go
index 8e12a6b..b5e30a8 100644
--- a/internal/lsp/testdata/fillstruct/a2.go
+++ b/internal/lsp/testdata/fillstruct/a2.go
@@ -8,22 +8,22 @@
 	a  [2]string
 }
 
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStruct struct {
 	fn func(i int) int
 }
 
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructCompex struct {
 	fn func(i int, s string) (string, int)
 }
 
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructEmpty struct {
 	fn func()
 }
 
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/internal/lsp/testdata/fillstruct/a2.go.golden b/internal/lsp/testdata/fillstruct/a2.go.golden
index 78a6ee2..2eca3e3 100644
--- a/internal/lsp/testdata/fillstruct/a2.go.golden
+++ b/internal/lsp/testdata/fillstruct/a2.go.golden
@@ -15,25 +15,25 @@
 	c:  make(chan int),
 	c1: make(<-chan int),
 	a:  [2]string{},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStruct struct {
 	fn func(i int) int
 }
 
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructCompex struct {
 	fn func(i int, s string) (string, int)
 }
 
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructEmpty struct {
 	fn func()
 }
 
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a2_17_19 --
 package fillstruct
@@ -46,7 +46,7 @@
 	a  [2]string
 }
 
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStruct struct {
 	fn func(i int) int
@@ -55,19 +55,19 @@
 var _ = funStruct{
 	fn: func(i int) int {
 	},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructCompex struct {
 	fn func(i int, s string) (string, int)
 }
 
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructEmpty struct {
 	fn func()
 }
 
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a2_23_25 --
 package fillstruct
@@ -80,13 +80,13 @@
 	a  [2]string
 }
 
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStruct struct {
 	fn func(i int) int
 }
 
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructCompex struct {
 	fn func(i int, s string) (string, int)
@@ -95,13 +95,13 @@
 var _ = funStructCompex{
 	fn: func(i int, s string) (string, int) {
 	},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructEmpty struct {
 	fn func()
 }
 
-var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a2_29_24 --
 package fillstruct
@@ -114,19 +114,19 @@
 	a  [2]string
 }
 
-var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStruct struct {
 	fn func(i int) int
 }
 
-var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructCompex struct {
 	fn func(i int, s string) (string, int)
 }
 
-var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite")
+var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type funStructEmpty struct {
 	fn func()
@@ -135,5 +135,5 @@
 var _ = funStructEmpty{
 	fn: func() {
 	},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
diff --git a/internal/lsp/testdata/fillstruct/a3.go b/internal/lsp/testdata/fillstruct/a3.go
index 730db30..59cd9fa 100644
--- a/internal/lsp/testdata/fillstruct/a3.go
+++ b/internal/lsp/testdata/fillstruct/a3.go
@@ -14,7 +14,7 @@
 	Y *Foo
 }
 
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -25,7 +25,7 @@
 	st ast.CompositeLit
 }
 
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -33,10 +33,10 @@
 	i *int
 }
 
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
-	{}, //@suggestedfix("}", "refactor.rewrite")
+	{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
diff --git a/internal/lsp/testdata/fillstruct/a3.go.golden b/internal/lsp/testdata/fillstruct/a3.go.golden
index 1d86729..a7c7baa 100644
--- a/internal/lsp/testdata/fillstruct/a3.go.golden
+++ b/internal/lsp/testdata/fillstruct/a3.go.golden
@@ -18,7 +18,7 @@
 var _ = Bar{
 	X: &Foo{},
 	Y: &Foo{},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -29,7 +29,7 @@
 	st ast.CompositeLit
 }
 
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -37,13 +37,13 @@
 	i *int
 }
 
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
-	{}, //@suggestedfix("}", "refactor.rewrite")
+	{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a3_28_24 --
 package fillstruct
@@ -62,7 +62,7 @@
 	Y *Foo
 }
 
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -81,7 +81,7 @@
 	fn: func(ast_decl ast.DeclStmt) ast.Ellipsis {
 	},
 	st: ast.CompositeLit{},
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -89,13 +89,13 @@
 	i *int
 }
 
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
-	{}, //@suggestedfix("}", "refactor.rewrite")
+	{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a3_36_30 --
 package fillstruct
@@ -114,7 +114,7 @@
 	Y *Foo
 }
 
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -125,7 +125,7 @@
 	st ast.CompositeLit
 }
 
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -137,13 +137,13 @@
 	b: new(bool),
 	s: new(string),
 	i: new(int),
-} //@suggestedfix("}", "refactor.rewrite")
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
-	{}, //@suggestedfix("}", "refactor.rewrite")
+	{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a3_39_3 --
 package fillstruct
@@ -162,7 +162,7 @@
 	Y *Foo
 }
 
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -173,7 +173,7 @@
 	st ast.CompositeLit
 }
 
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -181,17 +181,17 @@
 	i *int
 }
 
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
 	{
 		ValuePos: 0,
 		Kind:     0,
 		Value:    "",
-	}, //@suggestedfix("}", "refactor.rewrite")
+	}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
-var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite")
+var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 -- suggestedfix_a3_42_25 --
 package fillstruct
@@ -210,7 +210,7 @@
 	Y *Foo
 }
 
-var _ = Bar{} //@suggestedfix("}", "refactor.rewrite")
+var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type importedStruct struct {
 	m  map[*ast.CompositeLit]ast.Field
@@ -221,7 +221,7 @@
 	st ast.CompositeLit
 }
 
-var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 type pointerBuiltinStruct struct {
 	b *bool
@@ -229,15 +229,15 @@
 	i *int
 }
 
-var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite")
+var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 var _ = []ast.BasicLit{
-	{}, //@suggestedfix("}", "refactor.rewrite")
+	{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 var _ = []ast.BasicLit{{
 	ValuePos: 0,
 	Kind:     0,
 	Value:    "",
-}} //@suggestedfix("}", "refactor.rewrite")
+}} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
diff --git a/internal/lsp/testdata/fillstruct/a4.go b/internal/lsp/testdata/fillstruct/a4.go
index 7833d33..5f52a55 100644
--- a/internal/lsp/testdata/fillstruct/a4.go
+++ b/internal/lsp/testdata/fillstruct/a4.go
@@ -22,18 +22,18 @@
 
 func fill() {
 	var x int
-	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var s string
-	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var n int
 	_ = []int{}
 	if true {
 		arr := []int{1, 2}
 	}
-	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var node *ast.CompositeLit
-	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/a4.go.golden b/internal/lsp/testdata/fillstruct/a4.go.golden
index 109c6b5..b1e376f 100644
--- a/internal/lsp/testdata/fillstruct/a4.go.golden
+++ b/internal/lsp/testdata/fillstruct/a4.go.golden
@@ -25,20 +25,20 @@
 	var x int
 	var _ = iStruct{
 		X: x,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var s string
-	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var n int
 	_ = []int{}
 	if true {
 		arr := []int{1, 2}
 	}
-	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var node *ast.CompositeLit
-	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 -- suggestedfix_a4_28_18 --
@@ -66,22 +66,22 @@
 
 func fill() {
 	var x int
-	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var s string
 	var _ = sStruct{
 		str: s,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var n int
 	_ = []int{}
 	if true {
 		arr := []int{1, 2}
 	}
-	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var node *ast.CompositeLit
-	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 -- suggestedfix_a4_35_20 --
@@ -109,10 +109,10 @@
 
 func fill() {
 	var x int
-	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var s string
-	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var n int
 	_ = []int{}
@@ -123,10 +123,10 @@
 		num:   n,
 		strin: s,
 		arr:   []int{},
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var node *ast.CompositeLit
-	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 -- suggestedfix_a4_38_23 --
@@ -154,21 +154,21 @@
 
 func fill() {
 	var x int
-	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var s string
-	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var n int
 	_ = []int{}
 	if true {
 		arr := []int{1, 2}
 	}
-	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite")
+	var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 
 	var node *ast.CompositeLit
 	var _ = assignStruct{
 		n: node,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go b/internal/lsp/testdata/fillstruct/fill_struct.go
index fccec13..3da9047 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct.go
@@ -17,10 +17,10 @@
 }
 
 func fill() {
-	a := StructA{}  //@suggestedfix("}", "refactor.rewrite")
-	b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
-	c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
+	a := StructA{}  //@suggestedfix("}", "refactor.rewrite", "Fill")
+	b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	if true {
-		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
+		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/internal/lsp/testdata/fillstruct/fill_struct.go.golden
index 8d99703..de01a40 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct.go.golden
@@ -24,11 +24,11 @@
 		MapA:               map[int]string{},
 		Array:              []int{},
 		StructB:            StructB{},
-	}  //@suggestedfix("}", "refactor.rewrite")
-	b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
-	c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
+	}  //@suggestedfix("}", "refactor.rewrite", "Fill")
+	b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	if true {
-		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
+		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
 
@@ -52,13 +52,13 @@
 }
 
 func fill() {
-	a := StructA{}  //@suggestedfix("}", "refactor.rewrite")
+	a := StructA{}  //@suggestedfix("}", "refactor.rewrite", "Fill")
 	b := StructA2{
 		B: &StructB{},
-	} //@suggestedfix("}", "refactor.rewrite")
-	c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	if true {
-		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
+		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
 
@@ -82,13 +82,13 @@
 }
 
 func fill() {
-	a := StructA{}  //@suggestedfix("}", "refactor.rewrite")
-	b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
+	a := StructA{}  //@suggestedfix("}", "refactor.rewrite", "Fill")
+	b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	c := StructA3{
 		B: StructB{},
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	if true {
-		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite")
+		_ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
 
@@ -112,13 +112,13 @@
 }
 
 func fill() {
-	a := StructA{}  //@suggestedfix("}", "refactor.rewrite")
-	b := StructA2{} //@suggestedfix("}", "refactor.rewrite")
-	c := StructA3{} //@suggestedfix("}", "refactor.rewrite")
+	a := StructA{}  //@suggestedfix("}", "refactor.rewrite", "Fill")
+	b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	if true {
 		_ = StructA3{
 			B: StructB{},
-		} //@suggestedfix("}", "refactor.rewrite")
+		} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/internal/lsp/testdata/fillstruct/fill_struct_anon.go
index b5d2337..2c099a8 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_anon.go
@@ -10,5 +10,5 @@
 }
 
 func fill() {
-	_ := StructAnon{} //@suggestedfix("}", "refactor.rewrite")
+	_ := StructAnon{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
index eb6ffd6..7cc9ac2 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden
@@ -15,6 +15,6 @@
 		a: struct{}{},
 		b: map[string]interface{}{},
 		c: map[string]struct{d int; e bool}{},
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/internal/lsp/testdata/fillstruct/fill_struct_nested.go
index 79eb84b..ab7be5a 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_nested.go
@@ -10,6 +10,6 @@
 
 func nested() {
 	c := StructB{
-		StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite")
+		StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
index 30061a5..c902ee7 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden
@@ -13,7 +13,7 @@
 	c := StructB{
 		StructC: StructC{
 			unexportedInt: 0,
-		}, //@suggestedfix("}", "refactor.rewrite")
+		}, //@suggestedfix("}", "refactor.rewrite", "Fill")
 	}
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go b/internal/lsp/testdata/fillstruct/fill_struct_package.go
index 71f1248..edb88c4 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_package.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_package.go
@@ -7,6 +7,6 @@
 )
 
 func unexported() {
-	a := data.B{}   //@suggestedfix("}", "refactor.rewrite")
-	_ = h2.Client{} //@suggestedfix("}", "refactor.rewrite")
+	a := data.B{}   //@suggestedfix("}", "refactor.rewrite", "Fill")
+	_ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
index 13c8570..57b2613 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden
@@ -10,8 +10,8 @@
 func unexported() {
 	a := data.B{
 		ExportedInt: 0,
-	}   //@suggestedfix("}", "refactor.rewrite")
-	_ = h2.Client{} //@suggestedfix("}", "refactor.rewrite")
+	}   //@suggestedfix("}", "refactor.rewrite", "Fill")
+	_ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 -- suggestedfix_fill_struct_package_11_16 --
@@ -24,13 +24,13 @@
 )
 
 func unexported() {
-	a := data.B{}   //@suggestedfix("}", "refactor.rewrite")
+	a := data.B{}   //@suggestedfix("}", "refactor.rewrite", "Fill")
 	_ = h2.Client{
 		Transport: nil,
 		CheckRedirect: func(req *h2.Request, via []*h2.Request) error {
 		},
 		Jar:     nil,
 		Timeout: 0,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go b/internal/lsp/testdata/fillstruct/fill_struct_partial.go
index 97b517d..5de1722 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_partial.go
@@ -14,11 +14,11 @@
 func fill() {
 	a := StructPartialA{
 		PrefilledInt: 5,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	b := StructPartialB{
 		/* this comment should disappear */
 		PrefilledInt: 7, // This comment should be blown away.
 		/* As should
 		this one */
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
index 2d063c1..3aa437a 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden
@@ -17,13 +17,13 @@
 		PrefilledInt:   5,
 		UnfilledInt:    0,
 		StructPartialB: StructPartialB{},
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	b := StructPartialB{
 		/* this comment should disappear */
 		PrefilledInt: 7, // This comment should be blown away.
 		/* As should
 		this one */
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
 -- suggestedfix_fill_struct_partial_23_2 --
@@ -43,10 +43,10 @@
 func fill() {
 	a := StructPartialA{
 		PrefilledInt: 5,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 	b := StructPartialB{
 		PrefilledInt: 7,
 		UnfilledInt:  0,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
index d5d1bbb..6a468cd 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go
@@ -5,5 +5,5 @@
 }
 
 func spaces() {
-	d := StructD{} //@suggestedfix("}", "refactor.rewrite")
+	d := StructD{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
index 0d75533..590c916 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden
@@ -8,6 +8,6 @@
 func spaces() {
 	d := StructD{
 		ExportedIntField: 0,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go
index 50877e9..f5e42a4 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go
+++ b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go
@@ -8,5 +8,5 @@
 }
 
 func fill() {
-	_ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite")
+	_ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden
index 9936954..7e8e195 100644
--- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden
+++ b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden
@@ -12,6 +12,6 @@
 	_ := unsafeStruct{
 		x: 0,
 		p: nil,
-	} //@suggestedfix("}", "refactor.rewrite")
+	} //@suggestedfix("}", "refactor.rewrite", "Fill")
 }
 
diff --git a/internal/lsp/testdata/fillstruct/typeparams.go b/internal/lsp/testdata/fillstruct/typeparams.go
new file mode 100644
index 0000000..c60cd68
--- /dev/null
+++ b/internal/lsp/testdata/fillstruct/typeparams.go
@@ -0,0 +1,38 @@
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
diff --git a/internal/lsp/testdata/fillstruct/typeparams.go.golden b/internal/lsp/testdata/fillstruct/typeparams.go.golden
new file mode 100644
index 0000000..9b2b90c
--- /dev/null
+++ b/internal/lsp/testdata/fillstruct/typeparams.go.golden
@@ -0,0 +1,328 @@
+-- suggestedfix_typeparams_11_40 --
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{
+	foo: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	_ = x
+}
+
+-- suggestedfix_typeparams_14_40 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{
+	foo: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
+
+-- suggestedfix_typeparams_18_49 --
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{
+	foo: "",
+	bar: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	_ = x
+}
+
+-- suggestedfix_typeparams_20_49 --
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = basicStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{
+	foo: "",
+	bar: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
+
+-- suggestedfix_typeparams_21_49 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{
+	foo: "",
+	bar: 0,
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
+
+-- suggestedfix_typeparams_22_1 --
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	foo: 0,
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+	_ = x
+}
+
+-- suggestedfix_typeparams_24_1 --
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = basicStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	foo: 0,
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
+
+-- suggestedfix_typeparams_25_1 --
+//go:build go1.18
+// +build go1.18
+
+package fillstruct
+
+type emptyStructWithTypeParams[A any] struct{}
+
+var _ = emptyStructWithTypeParams[int]{}
+
+type basicStructWithTypeParams[T any] struct {
+	foo T
+}
+
+var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type twoArgStructWithTypeParams[F, B any] struct {
+	foo F
+	bar B
+}
+
+var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+var _ = twoArgStructWithTypeParams[int, string]{
+	foo: 0,
+	bar: "bar",
+} //@suggestedfix("}", "refactor.rewrite", "Fill")
+
+type nestedStructWithTypeParams struct {
+	bar   string
+	basic basicStructWithTypeParams[int]
+}
+
+var _ = nestedStructWithTypeParams{}
+
+func _[T any]() {
+	type S struct{ t T }
+	x := S{}
+	_ = x
+}
+
diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go b/internal/lsp/testdata/inlay_hint/parameter_names.go
index 6fba235..0d930e5 100644
--- a/internal/lsp/testdata/inlay_hint/parameter_names.go
+++ b/internal/lsp/testdata/inlay_hint/parameter_names.go
@@ -42,4 +42,9 @@
 	kipp("a", "b", "c")
 	plex("a", "b", "c")
 	tars("a", "b", "c")
+	foo, bar, baz := "a", "b", "c"
+	kipp(foo, bar, baz)
+	plex("a", bar, baz)
+	tars(foo+foo, (bar), "c")
+
 }
diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden
index 46d3ea4..4e93a4f 100644
--- a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden
+++ b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden
@@ -43,5 +43,10 @@
 	kipp(<foo: >"a", <bar: >"b", <baz: >"c")
 	plex(<foo: >"a", <bar: >"b", <baz: >"c")
 	tars(<foo: >"a", <bar: >"b", <baz: >"c")
+	foo< string>, bar< string>, baz< string> := "a", "b", "c"
+	kipp(foo, bar, baz)
+	plex(<foo: >"a", bar, baz)
+	tars(<foo: >foo+foo, <bar: >(bar), <baz: >"c")
+
 }
 
diff --git a/internal/lsp/testdata/missingfunction/channels.go b/internal/lsp/testdata/missingfunction/channels.go
index 436491c..303770c 100644
--- a/internal/lsp/testdata/missingfunction/channels.go
+++ b/internal/lsp/testdata/missingfunction/channels.go
@@ -1,7 +1,7 @@
 package missingfunction
 
 func channels(s string) {
-	undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix")
+	undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "")
 }
 
 func c() (<-chan string, chan string) {
diff --git a/internal/lsp/testdata/missingfunction/channels.go.golden b/internal/lsp/testdata/missingfunction/channels.go.golden
index f5078fe..998ce58 100644
--- a/internal/lsp/testdata/missingfunction/channels.go.golden
+++ b/internal/lsp/testdata/missingfunction/channels.go.golden
@@ -2,7 +2,7 @@
 package missingfunction
 
 func channels(s string) {
-	undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix")
+	undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "")
 }
 
 func undefinedChannels(ch1 <-chan string, ch2 chan string) {
diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go b/internal/lsp/testdata/missingfunction/consecutive_params.go
index d2ec3be..f2fb3c0 100644
--- a/internal/lsp/testdata/missingfunction/consecutive_params.go
+++ b/internal/lsp/testdata/missingfunction/consecutive_params.go
@@ -2,5 +2,5 @@
 
 func consecutiveParams() {
 	var s string
-	undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix")
+	undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden b/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
index 14a7664..4b852ce 100644
--- a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
+++ b/internal/lsp/testdata/missingfunction/consecutive_params.go.golden
@@ -3,7 +3,7 @@
 
 func consecutiveParams() {
 	var s string
-	undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix")
+	undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "")
 }
 
 func undefinedConsecutiveParams(s1, s2 string) {
diff --git a/internal/lsp/testdata/missingfunction/error_param.go b/internal/lsp/testdata/missingfunction/error_param.go
index 9fd943f..d0484f0 100644
--- a/internal/lsp/testdata/missingfunction/error_param.go
+++ b/internal/lsp/testdata/missingfunction/error_param.go
@@ -2,5 +2,5 @@
 
 func errorParam() {
 	var err error
-	undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix")
+	undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/error_param.go.golden b/internal/lsp/testdata/missingfunction/error_param.go.golden
index 2e12711..de78646 100644
--- a/internal/lsp/testdata/missingfunction/error_param.go.golden
+++ b/internal/lsp/testdata/missingfunction/error_param.go.golden
@@ -3,7 +3,7 @@
 
 func errorParam() {
 	var err error
-	undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix")
+	undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "")
 }
 
 func undefinedErrorParam(err error) {
diff --git a/internal/lsp/testdata/missingfunction/literals.go b/internal/lsp/testdata/missingfunction/literals.go
index e276eae..0099b1a 100644
--- a/internal/lsp/testdata/missingfunction/literals.go
+++ b/internal/lsp/testdata/missingfunction/literals.go
@@ -3,5 +3,5 @@
 type T struct{}
 
 func literals() {
-	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
+	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/literals.go.golden b/internal/lsp/testdata/missingfunction/literals.go.golden
index 04782b9..599f020 100644
--- a/internal/lsp/testdata/missingfunction/literals.go.golden
+++ b/internal/lsp/testdata/missingfunction/literals.go.golden
@@ -8,7 +8,7 @@
 type T struct{}
 
 func literals() {
-	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
+	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "")
 }
 
 func undefinedLiterals(s string, t1 T, t2 *T) {
@@ -20,7 +20,7 @@
 type T struct{}
 
 func literals() {
-	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix")
+	undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "")
 }
 
 func undefinedLiterals(s string, t1 T, t2 *T) {
diff --git a/internal/lsp/testdata/missingfunction/operation.go b/internal/lsp/testdata/missingfunction/operation.go
index 0408219..a4913ec 100644
--- a/internal/lsp/testdata/missingfunction/operation.go
+++ b/internal/lsp/testdata/missingfunction/operation.go
@@ -3,5 +3,5 @@
 import "time"
 
 func operation() {
-	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
+	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/operation.go.golden b/internal/lsp/testdata/missingfunction/operation.go.golden
index 5e35f30..fce69b1 100644
--- a/internal/lsp/testdata/missingfunction/operation.go.golden
+++ b/internal/lsp/testdata/missingfunction/operation.go.golden
@@ -8,7 +8,7 @@
 import "time"
 
 func operation() {
-	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
+	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "")
 }
 
 func undefinedOperation(duration time.Duration) {
@@ -20,7 +20,7 @@
 import "time"
 
 func operation() {
-	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix")
+	undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "")
 }
 
 func undefinedOperation(duration time.Duration) {
diff --git a/internal/lsp/testdata/missingfunction/selector.go b/internal/lsp/testdata/missingfunction/selector.go
index afd1ab6..93a0402 100644
--- a/internal/lsp/testdata/missingfunction/selector.go
+++ b/internal/lsp/testdata/missingfunction/selector.go
@@ -2,5 +2,5 @@
 
 func selector() {
 	m := map[int]bool{}
-	undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix")
+	undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/selector.go.golden b/internal/lsp/testdata/missingfunction/selector.go.golden
index c48691c..44e2dde 100644
--- a/internal/lsp/testdata/missingfunction/selector.go.golden
+++ b/internal/lsp/testdata/missingfunction/selector.go.golden
@@ -3,7 +3,7 @@
 
 func selector() {
 	m := map[int]bool{}
-	undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix")
+	undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "")
 }
 
 func undefinedSelector(b bool) {
diff --git a/internal/lsp/testdata/missingfunction/slice.go b/internal/lsp/testdata/missingfunction/slice.go
index 4a562a2..48b1a52 100644
--- a/internal/lsp/testdata/missingfunction/slice.go
+++ b/internal/lsp/testdata/missingfunction/slice.go
@@ -1,5 +1,5 @@
 package missingfunction
 
 func slice() {
-	undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix")
+	undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/slice.go.golden b/internal/lsp/testdata/missingfunction/slice.go.golden
index 0ccb861..2a05d9a 100644
--- a/internal/lsp/testdata/missingfunction/slice.go.golden
+++ b/internal/lsp/testdata/missingfunction/slice.go.golden
@@ -2,7 +2,7 @@
 package missingfunction
 
 func slice() {
-	undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix")
+	undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "")
 }
 
 func undefinedSlice(i []int) {
diff --git a/internal/lsp/testdata/missingfunction/tuple.go b/internal/lsp/testdata/missingfunction/tuple.go
index 1c4782c..4059ced 100644
--- a/internal/lsp/testdata/missingfunction/tuple.go
+++ b/internal/lsp/testdata/missingfunction/tuple.go
@@ -1,7 +1,7 @@
 package missingfunction
 
 func tuple() {
-	undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix")
+	undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "")
 }
 
 func b() (string, error) {
diff --git a/internal/lsp/testdata/missingfunction/tuple.go.golden b/internal/lsp/testdata/missingfunction/tuple.go.golden
index 1e12bb7..e1118a3 100644
--- a/internal/lsp/testdata/missingfunction/tuple.go.golden
+++ b/internal/lsp/testdata/missingfunction/tuple.go.golden
@@ -2,7 +2,7 @@
 package missingfunction
 
 func tuple() {
-	undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix")
+	undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "")
 }
 
 func undefinedTuple(s string, err error) {
diff --git a/internal/lsp/testdata/missingfunction/unique_params.go b/internal/lsp/testdata/missingfunction/unique_params.go
index ffaba3f..00479bf 100644
--- a/internal/lsp/testdata/missingfunction/unique_params.go
+++ b/internal/lsp/testdata/missingfunction/unique_params.go
@@ -3,5 +3,5 @@
 func uniqueArguments() {
 	var s string
 	var i int
-	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
+	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "")
 }
diff --git a/internal/lsp/testdata/missingfunction/unique_params.go.golden b/internal/lsp/testdata/missingfunction/unique_params.go.golden
index 74fb91a..4797b3b 100644
--- a/internal/lsp/testdata/missingfunction/unique_params.go.golden
+++ b/internal/lsp/testdata/missingfunction/unique_params.go.golden
@@ -8,7 +8,7 @@
 func uniqueArguments() {
 	var s string
 	var i int
-	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
+	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "")
 }
 
 func undefinedUniqueArguments(s1 string, i int, s2 string) {
@@ -21,7 +21,7 @@
 func uniqueArguments() {
 	var s string
 	var i int
-	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix")
+	undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "")
 }
 
 func undefinedUniqueArguments(s1 string, i int, s2 string) {
diff --git a/internal/lsp/testdata/semantic/a.go.golden b/internal/lsp/testdata/semantic/a.go.golden
index 19dd412..071dd17 100644
--- a/internal/lsp/testdata/semantic/a.go.golden
+++ b/internal/lsp/testdata/semantic/a.go.golden
@@ -27,7 +27,7 @@
 )
 
 /*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct {
-	/*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,comment,[]*/`foof`
+	/*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,string,[]*/`foof`
 }
 /*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface {
 	/*⇒1,type,[]*/A
diff --git a/internal/lsp/testdata/stub/stub_add_selector.go b/internal/lsp/testdata/stub/stub_add_selector.go
index a15afd7..4037b7a 100644
--- a/internal/lsp/testdata/stub/stub_add_selector.go
+++ b/internal/lsp/testdata/stub/stub_add_selector.go
@@ -7,6 +7,6 @@
 // then our implementation must add the import/package selector
 // in the concrete method if the concrete type is outside of the interface
 // package
-var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite")
+var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "")
 
 type readerFrom struct{}
diff --git a/internal/lsp/testdata/stub/stub_add_selector.go.golden b/internal/lsp/testdata/stub/stub_add_selector.go.golden
index e885483..8f08ca1 100644
--- a/internal/lsp/testdata/stub/stub_add_selector.go.golden
+++ b/internal/lsp/testdata/stub/stub_add_selector.go.golden
@@ -8,7 +8,7 @@
 // then our implementation must add the import/package selector
 // in the concrete method if the concrete type is outside of the interface
 // package
-var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite")
+var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "")
 
 type readerFrom struct{}
 
diff --git a/internal/lsp/testdata/stub/stub_assign.go b/internal/lsp/testdata/stub/stub_assign.go
index 9336361..d3f0931 100644
--- a/internal/lsp/testdata/stub/stub_assign.go
+++ b/internal/lsp/testdata/stub/stub_assign.go
@@ -4,7 +4,7 @@
 
 func main() {
 	var br io.ByteWriter
-	br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite")
+	br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type byteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_assign.go.golden b/internal/lsp/testdata/stub/stub_assign.go.golden
index a52a823..f153542 100644
--- a/internal/lsp/testdata/stub/stub_assign.go.golden
+++ b/internal/lsp/testdata/stub/stub_assign.go.golden
@@ -5,7 +5,7 @@
 
 func main() {
 	var br io.ByteWriter
-	br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite")
+	br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type byteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go b/internal/lsp/testdata/stub/stub_assign_multivars.go
index 01b330f..bd36d68 100644
--- a/internal/lsp/testdata/stub/stub_assign_multivars.go
+++ b/internal/lsp/testdata/stub/stub_assign_multivars.go
@@ -5,7 +5,7 @@
 func main() {
 	var br io.ByteWriter
 	var i int
-	i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite")
+	i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type multiByteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden b/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
index e1e71ad..425d117 100644
--- a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
+++ b/internal/lsp/testdata/stub/stub_assign_multivars.go.golden
@@ -6,7 +6,7 @@
 func main() {
 	var br io.ByteWriter
 	var i int
-	i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite")
+	i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type multiByteWriter struct{}
diff --git a/internal/lsp/testdata/stub/stub_call_expr.go b/internal/lsp/testdata/stub/stub_call_expr.go
index 775b0e5..0c30946 100644
--- a/internal/lsp/testdata/stub/stub_call_expr.go
+++ b/internal/lsp/testdata/stub/stub_call_expr.go
@@ -1,7 +1,7 @@
 package stub
 
 func main() {
-	check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite")
+	check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 func check(err error) {
diff --git a/internal/lsp/testdata/stub/stub_call_expr.go.golden b/internal/lsp/testdata/stub/stub_call_expr.go.golden
index 2d12f86..c82d224 100644
--- a/internal/lsp/testdata/stub/stub_call_expr.go.golden
+++ b/internal/lsp/testdata/stub/stub_call_expr.go.golden
@@ -2,7 +2,7 @@
 package stub
 
 func main() {
-	check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite")
+	check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 func check(err error) {
diff --git a/internal/lsp/testdata/stub/stub_embedded.go b/internal/lsp/testdata/stub/stub_embedded.go
index 6d6a986..f66989e 100644
--- a/internal/lsp/testdata/stub/stub_embedded.go
+++ b/internal/lsp/testdata/stub/stub_embedded.go
@@ -5,7 +5,7 @@
 	"sort"
 )
 
-var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite")
+var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "")
 
 type embeddedConcrete struct{}
 
diff --git a/internal/lsp/testdata/stub/stub_embedded.go.golden b/internal/lsp/testdata/stub/stub_embedded.go.golden
index c258eba..3c5347e 100644
--- a/internal/lsp/testdata/stub/stub_embedded.go.golden
+++ b/internal/lsp/testdata/stub/stub_embedded.go.golden
@@ -6,7 +6,7 @@
 	"sort"
 )
 
-var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite")
+var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "")
 
 type embeddedConcrete struct{}
 
diff --git a/internal/lsp/testdata/stub/stub_err.go b/internal/lsp/testdata/stub/stub_err.go
index 908c7d3..121f0e7 100644
--- a/internal/lsp/testdata/stub/stub_err.go
+++ b/internal/lsp/testdata/stub/stub_err.go
@@ -1,7 +1,7 @@
 package stub
 
 func main() {
-	var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite")
+	var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type customErr struct{}
diff --git a/internal/lsp/testdata/stub/stub_err.go.golden b/internal/lsp/testdata/stub/stub_err.go.golden
index 717aed8..0b441bd 100644
--- a/internal/lsp/testdata/stub/stub_err.go.golden
+++ b/internal/lsp/testdata/stub/stub_err.go.golden
@@ -2,7 +2,7 @@
 package stub
 
 func main() {
-	var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite")
+	var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type customErr struct{}
diff --git a/internal/lsp/testdata/stub/stub_function_return.go b/internal/lsp/testdata/stub/stub_function_return.go
index bbf0588..41f1764 100644
--- a/internal/lsp/testdata/stub/stub_function_return.go
+++ b/internal/lsp/testdata/stub/stub_function_return.go
@@ -5,7 +5,7 @@
 )
 
 func newCloser() io.Closer {
-	return closer{} //@suggestedfix("c", "refactor.rewrite")
+	return closer{} //@suggestedfix("c", "refactor.rewrite", "")
 }
 
 type closer struct{}
diff --git a/internal/lsp/testdata/stub/stub_function_return.go.golden b/internal/lsp/testdata/stub/stub_function_return.go.golden
index f80874d..e90712e 100644
--- a/internal/lsp/testdata/stub/stub_function_return.go.golden
+++ b/internal/lsp/testdata/stub/stub_function_return.go.golden
@@ -6,7 +6,7 @@
 )
 
 func newCloser() io.Closer {
-	return closer{} //@suggestedfix("c", "refactor.rewrite")
+	return closer{} //@suggestedfix("c", "refactor.rewrite", "")
 }
 
 type closer struct{}
diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go b/internal/lsp/testdata/stub/stub_generic_receiver.go
index 64e90fc..1c00569 100644
--- a/internal/lsp/testdata/stub/stub_generic_receiver.go
+++ b/internal/lsp/testdata/stub/stub_generic_receiver.go
@@ -7,7 +7,7 @@
 
 // This file tests that that the stub method generator accounts for concrete
 // types that have type parameters defined.
-var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite")
+var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom")
 
 type genReader[T, Y any] struct {
 	T T
diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden b/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
index 1fc7157..97935d4 100644
--- a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
+++ b/internal/lsp/testdata/stub/stub_generic_receiver.go.golden
@@ -8,7 +8,7 @@
 
 // This file tests that that the stub method generator accounts for concrete
 // types that have type parameters defined.
-var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite")
+var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom")
 
 type genReader[T, Y any] struct {
 	T T
diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go b/internal/lsp/testdata/stub/stub_ignored_imports.go
index 8f6ec73..ca95d2a 100644
--- a/internal/lsp/testdata/stub/stub_ignored_imports.go
+++ b/internal/lsp/testdata/stub/stub_ignored_imports.go
@@ -12,7 +12,7 @@
 
 var (
 	_ Reader
-	_ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite")
+	_ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "")
 )
 
 type ignoredResetter struct{}
diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden b/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
index a0ddc17..33aba53 100644
--- a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
+++ b/internal/lsp/testdata/stub/stub_ignored_imports.go.golden
@@ -14,7 +14,7 @@
 
 var (
 	_ Reader
-	_ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite")
+	_ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "")
 )
 
 type ignoredResetter struct{}
diff --git a/internal/lsp/testdata/stub/stub_multi_var.go b/internal/lsp/testdata/stub/stub_multi_var.go
index 4276b79..06702b2 100644
--- a/internal/lsp/testdata/stub/stub_multi_var.go
+++ b/internal/lsp/testdata/stub/stub_multi_var.go
@@ -6,6 +6,6 @@
 // has multiple values on the same line can still be
 // analyzed correctly to target the interface implementation
 // diagnostic.
-var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite")
+var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "")
 
 type multiVar struct{}
diff --git a/internal/lsp/testdata/stub/stub_multi_var.go.golden b/internal/lsp/testdata/stub/stub_multi_var.go.golden
index b9ac423..804c7ee 100644
--- a/internal/lsp/testdata/stub/stub_multi_var.go.golden
+++ b/internal/lsp/testdata/stub/stub_multi_var.go.golden
@@ -7,7 +7,7 @@
 // has multiple values on the same line can still be
 // analyzed correctly to target the interface implementation
 // diagnostic.
-var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite")
+var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "")
 
 type multiVar struct{}
 
diff --git a/internal/lsp/testdata/stub/stub_pointer.go b/internal/lsp/testdata/stub/stub_pointer.go
index 2b3681b..e9d8bc6 100644
--- a/internal/lsp/testdata/stub/stub_pointer.go
+++ b/internal/lsp/testdata/stub/stub_pointer.go
@@ -3,7 +3,7 @@
 import "io"
 
 func getReaderFrom() io.ReaderFrom {
-	return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite")
+	return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type pointerImpl struct{}
diff --git a/internal/lsp/testdata/stub/stub_pointer.go.golden b/internal/lsp/testdata/stub/stub_pointer.go.golden
index c4133d7..a4d765d 100644
--- a/internal/lsp/testdata/stub/stub_pointer.go.golden
+++ b/internal/lsp/testdata/stub/stub_pointer.go.golden
@@ -4,7 +4,7 @@
 import "io"
 
 func getReaderFrom() io.ReaderFrom {
-	return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite")
+	return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "")
 }
 
 type pointerImpl struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go b/internal/lsp/testdata/stub/stub_renamed_import.go
index eaebe25..54dd598 100644
--- a/internal/lsp/testdata/stub/stub_renamed_import.go
+++ b/internal/lsp/testdata/stub/stub_renamed_import.go
@@ -5,7 +5,7 @@
 	myio "io"
 )
 
-var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite")
+var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "")
 var _ myio.Reader
 
 type myIO struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go.golden b/internal/lsp/testdata/stub/stub_renamed_import.go.golden
index 48ff4f1..8182d2b 100644
--- a/internal/lsp/testdata/stub/stub_renamed_import.go.golden
+++ b/internal/lsp/testdata/stub/stub_renamed_import.go.golden
@@ -6,7 +6,7 @@
 	myio "io"
 )
 
-var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite")
+var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "")
 var _ myio.Reader
 
 type myIO struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go b/internal/lsp/testdata/stub/stub_renamed_import_iface.go
index 96caf54..26142d0 100644
--- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go
+++ b/internal/lsp/testdata/stub/stub_renamed_import_iface.go
@@ -8,6 +8,6 @@
 // method references an import from its own package
 // that the concrete type does not yet import, and that import happens
 // to be renamed, then we prefer the renaming of the interface.
-var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite")
+var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "")
 
 type otherInterfaceImpl struct{}
diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden b/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
index 9ba2cb4..134c24b 100644
--- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
+++ b/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden
@@ -11,7 +11,7 @@
 // method references an import from its own package
 // that the concrete type does not yet import, and that import happens
 // to be renamed, then we prefer the renaming of the interface.
-var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite")
+var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "")
 
 type otherInterfaceImpl struct{}
 
diff --git a/internal/lsp/testdata/stub/stub_stdlib.go b/internal/lsp/testdata/stub/stub_stdlib.go
index 0d54a6d..463cf78 100644
--- a/internal/lsp/testdata/stub/stub_stdlib.go
+++ b/internal/lsp/testdata/stub/stub_stdlib.go
@@ -4,6 +4,6 @@
 	"io"
 )
 
-var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite")
+var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "")
 
 type writer struct{}
diff --git a/internal/lsp/testdata/stub/stub_stdlib.go.golden b/internal/lsp/testdata/stub/stub_stdlib.go.golden
index 8636cea..5559250 100644
--- a/internal/lsp/testdata/stub/stub_stdlib.go.golden
+++ b/internal/lsp/testdata/stub/stub_stdlib.go.golden
@@ -5,7 +5,7 @@
 	"io"
 )
 
-var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite")
+var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "")
 
 type writer struct{}
 
diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
index e06dce0..7ff5244 100644
--- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
+++ b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go
@@ -6,6 +6,6 @@
 
 func goodbye() {
 	s := "hiiiiiii"
-	s = s //@suggestedfix("s = s", "quickfix")
+	s = s //@suggestedfix("s = s", "quickfix", "")
 	log.Print(s)
 }
diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
index 9ccaa19..e7e84fc 100644
--- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
+++ b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden
@@ -7,7 +7,7 @@
 
 func goodbye() {
 	s := "hiiiiiii"
-	 //@suggestedfix("s = s", "quickfix")
+	 //@suggestedfix("s = s", "quickfix", "")
 	log.Print(s)
 }
 
diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden
index 0247551..b6c6c07 100644
--- a/internal/lsp/testdata/summary.txt.golden
+++ b/internal/lsp/testdata/summary.txt.golden
@@ -6,7 +6,7 @@
 UnimportedCompletionsCount = 5
 DeepCompletionsCount = 5
 FuzzyCompletionsCount = 8
-RankedCompletionsCount = 163
+RankedCompletionsCount = 164
 CaseSensitiveCompletionsCount = 4
 DiagnosticsCount = 37
 FoldingRangesCount = 2
diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden
index 7e8da12..668d5fb 100644
--- a/internal/lsp/testdata/summary_go1.18.txt.golden
+++ b/internal/lsp/testdata/summary_go1.18.txt.golden
@@ -6,14 +6,14 @@
 UnimportedCompletionsCount = 5
 DeepCompletionsCount = 5
 FuzzyCompletionsCount = 8
-RankedCompletionsCount = 173
+RankedCompletionsCount = 174
 CaseSensitiveCompletionsCount = 4
 DiagnosticsCount = 37
 FoldingRangesCount = 2
 FormatCount = 6
 ImportCount = 8
 SemanticTokenCount = 3
-SuggestedFixCount = 64
+SuggestedFixCount = 67
 FunctionExtractionCount = 25
 MethodExtractionCount = 6
 DefinitionsCount = 108
diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go b/internal/lsp/testdata/typeerrors/noresultvalues.go
index 84234c4..729e7bb 100644
--- a/internal/lsp/testdata/typeerrors/noresultvalues.go
+++ b/internal/lsp/testdata/typeerrors/noresultvalues.go
@@ -1,5 +1,5 @@
 package typeerrors
 
-func x() { return nil } //@suggestedfix("nil", "quickfix")
+func x() { return nil } //@suggestedfix("nil", "quickfix", "")
 
-func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix")
+func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "")
diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
index 07c54d4..48409a0 100644
--- a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
+++ b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden
@@ -1,14 +1,14 @@
 -- suggestedfix_noresultvalues_3_19 --
 package typeerrors
 
-func x() { return } //@suggestedfix("nil", "quickfix")
+func x() { return } //@suggestedfix("nil", "quickfix", "")
 
-func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix")
+func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "")
 
 -- suggestedfix_noresultvalues_5_19 --
 package typeerrors
 
-func x() { return nil } //@suggestedfix("nil", "quickfix")
+func x() { return nil } //@suggestedfix("nil", "quickfix", "")
 
-func y() { return } //@suggestedfix("nil", "quickfix")
+func y() { return } //@suggestedfix("nil", "quickfix", "")
 
diff --git a/internal/lsp/testdata/undeclared/var.go b/internal/lsp/testdata/undeclared/var.go
index b5f9287..e27a733 100644
--- a/internal/lsp/testdata/undeclared/var.go
+++ b/internal/lsp/testdata/undeclared/var.go
@@ -1,13 +1,13 @@
 package undeclared
 
 func m() int {
-	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
+	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "")
 	if 100 < 90 {
 		z = 1
-	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
+	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "")
 		z = 4
 	}
-	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
+	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "")
 	}
 	r() //@diag("r", "compiler", "undeclared name: r", "error")
 	return z
diff --git a/internal/lsp/testdata/undeclared/var.go.golden b/internal/lsp/testdata/undeclared/var.go.golden
index 74adbe8..a266df7 100644
--- a/internal/lsp/testdata/undeclared/var.go.golden
+++ b/internal/lsp/testdata/undeclared/var.go.golden
@@ -2,14 +2,14 @@
 package undeclared
 
 func m() int {
-	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
+	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "")
 	if 100 < 90 {
 		z = 1
-	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
+	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "")
 		z = 4
 	}
 	i := 
-	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
+	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "")
 	}
 	r() //@diag("r", "compiler", "undeclared name: r", "error")
 	return z
@@ -20,13 +20,13 @@
 
 func m() int {
 	y := 
-	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
+	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "")
 	if 100 < 90 {
 		z = 1
-	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
+	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "")
 		z = 4
 	}
-	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
+	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "")
 	}
 	r() //@diag("r", "compiler", "undeclared name: r", "error")
 	return z
@@ -36,14 +36,14 @@
 package undeclared
 
 func m() int {
-	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix")
+	z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "")
 	n := 
 	if 100 < 90 {
 		z = 1
-	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix")
+	} else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "")
 		z = 4
 	}
-	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix")
+	for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "")
 	}
 	r() //@diag("r", "compiler", "undeclared name: r", "error")
 	return z
diff --git a/internal/lsp/tests/README.md b/internal/lsp/tests/README.md
index 2c18675..64ced79 100644
--- a/internal/lsp/tests/README.md
+++ b/internal/lsp/tests/README.md
@@ -11,7 +11,7 @@
 and the latter is the expected output.
 
 Each input file contains annotations like
-`//@suggestedfix("}", "refactor.rewrite")`. These annotations are interpreted by
+`//@suggestedfix("}", "refactor.rewrite", "Fill anonymous struct")`. These annotations are interpreted by
 test runners to perform certain actions. The expected output after those actions
 is encoded in the golden file.
 
diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go
index ec804e5..4c3201a 100644
--- a/internal/lsp/tests/tests.go
+++ b/internal/lsp/tests/tests.go
@@ -69,7 +69,7 @@
 type Formats []span.Span
 type Imports []span.Span
 type SemanticTokens []span.Span
-type SuggestedFixes map[span.Span][]string
+type SuggestedFixes map[span.Span][]SuggestedFix
 type FunctionExtractions map[span.Span]span.Span
 type MethodExtractions map[span.Span]span.Span
 type Definitions map[span.Span]Definition
@@ -152,7 +152,7 @@
 	Format(*testing.T, span.Span)
 	Import(*testing.T, span.Span)
 	SemanticTokens(*testing.T, span.Span)
-	SuggestedFix(*testing.T, span.Span, []string, int)
+	SuggestedFix(*testing.T, span.Span, []SuggestedFix, int)
 	FunctionExtraction(*testing.T, span.Span, span.Span)
 	MethodExtraction(*testing.T, span.Span, span.Span)
 	Definition(*testing.T, span.Span, Definition)
@@ -232,6 +232,10 @@
 	NotePosition token.Position
 }
 
+type SuggestedFix struct {
+	ActionKind, Title string
+}
+
 type Golden struct {
 	Filename string
 	Archive  *txtar.Archive
@@ -265,6 +269,7 @@
 	o.HierarchicalDocumentSymbolSupport = true
 	o.ExperimentalWorkspaceModule = true
 	o.SemanticTokens = true
+	o.InternalOptions.NewDiff = "both"
 }
 
 func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*testing.T, *Data)) {
@@ -1197,11 +1202,8 @@
 	data.SemanticTokens = append(data.SemanticTokens, spn)
 }
 
-func (data *Data) collectSuggestedFixes(spn span.Span, actionKind string) {
-	if _, ok := data.SuggestedFixes[spn]; !ok {
-		data.SuggestedFixes[spn] = []string{}
-	}
-	data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], actionKind)
+func (data *Data) collectSuggestedFixes(spn span.Span, actionKind, fix string) {
+	data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], SuggestedFix{actionKind, fix})
 }
 
 func (data *Data) collectFunctionExtractions(start span.Span, end span.Span) {
diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go
index 3276a47..dd67145 100644
--- a/internal/lsp/text_synchronization.go
+++ b/internal/lsp/text_synchronization.go
@@ -286,21 +286,38 @@
 		return errors.New("server is shut down")
 	}
 	s.stateMu.Unlock()
+
 	// If the set of changes included directories, expand those directories
 	// to their files.
 	modifications = s.session.ExpandModificationsToDirectories(ctx, modifications)
 
-	snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications)
+	// Build a lookup map for file modifications, so that we can later join
+	// with the snapshot file associations.
+	modMap := make(map[span.URI]source.FileModification)
+	for _, mod := range modifications {
+		modMap[mod.URI] = mod
+	}
+
+	snapshots, release, err := s.session.DidModifyFiles(ctx, modifications)
 	if err != nil {
 		close(diagnoseDone)
 		return err
 	}
 
+	// golang/go#50267: diagnostics should be re-sent after an open or close. For
+	// some clients, it may be helpful to re-send after each change.
+	for snapshot, uris := range snapshots {
+		for _, uri := range uris {
+			mod := modMap[uri]
+			if snapshot.View().Options().ChattyDiagnostics || mod.Action == source.Open || mod.Action == source.Close {
+				s.mustPublishDiagnostics(uri)
+			}
+		}
+	}
+
 	go func() {
 		s.diagnoseSnapshots(snapshots, onDisk)
-		for _, release := range releases {
-			release()
-		}
+		release()
 		close(diagnoseDone)
 	}()
 
diff --git a/internal/lsp/workspace.go b/internal/lsp/workspace.go
index a1f837e..b41406d 100644
--- a/internal/lsp/workspace.go
+++ b/internal/lsp/workspace.go
@@ -26,16 +26,18 @@
 	return s.addFolders(ctx, event.Added)
 }
 
+// addView returns a Snapshot and a release function that must be
+// called when it is no longer needed.
 func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) {
 	s.stateMu.Lock()
 	state := s.state
 	s.stateMu.Unlock()
 	if state < serverInitialized {
-		return nil, func() {}, fmt.Errorf("addView called before server initialized")
+		return nil, nil, fmt.Errorf("addView called before server initialized")
 	}
 	options := s.session.Options().Clone()
 	if err := s.fetchConfig(ctx, name, uri, options); err != nil {
-		return nil, func() {}, err
+		return nil, nil, err
 	}
 	_, snapshot, release, err := s.session.NewView(ctx, name, uri, options)
 	return snapshot, release, err
diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go
index a758dee..e56af3b 100644
--- a/internal/memoize/memoize.go
+++ b/internal/memoize/memoize.go
@@ -2,16 +2,24 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package memoize supports memoizing the return values of functions with
-// idempotent results that are expensive to compute.
+// Package memoize defines a "promise" abstraction that enables
+// memoization of the result of calling an expensive but idempotent
+// function.
 //
-// To use this package, build a store and use it to acquire handles with the
-// Bind method.
+// Call p = NewPromise(f) to obtain a promise for the future result of
+// calling f(), and call p.Get() to obtain that result. All calls to
+// p.Get return the result of a single call of f().
+// Get blocks if the function has not finished (or started).
+//
+// A Store is a map of arbitrary keys to promises. Use Store.Promise
+// to create a promise in the store. All calls to Handle(k) return the
+// same promise as long as it is in the store. These promises are
+// reference-counted and must be explicitly released. Once the last
+// reference is released, the promise is removed from the store.
 package memoize
 
 import (
 	"context"
-	"flag"
 	"fmt"
 	"reflect"
 	"runtime/trace"
@@ -21,130 +29,61 @@
 	"golang.org/x/tools/internal/xcontext"
 )
 
-var (
-	panicOnDestroyed = flag.Bool("memoize_panic_on_destroyed", false,
-		"Panic when a destroyed generation is read rather than returning an error. "+
-			"Panicking may make it easier to debug lifetime errors, especially when "+
-			"used with GOTRACEBACK=crash to see all running goroutines.")
-)
-
-// Store binds keys to functions, returning handles that can be used to access
-// the functions results.
-type Store struct {
-	handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu
-	handles   map[interface{}]*Handle
-}
-
-// Generation creates a new Generation associated with s. Destroy must be
-// called on the returned Generation once it is no longer in use. name is
-// for debugging purposes only.
-func (s *Store) Generation(name string) *Generation {
-	return &Generation{store: s, name: name}
-}
-
-// A Generation is a logical point in time of the cache life-cycle. Cache
-// entries associated with a Generation will not be removed until the
-// Generation is destroyed.
-type Generation struct {
-	// destroyed is 1 after the generation is destroyed. Atomic.
-	destroyed uint32
-	store     *Store
-	name      string
-	// destroyedBy describes the caller that togged destroyed from 0 to 1.
-	destroyedBy string
-	// wg tracks the reference count of this generation.
-	wg sync.WaitGroup
-}
-
-// Destroy waits for all operations referencing g to complete, then removes
-// all references to g from cache entries. Cache entries that no longer
-// reference any non-destroyed generation are removed. Destroy must be called
-// exactly once for each generation, and destroyedBy describes the caller.
-func (g *Generation) Destroy(destroyedBy string) {
-	g.wg.Wait()
-
-	prevDestroyedBy := g.destroyedBy
-	g.destroyedBy = destroyedBy
-	if ok := atomic.CompareAndSwapUint32(&g.destroyed, 0, 1); !ok {
-		panic("Destroy on generation " + g.name + " already destroyed by " + prevDestroyedBy)
-	}
-
-	g.store.handlesMu.Lock()
-	defer g.store.handlesMu.Unlock()
-	for _, h := range g.store.handles {
-		if !h.trackGenerations {
-			continue
-		}
-		h.mu.Lock()
-		if _, ok := h.generations[g]; ok {
-			delete(h.generations, g) // delete even if it's dead, in case of dangling references to the entry.
-			if len(h.generations) == 0 {
-				h.destroy(g.store)
-			}
-		}
-		h.mu.Unlock()
-	}
-}
-
-// Acquire creates a new reference to g, and returns a func to release that
-// reference.
-func (g *Generation) Acquire() func() {
-	destroyed := atomic.LoadUint32(&g.destroyed)
-	if destroyed != 0 {
-		panic("acquire on generation " + g.name + " destroyed by " + g.destroyedBy)
-	}
-	g.wg.Add(1)
-	return g.wg.Done
-}
-
-// Arg is a marker interface that can be embedded to indicate a type is
-// intended for use as a Function argument.
-type Arg interface{ memoizeArg() }
-
-// Function is the type for functions that can be memoized.
-// The result must be a pointer.
-type Function func(ctx context.Context, arg Arg) interface{}
-
-type state int
-
-// TODO(rfindley): remove stateDestroyed; Handles should not need to know
-// whether or not they have been destroyed.
+// Function is the type of a function that can be memoized.
 //
-// TODO(rfindley): also consider removing stateIdle. Why create a handle if you
-// aren't certain you're going to need its result? And if you know you need its
-// result, why wait to begin computing it?
-const (
-	stateIdle = iota
-	stateRunning
-	stateCompleted
-	stateDestroyed
-)
-
-// Handle is returned from a store when a key is bound to a function.
-// It is then used to access the results of that function.
+// If the arg is a RefCounted, its Acquire/Release operations are called.
 //
-// A Handle starts out in idle state, waiting for something to demand its
-// evaluation. It then transitions into running state. While it's running,
-// waiters tracks the number of Get calls waiting for a result, and the done
-// channel is used to notify waiters of the next state transition. Once the
-// evaluation finishes, value is set, state changes to completed, and done
-// is closed, unblocking waiters. Alternatively, as Get calls are cancelled,
-// they decrement waiters. If it drops to zero, the inner context is cancelled,
-// computation is abandoned, and state resets to idle to start the process over
-// again.
+// The argument must not materially affect the result of the function
+// in ways that are not captured by the promise's key, since if
+// Promise.Get is called twice concurrently, with the same (implicit)
+// key but different arguments, the Function is called only once but
+// its result must be suitable for both callers.
 //
-// Handles may be tracked by generations, or directly reference counted, as
-// determined by the trackGenerations field. See the field comments for more
-// information about the differences between these two forms.
+// The main purpose of the argument is to avoid the Function closure
+// needing to retain large objects (in practice: the snapshot) in
+// memory that can be supplied at call time by any caller.
+type Function func(ctx context.Context, arg interface{}) interface{}
+
+// A RefCounted is a value whose functional lifetime is determined by
+// reference counting.
 //
-// TODO(rfindley): eliminate generational handles.
-type Handle struct {
-	key interface{}
-	mu  sync.Mutex // lock ordering: Store.handlesMu before Handle.mu
+// Its Acquire method is called before the Function is invoked, and
+// the corresponding release is called when the Function returns.
+// Usually both events happen within a single call to Get, so Get
+// would be fine with a "borrowed" reference, but if the context is
+// cancelled, Get may return before the Function is complete, causing
+// the argument to escape, and potential premature destruction of the
+// value. For a reference-counted type, this requires a pair of
+// increment/decrement operations to extend its life.
+type RefCounted interface {
+	// Acquire prevents the value from being destroyed until the
+	// returned function is called.
+	Acquire() func()
+}
 
-	// generations is the set of generations in which this handle is valid.
-	generations map[*Generation]struct{}
+// A Promise represents the future result of a call to a function.
+type Promise struct {
+	debug string // for observability
 
+	// refcount is the reference count in the containing Store, used by
+	// Store.Promise. It is guarded by Store.promisesMu on the containing Store.
+	refcount int32
+
+	mu sync.Mutex
+
+	// A Promise starts out IDLE, waiting for something to demand
+	// its evaluation. It then transitions into RUNNING state.
+	//
+	// While RUNNING, waiters tracks the number of Get calls
+	// waiting for a result, and the done channel is used to
+	// notify waiters of the next state transition. Once
+	// evaluation finishes, value is set, state changes to
+	// COMPLETED, and done is closed, unblocking waiters.
+	//
+	// Alternatively, as Get calls are cancelled, they decrement
+	// waiters. If it drops to zero, the inner context is
+	// cancelled, computation is abandoned, and state resets to
+	// IDLE to start the process over again.
 	state state
 	// done is set in running state, and closed when exiting it.
 	done chan struct{}
@@ -156,248 +95,89 @@
 	function Function
 	// value is set in completed state.
 	value interface{}
-	// cleanup, if non-nil, is used to perform any necessary clean-up on values
-	// produced by function.
-	//
-	// cleanup is never set for reference counted handles.
-	//
-	// TODO(rfindley): remove this field once workspace folders no longer need to
-	// be tracked.
-	cleanup func(interface{})
-
-	// If trackGenerations is set, this handle tracks generations in which it
-	// is valid, via the generations field. Otherwise, it is explicitly reference
-	// counted via the refCounter field.
-	trackGenerations bool
-	refCounter       int32
 }
 
-// Bind returns a handle for the given key and function.
+// NewPromise returns a promise for the future result of calling the
+// specified function.
 //
-// Each call to bind will return the same handle if it is already bound. Bind
-// will always return a valid handle, creating one if needed. Each key can
-// only have one handle at any given time. The value will be held at least
-// until the associated generation is destroyed. Bind does not cause the value
-// to be generated.
-//
-// If cleanup is non-nil, it will be called on any non-nil values produced by
-// function when they are no longer referenced.
-//
-// It is responsibility of the caller to call Inherit on the handler whenever
-// it should still be accessible by a next generation.
-func (g *Generation) Bind(key interface{}, function Function, cleanup func(interface{})) *Handle {
-	return g.getHandle(key, function, cleanup, true)
-}
-
-// GetHandle returns a handle for the given key and function with similar
-// properties and behavior as Bind.
-//
-// As in opposite to Bind it returns a release callback which has to be called
-// once this reference to handle is not needed anymore.
-func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, func()) {
-	h := g.getHandle(key, function, nil, false)
-	store := g.store
-	release := func() {
-		// Acquire store.handlesMu before mutating refCounter
-		store.handlesMu.Lock()
-		defer store.handlesMu.Unlock()
-
-		h.mu.Lock()
-		defer h.mu.Unlock()
-
-		h.refCounter--
-		if h.refCounter == 0 {
-			// Don't call h.destroy: for reference counted handles we can't know when
-			// they are no longer reachable from runnable goroutines. For example,
-			// gopls could have a current operation that is using a packageHandle.
-			// Destroying the handle here would cause that operation to hang.
-			delete(store.handles, h.key)
-		}
-	}
-	return h, release
-}
-
-func (g *Generation) getHandle(key interface{}, function Function, cleanup func(interface{}), trackGenerations bool) *Handle {
-	// panic early if the function is nil
-	// it would panic later anyway, but in a way that was much harder to debug
+// The debug string is used to classify promises in logs and metrics.
+// It should be drawn from a small set.
+func NewPromise(debug string, function Function) *Promise {
 	if function == nil {
-		panic("the function passed to bind must not be nil")
+		panic("nil function")
 	}
-	if atomic.LoadUint32(&g.destroyed) != 0 {
-		panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy)
-	}
-	g.store.handlesMu.Lock()
-	defer g.store.handlesMu.Unlock()
-	h, ok := g.store.handles[key]
-	if !ok {
-		h = &Handle{
-			key:              key,
-			function:         function,
-			cleanup:          cleanup,
-			trackGenerations: trackGenerations,
-		}
-		if trackGenerations {
-			h.generations = make(map[*Generation]struct{}, 1)
-		}
-
-		if g.store.handles == nil {
-			g.store.handles = map[interface{}]*Handle{}
-		}
-		g.store.handles[key] = h
-	}
-
-	h.incrementRef(g)
-	return h
-}
-
-// Stats returns the number of each type of value in the store.
-func (s *Store) Stats() map[reflect.Type]int {
-	result := map[reflect.Type]int{}
-
-	s.handlesMu.Lock()
-	defer s.handlesMu.Unlock()
-
-	for k := range s.handles {
-		result[reflect.TypeOf(k)]++
-	}
-	return result
-}
-
-// DebugOnlyIterate iterates through all live cache entries and calls f on them.
-// It should only be used for debugging purposes.
-func (s *Store) DebugOnlyIterate(f func(k, v interface{})) {
-	s.handlesMu.Lock()
-	defer s.handlesMu.Unlock()
-
-	for k, h := range s.handles {
-		var v interface{}
-		h.mu.Lock()
-		if h.state == stateCompleted {
-			v = h.value
-		}
-		h.mu.Unlock()
-		if v == nil {
-			continue
-		}
-		f(k, v)
+	return &Promise{
+		debug:    debug,
+		function: function,
 	}
 }
 
-// Inherit makes h valid in generation g. It is concurrency-safe.
-func (g *Generation) Inherit(h *Handle) {
-	if atomic.LoadUint32(&g.destroyed) != 0 {
-		panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy)
-	}
-	if !h.trackGenerations {
-		panic("called Inherit on handle not created by Generation.Bind")
-	}
+type state int
 
-	h.incrementRef(g)
-}
+const (
+	stateIdle      = iota // newly constructed, or last waiter was cancelled
+	stateRunning          // start was called and not cancelled
+	stateCompleted        // function call ran to completion
+)
 
-// destroy marks h as destroyed. h.mu and store.handlesMu must be held.
-func (h *Handle) destroy(store *Store) {
-	h.state = stateDestroyed
-	if h.cleanup != nil && h.value != nil {
-		h.cleanup(h.value)
-	}
-	delete(store.handles, h.key)
-}
-
-func (h *Handle) incrementRef(g *Generation) {
-	h.mu.Lock()
-	defer h.mu.Unlock()
-
-	if h.state == stateDestroyed {
-		panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name))
-	}
-
-	if h.trackGenerations {
-		h.generations[g] = struct{}{}
-	} else {
-		h.refCounter++
-	}
-}
-
-// hasRefLocked reports whether h is valid in generation g. h.mu must be held.
-func (h *Handle) hasRefLocked(g *Generation) bool {
-	if !h.trackGenerations {
-		return true
-	}
-
-	_, ok := h.generations[g]
-	return ok
-}
-
-// Cached returns the value associated with a handle.
+// Cached returns the value associated with a promise.
 //
 // It will never cause the value to be generated.
 // It will return the cached value, if present.
-func (h *Handle) Cached(g *Generation) interface{} {
-	h.mu.Lock()
-	defer h.mu.Unlock()
-	if !h.hasRefLocked(g) {
-		return nil
-	}
-	if h.state == stateCompleted {
-		return h.value
+func (p *Promise) Cached() interface{} {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.state == stateCompleted {
+		return p.value
 	}
 	return nil
 }
 
-// Get returns the value associated with a handle.
+// Get returns the value associated with a promise.
+//
+// All calls to Promise.Get on a given promise return the
+// same result but the function is called (to completion) at most once.
 //
 // If the value is not yet ready, the underlying function will be invoked.
-// If ctx is cancelled, Get returns nil.
-func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, error) {
-	release := g.Acquire()
-	defer release()
-
+//
+// If ctx is cancelled, Get returns (nil, Canceled).
+// If all concurrent calls to Get are cancelled, the context provided
+// to the function is cancelled. A later call to Get may attempt to
+// call the function again.
+func (p *Promise) Get(ctx context.Context, arg interface{}) (interface{}, error) {
 	if ctx.Err() != nil {
 		return nil, ctx.Err()
 	}
-	h.mu.Lock()
-	if !h.hasRefLocked(g) {
-		h.mu.Unlock()
-
-		err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name)
-		if *panicOnDestroyed && ctx.Err() != nil {
-			panic(err)
-		}
-		return nil, err
-	}
-	switch h.state {
+	p.mu.Lock()
+	switch p.state {
 	case stateIdle:
-		return h.run(ctx, g, arg)
+		return p.run(ctx, arg)
 	case stateRunning:
-		return h.wait(ctx)
+		return p.wait(ctx)
 	case stateCompleted:
-		defer h.mu.Unlock()
-		return h.value, nil
-	case stateDestroyed:
-		h.mu.Unlock()
-		err := fmt.Errorf("Get on destroyed entry %#v (type %T) in generation %v", h.key, h.key, g.name)
-		if *panicOnDestroyed {
-			panic(err)
-		}
-		return nil, err
+		defer p.mu.Unlock()
+		return p.value, nil
 	default:
 		panic("unknown state")
 	}
 }
 
-// run starts h.function and returns the result. h.mu must be locked.
-func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, error) {
+// run starts p.function and returns the result. p.mu must be locked.
+func (p *Promise) run(ctx context.Context, arg interface{}) (interface{}, error) {
 	childCtx, cancel := context.WithCancel(xcontext.Detach(ctx))
-	h.cancel = cancel
-	h.state = stateRunning
-	h.done = make(chan struct{})
-	function := h.function // Read under the lock
+	p.cancel = cancel
+	p.state = stateRunning
+	p.done = make(chan struct{})
+	function := p.function // Read under the lock
 
-	// Make sure that the generation isn't destroyed while we're running in it.
-	release := g.Acquire()
+	// Make sure that the argument isn't destroyed while we're running in it.
+	release := func() {}
+	if rc, ok := arg.(RefCounted); ok {
+		release = rc.Acquire()
+	}
+
 	go func() {
-		trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %T", h.key), func() {
+		trace.WithRegion(childCtx, fmt.Sprintf("Promise.run %s", p.debug), func() {
 			defer release()
 			// Just in case the function does something expensive without checking
 			// the context, double-check we're still alive.
@@ -406,69 +186,150 @@
 			}
 			v := function(childCtx, arg)
 			if childCtx.Err() != nil {
-				// It's possible that v was computed despite the context cancellation. In
-				// this case we should ensure that it is cleaned up.
-				if h.cleanup != nil && v != nil {
-					h.cleanup(v)
-				}
 				return
 			}
 
-			h.mu.Lock()
-			defer h.mu.Unlock()
-			// It's theoretically possible that the handle has been cancelled out
+			p.mu.Lock()
+			defer p.mu.Unlock()
+			// It's theoretically possible that the promise has been cancelled out
 			// of the run that started us, and then started running again since we
 			// checked childCtx above. Even so, that should be harmless, since each
 			// run should produce the same results.
-			if h.state != stateRunning {
-				// v will never be used, so ensure that it is cleaned up.
-				if h.cleanup != nil && v != nil {
-					h.cleanup(v)
-				}
+			if p.state != stateRunning {
 				return
 			}
 
-			if h.cleanup != nil && h.value != nil {
-				// Clean up before overwriting an existing value.
-				h.cleanup(h.value)
-			}
-
-			// At this point v will be cleaned up whenever h is destroyed.
-			h.value = v
-			h.function = nil
-			h.state = stateCompleted
-			close(h.done)
+			p.value = v
+			p.function = nil // aid GC
+			p.state = stateCompleted
+			close(p.done)
 		})
 	}()
 
-	return h.wait(ctx)
+	return p.wait(ctx)
 }
 
-// wait waits for the value to be computed, or ctx to be cancelled. h.mu must be locked.
-func (h *Handle) wait(ctx context.Context) (interface{}, error) {
-	h.waiters++
-	done := h.done
-	h.mu.Unlock()
+// wait waits for the value to be computed, or ctx to be cancelled. p.mu must be locked.
+func (p *Promise) wait(ctx context.Context) (interface{}, error) {
+	p.waiters++
+	done := p.done
+	p.mu.Unlock()
 
 	select {
 	case <-done:
-		h.mu.Lock()
-		defer h.mu.Unlock()
-		if h.state == stateCompleted {
-			return h.value, nil
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		if p.state == stateCompleted {
+			return p.value, nil
 		}
 		return nil, nil
 	case <-ctx.Done():
-		h.mu.Lock()
-		defer h.mu.Unlock()
-		h.waiters--
-		if h.waiters == 0 && h.state == stateRunning {
-			h.cancel()
-			close(h.done)
-			h.state = stateIdle
-			h.done = nil
-			h.cancel = nil
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		p.waiters--
+		if p.waiters == 0 && p.state == stateRunning {
+			p.cancel()
+			close(p.done)
+			p.state = stateIdle
+			p.done = nil
+			p.cancel = nil
 		}
 		return nil, ctx.Err()
 	}
 }
+
+// An EvictionPolicy controls the eviction behavior of keys in a Store when
+// they no longer have any references.
+type EvictionPolicy int
+
+const (
+	// ImmediatelyEvict evicts keys as soon as they no longer have references.
+	ImmediatelyEvict EvictionPolicy = iota
+
+	// NeverEvict does not evict keys.
+	NeverEvict
+)
+
+// A Store maps arbitrary keys to reference-counted promises.
+//
+// The zero value is a valid Store, though a store may also be created via
+// NewStore if a custom EvictionPolicy is required.
+type Store struct {
+	evictionPolicy EvictionPolicy
+
+	promisesMu sync.Mutex
+	promises   map[interface{}]*Promise
+}
+
+// NewStore creates a new store with the given eviction policy.
+func NewStore(policy EvictionPolicy) *Store {
+	return &Store{evictionPolicy: policy}
+}
+
+// Promise returns a reference-counted promise for the future result of
+// calling the specified function.
+//
+// Calls to Promise with the same key return the same promise, incrementing its
+// reference count.  The caller must call the returned function to decrement
+// the promise's reference count when it is no longer needed. The returned
+// function must not be called more than once.
+//
+// Once the last reference has been released, the promise is removed from the
+// store.
+func (store *Store) Promise(key interface{}, function Function) (*Promise, func()) {
+	store.promisesMu.Lock()
+	p, ok := store.promises[key]
+	if !ok {
+		p = NewPromise(reflect.TypeOf(key).String(), function)
+		if store.promises == nil {
+			store.promises = map[interface{}]*Promise{}
+		}
+		store.promises[key] = p
+	}
+	p.refcount++
+	store.promisesMu.Unlock()
+
+	var released int32
+	release := func() {
+		if !atomic.CompareAndSwapInt32(&released, 0, 1) {
+			panic("release called more than once")
+		}
+		store.promisesMu.Lock()
+
+		p.refcount--
+		if p.refcount == 0 && store.evictionPolicy != NeverEvict {
+			// Inv: if p.refcount > 0, then store.promises[key] == p.
+			delete(store.promises, key)
+		}
+		store.promisesMu.Unlock()
+	}
+
+	return p, release
+}
+
+// Stats returns the number of each type of key in the store.
+func (s *Store) Stats() map[reflect.Type]int {
+	result := map[reflect.Type]int{}
+
+	s.promisesMu.Lock()
+	defer s.promisesMu.Unlock()
+
+	for k := range s.promises {
+		result[reflect.TypeOf(k)]++
+	}
+	return result
+}
+
+// DebugOnlyIterate iterates through the store and, for each completed
+// promise, calls f(k, v) for the map key k and function result v.  It
+// should only be used for debugging purposes.
+func (s *Store) DebugOnlyIterate(f func(k, v interface{})) {
+	s.promisesMu.Lock()
+	defer s.promisesMu.Unlock()
+
+	for k, p := range s.promises {
+		if v := p.Cached(); v != nil {
+			f(k, v)
+		}
+	}
+}
diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go
index ae387b8..c54572d 100644
--- a/internal/memoize/memoize_test.go
+++ b/internal/memoize/memoize_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"context"
-	"strings"
 	"sync"
 	"testing"
 	"time"
@@ -15,127 +14,74 @@
 )
 
 func TestGet(t *testing.T) {
-	s := &memoize.Store{}
-	g := s.Generation("x")
+	var store memoize.Store
 
 	evaled := 0
 
-	h := g.Bind("key", func(context.Context, memoize.Arg) interface{} {
+	h, release := store.Promise("key", func(context.Context, interface{}) interface{} {
 		evaled++
 		return "res"
-	}, nil)
-	expectGet(t, h, g, "res")
-	expectGet(t, h, g, "res")
+	})
+	defer release()
+	expectGet(t, h, "res")
+	expectGet(t, h, "res")
 	if evaled != 1 {
 		t.Errorf("got %v calls to function, wanted 1", evaled)
 	}
 }
 
-func expectGet(t *testing.T, h *memoize.Handle, g *memoize.Generation, wantV interface{}) {
+func expectGet(t *testing.T, h *memoize.Promise, wantV interface{}) {
 	t.Helper()
-	gotV, gotErr := h.Get(context.Background(), g, nil)
+	gotV, gotErr := h.Get(context.Background(), nil)
 	if gotV != wantV || gotErr != nil {
 		t.Fatalf("Get() = %v, %v, wanted %v, nil", gotV, gotErr, wantV)
 	}
 }
 
-func expectGetError(t *testing.T, h *memoize.Handle, g *memoize.Generation, substr string) {
-	gotV, gotErr := h.Get(context.Background(), g, nil)
-	if gotErr == nil || !strings.Contains(gotErr.Error(), substr) {
-		t.Fatalf("Get() = %v, %v, wanted err %q", gotV, gotErr, substr)
+func TestNewPromise(t *testing.T) {
+	calls := 0
+	f := func(context.Context, interface{}) interface{} {
+		calls++
+		return calls
 	}
+
+	// All calls to Get on the same promise return the same result.
+	p1 := memoize.NewPromise("debug", f)
+	expectGet(t, p1, 1)
+	expectGet(t, p1, 1)
+
+	// A new promise calls the function again.
+	p2 := memoize.NewPromise("debug", f)
+	expectGet(t, p2, 2)
+	expectGet(t, p2, 2)
+
+	// The original promise is unchanged.
+	expectGet(t, p1, 1)
 }
 
-func TestGenerations(t *testing.T) {
-	s := &memoize.Store{}
-	// Evaluate key in g1.
-	g1 := s.Generation("g1")
-	h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }, nil)
-	expectGet(t, h1, g1, "res")
-
-	// Get key in g2. It should inherit the value from g1.
-	g2 := s.Generation("g2")
-	h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} {
-		t.Fatal("h2 should not need evaluation")
-		return "error"
-	}, nil)
-	expectGet(t, h2, g2, "res")
-
-	// With g1 destroyed, g2 should still work.
-	g1.Destroy("TestGenerations")
-	expectGet(t, h2, g2, "res")
-
-	// With all generations destroyed, key should be re-evaluated.
-	g2.Destroy("TestGenerations")
-	g3 := s.Generation("g3")
-	h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }, nil)
-	expectGet(t, h3, g3, "new res")
-}
-
-func TestCleanup(t *testing.T) {
-	s := &memoize.Store{}
-	g1 := s.Generation("g1")
+func TestStoredPromiseRefCounting(t *testing.T) {
+	var store memoize.Store
 	v1 := false
 	v2 := false
-	cleanup := func(v interface{}) {
-		*(v.(*bool)) = true
-	}
-	h1 := g1.Bind("key1", func(context.Context, memoize.Arg) interface{} {
-		return &v1
-	}, nil)
-	h2 := g1.Bind("key2", func(context.Context, memoize.Arg) interface{} {
-		return &v2
-	}, cleanup)
-	expectGet(t, h1, g1, &v1)
-	expectGet(t, h2, g1, &v2)
-	g2 := s.Generation("g2")
-	g2.Inherit(h1)
-	g2.Inherit(h2)
-
-	g1.Destroy("TestCleanup")
-	expectGet(t, h1, g2, &v1)
-	expectGet(t, h2, g2, &v2)
-	for k, v := range map[string]*bool{"key1": &v1, "key2": &v2} {
-		if got, want := *v, false; got != want {
-			t.Errorf("after destroying g1, bound value %q is cleaned up", k)
-		}
-	}
-	g2.Destroy("TestCleanup")
-	if got, want := v1, false; got != want {
-		t.Error("after destroying g2, v1 is cleaned up")
-	}
-	if got, want := v2, true; got != want {
-		t.Error("after destroying g2, v2 is not cleaned up")
-	}
-}
-
-func TestHandleRefCounting(t *testing.T) {
-	s := &memoize.Store{}
-	g1 := s.Generation("g1")
-	v1 := false
-	v2 := false
-	h1, release1 := g1.GetHandle("key1", func(context.Context, memoize.Arg) interface{} {
+	p1, release1 := store.Promise("key1", func(context.Context, interface{}) interface{} {
 		return &v1
 	})
-	h2, release2 := g1.GetHandle("key2", func(context.Context, memoize.Arg) interface{} {
+	p2, release2 := store.Promise("key2", func(context.Context, interface{}) interface{} {
 		return &v2
 	})
-	expectGet(t, h1, g1, &v1)
-	expectGet(t, h2, g1, &v2)
+	expectGet(t, p1, &v1)
+	expectGet(t, p2, &v2)
 
-	g2 := s.Generation("g2")
-	expectGet(t, h1, g2, &v1)
-	g1.Destroy("by test")
-	expectGet(t, h2, g2, &v2)
+	expectGet(t, p1, &v1)
+	expectGet(t, p2, &v2)
 
-	h2Copy, release2Copy := g2.GetHandle("key2", func(context.Context, memoize.Arg) interface{} {
+	p2Copy, release2Copy := store.Promise("key2", func(context.Context, interface{}) interface{} {
 		return &v1
 	})
-	if h2 != h2Copy {
-		t.Error("NewHandle returned a new value while old is not destroyed yet")
+	if p2 != p2Copy {
+		t.Error("Promise returned a new value while old is not destroyed yet")
 	}
-	expectGet(t, h2Copy, g2, &v2)
-	g2.Destroy("by test")
+	expectGet(t, p2Copy, &v2)
 
 	release2()
 	if got, want := v2, false; got != want {
@@ -147,27 +93,23 @@
 	}
 	release1()
 
-	g3 := s.Generation("g3")
-	h2Copy, release2Copy = g3.GetHandle("key2", func(context.Context, memoize.Arg) interface{} {
+	p2Copy, release2Copy = store.Promise("key2", func(context.Context, interface{}) interface{} {
 		return &v2
 	})
-	if h2 == h2Copy {
-		t.Error("NewHandle returned previously destroyed value")
+	if p2 == p2Copy {
+		t.Error("Promise returned previously destroyed value")
 	}
 	release2Copy()
-	g3.Destroy("by test")
 }
 
-func TestHandleDestroyedWhileRunning(t *testing.T) {
-	// Test that calls to Handle.Get return even if the handle is destroyed while
-	// running.
+func TestPromiseDestroyedWhileRunning(t *testing.T) {
+	// Test that calls to Promise.Get return even if the promise is destroyed while running.
 
-	s := &memoize.Store{}
-	g := s.Generation("g")
+	var store memoize.Store
 	c := make(chan int)
 
 	var v int
-	h, release := g.GetHandle("key", func(ctx context.Context, _ memoize.Arg) interface{} {
+	h, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} {
 		<-c
 		<-c
 		if err := ctx.Err(); err != nil {
@@ -184,13 +126,13 @@
 	var got interface{}
 	var err error
 	go func() {
-		got, err = h.Get(ctx, g, nil)
+		got, err = h.Get(ctx, nil)
 		wg.Done()
 	}()
 
-	c <- 0    // send once to enter the handle function
-	release() // release before the handle function returns
-	c <- 0    // let the handle function proceed
+	c <- 0    // send once to enter the promise function
+	release() // release before the promise function returns
+	c <- 0    // let the promise function proceed
 
 	wg.Wait()
 
@@ -201,3 +143,24 @@
 		t.Errorf("Get() = %v, want %v", got, v)
 	}
 }
+
+func TestDoubleReleasePanics(t *testing.T) {
+	var store memoize.Store
+	_, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} { return 0 })
+
+	panicked := false
+
+	func() {
+		defer func() {
+			if recover() != nil {
+				panicked = true
+			}
+		}()
+		release()
+		release()
+	}()
+
+	if !panicked {
+		t.Errorf("calling release() twice did not panic")
+	}
+}
diff --git a/internal/persistent/map.go b/internal/persistent/map.go
index 9c17ad0..f5dd102 100644
--- a/internal/persistent/map.go
+++ b/internal/persistent/map.go
@@ -28,6 +28,8 @@
 // client-provided function that implements a strict weak order.
 //
 // Maps can be Cloned in constant time.
+// Get, Store, and Delete operations are done on average in logarithmic time.
+// Maps can be Updated in O(m log(n/m)) time for maps of size n and m, where m < n.
 //
 // Values are reference counted, and a client-supplied release function
 // is called when a value is no longer referenced by a map or any clone.
@@ -118,10 +120,17 @@
 	}
 }
 
-// Destroy the persistent map.
+// Destroy destroys the map.
 //
 // After Destroy, the Map should not be used again.
 func (pm *Map) Destroy() {
+	// The implementation of these two functions is the same,
+	// but their intent is different.
+	pm.Clear()
+}
+
+// Clear removes all entries from the map.
+func (pm *Map) Clear() {
 	pm.root.decref()
 	pm.root = nil
 }
@@ -156,6 +165,15 @@
 	return nil, false
 }
 
+// SetAll updates the map with key/value pairs from the other map, overwriting existing keys.
+// It is equivalent to calling Set for each entry in the other map but is more efficient.
+// Both maps must have the same comparison function, otherwise behavior is undefined.
+func (pm *Map) SetAll(other *Map) {
+	root := pm.root
+	pm.root = union(root, other.root, pm.less, true)
+	root.decref()
+}
+
 // Set updates the value associated with the specified key.
 // If release is non-nil, it will be called with entry's key and value once the
 // key is no longer contained in the map or any clone.
@@ -185,7 +203,7 @@
 		second, first, overwrite = first, second, !overwrite
 	}
 
-	left, mid, right := split(second, first.key, less)
+	left, mid, right := split(second, first.key, less, false)
 	var result *mapNode
 	if overwrite && mid != nil {
 		result = mid.shallowCloneWithRef()
@@ -205,23 +223,31 @@
 // Return three new trees: left with all nodes with smaller than key, mid with
 // the node matching the key, right with all nodes larger than key.
 // If there are no nodes in one of trees, return nil instead of it.
+// If requireMid is set (such as during deletion), then all return arguments
+// are nil if mid is not found.
 //
 // split(n:-0) (left:+1, mid:+1, right:+1)
 // Split borrows n without affecting its refcount, and returns three
 // new references that that caller is expected to call decref.
-func split(n *mapNode, key interface{}, less func(a, b interface{}) bool) (left, mid, right *mapNode) {
+func split(n *mapNode, key interface{}, less func(a, b interface{}) bool, requireMid bool) (left, mid, right *mapNode) {
 	if n == nil {
 		return nil, nil, nil
 	}
 
 	if less(n.key, key) {
-		left, mid, right := split(n.right, key, less)
+		left, mid, right := split(n.right, key, less, requireMid)
+		if requireMid && mid == nil {
+			return nil, nil, nil
+		}
 		newN := n.shallowCloneWithRef()
 		newN.left = n.left.incref()
 		newN.right = left
 		return newN, mid, right
 	} else if less(key, n.key) {
-		left, mid, right := split(n.left, key, less)
+		left, mid, right := split(n.left, key, less, requireMid)
+		if requireMid && mid == nil {
+			return nil, nil, nil
+		}
 		newN := n.shallowCloneWithRef()
 		newN.left = right
 		newN.right = n.right.incref()
@@ -234,7 +260,10 @@
 // Delete deletes the value for a key.
 func (pm *Map) Delete(key interface{}) {
 	root := pm.root
-	left, mid, right := split(root, key, pm.less)
+	left, mid, right := split(root, key, pm.less, true)
+	if mid == nil {
+		return
+	}
 	pm.root = merge(left, right)
 	left.decref()
 	mid.decref()
diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go
index 059f0da..1c413d7 100644
--- a/internal/persistent/map_test.go
+++ b/internal/persistent/map_test.go
@@ -71,6 +71,15 @@
 	m1.remove(t, 1)
 	validateRef(t, m1, m2)
 
+	gotAllocs := int(testing.AllocsPerRun(10, func() {
+		m1.impl.Delete(100)
+		m1.impl.Delete(1)
+	}))
+	wantAllocs := 0
+	if gotAllocs != wantAllocs {
+		t.Errorf("wanted %d allocs, got %d", wantAllocs, gotAllocs)
+	}
+
 	for i := 10; i < 14; i++ {
 		m1.set(t, i, i)
 		validateRef(t, m1, m2)
@@ -142,6 +151,31 @@
 	assertSameMap(t, seenEntries, deletedEntries)
 }
 
+func TestUpdate(t *testing.T) {
+	deletedEntries := make(map[mapEntry]struct{})
+	seenEntries := make(map[mapEntry]struct{})
+
+	m1 := &validatedMap{
+		impl: NewMap(func(a, b interface{}) bool {
+			return a.(int) < b.(int)
+		}),
+		expected: make(map[int]int),
+		deleted:  deletedEntries,
+		seen:     seenEntries,
+	}
+	m2 := m1.clone()
+
+	m1.set(t, 1, 1)
+	m1.set(t, 2, 2)
+	m2.set(t, 2, 20)
+	m2.set(t, 3, 3)
+	m1.setAll(t, m2)
+
+	m1.destroy()
+	m2.destroy()
+	assertSameMap(t, seenEntries, deletedEntries)
+}
+
 func (vm *validatedMap) onDelete(t *testing.T, key, value int) {
 	entry := mapEntry{key: key, value: value}
 	if _, ok := vm.deleted[entry]; ok {
@@ -245,6 +279,14 @@
 	validateNode(t, node.right, less)
 }
 
+func (vm *validatedMap) setAll(t *testing.T, other *validatedMap) {
+	vm.impl.SetAll(other.impl)
+	for key, value := range other.expected {
+		vm.expected[key] = value
+	}
+	vm.validate(t)
+}
+
 func (vm *validatedMap) set(t *testing.T, key, value int) {
 	vm.seen[mapEntry{key: key, value: value}] = struct{}{}
 	vm.impl.Set(key, value, func(deletedKey, deletedValue interface{}) {
@@ -298,19 +340,3 @@
 		t.Fatalf("different maps:\n%v\nvs\n%v", map1, map2)
 	}
 }
-
-func isSameMap(map1, map2 reflect.Value) bool {
-	if map1.Len() != map2.Len() {
-		return false
-	}
-	iter := map1.MapRange()
-	for iter.Next() {
-		key := iter.Key()
-		value1 := iter.Value()
-		value2 := map2.MapIndex(key)
-		if value2.IsZero() || !reflect.DeepEqual(value1.Interface(), value2.Interface()) {
-			return false
-		}
-	}
-	return true
-}
diff --git a/internal/span/token.go b/internal/span/token.go
index af01d7b..c35a512 100644
--- a/internal/span/token.go
+++ b/internal/span/token.go
@@ -12,28 +12,40 @@
 )
 
 // Range represents a source code range in token.Pos form.
-// It also carries the FileSet that produced the positions, so that it is
+// It also carries the token.File that produced the positions, so that it is
 // self contained.
 type Range struct {
-	Start token.Pos
-	End   token.Pos
-
-	// TokFile may be nil if Start or End is invalid.
-	// TODO: Eventually we should guarantee that it is non-nil.
-	TokFile *token.File
+	TokFile    *token.File // non-nil
+	Start, End token.Pos   // both IsValid()
 }
 
-// NewRange creates a new Range from a FileSet and two positions.
-// To represent a point pass a 0 as the end pos.
-func NewRange(fset *token.FileSet, start, end token.Pos) Range {
-	tf := fset.File(start)
-	if tf == nil {
-		bug.Reportf("nil file")
+// NewRange creates a new Range from a token.File and two valid positions within it.
+//
+// (If you only have a token.FileSet, use file = fset.File(start). But
+// most callers know exactly which token.File they're dealing with and
+// should pass it explicitly. Not only does this save a lookup, but it
+// brings us a step closer to eliminating the global FileSet.)
+func NewRange(file *token.File, start, end token.Pos) Range {
+	if file == nil {
+		panic("nil *token.File")
 	}
+	if !start.IsValid() || !end.IsValid() {
+		panic("invalid start/end token.Pos")
+	}
+
+	// TODO(adonovan): ideally we would make this stronger assertion:
+	//
+	//   // Assert that file is non-nil and contains start and end.
+	//   _ = file.Offset(start)
+	//   _ = file.Offset(end)
+	//
+	// but some callers (e.g. packageCompletionSurrounding,
+	// posToMappedRange) don't ensure this precondition.
+
 	return Range{
+		TokFile: file,
 		Start:   start,
 		End:     end,
-		TokFile: tf,
 	}
 }
 
@@ -164,15 +176,16 @@
 	return line, col, err
 }
 
-// ToOffset converts a 1-base line and utf-8 column index into a byte offset in
-// the file corresponding to tf.
+// ToOffset converts a 1-based line and utf-8 column index into a byte offset
+// in the file corresponding to tf.
 func ToOffset(tf *token.File, line, col int) (int, error) {
-	if line < 0 {
-		return -1, fmt.Errorf("line is not valid")
+	if line < 1 { // token.File.LineStart panics if line < 1
+		return -1, fmt.Errorf("invalid line: %d", line)
 	}
+
 	lineMax := tf.LineCount() + 1
 	if line > lineMax {
-		return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
+		return -1, fmt.Errorf("line %d is beyond end of file %v", line, lineMax)
 	} else if line == lineMax {
 		if col > 1 {
 			return -1, fmt.Errorf("column is beyond end of file")
@@ -182,7 +195,9 @@
 	}
 	pos := tf.LineStart(line)
 	if !pos.IsValid() {
-		return -1, fmt.Errorf("line is not in file")
+		// bug.Errorf here because LineStart panics on out-of-bound input, and so
+		// should never return invalid positions.
+		return -1, bug.Errorf("line is not in file")
 	}
 	// we assume that column is in bytes here, and that the first byte of a
 	// line is at column 1