internal/lsp: track diagnostics by reporting source

Our handling of diagnostics has gotten complicated and has recently been
a source of bugs. Heschi and I took a step back and refactored the
diagnostic pass, starting with a new data structure for tracking
diagnostics.

The LSP server now holds a map of URI to collection of diagnostic
reports, which is used to track diagnostics as they are computed by
multiple sources. Additionally, this collection holds a hash of the
last published diagnostic set.

This new information allows us to implement an algorithm for
incrementally updating diagnostics on the server: diagnostics reports
are stored as they are computed for a snapshot, and then published in
(possibly multiple) passes, with the last pass for a snapshot being
marked as 'final'.  In non-final passes, 'publishReports' publishes any
diagnostics that have already been computed for the snapshot, but does
nothing with URIs for which no diagnostics have been computed. In final
passes all diagnostics are reported, with empty diagnostics being
published for any URIs with no diagnostic reports. Any URIs for which no
diagnostic reports were computed are pruned from the diagnostic set. In
all cases, tracking the hash of published diagnostics prevents us from
duplicate publication.

This enables some simplifications of the existing diagnostic logic.
Computing new diagnostics and tracking send diagnostics is now handled
by the same algorithm, avoiding some bookkeeping. It is also no longer
necessary to explicitly clear diagnostics for deletions. This fixes some
previously broken edge cases, for example when packages go out of scope
due to go.mod or gopls.mod changes.

Fixes golang/go#42198

Change-Id: Id0d8d0f7a60f6ffa8c33f0ae41763461f61dab7b
Reviewed-on: https://go-review.googlesource.com/c/tools/+/269677
Run-TryBot: Robert Findley <rfindley@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
Trust: Robert Findley <rfindley@google.com>
diff --git a/gopls/internal/regtest/workspace_test.go b/gopls/internal/regtest/workspace_test.go
index 1887ab4..28ef0d0 100644
--- a/gopls/internal/regtest/workspace_test.go
+++ b/gopls/internal/regtest/workspace_test.go
@@ -470,15 +470,10 @@
 replace a.com => %s/moda/a
 `, workdir))
 		env.Await(CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), 1))
-		// TODO: diagnostics are not being cleared from the old go.mod location,
-		// because it's not treated as a 'deleted' file. Uncomment this after
-		// fixing.
-		/*
-			env.Await(OnceMet(
-				CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), 1),
-				EmptyDiagnostics("modb/go.mod"),
-			))
-		*/
+		env.Await(OnceMet(
+			CompletedWork(lsp.DiagnosticWorkTitle(lsp.FromDidChange), 1),
+			EmptyDiagnostics("modb/go.mod"),
+		))
 
 		// Just as before, check that we now jump to the module cache.
 		location, _ = env.GoToDefinition("moda/a/a.go", env.RegexpSearch("moda/a/a.go", "Hello"))
diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go
index 4e22543..0a27af3 100644
--- a/internal/lsp/cache/session.go
+++ b/internal/lsp/cache/session.go
@@ -405,7 +405,7 @@
 }
 
 func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error {
-	_, _, releases, _, err := s.DidModifyFiles(ctx, changes)
+	_, _, releases, err := s.DidModifyFiles(ctx, changes)
 	for _, release := range releases {
 		release()
 	}
@@ -418,13 +418,9 @@
 	fileHandle source.VersionedFileHandle
 }
 
-func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[span.URI]source.View, map[source.View]source.Snapshot, []func(), []span.URI, error) {
+func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[span.URI]source.View, map[source.View]source.Snapshot, []func(), error) {
 	views := make(map[*View]map[span.URI]*fileChange)
 	bestViews := map[span.URI]source.View{}
-	// Keep track of deleted files so that we can clear their diagnostics.
-	// A file might be re-created after deletion, so only mark files that
-	// have truly been deleted.
-	deletions := map[span.URI]struct{}{}
 
 	// If the set of changes included directories, expand those directories
 	// to their files.
@@ -432,7 +428,7 @@
 
 	overlays, err := s.updateOverlays(ctx, changes)
 	if err != nil {
-		return nil, nil, nil, nil, err
+		return nil, nil, nil, err
 	}
 	var forceReloadMetadata bool
 	for _, c := range changes {
@@ -443,7 +439,7 @@
 		// Build the list of affected views.
 		bestView, err := s.viewOf(c.URI)
 		if err != nil {
-			return nil, nil, nil, nil, err
+			return nil, nil, nil, err
 		}
 		bestViews[c.URI] = bestView
 
@@ -465,7 +461,7 @@
 		for _, view := range changedViews {
 			// Make sure that the file is added to the view.
 			if _, err := view.getFile(c.URI); err != nil {
-				return nil, nil, nil, nil, err
+				return nil, nil, nil, err
 			}
 			if _, ok := views[view]; !ok {
 				views[view] = make(map[span.URI]*fileChange)
@@ -476,11 +472,10 @@
 					exists:     true,
 					fileHandle: fh,
 				}
-				delete(deletions, c.URI)
 			} else {
 				fsFile, err := s.cache.getFile(ctx, c.URI)
 				if err != nil {
-					return nil, nil, nil, nil, err
+					return nil, nil, nil, err
 				}
 				content, err := fsFile.Read()
 				fh := &closedFile{fsFile}
@@ -489,11 +484,6 @@
 					exists:     err == nil,
 					fileHandle: fh,
 				}
-				// If there was an error reading the file, assume it has been
-				// deleted.
-				if err != nil {
-					deletions[c.URI] = struct{}{}
-				}
 			}
 		}
 	}
@@ -505,11 +495,7 @@
 		snapshots[view] = snapshot
 		releases = append(releases, release)
 	}
-	var deletionsSlice []span.URI
-	for uri := range deletions {
-		deletionsSlice = append(deletionsSlice, uri)
-	}
-	return bestViews, snapshots, releases, deletionsSlice, nil
+	return bestViews, snapshots, releases, nil
 }
 
 // expandChangesToDirectories returns the set of changes with the directory
diff --git a/internal/lsp/command.go b/internal/lsp/command.go
index a8a26aa..b56930c 100644
--- a/internal/lsp/command.go
+++ b/internal/lsp/command.go
@@ -226,6 +226,7 @@
 		s.gcOptimizationDetailsMu.Lock()
 		if _, ok := s.gcOptimizationDetails[pkgDir]; ok {
 			delete(s.gcOptimizationDetails, pkgDir)
+			s.clearDiagnosticSource(gcDetailsSource)
 		} else {
 			s.gcOptimizationDetails[pkgDir] = struct{}{}
 		}
diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go
index 49298bc..ad91973 100644
--- a/internal/lsp/diagnostics.go
+++ b/internal/lsp/diagnostics.go
@@ -23,63 +23,56 @@
 	errors "golang.org/x/xerrors"
 )
 
-// idWithAnalysis is used to track if the diagnostics for a given file were
-// computed with analyses.
-type idWithAnalysis struct {
-	id              source.VersionedFileIdentity
-	includeAnalysis bool
+// diagnosticSource differentiates different sources of diagnostics.
+type diagnosticSource int
+
+const (
+	modSource diagnosticSource = iota
+	gcDetailsSource
+	analysisSource
+	typeCheckSource
+	orphanedSource
+)
+
+// A diagnosticReport holds results for a single diagnostic source.
+type diagnosticReport struct {
+	snapshotID    uint64
+	publishedHash string
+	diags         map[string]*source.Diagnostic
 }
 
-// A reportSet collects diagnostics for publication, sorting them by file and
-// de-duplicating.
-type reportSet struct {
-	mu sync.Mutex
-	// lazily allocated
-	reports map[idWithAnalysis]map[string]*source.Diagnostic
+// fileReports holds a collection of diagnostic reports for a single file, as
+// well as the hash of the last published set of diagnostics.
+type fileReports struct {
+	publishedHash string
+	reports       map[diagnosticSource]diagnosticReport
 }
 
-func (s *reportSet) add(id source.VersionedFileIdentity, includeAnalysis bool, diags ...*source.Diagnostic) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if s.reports == nil {
-		s.reports = make(map[idWithAnalysis]map[string]*source.Diagnostic)
-	}
-	key := idWithAnalysis{
-		id:              id,
-		includeAnalysis: includeAnalysis,
-	}
-	if _, ok := s.reports[key]; !ok {
-		s.reports[key] = map[string]*source.Diagnostic{}
-	}
+// hashDiagnostics computes a hash to identify diags.
+func hashDiagnostics(diags ...*source.Diagnostic) string {
+	source.SortDiagnostics(diags)
+	h := sha256.New()
 	for _, d := range diags {
-		s.reports[key][diagnosticKey(d)] = d
+		for _, t := range d.Tags {
+			fmt.Fprintf(h, "%s", t)
+		}
+		for _, r := range d.Related {
+			fmt.Fprintf(h, "%s%s%s", r.URI, r.Message, r.Range)
+		}
+		fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source)
 	}
-}
-
-// diagnosticKey creates a unique identifier for a given diagnostic, since we
-// cannot use source.Diagnostics as map keys. This is used to de-duplicate
-// diagnostics.
-func diagnosticKey(d *source.Diagnostic) string {
-	var tags, related string
-	for _, t := range d.Tags {
-		tags += fmt.Sprintf("%s", t)
-	}
-	for _, r := range d.Related {
-		related += fmt.Sprintf("%s%s%s", r.URI, r.Message, r.Range)
-	}
-	key := fmt.Sprintf("%s%s%s%s%s%s", d.Message, d.Range, d.Severity, d.Source, tags, related)
-	return fmt.Sprintf("%x", sha256.Sum256([]byte(key)))
+	return fmt.Sprintf("%x", h.Sum(nil))
 }
 
 func (s *Server) diagnoseDetached(snapshot source.Snapshot) {
 	ctx := snapshot.View().BackgroundContext()
 	ctx = xcontext.Detach(ctx)
-	reports, shows := s.diagnose(ctx, snapshot, false)
+	shows := s.diagnose(ctx, snapshot, false)
 	if shows != nil {
 		// If a view has been created or the configuration changed, warn the user.
 		s.client.ShowMessage(ctx, shows)
 	}
-	s.publishReports(ctx, snapshot, reports, false)
+	s.publishDiagnostics(ctx, true, snapshot)
 }
 
 func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) {
@@ -93,26 +86,21 @@
 		// by file modifications (no analysis).
 		//
 		// The second phase does everything, and is debounced by the configured delay.
-		reports, err := s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
-		if err != nil {
-			if !errors.Is(err, context.Canceled) {
-				event.Error(ctx, "diagnosing changed files", err)
-			}
-		}
-		s.publishReports(ctx, snapshot, reports, true)
+		s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
+		s.publishDiagnostics(ctx, false, snapshot)
 		s.debouncer.debounce(snapshot.View().Name(), snapshot.ID(), delay, func() {
-			reports, _ := s.diagnose(ctx, snapshot, false)
-			s.publishReports(ctx, snapshot, reports, false)
+			s.diagnose(ctx, snapshot, false)
+			s.publishDiagnostics(ctx, true, snapshot)
 		})
 		return
 	}
 
 	// Ignore possible workspace configuration warnings in the normal flow.
-	reports, _ := s.diagnose(ctx, snapshot, false)
-	s.publishReports(ctx, snapshot, reports, false)
+	s.diagnose(ctx, snapshot, false)
+	s.publishDiagnostics(ctx, true, snapshot)
 }
 
-func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) (*reportSet, error) {
+func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) {
 	ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles")
 	defer done()
 	packages := make(map[source.Package]struct{})
@@ -124,7 +112,7 @@
 		}
 		pkgs, err := snapshot.PackagesForFile(ctx, uri, source.TypecheckWorkspace)
 		if err != nil {
-			// TODO (rFindley): we should probably do something with the error here,
+			// TODO (findleyr): we should probably do something with the error here,
 			// but as of now this can fail repeatedly if load fails, so can be too
 			// noisy to log (and we'll handle things later in the slow pass).
 			continue
@@ -133,41 +121,34 @@
 			packages[pkg] = struct{}{}
 		}
 	}
-	reports := new(reportSet)
 	for pkg := range packages {
-		pkgReports, _, err := source.Diagnostics(ctx, snapshot, pkg, false)
-		if err != nil {
-			return nil, err
-		}
-		for id, diags := range pkgReports {
-			reports.add(id, false, diags...)
+		typeCheckResults := source.GetTypeCheckDiagnostics(ctx, snapshot, pkg)
+		for uri, diags := range typeCheckResults.Diagnostics {
+			s.storeDiagnostics(snapshot, uri, typeCheckSource, diags)
 		}
 	}
-	return reports, nil
 }
 
 // diagnose is a helper function for running diagnostics with a given context.
 // Do not call it directly.
-func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysAnalyze bool) (diagReports *reportSet, _ *protocol.ShowMessageParams) {
+func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysAnalyze bool) (_ *protocol.ShowMessageParams) {
 	ctx, done := event.Start(ctx, "Server.diagnose")
 	defer done()
 
 	// Wait for a free diagnostics slot.
 	select {
 	case <-ctx.Done():
-		return nil, nil
+		return nil
 	case s.diagnosticsSema <- struct{}{}:
 	}
 	defer func() {
 		<-s.diagnosticsSema
 	}()
 
-	reports := new(reportSet)
-
 	// First, diagnose the go.mod file.
 	modReports, modErr := mod.Diagnostics(ctx, snapshot)
 	if ctx.Err() != nil {
-		return nil, nil
+		return nil
 	}
 	if modErr != nil {
 		event.Error(ctx, "warning: diagnose go.mod", modErr, tag.Directory.Of(snapshot.View().Folder().Filename()), tag.Snapshot.Of(snapshot.ID()))
@@ -177,13 +158,13 @@
 			event.Error(ctx, "missing URI for module diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename()))
 			continue
 		}
-		reports.add(id, true, diags...) // treat go.mod diagnostics like analyses
+		s.storeDiagnostics(snapshot, id.URI, modSource, diags)
 	}
 
 	// Diagnose all of the packages in the workspace.
 	wsPkgs, err := snapshot.WorkspacePackages(ctx)
 	if s.shouldIgnoreError(ctx, snapshot, err) {
-		return nil, nil
+		return nil
 	}
 	// Show the error as a progress error report so that it appears in the
 	// status bar. If a client doesn't support progress reports, the error
@@ -194,16 +175,18 @@
 	if err != nil {
 		// Some error messages can also be displayed as diagnostics.
 		if errList := (source.ErrorList)(nil); errors.As(err, &errList) {
-			_ = errorsToDiagnostic(ctx, snapshot, errList, reports)
+			s.storeErrorDiagnostics(ctx, snapshot, typeCheckSource, errList)
 		}
 		event.Error(ctx, "errors diagnosing workspace", err, tag.Snapshot.Of(snapshot.ID()), tag.Directory.Of(snapshot.View().Folder()))
-		return reports, nil
+		return nil
 	}
 
 	var (
 		showMsgMu sync.Mutex
 		showMsg   *protocol.ShowMessageParams
 		wg        sync.WaitGroup
+		seenMu    sync.Mutex
+		seen      = map[span.URI]bool{}
 	)
 	for _, pkg := range wsPkgs {
 		wg.Add(1)
@@ -213,6 +196,9 @@
 			includeAnalysis := alwaysAnalyze // only run analyses for packages with open files
 			var gcDetailsDir span.URI        // find the package's optimization details, if available
 			for _, pgf := range pkg.CompiledGoFiles() {
+				seenMu.Lock()
+				seen[pgf.URI] = true
+				seenMu.Unlock()
 				if snapshot.IsOpen(pgf.URI) {
 					includeAnalysis = true
 				}
@@ -227,11 +213,7 @@
 				}
 			}
 
-			pkgReports, warn, err := source.Diagnostics(ctx, snapshot, pkg, includeAnalysis)
-
-			// Check if might want to warn the user about their build configuration.
-			// Our caller decides whether to send the message.
-			if warn && !snapshot.ValidBuildConfiguration() {
+			if shouldWarn(snapshot, pkg) {
 				showMsgMu.Lock()
 				showMsg = &protocol.ShowMessageParams{
 					Type:    protocol.Warning,
@@ -239,24 +221,34 @@
 				}
 				showMsgMu.Unlock()
 			}
-			if err != nil {
-				event.Error(ctx, "warning: diagnose package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
-				return
-			}
 
-			// Add all reports to the global map, checking for duplicates.
-			for id, diags := range pkgReports {
-				reports.add(id, includeAnalysis, diags...)
+			typeCheckResults := source.GetTypeCheckDiagnostics(ctx, snapshot, pkg)
+			for uri, diags := range typeCheckResults.Diagnostics {
+				s.storeDiagnostics(snapshot, uri, typeCheckSource, diags)
+			}
+			if includeAnalysis && !typeCheckResults.HasParseOrListErrors {
+				reports, err := source.Analyze(ctx, snapshot, pkg, typeCheckResults)
+				if err != nil {
+					event.Error(ctx, "warning: diagnose package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
+					return
+				}
+				for uri, diags := range reports {
+					s.storeDiagnostics(snapshot, uri, analysisSource, diags)
+				}
 			}
 			// If gc optimization details are available, add them to the
 			// diagnostic reports.
 			if gcDetailsDir != "" {
 				gcReports, err := source.GCOptimizationDetails(ctx, snapshot, gcDetailsDir)
 				if err != nil {
-					event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()))
+					event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
 				}
 				for id, diags := range gcReports {
-					reports.add(id, includeAnalysis, diags...)
+					fh := snapshot.FindFile(id.URI)
+					if fh == nil {
+						continue
+					}
+					s.storeDiagnostics(snapshot, id.URI, gcDetailsSource, diags)
 				}
 			}
 		}(pkg)
@@ -266,27 +258,91 @@
 	// the workspace). Otherwise, add a diagnostic to the file.
 	if len(wsPkgs) > 0 {
 		for _, o := range s.session.Overlays() {
-			// Check if we already have diagnostic reports for the given file,
-			// meaning that we have already seen its package.
-			var seen bool
-			for _, includeAnalysis := range []bool{true, false} {
-				_, ok := reports.reports[idWithAnalysis{
-					id:              o.VersionedFileIdentity(),
-					includeAnalysis: includeAnalysis,
-				}]
-				seen = seen || ok
-			}
-			if seen {
+			if seen[o.URI()] {
 				continue
 			}
 			diagnostic := s.checkForOrphanedFile(ctx, snapshot, o)
 			if diagnostic == nil {
 				continue
 			}
-			reports.add(o.VersionedFileIdentity(), true, diagnostic)
+			s.storeDiagnostics(snapshot, o.URI(), orphanedSource, []*source.Diagnostic{diagnostic})
 		}
 	}
-	return reports, showMsg
+	return showMsg
+}
+
+// storeDiagnostics stores results from a single diagnostic source. If merge is
+// true, it merges results into any existing results for this snapshot.
+func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic) {
+	// Safeguard: ensure that the file actually exists in the snapshot
+	// (see golang.org/issues/38602).
+	fh := snapshot.FindFile(uri)
+	if fh == nil {
+		return
+	}
+	s.diagnosticsMu.Lock()
+	defer s.diagnosticsMu.Unlock()
+	if s.diagnostics[uri] == nil {
+		s.diagnostics[uri] = &fileReports{
+			publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
+			reports:       map[diagnosticSource]diagnosticReport{},
+		}
+	}
+	report := s.diagnostics[uri].reports[dsource]
+	// Don't set obsolete diagnostics.
+	if report.snapshotID > snapshot.ID() {
+		return
+	}
+	if report.diags == nil || report.snapshotID != snapshot.ID() {
+		report.diags = map[string]*source.Diagnostic{}
+	}
+	report.snapshotID = snapshot.ID()
+	for _, d := range diags {
+		report.diags[hashDiagnostics(d)] = d
+	}
+	s.diagnostics[uri].reports[dsource] = report
+}
+
+// shouldWarn reports whether we should warn the user about their build
+// configuration.
+func shouldWarn(snapshot source.Snapshot, pkg source.Package) bool {
+	if snapshot.ValidBuildConfiguration() {
+		return false
+	}
+	if len(pkg.MissingDependencies()) > 0 {
+		return true
+	}
+	if len(pkg.CompiledGoFiles()) == 1 && hasUndeclaredErrors(pkg) {
+		return true // The user likely opened a single file.
+	}
+	return false
+}
+
+// clearDiagnosticSource clears all diagnostics for a given source type. It is
+// necessary for cases where diagnostics have been invalidated by something
+// other than a snapshot change, for example when gc_details is toggled.
+func (s *Server) clearDiagnosticSource(dsource diagnosticSource) {
+	s.diagnosticsMu.Lock()
+	defer s.diagnosticsMu.Unlock()
+	for _, reports := range s.diagnostics {
+		delete(reports.reports, dsource)
+	}
+}
+
+// hasUndeclaredErrors returns true if a package has a type error
+// about an undeclared symbol.
+//
+// TODO(findleyr): switch to using error codes in 1.16
+func hasUndeclaredErrors(pkg source.Package) bool {
+	for _, err := range pkg.GetErrors() {
+		if err.Kind != source.TypeError {
+			continue
+		}
+		if strings.Contains(err.Message, "undeclared name:") {
+			return true
+		}
+	}
+	return false
 }
 
 // showCriticalErrorStatus shows the error as a progress report.
@@ -356,7 +412,7 @@
 	}
 }
 
-func errorsToDiagnostic(ctx context.Context, snapshot source.Snapshot, errors []*source.Error, reports *reportSet) error {
+func (s *Server) storeErrorDiagnostics(ctx context.Context, snapshot source.Snapshot, dsource diagnosticSource, errors []*source.Error) {
 	for _, e := range errors {
 		diagnostic := &source.Diagnostic{
 			Range:    e.Range,
@@ -365,103 +421,71 @@
 			Severity: protocol.SeverityError,
 			Source:   e.Category,
 		}
-		fh, err := snapshot.GetVersionedFile(ctx, e.URI)
-		if err != nil {
-			return err
-		}
-		reports.add(fh.VersionedFileIdentity(), true, diagnostic)
+		s.storeDiagnostics(snapshot, e.URI, dsource, []*source.Diagnostic{diagnostic})
 	}
-	return nil
 }
 
-func (s *Server) publishReports(ctx context.Context, snapshot source.Snapshot, reports *reportSet, isFirstPass bool) {
-	// Check for context cancellation before publishing diagnostics.
-	if ctx.Err() != nil || reports == nil {
-		return
-	}
-
-	s.deliveredMu.Lock()
-	defer s.deliveredMu.Unlock()
-
-	for key, diagnosticsMap := range reports.reports {
-		// Don't deliver diagnostics if the context has already been canceled.
-		if ctx.Err() != nil {
-			break
-		}
-		// Pre-sort diagnostics to avoid extra work when we compare them.
-		var diagnostics []*source.Diagnostic
-		for _, d := range diagnosticsMap {
-			diagnostics = append(diagnostics, d)
-		}
-		source.SortDiagnostics(diagnostics)
-		toSend := sentDiagnostics{
-			id:              key.id,
-			sorted:          diagnostics,
-			includeAnalysis: key.includeAnalysis,
-			snapshotID:      snapshot.ID(),
+// publishDiagnostics collects and publishes any unpublished diagnostic reports.
+func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) {
+	s.diagnosticsMu.Lock()
+	defer s.diagnosticsMu.Unlock()
+	for uri, r := range s.diagnostics {
+		anyReportsChanged := false
+		reportHashes := map[diagnosticSource]string{}
+		var diags []*source.Diagnostic
+		for dsource, report := range r.reports {
+			// Note: it might be worth adding special handling for the case where
+			// report.snapshotID > snapshot.ID(), in which case this publish pass is
+			// operating on stale data, but for now this is handled by virtue of
+			// cancelling the old snapshot context before diagnosing the next.
+			if report.snapshotID != snapshot.ID() {
+				continue
+			}
+			var reportDiags []*source.Diagnostic
+			for _, d := range report.diags {
+				diags = append(diags, d)
+				reportDiags = append(reportDiags, d)
+			}
+			hash := hashDiagnostics(reportDiags...)
+			if hash != report.publishedHash {
+				anyReportsChanged = true
+			}
+			reportHashes[dsource] = hash
 		}
 
-		// We use the zero values if this is an unknown file.
-		delivered := s.delivered[key.id.URI]
-
-		// Snapshot IDs are always increasing, so we use them instead of file
-		// versions to create the correct order for diagnostics.
-
-		// If we've already delivered diagnostics for a future snapshot for
-		// this file, do not deliver them.
-		if delivered.snapshotID > toSend.snapshotID {
-			// Do not update the delivered map since it already contains newer
-			// diagnostics.
+		if !final && !anyReportsChanged {
+			// Don't invalidate existing reports on the client if we haven't got any
+			// new information.
 			continue
 		}
-
-		// Check if we should reuse the cached diagnostics.
-		if equalDiagnostics(delivered.sorted, diagnostics) {
-			// Make sure to update the delivered map.
-			s.delivered[key.id.URI] = toSend
+		source.SortDiagnostics(diags)
+		hash := hashDiagnostics(diags...)
+		if hash == r.publishedHash {
 			continue
 		}
-
-		// If we've already delivered diagnostics for this file, at this
-		// snapshot, with analyses, do not send diagnostics without analyses.
-		if delivered.snapshotID == toSend.snapshotID && delivered.id == toSend.id &&
-			delivered.includeAnalysis && !toSend.includeAnalysis {
-			// Do not update the delivered map since it already contains better diagnostics.
-			continue
+		version := float64(0)
+		if fh := snapshot.FindFile(uri); fh != nil { // file may have been deleted
+			version = fh.Version()
 		}
-
-		// If we've previously delivered non-empty diagnostics and this is a
-		// first diagnostic pass, wait for a subsequent pass to complete before
-		// sending empty diagnostics to avoid flickering diagnostics.
-		if isFirstPass && delivered.includeAnalysis && !toSend.includeAnalysis && len(toSend.sorted) == 0 {
-			continue
-		}
-
 		if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
-			Diagnostics: toProtocolDiagnostics(diagnostics),
-			URI:         protocol.URIFromSpanURI(key.id.URI),
-			Version:     key.id.Version,
-		}); err != nil {
-			event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(key.id.URI))
-			continue
-		}
-		// Update the delivered map.
-		s.delivered[key.id.URI] = toSend
-	}
-}
-
-// equalDiagnostics returns true if the 2 lists of diagnostics are equal.
-// It assumes that both a and b are already sorted.
-func equalDiagnostics(a, b []*source.Diagnostic) bool {
-	if len(a) != len(b) {
-		return false
-	}
-	for i := 0; i < len(a); i++ {
-		if source.CompareDiagnostic(a[i], b[i]) != 0 {
-			return false
+			Diagnostics: toProtocolDiagnostics(diags),
+			URI:         protocol.URIFromSpanURI(uri),
+			Version:     version,
+		}); err == nil {
+			r.publishedHash = hash
+			for dsource, hash := range reportHashes {
+				report := r.reports[dsource]
+				report.publishedHash = hash
+				r.reports[dsource] = report
+			}
+		} else {
+			if ctx.Err() != nil {
+				// Publish may have failed due to a cancelled context.
+				return
+			}
+			event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri))
 		}
 	}
-	return true
 }
 
 func toProtocolDiagnostics(diagnostics []*source.Diagnostic) []protocol.Diagnostic {
diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go
index 0303491..705e26a 100644
--- a/internal/lsp/lsp_test.go
+++ b/internal/lsp/lsp_test.go
@@ -1158,20 +1158,21 @@
 	defer release()
 
 	// Always run diagnostics with analysis.
-	reports, _ := r.server.diagnose(r.ctx, snapshot, true)
-	r.server.publishReports(r.ctx, snapshot, reports, false)
-	for uri, sent := range r.server.delivered {
+	r.server.diagnose(r.ctx, snapshot, true)
+	for uri, reports := range r.server.diagnostics {
 		var diagnostics []*source.Diagnostic
-		for _, d := range sent.sorted {
-			diagnostics = append(diagnostics, &source.Diagnostic{
-				Range:    d.Range,
-				Message:  d.Message,
-				Related:  d.Related,
-				Severity: d.Severity,
-				Source:   d.Source,
-				Tags:     d.Tags,
-			})
+		for _, report := range reports.reports {
+			for _, d := range report.diags {
+				diagnostics = append(diagnostics, &source.Diagnostic{
+					Range:    d.Range,
+					Message:  d.Message,
+					Related:  d.Related,
+					Severity: d.Severity,
+					Source:   d.Source,
+					Tags:     d.Tags,
+				})
+			}
+			r.diagnostics[uri] = diagnostics
 		}
-		r.diagnostics[uri] = diagnostics
 	}
 }
diff --git a/internal/lsp/server.go b/internal/lsp/server.go
index 905a408..6df2d2d 100644
--- a/internal/lsp/server.go
+++ b/internal/lsp/server.go
@@ -23,7 +23,7 @@
 // messages on on the supplied stream.
 func NewServer(session source.Session, client protocol.Client) *Server {
 	return &Server{
-		delivered:             make(map[span.URI]sentDiagnostics),
+		diagnostics:           map[span.URI]*fileReports{},
 		gcOptimizationDetails: make(map[span.URI]struct{}),
 		watchedDirectories:    make(map[span.URI]struct{}),
 		changedFiles:          make(map[span.URI]struct{}),
@@ -86,9 +86,8 @@
 	watchedDirectories     map[span.URI]struct{}
 	watchRegistrationCount uint64
 
-	// delivered is a cache of the diagnostics that the server has sent.
-	deliveredMu sync.Mutex
-	delivered   map[span.URI]sentDiagnostics
+	diagnosticsMu sync.Mutex
+	diagnostics   map[span.URI]*fileReports
 
 	// gcOptimizationDetails describes the packages for which we want
 	// optimization details to be included in the diagnostics. The key is the
@@ -111,14 +110,6 @@
 	criticalErrorStatus   *workDone
 }
 
-// sentDiagnostics is used to cache diagnostics that have been sent for a given file.
-type sentDiagnostics struct {
-	id              source.VersionedFileIdentity
-	sorted          []*source.Diagnostic
-	includeAnalysis bool
-	snapshotID      uint64
-}
-
 func (s *Server) workDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) error {
 	return s.progress.cancel(ctx, params.Token)
 }
diff --git a/internal/lsp/source/diagnostics.go b/internal/lsp/source/diagnostics.go
index 33693a8..a9bffbd 100644
--- a/internal/lsp/source/diagnostics.go
+++ b/internal/lsp/source/diagnostics.go
@@ -6,15 +6,12 @@
 
 import (
 	"context"
-	"fmt"
-	"strings"
 
 	"golang.org/x/tools/go/analysis"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/lsp/debug/tag"
 	"golang.org/x/tools/internal/lsp/protocol"
 	"golang.org/x/tools/internal/span"
-	errors "golang.org/x/xerrors"
 )
 
 type Diagnostic struct {
@@ -39,32 +36,15 @@
 	Message string
 }
 
-func Diagnostics(ctx context.Context, snapshot Snapshot, pkg Package, withAnalysis bool) (map[VersionedFileIdentity][]*Diagnostic, bool, error) {
+func GetTypeCheckDiagnostics(ctx context.Context, snapshot Snapshot, pkg Package) TypeCheckDiagnostics {
 	onlyIgnoredFiles := true
 	for _, pgf := range pkg.CompiledGoFiles() {
 		onlyIgnoredFiles = onlyIgnoredFiles && snapshot.IgnoredFile(pgf.URI)
 	}
 	if onlyIgnoredFiles {
-		return nil, false, nil
+		return TypeCheckDiagnostics{}
 	}
 
-	// If we are missing dependencies, it may because the user's workspace is
-	// not correctly configured. Report errors, if possible.
-	var warn bool
-	if len(pkg.MissingDependencies()) > 0 {
-		warn = true
-	}
-	// If we have a package with a single file and errors about "undeclared" symbols,
-	// we may have an ad-hoc package with multiple files. Show a warning message.
-	// TODO(golang/go#36416): Remove this when golang.org/cl/202277 is merged.
-	if len(pkg.CompiledGoFiles()) == 1 && hasUndeclaredErrors(pkg) {
-		warn = true
-	}
-	// Prepare the reports we will send for the files in this package.
-	reports := make(map[VersionedFileIdentity][]*Diagnostic)
-	for _, pgf := range pkg.CompiledGoFiles() {
-		clearReports(snapshot, reports, pgf.URI)
-	}
 	// Prepare any additional reports for the errors in this package.
 	for _, e := range pkg.GetErrors() {
 		// We only need to handle lower-level errors.
@@ -79,51 +59,82 @@
 				}
 			}
 		}
-		clearReports(snapshot, reports, e.URI)
 	}
-	// Run diagnostics for the package that this URI belongs to.
-	hadDiagnostics, hadTypeErrors, err := diagnostics(ctx, snapshot, reports, pkg, len(pkg.MissingDependencies()) > 0)
-	if err != nil {
-		return nil, warn, err
-	}
-	if hadDiagnostics || !withAnalysis {
-		return reports, warn, nil
-	}
+	return typeCheckDiagnostics(ctx, snapshot, pkg)
+}
+
+func Analyze(ctx context.Context, snapshot Snapshot, pkg Package, typeCheckResult TypeCheckDiagnostics) (map[span.URI][]*Diagnostic, error) {
 	// Exit early if the context has been canceled. This also protects us
 	// from a race on Options, see golang/go#36699.
 	if ctx.Err() != nil {
-		return nil, warn, ctx.Err()
+		return nil, ctx.Err()
 	}
 	// If we don't have any list or parse errors, run analyses.
-	analyzers := pickAnalyzers(snapshot, hadTypeErrors)
-	if err := analyses(ctx, snapshot, reports, pkg, analyzers); err != nil {
-		event.Error(ctx, "analyses failed", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(pkg.ID()))
-		if ctx.Err() != nil {
-			return nil, warn, ctx.Err()
+	analyzers := pickAnalyzers(snapshot, typeCheckResult.HasTypeErrors)
+	analysisErrors, err := snapshot.Analyze(ctx, pkg.ID(), analyzers...)
+	if err != nil {
+		return nil, err
+	}
+
+	reports := emptyDiagnostics(pkg)
+	// Report diagnostics and errors from root analyzers.
+	for _, e := range analysisErrors {
+		// If the diagnostic comes from a "convenience" analyzer, it is not
+		// meant to provide diagnostics, but rather only suggested fixes.
+		// Skip these types of errors in diagnostics; we will use their
+		// suggested fixes when providing code actions.
+		if isConvenienceAnalyzer(e.Category) {
+			continue
+		}
+		// This is a bit of a hack, but clients > 3.15 will be able to grey out unnecessary code.
+		// If we are deleting code as part of all of our suggested fixes, assume that this is dead code.
+		// TODO(golang/go#34508): Return these codes from the diagnostics themselves.
+		var tags []protocol.DiagnosticTag
+		if onlyDeletions(e.SuggestedFixes) {
+			tags = append(tags, protocol.Unnecessary)
+		}
+		// Type error analyzers only alter the tags for existing type errors.
+		if _, ok := snapshot.View().Options().TypeErrorAnalyzers[e.Category]; ok {
+			existingDiagnostics := typeCheckResult.Diagnostics[e.URI]
+			for _, d := range existingDiagnostics {
+				if r := protocol.CompareRange(e.Range, d.Range); r != 0 {
+					continue
+				}
+				if e.Message != d.Message {
+					continue
+				}
+				d.Tags = append(d.Tags, tags...)
+			}
+		} else {
+			reports[e.URI] = append(reports[e.URI], &Diagnostic{
+				Range:    e.Range,
+				Message:  e.Message,
+				Source:   e.Category,
+				Severity: protocol.SeverityWarning,
+				Tags:     tags,
+				Related:  e.Related,
+			})
 		}
 	}
-	return reports, warn, nil
+	return reports, nil
 }
 
-func pickAnalyzers(snapshot Snapshot, hadTypeErrors bool) map[string]Analyzer {
-	analyzers := make(map[string]Analyzer)
-
+func pickAnalyzers(snapshot Snapshot, hadTypeErrors bool) []*analysis.Analyzer {
 	// Always run convenience analyzers.
-	for k, v := range snapshot.View().Options().ConvenienceAnalyzers {
-		analyzers[k] = v
-	}
+	categories := []map[string]Analyzer{snapshot.View().Options().ConvenienceAnalyzers}
 	// If we had type errors, only run type error analyzers.
 	if hadTypeErrors {
-		for k, v := range snapshot.View().Options().TypeErrorAnalyzers {
-			analyzers[k] = v
+		categories = append(categories, snapshot.View().Options().TypeErrorAnalyzers)
+	} else {
+		categories = append(categories, snapshot.View().Options().DefaultAnalyzers, snapshot.View().Options().StaticcheckAnalyzers)
+	}
+	var analyzers []*analysis.Analyzer
+	for _, m := range categories {
+		for _, a := range m {
+			if a.IsEnabled(snapshot.View()) {
+				analyzers = append(analyzers, a.Analyzer)
+			}
 		}
-		return analyzers
-	}
-	for k, v := range snapshot.View().Options().DefaultAnalyzers {
-		analyzers[k] = v
-	}
-	for k, v := range snapshot.View().Options().StaticcheckAnalyzers {
-		analyzers[k] = v
 	}
 	return analyzers
 }
@@ -137,22 +148,29 @@
 	if err != nil {
 		return VersionedFileIdentity{}, nil, err
 	}
-	reports, _, err := Diagnostics(ctx, snapshot, pkg, true)
-	if err != nil {
-		return VersionedFileIdentity{}, nil, err
-	}
-	diagnostics, ok := reports[fh.VersionedFileIdentity()]
-	if !ok {
-		return VersionedFileIdentity{}, nil, errors.Errorf("no diagnostics for %s", uri)
+	typeCheckResults := GetTypeCheckDiagnostics(ctx, snapshot, pkg)
+	diagnostics := typeCheckResults.Diagnostics[fh.URI()]
+	if !typeCheckResults.HasParseOrListErrors {
+		reports, err := Analyze(ctx, snapshot, pkg, typeCheckResults)
+		if err != nil {
+			return VersionedFileIdentity{}, nil, err
+		}
+		diagnostics = append(diagnostics, reports[fh.URI()]...)
 	}
 	return fh.VersionedFileIdentity(), diagnostics, nil
 }
 
+type TypeCheckDiagnostics struct {
+	HasTypeErrors        bool
+	HasParseOrListErrors bool
+	Diagnostics          map[span.URI][]*Diagnostic
+}
+
 type diagnosticSet struct {
 	listErrors, parseErrors, typeErrors []*Diagnostic
 }
 
-func diagnostics(ctx context.Context, snapshot Snapshot, reports map[VersionedFileIdentity][]*Diagnostic, pkg Package, hasMissingDeps bool) (bool, bool, error) {
+func typeCheckDiagnostics(ctx context.Context, snapshot Snapshot, pkg Package) TypeCheckDiagnostics {
 	ctx, done := event.Start(ctx, "source.diagnostics", tag.Package.Of(pkg.ID()))
 	_ = ctx // circumvent SA4006
 	defer done()
@@ -182,104 +200,37 @@
 			diag.Source = "go list"
 		}
 	}
-	var nonEmptyDiagnostics, hasTypeErrors bool // track if we actually send non-empty diagnostics
+	typecheck := TypeCheckDiagnostics{
+		Diagnostics: emptyDiagnostics(pkg),
+	}
 	for uri, set := range diagSets {
 		// Don't report type errors if there are parse errors or list errors.
 		diags := set.typeErrors
-		if len(set.parseErrors) > 0 {
-			diags, nonEmptyDiagnostics = set.parseErrors, true
-		} else if len(set.listErrors) > 0 {
-			// Only show list errors if the package has missing dependencies.
-			if hasMissingDeps {
-				diags, nonEmptyDiagnostics = set.listErrors, true
+		switch {
+		case len(set.parseErrors) > 0:
+			typecheck.HasParseOrListErrors = true
+			diags = set.parseErrors
+		case len(set.listErrors) > 0:
+			typecheck.HasParseOrListErrors = true
+			if len(pkg.MissingDependencies()) > 0 {
+				diags = set.listErrors
 			}
-		} else if len(set.typeErrors) > 0 {
-			hasTypeErrors = true
+		case len(set.typeErrors) > 0:
+			typecheck.HasTypeErrors = true
 		}
-		if err := addReports(snapshot, reports, uri, diags...); err != nil {
-			return false, false, err
-		}
+		typecheck.Diagnostics[uri] = diags
 	}
-	return nonEmptyDiagnostics, hasTypeErrors, nil
+	return typecheck
 }
 
-func analyses(ctx context.Context, snapshot Snapshot, reports map[VersionedFileIdentity][]*Diagnostic, pkg Package, analyses map[string]Analyzer) error {
-	var analyzers []*analysis.Analyzer
-	for _, a := range analyses {
-		if !a.IsEnabled(snapshot.View()) {
-			continue
-		}
-		analyzers = append(analyzers, a.Analyzer)
-	}
-	analysisErrors, err := snapshot.Analyze(ctx, pkg.ID(), analyzers...)
-	if err != nil {
-		return err
-	}
-
-	// Report diagnostics and errors from root analyzers.
-	for _, e := range analysisErrors {
-		// If the diagnostic comes from a "convenience" analyzer, it is not
-		// meant to provide diagnostics, but rather only suggested fixes.
-		// Skip these types of errors in diagnostics; we will use their
-		// suggested fixes when providing code actions.
-		if isConvenienceAnalyzer(e.Category) {
-			continue
-		}
-		// This is a bit of a hack, but clients > 3.15 will be able to grey out unnecessary code.
-		// If we are deleting code as part of all of our suggested fixes, assume that this is dead code.
-		// TODO(golang/go#34508): Return these codes from the diagnostics themselves.
-		var tags []protocol.DiagnosticTag
-		if onlyDeletions(e.SuggestedFixes) {
-			tags = append(tags, protocol.Unnecessary)
-		}
-		if err := addReports(snapshot, reports, e.URI, &Diagnostic{
-			Range:    e.Range,
-			Message:  e.Message,
-			Source:   e.Category,
-			Severity: protocol.SeverityWarning,
-			Tags:     tags,
-			Related:  e.Related,
-		}); err != nil {
-			return err
+func emptyDiagnostics(pkg Package) map[span.URI][]*Diagnostic {
+	diags := map[span.URI][]*Diagnostic{}
+	for _, pgf := range pkg.CompiledGoFiles() {
+		if _, ok := diags[pgf.URI]; !ok {
+			diags[pgf.URI] = nil
 		}
 	}
-	return nil
-}
-
-func clearReports(snapshot Snapshot, reports map[VersionedFileIdentity][]*Diagnostic, uri span.URI) {
-	fh := snapshot.FindFile(uri)
-	if fh == nil {
-		return
-	}
-	reports[fh.VersionedFileIdentity()] = []*Diagnostic{}
-}
-
-func addReports(snapshot Snapshot, reports map[VersionedFileIdentity][]*Diagnostic, uri span.URI, diagnostics ...*Diagnostic) error {
-	fh := snapshot.FindFile(uri)
-	if fh == nil {
-		return nil
-	}
-	existingDiagnostics, ok := reports[fh.VersionedFileIdentity()]
-	if !ok {
-		return fmt.Errorf("diagnostics for unexpected file %s", uri)
-	}
-	if len(diagnostics) == 1 {
-		d1 := diagnostics[0]
-		if _, ok := snapshot.View().Options().TypeErrorAnalyzers[d1.Source]; ok {
-			for i, d2 := range existingDiagnostics {
-				if r := protocol.CompareRange(d1.Range, d2.Range); r != 0 {
-					continue
-				}
-				if d1.Message != d2.Message {
-					continue
-				}
-				reports[fh.VersionedFileIdentity()][i].Tags = append(reports[fh.VersionedFileIdentity()][i].Tags, d1.Tags...)
-			}
-			return nil
-		}
-	}
-	reports[fh.VersionedFileIdentity()] = append(reports[fh.VersionedFileIdentity()], diagnostics...)
-	return nil
+	return diags
 }
 
 // onlyDeletions returns true if all of the suggested fixes are deletions.
@@ -299,20 +250,6 @@
 	return len(fixes) > 0
 }
 
-// hasUndeclaredErrors returns true if a package has a type error
-// about an undeclared symbol.
-func hasUndeclaredErrors(pkg Package) bool {
-	for _, err := range pkg.GetErrors() {
-		if err.Kind != TypeError {
-			continue
-		}
-		if strings.Contains(err.Message, "undeclared name:") {
-			return true
-		}
-	}
-	return false
-}
-
 func isConvenienceAnalyzer(category string) bool {
 	for _, a := range DefaultOptions().ConvenienceAnalyzers {
 		if category == a.Analyzer.Name {
diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go
index ff4a4ca..43157f7 100644
--- a/internal/lsp/source/view.go
+++ b/internal/lsp/source/view.go
@@ -291,7 +291,7 @@
 
 	// DidModifyFile reports a file modification to the session. It returns the
 	// resulting snapshots, a guaranteed one per view.
-	DidModifyFiles(ctx context.Context, changes []FileModification) (map[span.URI]View, map[View]Snapshot, []func(), []span.URI, error)
+	DidModifyFiles(ctx context.Context, changes []FileModification) (map[span.URI]View, map[View]Snapshot, []func(), error)
 
 	// Overlays returns a slice of file overlays for the session.
 	Overlays() []Overlay
diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go
index fc35ff5..7281c90 100644
--- a/internal/lsp/text_synchronization.go
+++ b/internal/lsp/text_synchronization.go
@@ -186,22 +186,11 @@
 			}()
 		}()
 	}
-	views, snapshots, releases, deletions, err := s.session.DidModifyFiles(ctx, modifications)
+	views, snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications)
 	if err != nil {
 		return err
 	}
 
-	// Clear out diagnostics for deleted files.
-	for _, uri := range deletions {
-		if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
-			URI:         protocol.URIFromSpanURI(uri),
-			Diagnostics: []protocol.Diagnostic{},
-			Version:     0,
-		}); err != nil {
-			return err
-		}
-	}
-
 	// Check if the user is trying to modify a generated file.
 	for _, mod := range modifications {
 		if mod.OnDisk || mod.Action != source.Change {