gopls/internal/lsp/cache: record unloadable files if loading completes

As reported in golang/go#59318, orphaned file reloading may be
accidentally quadratic if it makes progress on every load.

Adjust the logic to record unloadable files whenever loading completes
without cancellation. This fixes golang/go#59184, but is only one half
of fixing golang/go#59318, as we must also fix the package IDs used for
command-line-arguments packages.

Fixes golang/go#59184
For golang/go#59318

Change-Id: Ibbb572b1f65832306b6046d29befa40817be3a28
Reviewed-on: https://go-review.googlesource.com/c/tools/+/480197
TryBot-Result: Gopher Robot <gobot@golang.org>
gopls-CI: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alan Donovan <adonovan@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
index 1f530b4..7771ce9 100644
--- a/gopls/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -1553,6 +1553,11 @@
 	return err
 }
 
+// reloadOrphanedOpenFiles attempts to load a package for each open file that
+// does not yet have an associated package. If loading finishes without being
+// canceled, any files still not contained in a package are marked as unloadable.
+//
+// An error is returned if the load is canceled.
 func (s *snapshot) reloadOrphanedOpenFiles(ctx context.Context) error {
 	// When we load ./... or a package path directly, we may not get packages
 	// that exist only in overlays. As a workaround, we search all of the files
@@ -1586,22 +1591,35 @@
 	// mark the failures so we don't bother retrying until the file's
 	// content changes.
 	//
-	// TODO(rstambler): This may be an overestimate if the load stopped
-	// early for an unrelated errors. Add a fallback?
+	// TODO(rfindley): is it possible that the the load stopped early for an
+	// unrelated errors? If so, add a fallback?
 	//
 	// Check for context cancellation so that we don't incorrectly mark files
 	// as unloadable, but don't return before setting all workspace packages.
-	if ctx.Err() == nil && err != nil {
-		event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
-		s.mu.Lock()
-		for _, scope := range scopes {
-			uri := span.URI(scope.(fileLoadScope))
-			if s.noValidMetadataForURILocked(uri) {
-				s.unloadableFiles[uri] = struct{}{}
-			}
-		}
-		s.mu.Unlock()
+	if ctx.Err() != nil {
+		return ctx.Err()
 	}
+
+	// If the context was not canceled, we assume that the result of loading
+	// packages is deterministic (though as we saw in golang/go#59318, it may not
+	// be in the presence of bugs). Marking all unloaded files as unloadable here
+	// prevents us from falling into recursive reloading where we only make a bit
+	// of progress each time.
+	s.mu.Lock()
+	for _, scope := range scopes {
+		// TODO(rfindley): instead of locking here, we should have load return the
+		// metadata graph that resulted from loading.
+		uri := span.URI(scope.(fileLoadScope))
+		if s.noValidMetadataForURILocked(uri) {
+			s.unloadableFiles[uri] = struct{}{}
+		}
+	}
+	s.mu.Unlock()
+
+	if err != nil && !errors.Is(err, errNoPackages) {
+		event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
+	}
+
 	return nil
 }