internal/lsp/cache: support loading multiple orphaned files
go/packages returns at most one command-line-arguments package per
query. Previously, this led to N^2 loading of orphaned files. CL 480197
fixed this by marking all queried files as unloadable after a load
completes. However, as a result gopls would fail to load multiple
orphaned files.
Fix this properly by calling Load once for each orphaned file.
Fixes golang/go#59318
Change-Id: Ibfb3742fcb70ea3976d8b1b5b384fe6b97350cf4
Reviewed-on: https://go-review.googlesource.com/c/tools/+/494401
Reviewed-by: Alan Donovan <adonovan@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
index 7e9a06f..71f6563 100644
--- a/gopls/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -1595,28 +1595,34 @@
// available in the snapshot and reload their metadata individually using a
// file= query if the metadata is unavailable.
files := s.orphanedOpenFiles()
-
- // Files without a valid package declaration can't be loaded. Don't try.
- var scopes []loadScope
- for _, file := range files {
- pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
- if err != nil {
- continue
- }
- if !pgf.File.Package.IsValid() {
- continue
- }
-
- scopes = append(scopes, fileLoadScope(file.URI()))
- }
-
- if len(scopes) == 0 {
+ if len(files) == 0 {
return nil
}
- // The regtests match this exact log message, keep them in sync.
- event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes))
- err := s.load(ctx, false, scopes...)
+ var uris []span.URI
+ for _, file := range files {
+ uris = append(uris, file.URI())
+ }
+
+ event.Log(ctx, "reloadOrphanedFiles reloading", tag.Files.Of(uris))
+
+ var g errgroup.Group
+
+ cpulimit := runtime.GOMAXPROCS(0)
+ g.SetLimit(cpulimit)
+
+ // Load files one-at-a-time. go/packages can return at most one
+ // command-line-arguments package per query.
+ for _, file := range files {
+ file := file
+ g.Go(func() error {
+ pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
+ if err != nil || !pgf.File.Package.IsValid() {
+ return nil // need a valid header
+ }
+ return s.load(ctx, false, fileLoadScope(file.URI()))
+ })
+ }
// If we failed to load some files, i.e. they have no metadata,
// mark the failures so we don't bother retrying until the file's
@@ -1624,11 +1630,17 @@
//
// TODO(rfindley): is it possible that the load stopped early for an
// unrelated errors? If so, add a fallback?
- //
- // Check for context cancellation so that we don't incorrectly mark files
- // as unloadable, but don't return before setting all workspace packages.
- if ctx.Err() != nil {
- return ctx.Err()
+
+ if err := g.Wait(); err != nil {
+ // Check for context cancellation so that we don't incorrectly mark files
+ // as unloadable, but don't return before setting all workspace packages.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ if !errors.Is(err, errNoPackages) {
+ event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Files.Of(uris))
+ }
}
// If the context was not canceled, we assume that the result of loading
@@ -1637,19 +1649,15 @@
// prevents us from falling into recursive reloading where we only make a bit
// of progress each time.
s.mu.Lock()
- for _, scope := range scopes {
+ defer s.mu.Unlock()
+ for _, file := range files {
// TODO(rfindley): instead of locking here, we should have load return the
// metadata graph that resulted from loading.
- uri := span.URI(scope.(fileLoadScope))
+ uri := file.URI()
if s.noValidMetadataForURILocked(uri) {
s.unloadableFiles[uri] = struct{}{}
}
}
- s.mu.Unlock()
-
- if err != nil && !errors.Is(err, errNoPackages) {
- event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
- }
return nil
}