gopls/internal/lsp/cache: actually preload files in parallel
snapshot.ReadFile holds the snapshot lock, so calling it concurrently
was pointless. Instead, add a new preloadFiles helper that delegates to
to the underlying FileSource.
For golang/go#57987
Change-Id: If3259ca45260b8a24542d40f7558b5d6bf5018d1
Reviewed-on: https://go-review.googlesource.com/c/tools/+/477975
gopls-CI: kokoro <noreply+kokoro@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
Reviewed-by: Alan Donovan <adonovan@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go
index 36b88db..269ab31 100644
--- a/gopls/internal/lsp/cache/load.go
+++ b/gopls/internal/lsp/cache/load.go
@@ -12,7 +12,6 @@
"path/filepath"
"sort"
"strings"
- "sync"
"sync/atomic"
"time"
@@ -255,7 +254,7 @@
s.dumpWorkspace("load")
s.mu.Unlock()
- // Opt: pre-fetch loaded files in parallel.
+ // Opt: preLoad files in parallel.
//
// Requesting files in batch optimizes the underlying filesystem reads.
// However, this is also currently necessary for correctness: populating all
@@ -266,16 +265,7 @@
// TODO(rfindley, golang/go#57558): determine the set of directories based on
// loaded packages, so that reading files here is not necessary for
// correctness.
- var wg sync.WaitGroup
- for _, uri := range files {
- uri := uri
- wg.Add(1)
- go func() {
- s.ReadFile(ctx, uri) // ignore result
- wg.Done()
- }()
- }
- wg.Wait()
+ s.preloadFiles(ctx, files)
if len(moduleErrs) > 0 {
return &moduleErrorMap{moduleErrs}
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
index bf026b6..225f3dd 100644
--- a/gopls/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -1240,6 +1240,42 @@
return lockedSnapshot{s}.ReadFile(ctx, uri)
}
+// preloadFiles delegates to the view FileSource to read the requested uris in
+// parallel, without holding the snapshot lock.
+func (s *snapshot) preloadFiles(ctx context.Context, uris []span.URI) {
+ files := make([]source.FileHandle, len(uris))
+ var wg sync.WaitGroup
+ iolimit := make(chan struct{}, 20) // I/O concurrency limiting semaphore
+ for i, uri := range uris {
+ wg.Add(1)
+ iolimit <- struct{}{}
+ go func(i int, uri span.URI) {
+ defer wg.Done()
+ fh, err := s.view.fs.ReadFile(ctx, uri)
+ <-iolimit
+ if err != nil && ctx.Err() == nil {
+ event.Error(ctx, fmt.Sprintf("reading %s", uri), err)
+ return
+ }
+ files[i] = fh
+ }(i, uri)
+ }
+ wg.Wait()
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ for i, fh := range files {
+ if fh == nil {
+ continue // error logged above
+ }
+ uri := uris[i]
+ if _, ok := s.files.Get(uri); !ok {
+ s.files.Set(uri, fh)
+ }
+ }
+}
+
// A lockedSnapshot implements the source.FileSource interface while holding
// the lock for the wrapped snapshot.
type lockedSnapshot struct{ wrapped *snapshot }