blob: c7e5e8ad6408353b811c4e992ec233dff7bf9601 [file] [log] [blame]
Robert Findleyb15dac22022-08-30 14:40:12 -04001// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package cache
6
7import (
8 "bytes"
9 "context"
10 "errors"
11 "fmt"
12 "go/ast"
13 "go/token"
14 "go/types"
15 "io"
16 "io/ioutil"
17 "log"
18 "os"
19 "path/filepath"
20 "regexp"
21 "runtime"
22 "sort"
23 "strconv"
24 "strings"
25 "sync"
26 "sync/atomic"
27 "unsafe"
28
29 "golang.org/x/mod/modfile"
30 "golang.org/x/mod/module"
31 "golang.org/x/mod/semver"
32 "golang.org/x/sync/errgroup"
33 "golang.org/x/tools/go/packages"
Robert Findleyb15dac22022-08-30 14:40:12 -040034 "golang.org/x/tools/gopls/internal/lsp/source"
Alan Donovan26a95e62022-10-07 10:40:32 -040035 "golang.org/x/tools/gopls/internal/span"
Robert Findley9c639112022-09-28 12:03:51 -040036 "golang.org/x/tools/internal/bug"
37 "golang.org/x/tools/internal/event"
38 "golang.org/x/tools/internal/event/tag"
39 "golang.org/x/tools/internal/gocommand"
Robert Findleyb15dac22022-08-30 14:40:12 -040040 "golang.org/x/tools/internal/memoize"
41 "golang.org/x/tools/internal/packagesinternal"
42 "golang.org/x/tools/internal/persistent"
Robert Findleyb15dac22022-08-30 14:40:12 -040043 "golang.org/x/tools/internal/typesinternal"
44)
45
46type snapshot struct {
47 id uint64
48 view *View
49
50 cancel func()
51 backgroundCtx context.Context
52
53 store *memoize.Store // cache of handles shared by all snapshots
54
55 refcount sync.WaitGroup // number of references
56 destroyedBy *string // atomically set to non-nil in Destroy once refcount = 0
57
58 // initialized reports whether the snapshot has been initialized. Concurrent
59 // initialization is guarded by the view.initializationSema. Each snapshot is
60 // initialized at most once: concurrent initialization is guarded by
61 // view.initializationSema.
62 initialized bool
63 // initializedErr holds the last error resulting from initialization. If
64 // initialization fails, we only retry when the the workspace modules change,
65 // to avoid too many go/packages calls.
66 initializedErr *source.CriticalError
67
68 // mu guards all of the maps in the snapshot, as well as the builtin URI.
69 mu sync.Mutex
70
71 // builtin pins the AST and package for builtin.go in memory.
72 builtin span.URI
73
74 // meta holds loaded metadata.
75 //
76 // meta is guarded by mu, but the metadataGraph itself is immutable.
77 // TODO(rfindley): in many places we hold mu while operating on meta, even
78 // though we only need to hold mu while reading the pointer.
79 meta *metadataGraph
80
81 // files maps file URIs to their corresponding FileHandles.
82 // It may invalidated when a file's content changes.
83 files filesMap
84
85 // parsedGoFiles maps a parseKey to the handle of the future result of parsing it.
86 parsedGoFiles *persistent.Map // from parseKey to *memoize.Promise[parseGoResult]
87
88 // parseKeysByURI records the set of keys of parsedGoFiles that
89 // need to be invalidated for each URI.
90 // TODO(adonovan): opt: parseKey = ParseMode + URI, so this could
91 // be just a set of ParseModes, or we could loop over AllParseModes.
92 parseKeysByURI parseKeysByURIMap
93
94 // symbolizeHandles maps each file URI to a handle for the future
95 // result of computing the symbols declared in that file.
96 symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult]
97
98 // packages maps a packageKey to a *packageHandle.
99 // It may be invalidated when a file's content changes.
100 //
101 // Invariants to preserve:
102 // - packages.Get(id).m.Metadata == meta.metadata[id].Metadata for all ids
103 // - if a package is in packages, then all of its dependencies should also
104 // be in packages, unless there is a missing import
105 packages *persistent.Map // from packageKey to *memoize.Promise[*packageHandle]
106
107 // isActivePackageCache maps package ID to the cached value if it is active or not.
108 // It may be invalidated when metadata changes or a new file is opened or closed.
109 isActivePackageCache isActivePackageCacheMap
110
111 // actions maps an actionKey to the handle for the future
112 // result of execution an analysis pass on a package.
113 actions *persistent.Map // from actionKey to *actionHandle
114
115 // workspacePackages contains the workspace's packages, which are loaded
116 // when the view is created.
117 workspacePackages map[PackageID]PackagePath
118
119 // shouldLoad tracks packages that need to be reloaded, mapping a PackageID
120 // to the package paths that should be used to reload it
121 //
122 // When we try to load a package, we clear it from the shouldLoad map
123 // regardless of whether the load succeeded, to prevent endless loads.
124 shouldLoad map[PackageID][]PackagePath
125
126 // unloadableFiles keeps track of files that we've failed to load.
127 unloadableFiles map[span.URI]struct{}
128
129 // parseModHandles keeps track of any parseModHandles for the snapshot.
130 // The handles need not refer to only the view's go.mod file.
131 parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult]
132
133 // parseWorkHandles keeps track of any parseWorkHandles for the snapshot.
134 // The handles need not refer to only the view's go.work file.
135 parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult]
136
137 // Preserve go.mod-related handles to avoid garbage-collecting the results
138 // of various calls to the go command. The handles need not refer to only
139 // the view's go.mod file.
140 modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult]
141 modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult]
142
143 workspace *workspace // (not guarded by mu)
144
145 // The cached result of makeWorkspaceDir, created on demand and deleted by Snapshot.Destroy.
146 workspaceDir string
147 workspaceDirErr error
148
149 // knownSubdirs is the set of subdirectories in the workspace, used to
150 // create glob patterns for file watching.
151 knownSubdirs knownDirsSet
152 knownSubdirsPatternCache string
153 // unprocessedSubdirChanges are any changes that might affect the set of
154 // subdirectories in the workspace. They are not reflected to knownSubdirs
155 // during the snapshot cloning step as it can slow down cloning.
156 unprocessedSubdirChanges []*fileChange
157}
158
159var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
160
161// Acquire prevents the snapshot from being destroyed until the returned function is called.
162//
163// (s.Acquire().release() could instead be expressed as a pair of
164// method calls s.IncRef(); s.DecRef(). The latter has the advantage
165// that the DecRefs are fungible and don't require holding anything in
166// addition to the refcounted object s, but paradoxically that is also
167// an advantage of the current approach, which forces the caller to
168// consider the release function at every stage, making a reference
169// leak more obvious.)
170func (s *snapshot) Acquire() func() {
171 type uP = unsafe.Pointer
172 if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil {
173 log.Panicf("%d: acquire() after Destroy(%q)", s.id, *(*string)(destroyedBy))
174 }
175 s.refcount.Add(1)
176 return s.refcount.Done
177}
178
179func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) {
180 return p.Get(ctx, s)
181}
182
183// destroy waits for all leases on the snapshot to expire then releases
184// any resources (reference counts and files) associated with it.
185// Snapshots being destroyed can be awaited using v.destroyWG.
186//
187// TODO(adonovan): move this logic into the release function returned
188// by Acquire when the reference count becomes zero. (This would cost
189// us the destroyedBy debug info, unless we add it to the signature of
190// memoize.RefCounted.Acquire.)
191//
192// The destroyedBy argument is used for debugging.
193//
194// v.snapshotMu must be held while calling this function, in order to preserve
195// the invariants described by the the docstring for v.snapshot.
196func (v *View) destroy(s *snapshot, destroyedBy string) {
197 v.snapshotWG.Add(1)
198 go func() {
199 defer v.snapshotWG.Done()
200 s.destroy(destroyedBy)
201 }()
202}
203
204func (s *snapshot) destroy(destroyedBy string) {
205 // Wait for all leases to end before commencing destruction.
206 s.refcount.Wait()
207
208 // Report bad state as a debugging aid.
209 // Not foolproof: another thread could acquire() at this moment.
210 type uP = unsafe.Pointer // looking forward to generics...
211 if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil {
212 log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.id, destroyedBy, *(*string)(old))
213 }
214
215 s.packages.Destroy()
216 s.isActivePackageCache.Destroy()
217 s.actions.Destroy()
218 s.files.Destroy()
219 s.parsedGoFiles.Destroy()
220 s.parseKeysByURI.Destroy()
221 s.knownSubdirs.Destroy()
222 s.symbolizeHandles.Destroy()
223 s.parseModHandles.Destroy()
224 s.parseWorkHandles.Destroy()
225 s.modTidyHandles.Destroy()
226 s.modWhyHandles.Destroy()
227
228 if s.workspaceDir != "" {
229 if err := os.RemoveAll(s.workspaceDir); err != nil {
230 event.Error(context.Background(), "cleaning workspace dir", err)
231 }
232 }
233}
234
235func (s *snapshot) ID() uint64 {
236 return s.id
237}
238
239func (s *snapshot) View() source.View {
240 return s.view
241}
242
243func (s *snapshot) BackgroundContext() context.Context {
244 return s.backgroundCtx
245}
246
247func (s *snapshot) FileSet() *token.FileSet {
248 return s.view.session.cache.fset
249}
250
251func (s *snapshot) ModFiles() []span.URI {
252 var uris []span.URI
253 for modURI := range s.workspace.getActiveModFiles() {
254 uris = append(uris, modURI)
255 }
256 return uris
257}
258
259func (s *snapshot) WorkFile() span.URI {
260 return s.workspace.workFile
261}
262
263func (s *snapshot) Templates() map[span.URI]source.VersionedFileHandle {
264 s.mu.Lock()
265 defer s.mu.Unlock()
266
267 tmpls := map[span.URI]source.VersionedFileHandle{}
268 s.files.Range(func(k span.URI, fh source.VersionedFileHandle) {
269 if s.view.FileKind(fh) == source.Tmpl {
270 tmpls[k] = fh
271 }
272 })
273 return tmpls
274}
275
276func (s *snapshot) ValidBuildConfiguration() bool {
277 // Since we only really understand the `go` command, if the user has a
278 // different GOPACKAGESDRIVER, assume that their configuration is valid.
279 if s.view.hasGopackagesDriver {
280 return true
281 }
282 // Check if the user is working within a module or if we have found
283 // multiple modules in the workspace.
284 if len(s.workspace.getActiveModFiles()) > 0 {
285 return true
286 }
287 // The user may have a multiple directories in their GOPATH.
288 // Check if the workspace is within any of them.
289 for _, gp := range filepath.SplitList(s.view.gopath) {
290 if source.InDir(filepath.Join(gp, "src"), s.view.rootURI.Filename()) {
291 return true
292 }
293 }
294 return false
295}
296
297// workspaceMode describes the way in which the snapshot's workspace should
298// be loaded.
299func (s *snapshot) workspaceMode() workspaceMode {
300 var mode workspaceMode
301
302 // If the view has an invalid configuration, don't build the workspace
303 // module.
304 validBuildConfiguration := s.ValidBuildConfiguration()
305 if !validBuildConfiguration {
306 return mode
307 }
308 // If the view is not in a module and contains no modules, but still has a
309 // valid workspace configuration, do not create the workspace module.
310 // It could be using GOPATH or a different build system entirely.
311 if len(s.workspace.getActiveModFiles()) == 0 && validBuildConfiguration {
312 return mode
313 }
314 mode |= moduleMode
315 options := s.view.Options()
316 // The -modfile flag is available for Go versions >= 1.14.
317 if options.TempModfile && s.view.workspaceInformation.goversion >= 14 {
318 mode |= tempModfile
319 }
320 return mode
321}
322
323// config returns the configuration used for the snapshot's interaction with
324// the go/packages API. It uses the given working directory.
325//
326// TODO(rstambler): go/packages requires that we do not provide overlays for
327// multiple modules in on config, so buildOverlay needs to filter overlays by
328// module.
329func (s *snapshot) config(ctx context.Context, inv *gocommand.Invocation) *packages.Config {
330 s.view.optionsMu.Lock()
331 verboseOutput := s.view.options.VerboseOutput
332 s.view.optionsMu.Unlock()
333
334 cfg := &packages.Config{
335 Context: ctx,
336 Dir: inv.WorkingDir,
337 Env: inv.Env,
338 BuildFlags: inv.BuildFlags,
339 Mode: packages.NeedName |
340 packages.NeedFiles |
341 packages.NeedCompiledGoFiles |
342 packages.NeedImports |
343 packages.NeedDeps |
344 packages.NeedTypesSizes |
345 packages.NeedModule |
346 packages.LoadMode(packagesinternal.DepsErrors) |
347 packages.LoadMode(packagesinternal.ForTest),
348 Fset: s.FileSet(),
349 Overlay: s.buildOverlay(),
350 ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
351 panic("go/packages must not be used to parse files")
352 },
353 Logf: func(format string, args ...interface{}) {
354 if verboseOutput {
355 event.Log(ctx, fmt.Sprintf(format, args...))
356 }
357 },
358 Tests: true,
359 }
360 packagesinternal.SetModFile(cfg, inv.ModFile)
361 packagesinternal.SetModFlag(cfg, inv.ModFlag)
362 // We want to type check cgo code if go/types supports it.
363 if typesinternal.SetUsesCgo(&types.Config{}) {
364 cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo)
365 }
366 packagesinternal.SetGoCmdRunner(cfg, s.view.session.gocmdRunner)
367 return cfg
368}
369
370func (s *snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) {
371 _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
372 if err != nil {
373 return nil, err
374 }
375 defer cleanup()
376
377 return s.view.session.gocmdRunner.Run(ctx, *inv)
378}
379
380func (s *snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
381 _, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
382 if err != nil {
383 return err
384 }
385 defer cleanup()
386 return s.view.session.gocmdRunner.RunPiped(ctx, *inv, stdout, stderr)
387}
388
389func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) (bool, []byte, []byte, error) {
390 var flags source.InvocationFlags
391 if s.workspaceMode()&tempModfile != 0 {
392 flags = source.WriteTemporaryModFile
393 } else {
394 flags = source.Normal
395 }
396 if allowNetwork {
397 flags |= source.AllowNetwork
398 }
399 tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd})
400 if err != nil {
401 return false, nil, nil, err
402 }
403 defer cleanup()
404 invoke := func(args ...string) (*bytes.Buffer, error) {
405 inv.Verb = args[0]
406 inv.Args = args[1:]
407 return s.view.session.gocmdRunner.Run(ctx, *inv)
408 }
409 if err := run(invoke); err != nil {
410 return false, nil, nil, err
411 }
412 if flags.Mode() != source.WriteTemporaryModFile {
413 return false, nil, nil, nil
414 }
415 var modBytes, sumBytes []byte
416 modBytes, err = ioutil.ReadFile(tmpURI.Filename())
417 if err != nil && !os.IsNotExist(err) {
418 return false, nil, nil, err
419 }
420 sumBytes, err = ioutil.ReadFile(strings.TrimSuffix(tmpURI.Filename(), ".mod") + ".sum")
421 if err != nil && !os.IsNotExist(err) {
422 return false, nil, nil, err
423 }
424 return true, modBytes, sumBytes, nil
425}
426
427// goCommandInvocation populates inv with configuration for running go commands on the snapshot.
428//
429// TODO(rfindley): refactor this function to compose the required configuration
430// explicitly, rather than implicitly deriving it from flags and inv.
431//
432// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but
433// it used only after call to tempModFile. Clarify that it is only
434// non-nil on success.
435func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
436 s.view.optionsMu.Lock()
437 allowModfileModificationOption := s.view.options.AllowModfileModifications
438 allowNetworkOption := s.view.options.AllowImplicitNetworkAccess
439
440 // TODO(rfindley): this is very hard to follow, and may not even be doing the
441 // right thing: should inv.Env really trample view.options? Do we ever invoke
442 // this with a non-empty inv.Env?
443 //
444 // We should refactor to make it clearer that the correct env is being used.
445 inv.Env = append(append(append(os.Environ(), s.view.options.EnvSlice()...), inv.Env...), "GO111MODULE="+s.view.effectiveGo111Module)
446 inv.BuildFlags = append([]string{}, s.view.options.BuildFlags...)
447 s.view.optionsMu.Unlock()
448 cleanup = func() {} // fallback
449
450 // All logic below is for module mode.
451 if s.workspaceMode()&moduleMode == 0 {
452 return "", inv, cleanup, nil
453 }
454
455 mode, allowNetwork := flags.Mode(), flags.AllowNetwork()
456 if !allowNetwork && !allowNetworkOption {
457 inv.Env = append(inv.Env, "GOPROXY=off")
458 }
459
460 // What follows is rather complicated logic for how to actually run the go
461 // command. A word of warning: this is the result of various incremental
462 // features added to gopls, and varying behavior of the Go command across Go
463 // versions. It can surely be cleaned up significantly, but tread carefully.
464 //
465 // Roughly speaking we need to resolve four things:
466 // - the working directory.
467 // - the -mod flag
468 // - the -modfile flag
469 //
470 // These are dependent on a number of factors: whether we need to run in a
471 // synthetic workspace, whether flags are supported at the current go
472 // version, and what we're actually trying to achieve (the
473 // source.InvocationFlags).
474
475 var modURI span.URI
476 // Select the module context to use.
477 // If we're type checking, we need to use the workspace context, meaning
478 // the main (workspace) module. Otherwise, we should use the module for
479 // the passed-in working dir.
480 if mode == source.LoadWorkspace {
481 switch s.workspace.moduleSource {
482 case legacyWorkspace:
483 for m := range s.workspace.getActiveModFiles() { // range to access the only element
484 modURI = m
485 }
486 case goWorkWorkspace:
487 if s.view.goversion >= 18 {
488 break
489 }
490 // Before go 1.18, the Go command did not natively support go.work files,
491 // so we 'fake' them with a workspace module.
492 fallthrough
493 case fileSystemWorkspace, goplsModWorkspace:
494 var tmpDir span.URI
495 var err error
496 tmpDir, err = s.getWorkspaceDir(ctx)
497 if err != nil {
498 return "", nil, cleanup, err
499 }
500 inv.WorkingDir = tmpDir.Filename()
501 modURI = span.URIFromPath(filepath.Join(tmpDir.Filename(), "go.mod"))
502 }
503 } else {
504 modURI = s.GoModForFile(span.URIFromPath(inv.WorkingDir))
505 }
506
507 var modContent []byte
508 if modURI != "" {
509 modFH, err := s.GetFile(ctx, modURI)
510 if err != nil {
511 return "", nil, cleanup, err
512 }
513 modContent, err = modFH.Read()
514 if err != nil {
515 return "", nil, cleanup, err
516 }
517 }
518
519 // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall
520 // back on the default behavior of vendorEnabled with an empty modURI. Figure
521 // out what is correct here and implement it explicitly.
522 vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent)
523 if err != nil {
524 return "", nil, cleanup, err
525 }
526
527 mutableModFlag := ""
528 // If the mod flag isn't set, populate it based on the mode and workspace.
529 if inv.ModFlag == "" {
530 if s.view.goversion >= 16 {
531 mutableModFlag = "mod"
532 }
533
534 switch mode {
535 case source.LoadWorkspace, source.Normal:
536 if vendorEnabled {
537 inv.ModFlag = "vendor"
538 } else if !allowModfileModificationOption {
539 inv.ModFlag = "readonly"
540 } else {
541 inv.ModFlag = mutableModFlag
542 }
543 case source.WriteTemporaryModFile:
544 inv.ModFlag = mutableModFlag
545 // -mod must be readonly when using go.work files - see issue #48941
546 inv.Env = append(inv.Env, "GOWORK=off")
547 }
548 }
549
550 // Only use a temp mod file if the modfile can actually be mutated.
551 needTempMod := inv.ModFlag == mutableModFlag
552 useTempMod := s.workspaceMode()&tempModfile != 0
553 if needTempMod && !useTempMod {
554 return "", nil, cleanup, source.ErrTmpModfileUnsupported
555 }
556
557 // We should use -modfile if:
558 // - the workspace mode supports it
559 // - we're using a go.work file on go1.18+, or we need a temp mod file (for
560 // example, if running go mod tidy in a go.work workspace)
561 //
562 // TODO(rfindley): this is very hard to follow. Refactor.
563 useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18
564 if useWorkFile {
565 // Since we're running in the workspace root, the go command will resolve GOWORK automatically.
566 } else if useTempMod {
567 if modURI == "" {
568 return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir)
569 }
570 modFH, err := s.GetFile(ctx, modURI)
571 if err != nil {
572 return "", nil, cleanup, err
573 }
574 // Use the go.sum if it happens to be available.
575 gosum := s.goSum(ctx, modURI)
576 tmpURI, cleanup, err = tempModFile(modFH, gosum)
577 if err != nil {
578 return "", nil, cleanup, err
579 }
580 inv.ModFile = tmpURI.Filename()
581 }
582
583 return tmpURI, inv, cleanup, nil
584}
585
586// usesWorkspaceDir reports whether the snapshot should use a synthetic
587// workspace directory for running workspace go commands such as go list.
588//
589// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up
590// the latter, and deduplicate.
591func (s *snapshot) usesWorkspaceDir() bool {
592 switch s.workspace.moduleSource {
593 case legacyWorkspace:
594 return false
595 case goWorkWorkspace:
596 if s.view.goversion >= 18 {
597 return false
598 }
599 // Before go 1.18, the Go command did not natively support go.work files,
600 // so we 'fake' them with a workspace module.
601 }
602 return true
603}
604
605func (s *snapshot) buildOverlay() map[string][]byte {
606 s.mu.Lock()
607 defer s.mu.Unlock()
608
609 overlays := make(map[string][]byte)
610 s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
611 overlay, ok := fh.(*overlay)
612 if !ok {
613 return
614 }
615 if overlay.saved {
616 return
617 }
618 // TODO(rstambler): Make sure not to send overlays outside of the current view.
619 overlays[uri.Filename()] = overlay.text
620 })
621 return overlays
622}
623
624func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) {
625 ctx = event.Label(ctx, tag.URI.Of(uri))
626
627 phs, err := s.packageHandlesForFile(ctx, uri, mode, includeTestVariants)
628 if err != nil {
629 return nil, err
630 }
631 var pkgs []source.Package
632 for _, ph := range phs {
633 pkg, err := ph.await(ctx, s)
634 if err != nil {
635 return nil, err
636 }
637 pkgs = append(pkgs, pkg)
638 }
639 return pkgs, nil
640}
641
642func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, pkgPolicy source.PackageFilter) (source.Package, error) {
643 ctx = event.Label(ctx, tag.URI.Of(uri))
644
645 phs, err := s.packageHandlesForFile(ctx, uri, mode, false)
646 if err != nil {
647 return nil, err
648 }
649
650 if len(phs) < 1 {
651 return nil, fmt.Errorf("no packages")
652 }
653
654 ph := phs[0]
655 for _, handle := range phs[1:] {
656 switch pkgPolicy {
657 case source.WidestPackage:
658 if ph == nil || len(handle.CompiledGoFiles()) > len(ph.CompiledGoFiles()) {
659 ph = handle
660 }
661 case source.NarrowestPackage:
662 if ph == nil || len(handle.CompiledGoFiles()) < len(ph.CompiledGoFiles()) {
663 ph = handle
664 }
665 }
666 }
667 if ph == nil {
668 return nil, fmt.Errorf("no packages in input")
669 }
670
671 return ph.await(ctx, s)
672}
673
Robert Findleyff4ff8b2022-10-04 09:19:27 -0400674func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, withIntermediateTestVariants bool) ([]*packageHandle, error) {
Robert Findleyb15dac22022-08-30 14:40:12 -0400675 // TODO(rfindley): why can't/shouldn't we awaitLoaded here? It seems that if
676 // we ask for package handles for a file, we should wait for pending loads.
677 // Else we will reload orphaned files before the initial load completes.
678
679 // Check if we should reload metadata for the file. We don't invalidate IDs
680 // (though we should), so the IDs will be a better source of truth than the
681 // metadata. If there are no IDs for the file, then we should also reload.
682 fh, err := s.GetFile(ctx, uri)
683 if err != nil {
684 return nil, err
685 }
686 if kind := s.view.FileKind(fh); kind != source.Go {
687 return nil, fmt.Errorf("no packages for non-Go file %s (%v)", uri, kind)
688 }
689 knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
690 if err != nil {
691 return nil, err
692 }
693
694 var phs []*packageHandle
695 for _, id := range knownIDs {
696 // Filter out any intermediate test variants. We typically aren't
697 // interested in these packages for file= style queries.
Robert Findleyb280e272022-10-04 09:49:34 -0400698 if m := s.getMetadata(id); m != nil && m.IsIntermediateTestVariant() && !withIntermediateTestVariants {
Robert Findleyb15dac22022-08-30 14:40:12 -0400699 continue
700 }
Robert Findley906c7332022-10-04 15:01:17 -0400701 parseMode := source.ParseFull
702 if mode == source.TypecheckWorkspace {
703 parseMode = s.workspaceParseMode(id)
Robert Findleyb15dac22022-08-30 14:40:12 -0400704 }
705
Robert Findley906c7332022-10-04 15:01:17 -0400706 ph, err := s.buildPackageHandle(ctx, id, parseMode)
707 if err != nil {
708 return nil, err
Robert Findleyb15dac22022-08-30 14:40:12 -0400709 }
Robert Findley906c7332022-10-04 15:01:17 -0400710 phs = append(phs, ph)
Robert Findleyb15dac22022-08-30 14:40:12 -0400711 }
712 return phs, nil
713}
714
Robert Findleyb2533142022-10-10 13:50:45 -0400715// getOrLoadIDsForURI returns package IDs associated with the file uri. If no
716// such packages exist or if they are known to be stale, it reloads the file.
717//
718// If experimentalUseInvalidMetadata is set, this function may return package
719// IDs with invalid metadata.
Robert Findleyb15dac22022-08-30 14:40:12 -0400720func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) {
Robert Findleyb2533142022-10-10 13:50:45 -0400721 useInvalidMetadata := s.useInvalidMetadata()
722
Robert Findleyb15dac22022-08-30 14:40:12 -0400723 s.mu.Lock()
Robert Findleyb2533142022-10-10 13:50:45 -0400724
725 // Start with the set of package associations derived from the last load.
Robert Findleyb15dac22022-08-30 14:40:12 -0400726 ids := s.meta.ids[uri]
Robert Findleyb2533142022-10-10 13:50:45 -0400727
728 hasValidID := false // whether we have any valid package metadata containing uri
729 shouldLoad := false // whether any packages containing uri are marked 'shouldLoad'
Robert Findleyb15dac22022-08-30 14:40:12 -0400730 for _, id := range ids {
Robert Findleyb2533142022-10-10 13:50:45 -0400731 // TODO(rfindley): remove the defensiveness here. s.meta.metadata[id] must
732 // exist.
733 if m, ok := s.meta.metadata[id]; ok && m.Valid {
734 hasValidID = true
Robert Findleyb15dac22022-08-30 14:40:12 -0400735 }
Robert Findleyb2533142022-10-10 13:50:45 -0400736 if len(s.shouldLoad[id]) > 0 {
737 shouldLoad = true
738 }
Robert Findleyb15dac22022-08-30 14:40:12 -0400739 }
Robert Findleyb2533142022-10-10 13:50:45 -0400740
741 // Check if uri is known to be unloadable.
742 //
743 // TODO(rfindley): shouldn't we also mark uri as unloadable if the load below
744 // fails? Otherwise we endlessly load files with no packages.
745 _, unloadable := s.unloadableFiles[uri]
746
Robert Findleyb15dac22022-08-30 14:40:12 -0400747 s.mu.Unlock()
748
Robert Findleyb2533142022-10-10 13:50:45 -0400749 // Special case: if experimentalUseInvalidMetadata is set and we have any
750 // ids, just return them.
751 //
752 // This is arguably wrong: if the metadata is invalid we should try reloading
753 // it. However, this was the pre-existing behavior, and
754 // experimentalUseInvalidMetadata will be removed in a future release.
755 if !shouldLoad && useInvalidMetadata && len(ids) > 0 {
756 return ids, nil
757 }
758
759 // Reload if loading is likely to improve the package associations for uri:
760 // - uri is not contained in any valid packages
761 // - ...or one of the packages containing uri is marked 'shouldLoad'
762 // - ...but uri is not unloadable
763 if (shouldLoad || !hasValidID) && !unloadable {
764 scope := fileLoadScope(uri)
Robert Findleyb15dac22022-08-30 14:40:12 -0400765 err := s.load(ctx, false, scope)
766
Robert Findleyb2533142022-10-10 13:50:45 -0400767 // Guard against failed loads due to context cancellation.
Robert Findleyb15dac22022-08-30 14:40:12 -0400768 //
Robert Findleyb2533142022-10-10 13:50:45 -0400769 // Return the context error here as the current operation is no longer
770 // valid.
771 if ctxErr := ctx.Err(); ctxErr != nil {
772 return nil, ctxErr
Robert Findleyb15dac22022-08-30 14:40:12 -0400773 }
774
Robert Findleyb2533142022-10-10 13:50:45 -0400775 // We must clear scopes after loading.
776 //
777 // TODO(rfindley): unlike reloadWorkspace, this is simply marking loaded
778 // packages as loaded. We could do this from snapshot.load and avoid
779 // raciness.
780 s.clearShouldLoad(scope)
Robert Findleyb15dac22022-08-30 14:40:12 -0400781
Robert Findleyb2533142022-10-10 13:50:45 -0400782 // Don't return an error here, as we may still return stale IDs.
783 // Furthermore, the result of getOrLoadIDsForURI should be consistent upon
784 // subsequent calls, even if the file is marked as unloadable.
785 if err != nil && !errors.Is(err, errNoPackages) {
786 event.Error(ctx, "getOrLoadIDsForURI", err)
Robert Findleyb15dac22022-08-30 14:40:12 -0400787 }
788 }
789
Robert Findleyb2533142022-10-10 13:50:45 -0400790 s.mu.Lock()
791 ids = s.meta.ids[uri]
792 if !useInvalidMetadata {
793 var validIDs []PackageID
794 for _, id := range ids {
795 // TODO(rfindley): remove the defensiveness here as well.
796 if m, ok := s.meta.metadata[id]; ok && m.Valid {
797 validIDs = append(validIDs, id)
798 }
799 }
800 ids = validIDs
801 }
802 s.mu.Unlock()
803
Robert Findleyb15dac22022-08-30 14:40:12 -0400804 return ids, nil
805}
806
807// Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has
808// issues with overlays that will cause confusing error messages if we reuse
809// old metadata.
810func (s *snapshot) useInvalidMetadata() bool {
811 return s.view.goversion >= 13 && s.view.Options().ExperimentalUseInvalidMetadata
812}
813
814func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) {
815 if err := s.awaitLoaded(ctx); err != nil {
816 return nil, err
817 }
818 s.mu.Lock()
819 meta := s.meta
820 s.mu.Unlock()
821 ids := meta.reverseTransitiveClosure(s.useInvalidMetadata(), PackageID(id))
822
823 // Make sure to delete the original package ID from the map.
824 delete(ids, PackageID(id))
825
826 var pkgs []source.Package
827 for id := range ids {
828 pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
829 if err != nil {
830 return nil, err
831 }
832 pkgs = append(pkgs, pkg)
833 }
834 return pkgs, nil
835}
836
837func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source.ParseMode) (*pkg, error) {
838 ph, err := s.buildPackageHandle(ctx, id, mode)
839 if err != nil {
840 return nil, err
841 }
842 return ph.await(ctx, s)
843}
844
845func (s *snapshot) getImportedBy(id PackageID) []PackageID {
846 s.mu.Lock()
847 defer s.mu.Unlock()
848 return s.meta.importedBy[id]
849}
850
851func (s *snapshot) workspacePackageIDs() (ids []PackageID) {
852 s.mu.Lock()
853 defer s.mu.Unlock()
854
855 for id := range s.workspacePackages {
856 ids = append(ids, id)
857 }
858 return ids
859}
860
861func (s *snapshot) activePackageIDs() (ids []PackageID) {
862 if s.view.Options().MemoryMode == source.ModeNormal {
863 return s.workspacePackageIDs()
864 }
865
866 s.mu.Lock()
867 defer s.mu.Unlock()
868
869 for id := range s.workspacePackages {
870 if s.isActiveLocked(id) {
871 ids = append(ids, id)
872 }
873 }
874 return ids
875}
876
877func (s *snapshot) isActiveLocked(id PackageID) (active bool) {
878 if seen, ok := s.isActivePackageCache.Get(id); ok {
879 return seen
880 }
881 defer func() {
882 s.isActivePackageCache.Set(id, active)
883 }()
884 m, ok := s.meta.metadata[id]
885 if !ok {
886 return false
887 }
888 for _, cgf := range m.CompiledGoFiles {
889 if s.isOpenLocked(cgf) {
890 return true
891 }
892 }
893 // TODO(rfindley): it looks incorrect that we don't also check GoFiles here.
894 // If a CGo file is open, we want to consider the package active.
Alan Donovan91311ab2022-10-18 10:28:21 -0400895 for _, dep := range m.Imports {
Robert Findleyb15dac22022-08-30 14:40:12 -0400896 if s.isActiveLocked(dep) {
897 return true
898 }
899 }
900 return false
901}
902
903func (s *snapshot) resetIsActivePackageLocked() {
904 s.isActivePackageCache.Destroy()
905 s.isActivePackageCache = newIsActivePackageCacheMap()
906}
907
908const fileExtensions = "go,mod,sum,work"
909
910func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} {
911 extensions := fileExtensions
912 for _, ext := range s.View().Options().TemplateExtensions {
913 extensions += "," + ext
914 }
915 // Work-around microsoft/vscode#100870 by making sure that we are,
916 // at least, watching the user's entire workspace. This will still be
917 // applied to every folder in the workspace.
918 patterns := map[string]struct{}{
919 fmt.Sprintf("**/*.{%s}", extensions): {},
920 }
921
922 if s.view.explicitGowork != "" {
923 patterns[s.view.explicitGowork.Filename()] = struct{}{}
924 }
925
926 // Add a pattern for each Go module in the workspace that is not within the view.
927 dirs := s.workspace.dirs(ctx, s)
928 for _, dir := range dirs {
929 dirName := dir.Filename()
930
931 // If the directory is within the view's folder, we're already watching
932 // it with the pattern above.
933 if source.InDir(s.view.folder.Filename(), dirName) {
934 continue
935 }
936 // TODO(rstambler): If microsoft/vscode#3025 is resolved before
937 // microsoft/vscode#101042, we will need a work-around for Windows
938 // drive letter casing.
939 patterns[fmt.Sprintf("%s/**/*.{%s}", dirName, extensions)] = struct{}{}
940 }
941
942 // Some clients do not send notifications for changes to directories that
943 // contain Go code (golang/go#42348). To handle this, explicitly watch all
944 // of the directories in the workspace. We find them by adding the
945 // directories of every file in the snapshot's workspace directories.
946 // There may be thousands.
947 if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" {
948 patterns[pattern] = struct{}{}
949 }
950
951 return patterns
952}
953
954func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string {
955 s.mu.Lock()
956 defer s.mu.Unlock()
957
958 // First, process any pending changes and update the set of known
959 // subdirectories.
960 // It may change list of known subdirs and therefore invalidate the cache.
961 s.applyKnownSubdirsChangesLocked(wsDirs)
962
963 if s.knownSubdirsPatternCache == "" {
964 var builder strings.Builder
965 s.knownSubdirs.Range(func(uri span.URI) {
966 if builder.Len() == 0 {
967 builder.WriteString("{")
968 } else {
969 builder.WriteString(",")
970 }
971 builder.WriteString(uri.Filename())
972 })
973 if builder.Len() > 0 {
974 builder.WriteString("}")
975 s.knownSubdirsPatternCache = builder.String()
976 }
977 }
978
979 return s.knownSubdirsPatternCache
980}
981
982// collectAllKnownSubdirs collects all of the subdirectories within the
983// snapshot's workspace directories. None of the workspace directories are
984// included.
985func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) {
986 dirs := s.workspace.dirs(ctx, s)
987
988 s.mu.Lock()
989 defer s.mu.Unlock()
990
991 s.knownSubdirs.Destroy()
992 s.knownSubdirs = newKnownDirsSet()
993 s.knownSubdirsPatternCache = ""
994 s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
995 s.addKnownSubdirLocked(uri, dirs)
996 })
997}
998
999func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet {
1000 s.mu.Lock()
1001 defer s.mu.Unlock()
1002
1003 // First, process any pending changes and update the set of known
1004 // subdirectories.
1005 s.applyKnownSubdirsChangesLocked(wsDirs)
1006
1007 return s.knownSubdirs.Clone()
1008}
1009
1010func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) {
1011 for _, c := range s.unprocessedSubdirChanges {
1012 if c.isUnchanged {
1013 continue
1014 }
1015 if !c.exists {
1016 s.removeKnownSubdirLocked(c.fileHandle.URI())
1017 } else {
1018 s.addKnownSubdirLocked(c.fileHandle.URI(), wsDirs)
1019 }
1020 }
1021 s.unprocessedSubdirChanges = nil
1022}
1023
1024func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) {
1025 dir := filepath.Dir(uri.Filename())
1026 // First check if the directory is already known, because then we can
1027 // return early.
1028 if s.knownSubdirs.Contains(span.URIFromPath(dir)) {
1029 return
1030 }
1031 var matched span.URI
1032 for _, wsDir := range dirs {
1033 if source.InDir(wsDir.Filename(), dir) {
1034 matched = wsDir
1035 break
1036 }
1037 }
1038 // Don't watch any directory outside of the workspace directories.
1039 if matched == "" {
1040 return
1041 }
1042 for {
1043 if dir == "" || dir == matched.Filename() {
1044 break
1045 }
1046 uri := span.URIFromPath(dir)
1047 if s.knownSubdirs.Contains(uri) {
1048 break
1049 }
1050 s.knownSubdirs.Insert(uri)
1051 dir = filepath.Dir(dir)
1052 s.knownSubdirsPatternCache = ""
1053 }
1054}
1055
1056func (s *snapshot) removeKnownSubdirLocked(uri span.URI) {
1057 dir := filepath.Dir(uri.Filename())
1058 for dir != "" {
1059 uri := span.URIFromPath(dir)
1060 if !s.knownSubdirs.Contains(uri) {
1061 break
1062 }
1063 if info, _ := os.Stat(dir); info == nil {
1064 s.knownSubdirs.Remove(uri)
1065 s.knownSubdirsPatternCache = ""
1066 }
1067 dir = filepath.Dir(dir)
1068 }
1069}
1070
1071// knownFilesInDir returns the files known to the given snapshot that are in
1072// the given directory. It does not respect symlinks.
1073func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI {
1074 var files []span.URI
1075 s.mu.Lock()
1076 defer s.mu.Unlock()
1077
1078 s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
1079 if source.InDir(dir.Filename(), uri.Filename()) {
1080 files = append(files, uri)
1081 }
1082 })
1083 return files
1084}
1085
1086func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) {
1087 phs, err := s.activePackageHandles(ctx)
1088 if err != nil {
1089 return nil, err
1090 }
1091 var pkgs []source.Package
1092 for _, ph := range phs {
1093 pkg, err := ph.await(ctx, s)
1094 if err != nil {
1095 return nil, err
1096 }
1097 pkgs = append(pkgs, pkg)
1098 }
1099 return pkgs, nil
1100}
1101
1102func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, error) {
1103 if err := s.awaitLoaded(ctx); err != nil {
1104 return nil, err
1105 }
1106 var phs []*packageHandle
1107 for _, pkgID := range s.activePackageIDs() {
1108 ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID))
1109 if err != nil {
1110 return nil, err
1111 }
1112 phs = append(phs, ph)
1113 }
1114 return phs, nil
1115}
1116
1117// Symbols extracts and returns the symbols for each file in all the snapshot's views.
1118func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol {
1119 var (
1120 group errgroup.Group
1121 nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU
1122 iolimit = make(chan struct{}, nprocs) // I/O limiting counting semaphore
1123 resultMu sync.Mutex
1124 result = make(map[span.URI][]source.Symbol)
1125 )
1126 s.files.Range(func(uri span.URI, f source.VersionedFileHandle) {
1127 if s.View().FileKind(f) != source.Go {
1128 return // workspace symbols currently supports only Go files.
1129 }
1130
1131 // TODO(adonovan): upgrade errgroup and use group.SetLimit(nprocs).
1132 iolimit <- struct{}{} // acquire token
1133 group.Go(func() error {
1134 defer func() { <-iolimit }() // release token
1135 symbols, err := s.symbolize(ctx, f)
1136 if err != nil {
1137 return err
1138 }
1139 resultMu.Lock()
1140 result[uri] = symbols
1141 resultMu.Unlock()
1142 return nil
1143 })
1144 })
1145 // Keep going on errors, but log the first failure.
1146 // Partial results are better than no symbol results.
1147 if err := group.Wait(); err != nil {
1148 event.Error(ctx, "getting snapshot symbols", err)
1149 }
1150 return result
1151}
1152
1153func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]source.Metadata, error) {
1154 knownIDs, err := s.getOrLoadIDsForURI(ctx, uri)
1155 if err != nil {
1156 return nil, err
1157 }
1158 var mds []source.Metadata
1159 for _, id := range knownIDs {
1160 md := s.getMetadata(id)
1161 // TODO(rfindley): knownIDs and metadata should be in sync, but existing
1162 // code is defensive of nil metadata.
1163 if md != nil {
1164 mds = append(mds, md)
1165 }
1166 }
1167 return mds, nil
1168}
1169
1170func (s *snapshot) KnownPackages(ctx context.Context) ([]source.Package, error) {
1171 if err := s.awaitLoaded(ctx); err != nil {
1172 return nil, err
1173 }
1174
1175 // The WorkspaceSymbols implementation relies on this function returning
1176 // workspace packages first.
1177 ids := s.workspacePackageIDs()
1178 s.mu.Lock()
1179 for id := range s.meta.metadata {
1180 if _, ok := s.workspacePackages[id]; ok {
1181 continue
1182 }
1183 ids = append(ids, id)
1184 }
1185 s.mu.Unlock()
1186
1187 var pkgs []source.Package
1188 for _, id := range ids {
1189 pkg, err := s.checkedPackage(ctx, id, s.workspaceParseMode(id))
1190 if err != nil {
1191 return nil, err
1192 }
1193 pkgs = append(pkgs, pkg)
1194 }
1195 return pkgs, nil
1196}
1197
Robert Findley61280302022-10-17 16:35:50 -04001198func (s *snapshot) AllValidMetadata(ctx context.Context) ([]source.Metadata, error) {
1199 if err := s.awaitLoaded(ctx); err != nil {
1200 return nil, err
1201 }
1202
1203 s.mu.Lock()
1204 defer s.mu.Unlock()
1205
1206 var meta []source.Metadata
1207 for _, m := range s.meta.metadata {
1208 if m.Valid {
1209 meta = append(meta, m)
1210 }
1211 }
1212 return meta, nil
1213}
1214
1215func (s *snapshot) WorkspacePackageByID(ctx context.Context, id string) (source.Package, error) {
1216 packageID := PackageID(id)
1217 return s.checkedPackage(ctx, packageID, s.workspaceParseMode(packageID))
1218}
1219
Robert Findleyb15dac22022-08-30 14:40:12 -04001220func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) {
1221 // Don't reload workspace package metadata.
1222 // This function is meant to only return currently cached information.
1223 s.AwaitInitialized(ctx)
1224
1225 s.mu.Lock()
1226 defer s.mu.Unlock()
1227
1228 results := map[string]source.Package{}
1229 s.packages.Range(func(_, v interface{}) {
1230 cachedPkg, err := v.(*packageHandle).cached()
1231 if err != nil {
1232 return
1233 }
Alan Donovan91311ab2022-10-18 10:28:21 -04001234 for pkgPath, newPkg := range cachedPkg.depsByPkgPath {
1235 if oldPkg, ok := results[string(pkgPath)]; ok {
Robert Findleyb15dac22022-08-30 14:40:12 -04001236 // Using the same trick as NarrowestPackage, prefer non-variants.
1237 if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
Alan Donovan91311ab2022-10-18 10:28:21 -04001238 results[string(pkgPath)] = newPkg
Robert Findleyb15dac22022-08-30 14:40:12 -04001239 }
1240 } else {
Alan Donovan91311ab2022-10-18 10:28:21 -04001241 results[string(pkgPath)] = newPkg
Robert Findleyb15dac22022-08-30 14:40:12 -04001242 }
1243 }
1244 })
1245 return results, nil
1246}
1247
1248func (s *snapshot) GoModForFile(uri span.URI) span.URI {
1249 return moduleForURI(s.workspace.activeModFiles, uri)
1250}
1251
1252func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI {
1253 var match span.URI
1254 for modURI := range modFiles {
1255 if !source.InDir(dirURI(modURI).Filename(), uri.Filename()) {
1256 continue
1257 }
1258 if len(modURI) > len(match) {
1259 match = modURI
1260 }
1261 }
1262 return match
1263}
1264
1265func (s *snapshot) getMetadata(id PackageID) *KnownMetadata {
1266 s.mu.Lock()
1267 defer s.mu.Unlock()
1268
1269 return s.meta.metadata[id]
1270}
1271
1272// clearShouldLoad clears package IDs that no longer need to be reloaded after
1273// scopes has been loaded.
Robert Findleyb2533142022-10-10 13:50:45 -04001274func (s *snapshot) clearShouldLoad(scopes ...loadScope) {
Robert Findleyb15dac22022-08-30 14:40:12 -04001275 s.mu.Lock()
1276 defer s.mu.Unlock()
1277
1278 for _, scope := range scopes {
1279 switch scope := scope.(type) {
Robert Findleyb2533142022-10-10 13:50:45 -04001280 case packageLoadScope:
1281 scopePath := PackagePath(scope)
Robert Findleyb15dac22022-08-30 14:40:12 -04001282 var toDelete []PackageID
1283 for id, pkgPaths := range s.shouldLoad {
1284 for _, pkgPath := range pkgPaths {
Robert Findleyb2533142022-10-10 13:50:45 -04001285 if pkgPath == scopePath {
Robert Findleyb15dac22022-08-30 14:40:12 -04001286 toDelete = append(toDelete, id)
1287 }
1288 }
1289 }
1290 for _, id := range toDelete {
1291 delete(s.shouldLoad, id)
1292 }
Robert Findleyb2533142022-10-10 13:50:45 -04001293 case fileLoadScope:
Robert Findleyb15dac22022-08-30 14:40:12 -04001294 uri := span.URI(scope)
1295 ids := s.meta.ids[uri]
1296 for _, id := range ids {
1297 delete(s.shouldLoad, id)
1298 }
1299 }
1300 }
1301}
1302
1303// noValidMetadataForURILocked reports whether there is any valid metadata for
1304// the given URI.
1305func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool {
1306 ids, ok := s.meta.ids[uri]
1307 if !ok {
1308 return true
1309 }
1310 for _, id := range ids {
1311 if m, ok := s.meta.metadata[id]; ok && m.Valid {
1312 return false
1313 }
1314 }
1315 return true
1316}
1317
1318func (s *snapshot) isWorkspacePackage(id PackageID) bool {
1319 s.mu.Lock()
1320 defer s.mu.Unlock()
1321
1322 _, ok := s.workspacePackages[id]
1323 return ok
1324}
1325
1326func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle {
1327 f := s.view.getFile(uri)
1328
1329 s.mu.Lock()
1330 defer s.mu.Unlock()
1331
1332 result, _ := s.files.Get(f.URI())
1333 return result
1334}
1335
1336// GetVersionedFile returns a File for the given URI. If the file is unknown it
1337// is added to the managed set.
1338//
1339// GetVersionedFile succeeds even if the file does not exist. A non-nil error return
1340// indicates some type of internal error, for example if ctx is cancelled.
1341func (s *snapshot) GetVersionedFile(ctx context.Context, uri span.URI) (source.VersionedFileHandle, error) {
1342 f := s.view.getFile(uri)
1343
1344 s.mu.Lock()
1345 defer s.mu.Unlock()
1346 return s.getFileLocked(ctx, f)
1347}
1348
1349// GetFile implements the fileSource interface by wrapping GetVersionedFile.
1350func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
1351 return s.GetVersionedFile(ctx, uri)
1352}
1353
1354func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) {
1355 if fh, ok := s.files.Get(f.URI()); ok {
1356 return fh, nil
1357 }
1358
1359 fh, err := s.view.session.cache.getFile(ctx, f.URI()) // read the file
1360 if err != nil {
1361 return nil, err
1362 }
1363 closed := &closedFile{fh}
1364 s.files.Set(f.URI(), closed)
1365 return closed, nil
1366}
1367
1368func (s *snapshot) IsOpen(uri span.URI) bool {
1369 s.mu.Lock()
1370 defer s.mu.Unlock()
1371 return s.isOpenLocked(uri)
1372
1373}
1374
1375func (s *snapshot) openFiles() []source.VersionedFileHandle {
1376 s.mu.Lock()
1377 defer s.mu.Unlock()
1378
1379 var open []source.VersionedFileHandle
1380 s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
1381 if isFileOpen(fh) {
1382 open = append(open, fh)
1383 }
1384 })
1385 return open
1386}
1387
1388func (s *snapshot) isOpenLocked(uri span.URI) bool {
1389 fh, _ := s.files.Get(uri)
1390 return isFileOpen(fh)
1391}
1392
1393func isFileOpen(fh source.VersionedFileHandle) bool {
1394 _, open := fh.(*overlay)
1395 return open
1396}
1397
1398func (s *snapshot) awaitLoaded(ctx context.Context) error {
1399 loadErr := s.awaitLoadedAllErrors(ctx)
1400
1401 s.mu.Lock()
1402 defer s.mu.Unlock()
1403
1404 // If we still have absolutely no metadata, check if the view failed to
1405 // initialize and return any errors.
1406 if s.useInvalidMetadata() && len(s.meta.metadata) > 0 {
1407 return nil
1408 }
1409 for _, m := range s.meta.metadata {
1410 if m.Valid {
1411 return nil
1412 }
1413 }
1414 if loadErr != nil {
1415 return loadErr.MainError
1416 }
1417 return nil
1418}
1419
1420func (s *snapshot) GetCriticalError(ctx context.Context) *source.CriticalError {
1421 if wsErr := s.workspace.criticalError(ctx, s); wsErr != nil {
1422 return wsErr
1423 }
1424
1425 loadErr := s.awaitLoadedAllErrors(ctx)
1426 if loadErr != nil && errors.Is(loadErr.MainError, context.Canceled) {
1427 return nil
1428 }
1429
1430 // Even if packages didn't fail to load, we still may want to show
1431 // additional warnings.
1432 if loadErr == nil {
1433 wsPkgs, _ := s.ActivePackages(ctx)
1434 if msg := shouldShowAdHocPackagesWarning(s, wsPkgs); msg != "" {
1435 return &source.CriticalError{
1436 MainError: errors.New(msg),
1437 }
1438 }
1439 // Even if workspace packages were returned, there still may be an error
1440 // with the user's workspace layout. Workspace packages that only have the
1441 // ID "command-line-arguments" are usually a symptom of a bad workspace
1442 // configuration.
1443 //
1444 // TODO(rfindley): re-evaluate this heuristic.
1445 if containsCommandLineArguments(wsPkgs) {
1446 return s.workspaceLayoutError(ctx)
1447 }
1448 return nil
1449 }
1450
1451 if errMsg := loadErr.MainError.Error(); strings.Contains(errMsg, "cannot find main module") || strings.Contains(errMsg, "go.mod file not found") {
1452 return s.workspaceLayoutError(ctx)
1453 }
1454 return loadErr
1455}
1456
1457const adHocPackagesWarning = `You are outside of a module and outside of $GOPATH/src.
1458If you are using modules, please open your editor to a directory in your module.
1459If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.`
1460
1461func shouldShowAdHocPackagesWarning(snapshot source.Snapshot, pkgs []source.Package) string {
1462 if snapshot.ValidBuildConfiguration() {
1463 return ""
1464 }
1465 for _, pkg := range pkgs {
1466 if len(pkg.MissingDependencies()) > 0 {
1467 return adHocPackagesWarning
1468 }
1469 }
1470 return ""
1471}
1472
1473func containsCommandLineArguments(pkgs []source.Package) bool {
1474 for _, pkg := range pkgs {
1475 if source.IsCommandLineArguments(pkg.ID()) {
1476 return true
1477 }
1478 }
1479 return false
1480}
1481
1482func (s *snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError {
1483 // Do not return results until the snapshot's view has been initialized.
1484 s.AwaitInitialized(ctx)
1485
1486 // TODO(rfindley): Should we be more careful about returning the
1487 // initialization error? Is it possible for the initialization error to be
1488 // corrected without a successful reinitialization?
1489 s.mu.Lock()
1490 initializedErr := s.initializedErr
1491 s.mu.Unlock()
1492
1493 if initializedErr != nil {
1494 return initializedErr
1495 }
1496
1497 // TODO(rfindley): revisit this handling. Calling reloadWorkspace with a
1498 // cancelled context should have the same effect, so this preemptive handling
1499 // should not be necessary.
1500 //
1501 // Also: GetCriticalError ignores context cancellation errors. Should we be
1502 // returning nil here?
1503 if ctx.Err() != nil {
1504 return &source.CriticalError{MainError: ctx.Err()}
1505 }
1506
1507 // TODO(rfindley): reloading is not idempotent: if we try to reload or load
1508 // orphaned files below and fail, we won't try again. For that reason, we
1509 // could get different results from subsequent calls to this function, which
1510 // may cause critical errors to be suppressed.
1511
1512 if err := s.reloadWorkspace(ctx); err != nil {
1513 diags := s.extractGoCommandErrors(ctx, err)
1514 return &source.CriticalError{
1515 MainError: err,
1516 Diagnostics: diags,
1517 }
1518 }
1519
1520 if err := s.reloadOrphanedFiles(ctx); err != nil {
1521 diags := s.extractGoCommandErrors(ctx, err)
1522 return &source.CriticalError{
1523 MainError: err,
1524 Diagnostics: diags,
1525 }
1526 }
1527 return nil
1528}
1529
1530func (s *snapshot) getInitializationError(ctx context.Context) *source.CriticalError {
1531 s.mu.Lock()
1532 defer s.mu.Unlock()
1533
1534 return s.initializedErr
1535}
1536
1537func (s *snapshot) AwaitInitialized(ctx context.Context) {
1538 select {
1539 case <-ctx.Done():
1540 return
1541 case <-s.view.initialWorkspaceLoad:
1542 }
1543 // We typically prefer to run something as intensive as the IWL without
1544 // blocking. I'm not sure if there is a way to do that here.
1545 s.initialize(ctx, false)
1546}
1547
1548// reloadWorkspace reloads the metadata for all invalidated workspace packages.
1549func (s *snapshot) reloadWorkspace(ctx context.Context) error {
Robert Findleyb2533142022-10-10 13:50:45 -04001550 var scopes []loadScope
Robert Findleyb15dac22022-08-30 14:40:12 -04001551 var seen map[PackagePath]bool
1552 s.mu.Lock()
1553 for _, pkgPaths := range s.shouldLoad {
1554 for _, pkgPath := range pkgPaths {
1555 if seen == nil {
1556 seen = make(map[PackagePath]bool)
1557 }
1558 if seen[pkgPath] {
1559 continue
1560 }
1561 seen[pkgPath] = true
Robert Findleyb2533142022-10-10 13:50:45 -04001562 scopes = append(scopes, packageLoadScope(pkgPath))
Robert Findleyb15dac22022-08-30 14:40:12 -04001563 }
1564 }
1565 s.mu.Unlock()
1566
1567 if len(scopes) == 0 {
1568 return nil
1569 }
1570
1571 // If the view's build configuration is invalid, we cannot reload by
1572 // package path. Just reload the directory instead.
1573 if !s.ValidBuildConfiguration() {
Robert Findleyb2533142022-10-10 13:50:45 -04001574 scopes = []loadScope{viewLoadScope("LOAD_INVALID_VIEW")}
Robert Findleyb15dac22022-08-30 14:40:12 -04001575 }
1576
1577 err := s.load(ctx, false, scopes...)
1578
1579 // Unless the context was canceled, set "shouldLoad" to false for all
1580 // of the metadata we attempted to load.
1581 if !errors.Is(err, context.Canceled) {
1582 s.clearShouldLoad(scopes...)
1583 }
1584
1585 return err
1586}
1587
1588func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
1589 // When we load ./... or a package path directly, we may not get packages
1590 // that exist only in overlays. As a workaround, we search all of the files
1591 // available in the snapshot and reload their metadata individually using a
1592 // file= query if the metadata is unavailable.
1593 files := s.orphanedFiles()
1594
1595 // Files without a valid package declaration can't be loaded. Don't try.
Robert Findleyb2533142022-10-10 13:50:45 -04001596 var scopes []loadScope
Robert Findleyb15dac22022-08-30 14:40:12 -04001597 for _, file := range files {
1598 pgf, err := s.ParseGo(ctx, file, source.ParseHeader)
1599 if err != nil {
1600 continue
1601 }
1602 if !pgf.File.Package.IsValid() {
1603 continue
1604 }
Robert Findleyb2533142022-10-10 13:50:45 -04001605
1606 scopes = append(scopes, fileLoadScope(file.URI()))
Robert Findleyb15dac22022-08-30 14:40:12 -04001607 }
1608
1609 if len(scopes) == 0 {
1610 return nil
1611 }
1612
1613 // The regtests match this exact log message, keep them in sync.
1614 event.Log(ctx, "reloadOrphanedFiles reloading", tag.Query.Of(scopes))
1615 err := s.load(ctx, false, scopes...)
1616
1617 // If we failed to load some files, i.e. they have no metadata,
1618 // mark the failures so we don't bother retrying until the file's
1619 // content changes.
1620 //
1621 // TODO(rstambler): This may be an overestimate if the load stopped
1622 // early for an unrelated errors. Add a fallback?
1623 //
1624 // Check for context cancellation so that we don't incorrectly mark files
1625 // as unloadable, but don't return before setting all workspace packages.
1626 if ctx.Err() == nil && err != nil {
1627 event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
1628 s.mu.Lock()
1629 for _, scope := range scopes {
Robert Findleyb2533142022-10-10 13:50:45 -04001630 uri := span.URI(scope.(fileLoadScope))
Robert Findleyb15dac22022-08-30 14:40:12 -04001631 if s.noValidMetadataForURILocked(uri) {
1632 s.unloadableFiles[uri] = struct{}{}
1633 }
1634 }
1635 s.mu.Unlock()
1636 }
1637 return nil
1638}
1639
1640func (s *snapshot) orphanedFiles() []source.VersionedFileHandle {
1641 s.mu.Lock()
1642 defer s.mu.Unlock()
1643
1644 var files []source.VersionedFileHandle
1645 s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) {
1646 // Don't try to reload metadata for go.mod files.
1647 if s.view.FileKind(fh) != source.Go {
1648 return
1649 }
1650 // If the URI doesn't belong to this view, then it's not in a workspace
1651 // package and should not be reloaded directly.
1652 if !source.InDir(s.view.folder.Filename(), uri.Filename()) {
1653 return
1654 }
1655 // If the file is not open and is in a vendor directory, don't treat it
1656 // like a workspace package.
1657 if _, ok := fh.(*overlay); !ok && inVendor(uri) {
1658 return
1659 }
1660 // Don't reload metadata for files we've already deemed unloadable.
1661 if _, ok := s.unloadableFiles[uri]; ok {
1662 return
1663 }
1664 if s.noValidMetadataForURILocked(uri) {
1665 files = append(files, fh)
1666 }
1667 })
1668 return files
1669}
1670
Robert Findleyb15dac22022-08-30 14:40:12 -04001671// TODO(golang/go#53756): this function needs to consider more than just the
1672// absolute URI, for example:
1673// - the position of /vendor/ with respect to the relevant module root
1674// - whether or not go.work is in use (as vendoring isn't supported in workspace mode)
1675//
1676// Most likely, each call site of inVendor needs to be reconsidered to
1677// understand and correctly implement the desired behavior.
1678func inVendor(uri span.URI) bool {
1679 if !strings.Contains(string(uri), "/vendor/") {
1680 return false
1681 }
1682 // Only packages in _subdirectories_ of /vendor/ are considered vendored
1683 // (/vendor/a/foo.go is vendored, /vendor/foo.go is not).
1684 split := strings.Split(string(uri), "/vendor/")
1685 if len(split) < 2 {
1686 return false
1687 }
1688 return strings.Contains(split[1], "/")
1689}
1690
1691// unappliedChanges is a file source that handles an uncloned snapshot.
1692type unappliedChanges struct {
1693 originalSnapshot *snapshot
1694 changes map[span.URI]*fileChange
1695}
1696
1697func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) {
1698 if c, ok := ac.changes[uri]; ok {
1699 return c.fileHandle, nil
1700 }
1701 return ac.originalSnapshot.GetFile(ctx, uri)
1702}
1703
1704func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) {
1705 ctx, done := event.Start(ctx, "snapshot.clone")
1706 defer done()
1707
1708 newWorkspace, reinit := s.workspace.Clone(ctx, changes, &unappliedChanges{
1709 originalSnapshot: s,
1710 changes: changes,
1711 })
1712
1713 s.mu.Lock()
1714 defer s.mu.Unlock()
1715
1716 // If there is an initialization error and a vendor directory changed, try to
1717 // reinit.
1718 if s.initializedErr != nil {
1719 for uri := range changes {
1720 if inVendor(uri) {
1721 reinit = true
1722 break
1723 }
1724 }
1725 }
1726
1727 bgCtx, cancel := context.WithCancel(bgCtx)
1728 result := &snapshot{
1729 id: s.id + 1,
1730 store: s.store,
1731 view: s.view,
1732 backgroundCtx: bgCtx,
1733 cancel: cancel,
1734 builtin: s.builtin,
1735 initialized: s.initialized,
1736 initializedErr: s.initializedErr,
1737 packages: s.packages.Clone(),
1738 isActivePackageCache: s.isActivePackageCache.Clone(),
1739 actions: s.actions.Clone(),
1740 files: s.files.Clone(),
1741 parsedGoFiles: s.parsedGoFiles.Clone(),
1742 parseKeysByURI: s.parseKeysByURI.Clone(),
1743 symbolizeHandles: s.symbolizeHandles.Clone(),
1744 workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
1745 unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
1746 parseModHandles: s.parseModHandles.Clone(),
1747 parseWorkHandles: s.parseWorkHandles.Clone(),
1748 modTidyHandles: s.modTidyHandles.Clone(),
1749 modWhyHandles: s.modWhyHandles.Clone(),
1750 knownSubdirs: s.knownSubdirs.Clone(),
1751 workspace: newWorkspace,
1752 }
1753
1754 // The snapshot should be initialized if either s was uninitialized, or we've
1755 // detected a change that triggers reinitialization.
1756 if reinit {
1757 result.initialized = false
1758 }
1759
1760 // Create a lease on the new snapshot.
1761 // (Best to do this early in case the code below hides an
1762 // incref/decref operation that might destroy it prematurely.)
1763 release := result.Acquire()
1764
1765 // Copy the set of unloadable files.
1766 //
1767 // TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on
1768 // changes to environment or workspace layout, or more generally on any
1769 // metadata change?
1770 for k, v := range s.unloadableFiles {
1771 result.unloadableFiles[k] = v
1772 }
1773
1774 // TODO(adonovan): merge loops over "changes".
1775 for uri := range changes {
1776 keys, ok := result.parseKeysByURI.Get(uri)
1777 if ok {
1778 for _, key := range keys {
1779 result.parsedGoFiles.Delete(key)
1780 }
1781 result.parseKeysByURI.Delete(uri)
1782 }
1783
1784 // Invalidate go.mod-related handles.
1785 result.modTidyHandles.Delete(uri)
1786 result.modWhyHandles.Delete(uri)
1787
1788 // Invalidate handles for cached symbols.
1789 result.symbolizeHandles.Delete(uri)
1790 }
1791
1792 // Add all of the known subdirectories, but don't update them for the
1793 // changed files. We need to rebuild the workspace module to know the
1794 // true set of known subdirectories, but we don't want to do that in clone.
1795 result.knownSubdirs = s.knownSubdirs.Clone()
1796 result.knownSubdirsPatternCache = s.knownSubdirsPatternCache
1797 for _, c := range changes {
1798 result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c)
1799 }
1800
1801 // directIDs keeps track of package IDs that have directly changed.
1802 // It maps id->invalidateMetadata.
1803 directIDs := map[PackageID]bool{}
1804
1805 // Invalidate all package metadata if the workspace module has changed.
1806 if reinit {
1807 for k := range s.meta.metadata {
1808 directIDs[k] = true
1809 }
1810 }
1811
1812 // Compute invalidations based on file changes.
1813 anyImportDeleted := false // import deletions can resolve cycles
1814 anyFileOpenedOrClosed := false // opened files affect workspace packages
1815 anyFileAdded := false // adding a file can resolve missing dependencies
1816
1817 for uri, change := range changes {
1818 // The original FileHandle for this URI is cached on the snapshot.
1819 originalFH, _ := s.files.Get(uri)
1820 var originalOpen, newOpen bool
1821 _, originalOpen = originalFH.(*overlay)
1822 _, newOpen = change.fileHandle.(*overlay)
1823 anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen)
1824 anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil)
1825
1826 // If uri is a Go file, check if it has changed in a way that would
1827 // invalidate metadata. Note that we can't use s.view.FileKind here,
1828 // because the file type that matters is not what the *client* tells us,
1829 // but what the Go command sees.
1830 var invalidateMetadata, pkgFileChanged, importDeleted bool
1831 if strings.HasSuffix(uri.Filename(), ".go") {
1832 invalidateMetadata, pkgFileChanged, importDeleted = metadataChanges(ctx, s, originalFH, change.fileHandle)
1833 }
1834
1835 invalidateMetadata = invalidateMetadata || forceReloadMetadata || reinit
1836 anyImportDeleted = anyImportDeleted || importDeleted
1837
1838 // Mark all of the package IDs containing the given file.
1839 filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged)
1840 for id := range filePackageIDs {
1841 directIDs[id] = directIDs[id] || invalidateMetadata
1842 }
1843
1844 // Invalidate the previous modTidyHandle if any of the files have been
1845 // saved or if any of the metadata has been invalidated.
1846 if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) {
1847 // TODO(maybe): Only delete mod handles for
1848 // which the withoutURI is relevant.
1849 // Requires reverse-engineering the go command. (!)
1850
1851 result.modTidyHandles.Clear()
1852 result.modWhyHandles.Clear()
1853 }
1854
1855 result.parseModHandles.Delete(uri)
1856 result.parseWorkHandles.Delete(uri)
1857 // Handle the invalidated file; it may have new contents or not exist.
1858 if !change.exists {
1859 result.files.Delete(uri)
1860 } else {
1861 result.files.Set(uri, change.fileHandle)
1862 }
1863
1864 // Make sure to remove the changed file from the unloadable set.
1865 delete(result.unloadableFiles, uri)
1866 }
1867
1868 // Deleting an import can cause list errors due to import cycles to be
1869 // resolved. The best we can do without parsing the list error message is to
1870 // hope that list errors may have been resolved by a deleted import.
1871 //
1872 // We could do better by parsing the list error message. We already do this
1873 // to assign a better range to the list error, but for such critical
1874 // functionality as metadata, it's better to be conservative until it proves
1875 // impractical.
1876 //
1877 // We could also do better by looking at which imports were deleted and
1878 // trying to find cycles they are involved in. This fails when the file goes
1879 // from an unparseable state to a parseable state, as we don't have a
1880 // starting point to compare with.
1881 if anyImportDeleted {
1882 for id, metadata := range s.meta.metadata {
1883 if len(metadata.Errors) > 0 {
1884 directIDs[id] = true
1885 }
1886 }
1887 }
1888
1889 // Adding a file can resolve missing dependencies from existing packages.
1890 //
1891 // We could be smart here and try to guess which packages may have been
1892 // fixed, but until that proves necessary, just invalidate metadata for any
1893 // package with missing dependencies.
1894 if anyFileAdded {
1895 for id, metadata := range s.meta.metadata {
1896 if len(metadata.MissingDeps) > 0 {
1897 directIDs[id] = true
1898 }
1899 }
1900 }
1901
1902 // Invalidate reverse dependencies too.
1903 // idsToInvalidate keeps track of transitive reverse dependencies.
1904 // If an ID is present in the map, invalidate its types.
1905 // If an ID's value is true, invalidate its metadata too.
1906 idsToInvalidate := map[PackageID]bool{}
1907 var addRevDeps func(PackageID, bool)
1908 addRevDeps = func(id PackageID, invalidateMetadata bool) {
1909 current, seen := idsToInvalidate[id]
1910 newInvalidateMetadata := current || invalidateMetadata
1911
1912 // If we've already seen this ID, and the value of invalidate
1913 // metadata has not changed, we can return early.
1914 if seen && current == newInvalidateMetadata {
1915 return
1916 }
1917 idsToInvalidate[id] = newInvalidateMetadata
1918 for _, rid := range s.meta.importedBy[id] {
1919 addRevDeps(rid, invalidateMetadata)
1920 }
1921 }
1922 for id, invalidateMetadata := range directIDs {
1923 addRevDeps(id, invalidateMetadata)
1924 }
1925
Robert Findleycd0288f2022-08-01 14:17:38 -04001926 result.invalidatePackagesLocked(idsToInvalidate)
Robert Findleyb15dac22022-08-30 14:40:12 -04001927
1928 // If a file has been deleted, we must delete metadata for all packages
1929 // containing that file.
1930 //
1931 // TODO(rfindley): why not keep invalid metadata in this case? If we
1932 // otherwise allow operate on invalid metadata, why not continue to do so,
1933 // skipping the missing file?
1934 skipID := map[PackageID]bool{}
1935 for _, c := range changes {
1936 if c.exists {
1937 continue
1938 }
1939 // The file has been deleted.
1940 if ids, ok := s.meta.ids[c.fileHandle.URI()]; ok {
1941 for _, id := range ids {
1942 skipID[id] = true
1943 }
1944 }
1945 }
1946
1947 // Any packages that need loading in s still need loading in the new
1948 // snapshot.
1949 for k, v := range s.shouldLoad {
1950 if result.shouldLoad == nil {
1951 result.shouldLoad = make(map[PackageID][]PackagePath)
1952 }
1953 result.shouldLoad[k] = v
1954 }
1955
1956 // TODO(rfindley): consolidate the this workspace mode detection with
1957 // workspace invalidation.
1958 workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
1959
1960 // We delete invalid metadata in the following cases:
1961 // - If we are forcing a reload of metadata.
1962 // - If the workspace mode has changed, as stale metadata may produce
1963 // confusing or incorrect diagnostics.
1964 //
1965 // TODO(rfindley): we should probably also clear metadata if we are
1966 // reinitializing the workspace, as otherwise we could leave around a bunch
1967 // of irrelevant and duplicate metadata (for example, if the module path
1968 // changed). However, this breaks the "experimentalUseInvalidMetadata"
1969 // feature, which relies on stale metadata when, for example, a go.mod file
1970 // is broken via invalid syntax.
1971 deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
1972
1973 // Compute which metadata updates are required. We only need to invalidate
1974 // packages directly containing the affected file, and only if it changed in
1975 // a relevant way.
1976 metadataUpdates := make(map[PackageID]*KnownMetadata)
1977 for k, v := range s.meta.metadata {
1978 invalidateMetadata := idsToInvalidate[k]
1979
1980 // For metadata that has been newly invalidated, capture package paths
1981 // requiring reloading in the shouldLoad map.
1982 if invalidateMetadata && !source.IsCommandLineArguments(string(v.ID)) {
1983 if result.shouldLoad == nil {
1984 result.shouldLoad = make(map[PackageID][]PackagePath)
1985 }
1986 needsReload := []PackagePath{v.PkgPath}
1987 if v.ForTest != "" && v.ForTest != v.PkgPath {
1988 // When reloading test variants, always reload their ForTest package as
1989 // well. Otherwise, we may miss test variants in the resulting load.
1990 //
1991 // TODO(rfindley): is this actually sufficient? Is it possible that
1992 // other test variants may be invalidated? Either way, we should
1993 // determine exactly what needs to be reloaded here.
1994 needsReload = append(needsReload, v.ForTest)
1995 }
1996 result.shouldLoad[k] = needsReload
1997 }
1998
1999 // Check whether the metadata should be deleted.
2000 if skipID[k] || (invalidateMetadata && deleteInvalidMetadata) {
2001 metadataUpdates[k] = nil
2002 continue
2003 }
2004
2005 // Check if the metadata has changed.
2006 valid := v.Valid && !invalidateMetadata
2007 if valid != v.Valid {
2008 // Mark invalidated metadata rather than deleting it outright.
2009 metadataUpdates[k] = &KnownMetadata{
2010 Metadata: v.Metadata,
2011 Valid: valid,
2012 }
2013 }
2014 }
2015
2016 // Update metadata, if necessary.
2017 result.meta = s.meta.Clone(metadataUpdates)
2018
2019 // Update workspace and active packages, if necessary.
2020 if result.meta != s.meta || anyFileOpenedOrClosed {
2021 result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta)
2022 result.resetIsActivePackageLocked()
2023 } else {
2024 result.workspacePackages = s.workspacePackages
2025 }
2026
2027 // Don't bother copying the importedBy graph,
2028 // as it changes each time we update metadata.
2029
2030 // If the snapshot's workspace mode has changed, the packages loaded using
2031 // the previous mode are no longer relevant, so clear them out.
2032 if workspaceModeChanged {
2033 result.workspacePackages = map[PackageID]PackagePath{}
2034 }
2035 result.dumpWorkspace("clone")
2036 return result, release
2037}
2038
2039// invalidatedPackageIDs returns all packages invalidated by a change to uri.
2040// If we haven't seen this URI before, we guess based on files in the same
2041// directory. This is of course incorrect in build systems where packages are
2042// not organized by directory.
2043//
2044// If packageFileChanged is set, the file is either a new file, or has a new
2045// package name. In this case, all known packages in the directory will be
2046// invalidated.
2047func invalidatedPackageIDs(uri span.URI, known map[span.URI][]PackageID, packageFileChanged bool) map[PackageID]struct{} {
2048 invalidated := make(map[PackageID]struct{})
2049
2050 // At a minimum, we invalidate packages known to contain uri.
2051 for _, id := range known[uri] {
2052 invalidated[id] = struct{}{}
2053 }
2054
2055 // If the file didn't move to a new package, we should only invalidate the
2056 // packages it is currently contained inside.
2057 if !packageFileChanged && len(invalidated) > 0 {
2058 return invalidated
2059 }
2060
2061 // This is a file we don't yet know about, or which has moved packages. Guess
2062 // relevant packages by considering files in the same directory.
2063
2064 // Cache of FileInfo to avoid unnecessary stats for multiple files in the
2065 // same directory.
2066 stats := make(map[string]struct {
2067 os.FileInfo
2068 error
2069 })
2070 getInfo := func(dir string) (os.FileInfo, error) {
2071 if res, ok := stats[dir]; ok {
2072 return res.FileInfo, res.error
2073 }
2074 fi, err := os.Stat(dir)
2075 stats[dir] = struct {
2076 os.FileInfo
2077 error
2078 }{fi, err}
2079 return fi, err
2080 }
2081 dir := filepath.Dir(uri.Filename())
2082 fi, err := getInfo(dir)
2083 if err == nil {
2084 // Aggregate all possibly relevant package IDs.
2085 for knownURI, ids := range known {
2086 knownDir := filepath.Dir(knownURI.Filename())
2087 knownFI, err := getInfo(knownDir)
2088 if err != nil {
2089 continue
2090 }
2091 if os.SameFile(fi, knownFI) {
2092 for _, id := range ids {
2093 invalidated[id] = struct{}{}
2094 }
2095 }
2096 }
2097 }
2098 return invalidated
2099}
2100
Robert Findleycd0288f2022-08-01 14:17:38 -04002101// invalidatePackagesLocked deletes data associated with the given package IDs.
2102//
2103// Note: all keys in the ids map are invalidated, regardless of the
2104// corresponding value.
2105//
2106// s.mu must be held while calling this function.
2107func (s *snapshot) invalidatePackagesLocked(ids map[PackageID]bool) {
2108 // Delete invalidated package type information.
2109 for id := range ids {
2110 for _, mode := range source.AllParseModes {
2111 key := packageKey{mode, id}
2112 s.packages.Delete(key)
2113 }
2114 }
2115
2116 // Copy actions.
2117 // TODO(adonovan): opt: avoid iteration over s.actions.
2118 var actionsToDelete []actionKey
2119 s.actions.Range(func(k, _ interface{}) {
2120 key := k.(actionKey)
2121 if _, ok := ids[key.pkgid]; ok {
2122 actionsToDelete = append(actionsToDelete, key)
2123 }
2124 })
2125 for _, key := range actionsToDelete {
2126 s.actions.Delete(key)
2127 }
2128}
2129
Robert Findleyb15dac22022-08-30 14:40:12 -04002130// fileWasSaved reports whether the FileHandle passed in has been saved. It
2131// accomplishes this by checking to see if the original and current FileHandles
2132// are both overlays, and if the current FileHandle is saved while the original
2133// FileHandle was not saved.
2134func fileWasSaved(originalFH, currentFH source.FileHandle) bool {
2135 c, ok := currentFH.(*overlay)
2136 if !ok || c == nil {
2137 return true
2138 }
2139 o, ok := originalFH.(*overlay)
2140 if !ok || o == nil {
2141 return c.saved
2142 }
2143 return !o.saved && c.saved
2144}
2145
2146// metadataChanges detects features of the change from oldFH->newFH that may
2147// affect package metadata.
2148//
2149// It uses lockedSnapshot to access cached parse information. lockedSnapshot
2150// must be locked.
2151//
2152// The result parameters have the following meaning:
2153// - invalidate means that package metadata for packages containing the file
2154// should be invalidated.
2155// - pkgFileChanged means that the file->package associates for the file have
2156// changed (possibly because the file is new, or because its package name has
2157// changed).
2158// - importDeleted means that an import has been deleted, or we can't
2159// determine if an import was deleted due to errors.
2160func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH source.FileHandle) (invalidate, pkgFileChanged, importDeleted bool) {
2161 if oldFH == nil || newFH == nil { // existential changes
2162 changed := (oldFH == nil) != (newFH == nil)
2163 return changed, changed, (newFH == nil) // we don't know if an import was deleted
2164 }
2165
2166 // If the file hasn't changed, there's no need to reload.
2167 if oldFH.FileIdentity() == newFH.FileIdentity() {
2168 return false, false, false
2169 }
2170
2171 // Parse headers to compare package names and imports.
2172 oldHead, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseHeader)
2173 newHead, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseHeader)
2174
2175 if oldErr != nil || newErr != nil {
2176 // TODO(rfindley): we can get here if newFH does not exists. There is
2177 // asymmetry here, in that newFH may be non-nil even if the underlying file
2178 // does not exist.
2179 //
2180 // We should not produce a non-nil filehandle for a file that does not exist.
2181 errChanged := (oldErr == nil) != (newErr == nil)
2182 return errChanged, errChanged, (newErr != nil) // we don't know if an import was deleted
2183 }
2184
2185 // `go list` fails completely if the file header cannot be parsed. If we go
2186 // from a non-parsing state to a parsing state, we should reload.
2187 if oldHead.ParseErr != nil && newHead.ParseErr == nil {
2188 return true, true, true // We don't know what changed, so fall back on full invalidation.
2189 }
2190
2191 // If a package name has changed, the set of package imports may have changed
2192 // in ways we can't detect here. Assume an import has been deleted.
2193 if oldHead.File.Name.Name != newHead.File.Name.Name {
2194 return true, true, true
2195 }
2196
2197 // Check whether package imports have changed. Only consider potentially
2198 // valid imports paths.
2199 oldImports := validImports(oldHead.File.Imports)
2200 newImports := validImports(newHead.File.Imports)
2201
2202 for path := range newImports {
2203 if _, ok := oldImports[path]; ok {
2204 delete(oldImports, path)
2205 } else {
2206 invalidate = true // a new, potentially valid import was added
2207 }
2208 }
2209
2210 if len(oldImports) > 0 {
2211 invalidate = true
2212 importDeleted = true
2213 }
2214
2215 // If the change does not otherwise invalidate metadata, get the full ASTs in
2216 // order to check magic comments.
2217 //
2218 // Note: if this affects performance we can probably avoid parsing in the
2219 // common case by first scanning the source for potential comments.
2220 if !invalidate {
2221 origFull, oldErr := peekOrParse(ctx, lockedSnapshot, oldFH, source.ParseFull)
2222 currFull, newErr := peekOrParse(ctx, lockedSnapshot, newFH, source.ParseFull)
2223 if oldErr == nil && newErr == nil {
2224 invalidate = magicCommentsChanged(origFull.File, currFull.File)
2225 } else {
2226 // At this point, we shouldn't ever fail to produce a ParsedGoFile, as
2227 // we're already past header parsing.
2228 bug.Reportf("metadataChanges: unparseable file %v (old error: %v, new error: %v)", oldFH.URI(), oldErr, newErr)
2229 }
2230 }
2231
2232 return invalidate, pkgFileChanged, importDeleted
2233}
2234
2235// peekOrParse returns the cached ParsedGoFile if it exists,
2236// otherwise parses without populating the cache.
2237//
2238// It returns an error if the file could not be read (note that parsing errors
2239// are stored in ParsedGoFile.ParseErr).
2240//
2241// lockedSnapshot must be locked.
2242func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) {
2243 // Peek in the cache without populating it.
2244 // We do this to reduce retained heap, not work.
2245 if parsed, _ := lockedSnapshot.peekParseGoLocked(fh, mode); parsed != nil {
2246 return parsed, nil // cache hit
2247 }
2248 return parseGoImpl(ctx, token.NewFileSet(), fh, mode)
2249}
2250
2251func magicCommentsChanged(original *ast.File, current *ast.File) bool {
2252 oldComments := extractMagicComments(original)
2253 newComments := extractMagicComments(current)
2254 if len(oldComments) != len(newComments) {
2255 return true
2256 }
2257 for i := range oldComments {
2258 if oldComments[i] != newComments[i] {
2259 return true
2260 }
2261 }
2262 return false
2263}
2264
2265// validImports extracts the set of valid import paths from imports.
2266func validImports(imports []*ast.ImportSpec) map[string]struct{} {
2267 m := make(map[string]struct{})
2268 for _, spec := range imports {
2269 if path := spec.Path.Value; validImportPath(path) {
2270 m[path] = struct{}{}
2271 }
2272 }
2273 return m
2274}
2275
2276func validImportPath(path string) bool {
2277 path, err := strconv.Unquote(path)
2278 if err != nil {
2279 return false
2280 }
2281 if path == "" {
2282 return false
2283 }
2284 if path[len(path)-1] == '/' {
2285 return false
2286 }
2287 return true
2288}
2289
2290var buildConstraintOrEmbedRe = regexp.MustCompile(`^//(go:embed|go:build|\s*\+build).*`)
2291
2292// extractMagicComments finds magic comments that affect metadata in f.
2293func extractMagicComments(f *ast.File) []string {
2294 var results []string
2295 for _, cg := range f.Comments {
2296 for _, c := range cg.List {
2297 if buildConstraintOrEmbedRe.MatchString(c.Text) {
2298 results = append(results, c.Text)
2299 }
2300 }
2301 }
2302 return results
2303}
2304
2305func (s *snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) {
2306 s.AwaitInitialized(ctx)
2307
2308 s.mu.Lock()
2309 builtin := s.builtin
2310 s.mu.Unlock()
2311
2312 if builtin == "" {
2313 return nil, fmt.Errorf("no builtin package for view %s", s.view.name)
2314 }
2315
2316 fh, err := s.GetFile(ctx, builtin)
2317 if err != nil {
2318 return nil, err
2319 }
2320 return s.ParseGo(ctx, fh, source.ParseFull)
2321}
2322
2323func (s *snapshot) IsBuiltin(ctx context.Context, uri span.URI) bool {
2324 s.mu.Lock()
2325 defer s.mu.Unlock()
2326 // We should always get the builtin URI in a canonical form, so use simple
2327 // string comparison here. span.CompareURI is too expensive.
2328 return uri == s.builtin
2329}
2330
2331func (s *snapshot) setBuiltin(path string) {
2332 s.mu.Lock()
2333 defer s.mu.Unlock()
2334
2335 s.builtin = span.URIFromPath(path)
2336}
2337
2338// BuildGoplsMod generates a go.mod file for all modules in the workspace. It
2339// bypasses any existing gopls.mod.
2340func (s *snapshot) BuildGoplsMod(ctx context.Context) (*modfile.File, error) {
2341 allModules, err := findModules(s.view.folder, pathExcludedByFilterFunc(s.view.rootURI.Filename(), s.view.gomodcache, s.View().Options()), 0)
2342 if err != nil {
2343 return nil, err
2344 }
2345 return buildWorkspaceModFile(ctx, allModules, s)
2346}
2347
2348// TODO(rfindley): move this to workspace.go
2349func buildWorkspaceModFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) (*modfile.File, error) {
2350 file := &modfile.File{}
2351 file.AddModuleStmt("gopls-workspace")
2352 // Track the highest Go version, to be set on the workspace module.
2353 // Fall back to 1.12 -- old versions insist on having some version.
2354 goVersion := "1.12"
2355
2356 paths := map[string]span.URI{}
2357 excludes := map[string][]string{}
2358 var sortedModURIs []span.URI
2359 for uri := range modFiles {
2360 sortedModURIs = append(sortedModURIs, uri)
2361 }
2362 sort.Slice(sortedModURIs, func(i, j int) bool {
2363 return sortedModURIs[i] < sortedModURIs[j]
2364 })
2365 for _, modURI := range sortedModURIs {
2366 fh, err := fs.GetFile(ctx, modURI)
2367 if err != nil {
2368 return nil, err
2369 }
2370 content, err := fh.Read()
2371 if err != nil {
2372 return nil, err
2373 }
2374 parsed, err := modfile.Parse(fh.URI().Filename(), content, nil)
2375 if err != nil {
2376 return nil, err
2377 }
2378 if file == nil || parsed.Module == nil {
2379 return nil, fmt.Errorf("no module declaration for %s", modURI)
2380 }
2381 // Prepend "v" to go versions to make them valid semver.
2382 if parsed.Go != nil && semver.Compare("v"+goVersion, "v"+parsed.Go.Version) < 0 {
2383 goVersion = parsed.Go.Version
2384 }
2385 path := parsed.Module.Mod.Path
2386 if seen, ok := paths[path]; ok {
2387 return nil, fmt.Errorf("found module %q multiple times in the workspace, at:\n\t%q\n\t%q", path, seen, modURI)
2388 }
2389 paths[path] = modURI
2390 // If the module's path includes a major version, we expect it to have
2391 // a matching major version.
2392 _, majorVersion, _ := module.SplitPathVersion(path)
2393 if majorVersion == "" {
2394 majorVersion = "/v0"
2395 }
2396 majorVersion = strings.TrimLeft(majorVersion, "/.") // handle gopkg.in versions
2397 file.AddNewRequire(path, source.WorkspaceModuleVersion(majorVersion), false)
2398 if err := file.AddReplace(path, "", dirURI(modURI).Filename(), ""); err != nil {
2399 return nil, err
2400 }
2401 for _, exclude := range parsed.Exclude {
2402 excludes[exclude.Mod.Path] = append(excludes[exclude.Mod.Path], exclude.Mod.Version)
2403 }
2404 }
2405 if goVersion != "" {
2406 file.AddGoStmt(goVersion)
2407 }
2408 // Go back through all of the modules to handle any of their replace
2409 // statements.
2410 for _, modURI := range sortedModURIs {
2411 fh, err := fs.GetFile(ctx, modURI)
2412 if err != nil {
2413 return nil, err
2414 }
2415 content, err := fh.Read()
2416 if err != nil {
2417 return nil, err
2418 }
2419 parsed, err := modfile.Parse(fh.URI().Filename(), content, nil)
2420 if err != nil {
2421 return nil, err
2422 }
2423 // If any of the workspace modules have replace directives, they need
2424 // to be reflected in the workspace module.
2425 for _, rep := range parsed.Replace {
2426 // Don't replace any modules that are in our workspace--we should
2427 // always use the version in the workspace.
2428 if _, ok := paths[rep.Old.Path]; ok {
2429 continue
2430 }
2431 newPath := rep.New.Path
2432 newVersion := rep.New.Version
2433 // If a replace points to a module in the workspace, make sure we
2434 // direct it to version of the module in the workspace.
2435 if m, ok := paths[rep.New.Path]; ok {
2436 newPath = dirURI(m).Filename()
2437 newVersion = ""
2438 } else if rep.New.Version == "" && !filepath.IsAbs(rep.New.Path) {
2439 // Make any relative paths absolute.
2440 newPath = filepath.Join(dirURI(modURI).Filename(), rep.New.Path)
2441 }
2442 if err := file.AddReplace(rep.Old.Path, rep.Old.Version, newPath, newVersion); err != nil {
2443 return nil, err
2444 }
2445 }
2446 }
2447 for path, versions := range excludes {
2448 for _, version := range versions {
2449 file.AddExclude(path, version)
2450 }
2451 }
2452 file.SortBlocks()
2453 return file, nil
2454}
2455
2456func buildWorkspaceSumFile(ctx context.Context, modFiles map[span.URI]struct{}, fs source.FileSource) ([]byte, error) {
2457 allSums := map[module.Version][]string{}
2458 for modURI := range modFiles {
2459 // TODO(rfindley): factor out this pattern into a uripath package.
2460 sumURI := span.URIFromPath(filepath.Join(filepath.Dir(modURI.Filename()), "go.sum"))
2461 fh, err := fs.GetFile(ctx, sumURI)
2462 if err != nil {
2463 continue
2464 }
2465 data, err := fh.Read()
2466 if os.IsNotExist(err) {
2467 continue
2468 }
2469 if err != nil {
2470 return nil, fmt.Errorf("reading go sum: %w", err)
2471 }
2472 if err := readGoSum(allSums, sumURI.Filename(), data); err != nil {
2473 return nil, err
2474 }
2475 }
2476 // This logic to write go.sum is copied (with minor modifications) from
2477 // https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=631;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0
2478 var mods []module.Version
2479 for m := range allSums {
2480 mods = append(mods, m)
2481 }
2482 module.Sort(mods)
2483
2484 var buf bytes.Buffer
2485 for _, m := range mods {
2486 list := allSums[m]
2487 sort.Strings(list)
2488 // Note (rfindley): here we add all sum lines without verification, because
2489 // the assumption is that if they come from a go.sum file, they are
2490 // trusted.
2491 for _, h := range list {
2492 fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h)
2493 }
2494 }
2495 return buf.Bytes(), nil
2496}
2497
2498// readGoSum is copied (with minor modifications) from
2499// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/modfetch/fetch.go;l=398;drc=762eda346a9f4062feaa8a9fc0d17d72b11586f0
2500func readGoSum(dst map[module.Version][]string, file string, data []byte) error {
2501 lineno := 0
2502 for len(data) > 0 {
2503 var line []byte
2504 lineno++
2505 i := bytes.IndexByte(data, '\n')
2506 if i < 0 {
2507 line, data = data, nil
2508 } else {
2509 line, data = data[:i], data[i+1:]
2510 }
2511 f := strings.Fields(string(line))
2512 if len(f) == 0 {
2513 // blank line; skip it
2514 continue
2515 }
2516 if len(f) != 3 {
2517 return fmt.Errorf("malformed go.sum:\n%s:%d: wrong number of fields %v", file, lineno, len(f))
2518 }
2519 mod := module.Version{Path: f[0], Version: f[1]}
2520 dst[mod] = append(dst[mod], f[2])
2521 }
2522 return nil
2523}