internal/lsp: export and move some objects related to metadata
In preparation for moving metadata related functionality to a separate
package, move around some types and export some symbols. This is purely
to reduce diffs in subsequent CLs, and contains no functional changes.
Change-Id: I24d4fbd71df78e4d7a84f6598cdf820b41d542a2
Reviewed-on: https://go-review.googlesource.com/c/tools/+/340729
Trust: Robert Findley <rfindley@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go
index faf0306..baaad5a 100644
--- a/internal/lsp/cache/analysis.go
+++ b/internal/lsp/cache/analysis.go
@@ -30,7 +30,7 @@
if !a.IsEnabled(s.view) {
continue
}
- ah, err := s.actionHandle(ctx, packageID(id), a.Analyzer)
+ ah, err := s.actionHandle(ctx, PackageID(id), a.Analyzer)
if err != nil {
return nil, err
}
@@ -84,7 +84,7 @@
typ reflect.Type
}
-func (s *snapshot) actionHandle(ctx context.Context, id packageID, a *analysis.Analyzer) (*actionHandle, error) {
+func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) {
ph, err := s.buildPackageHandle(ctx, id, source.ParseFull)
if err != nil {
return nil, err
@@ -121,13 +121,13 @@
// An analysis that consumes/produces facts
// must run on the package's dependencies too.
if len(a.FactTypes) > 0 {
- importIDs := make([]string, 0, len(ph.m.deps))
- for _, importID := range ph.m.deps {
+ importIDs := make([]string, 0, len(ph.m.Deps))
+ for _, importID := range ph.m.Deps {
importIDs = append(importIDs, string(importID))
}
sort.Strings(importIDs) // for determinism
for _, importID := range importIDs {
- depActionHandle, err := s.actionHandle(ctx, packageID(importID), a)
+ depActionHandle, err := s.actionHandle(ctx, PackageID(importID), a)
if err != nil {
return nil, err
}
diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go
index 7221874..be03e63 100644
--- a/internal/lsp/cache/cache.go
+++ b/internal/lsp/cache/cache.go
@@ -198,7 +198,7 @@
func (c *Cache) MemStats() map[reflect.Type]int { return c.store.Stats() }
type packageStat struct {
- id packageID
+ id PackageID
mode source.ParseMode
file int64
ast int64
@@ -224,7 +224,7 @@
typInfoCost = typesInfoCost(v.pkg.typesInfo)
}
stat := packageStat{
- id: v.pkg.m.id,
+ id: v.pkg.m.ID,
mode: v.pkg.mode,
types: typsCost,
typesInfo: typInfoCost,
diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go
index 287451f..2eb2d1e 100644
--- a/internal/lsp/cache/check.go
+++ b/internal/lsp/cache/check.go
@@ -42,7 +42,7 @@
mode source.ParseMode
// m is the metadata associated with the package.
- m *knownMetadata
+ m *KnownMetadata
// key is the hashed key for the package.
key packageHandleKey
@@ -50,7 +50,7 @@
func (ph *packageHandle) packageKey() packageKey {
return packageKey{
- id: ph.m.id,
+ id: ph.m.ID,
mode: ph.mode,
}
}
@@ -85,7 +85,7 @@
// It assumes that the given ID already has metadata available, so it does not
// attempt to reload missing or invalid metadata. The caller must reload
// metadata if needed.
-func (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, error) {
+func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) {
if ph := s.getPackage(id, mode); ph != nil {
return ph, nil
}
@@ -121,7 +121,7 @@
}
data := &packageData{}
- data.pkg, data.err = typeCheck(ctx, snapshot, m.metadata, mode, deps)
+ data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
// Make sure that the workers above have finished before we return,
// especially in case of cancellation.
wg.Wait()
@@ -140,16 +140,16 @@
}
// buildKey computes the key for a given packageHandle.
-func (s *snapshot) buildKey(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, map[packagePath]*packageHandle, error) {
+func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) {
m := s.getMetadata(id)
if m == nil {
return nil, nil, errors.Errorf("no metadata for %s", id)
}
- goFiles, err := s.parseGoHandles(ctx, m.goFiles, mode)
+ goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode)
if err != nil {
return nil, nil, err
}
- compiledGoFiles, err := s.parseGoHandles(ctx, m.compiledGoFiles, mode)
+ compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode)
if err != nil {
return nil, nil, err
}
@@ -160,12 +160,12 @@
mode: mode,
}
// Make sure all of the depList are sorted.
- depList := append([]packageID{}, m.deps...)
+ depList := append([]PackageID{}, m.Deps...)
sort.Slice(depList, func(i, j int) bool {
return depList[i] < depList[j]
})
- deps := make(map[packagePath]*packageHandle)
+ deps := make(map[PackagePath]*packageHandle)
// Begin computing the key by getting the depKeys for all dependencies.
var depKeys []packageHandleKey
@@ -174,7 +174,7 @@
// Don't use invalid metadata for dependencies if the top-level
// metadata is valid. We only load top-level packages, so if the
// top-level is valid, all of its dependencies should be as well.
- if err != nil || m.valid && !depHandle.m.valid {
+ if err != nil || m.Valid && !depHandle.m.Valid {
if err != nil {
event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
} else {
@@ -189,15 +189,15 @@
depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID)))
continue
}
- deps[depHandle.m.pkgPath] = depHandle
+ deps[depHandle.m.PkgPath] = depHandle
depKeys = append(depKeys, depHandle.key)
}
experimentalKey := s.View().Options().ExperimentalPackageCacheKey
- ph.key = checkPackageKey(ph.m.id, compiledGoFiles, m.config, depKeys, mode, experimentalKey)
+ ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m.Config, depKeys, mode, experimentalKey)
return ph, deps, nil
}
-func (s *snapshot) workspaceParseMode(id packageID) source.ParseMode {
+func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode {
s.mu.Lock()
defer s.mu.Unlock()
_, ws := s.workspacePackages[id]
@@ -213,7 +213,7 @@
return source.ParseExported
}
-func checkPackageKey(id packageID, pghs []*parseGoHandle, cfg *packages.Config, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
+func checkPackageKey(id PackageID, pghs []*parseGoHandle, cfg *packages.Config, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
b := bytes.NewBuffer(nil)
b.WriteString(string(id))
if !experimentalKey {
@@ -277,17 +277,17 @@
}
func (ph *packageHandle) CompiledGoFiles() []span.URI {
- return ph.m.compiledGoFiles
+ return ph.m.CompiledGoFiles
}
func (ph *packageHandle) ID() string {
- return string(ph.m.id)
+ return string(ph.m.ID)
}
func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) {
v := ph.handle.Cached(g)
if v == nil {
- return nil, errors.Errorf("no cached type information for %s", ph.m.pkgPath)
+ return nil, errors.Errorf("no cached type information for %s", ph.m.PkgPath)
}
data := v.(*packageData)
return data.pkg, data.err
@@ -305,7 +305,7 @@
return pghs, nil
}
-func typeCheck(ctx context.Context, snapshot *snapshot, m *metadata, mode source.ParseMode, deps map[packagePath]*packageHandle) (*pkg, error) {
+func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
var filter *unexportedFilter
if mode == source.ParseExported {
filter = &unexportedFilter{uses: map[string]bool{}}
@@ -321,7 +321,7 @@
// time keeping those names.
missing, unexpected := filter.ProcessErrors(pkg.typeErrors)
if len(unexpected) == 0 && len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.id)))
+ event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID)))
pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter)
if err != nil {
return nil, err
@@ -329,7 +329,7 @@
missing, unexpected = filter.ProcessErrors(pkg.typeErrors)
}
if len(unexpected) != 0 || len(missing) != 0 {
- event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.id)))
+ event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID)))
pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil)
if err != nil {
return nil, err
@@ -338,13 +338,13 @@
}
// If this is a replaced module in the workspace, the version is
// meaningless, and we don't want clients to access it.
- if m.module != nil {
- version := m.module.Version
+ if m.Module != nil {
+ version := m.Module.Version
if source.IsWorkspaceModuleVersion(version) {
version = ""
}
pkg.version = &module.Version{
- Path: m.module.Path,
+ Path: m.Module.Path,
Version: version,
}
}
@@ -354,7 +354,7 @@
return pkg, nil
}
- for _, e := range m.errors {
+ for _, e := range m.Errors {
diags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
if err != nil {
event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(pkg.ID()))
@@ -420,15 +420,15 @@
return pkg, nil
}
-func doTypeCheck(ctx context.Context, snapshot *snapshot, m *metadata, mode source.ParseMode, deps map[packagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
- ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.id)))
+func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
+ ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID)))
defer done()
pkg := &pkg{
m: m,
mode: mode,
- imports: make(map[packagePath]*pkg),
- types: types.NewPackage(string(m.pkgPath), string(m.name)),
+ imports: make(map[PackagePath]*pkg),
+ types: types.NewPackage(string(m.PkgPath), string(m.Name)),
typesInfo: &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
@@ -437,11 +437,11 @@
Selections: make(map[*ast.SelectorExpr]*types.Selection),
Scopes: make(map[ast.Node]*types.Scope),
},
- typesSizes: m.typesSizes,
+ typesSizes: m.TypesSizes,
}
typeparams.InitInferred(pkg.typesInfo)
- for _, gf := range pkg.m.goFiles {
+ for _, gf := range pkg.m.GoFiles {
// In the presence of line directives, we may need to report errors in
// non-compiled Go files, so we need to register them on the package.
// However, we only need to really parse them in ParseFull mode, when
@@ -466,18 +466,18 @@
}
// Use the default type information for the unsafe package.
- if m.pkgPath == "unsafe" {
+ if m.PkgPath == "unsafe" {
// Don't type check Unsafe: it's unnecessary, and doing so exposes a data
// race to Unsafe.completed.
pkg.types = types.Unsafe
return pkg, nil
}
- if len(m.compiledGoFiles) == 0 {
+ if len(m.CompiledGoFiles) == 0 {
// No files most likely means go/packages failed. Try to attach error
// messages to the file as much as possible.
var found bool
- for _, e := range m.errors {
+ for _, e := range m.Errors {
srcDiags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
if err != nil {
continue
@@ -488,7 +488,7 @@
if found {
return pkg, nil
}
- return nil, errors.Errorf("no parsed files for package %s, expected: %v, errors: %v", pkg.m.pkgPath, pkg.compiledGoFiles, m.errors)
+ return nil, errors.Errorf("no parsed files for package %s, expected: %v, errors: %v", pkg.m.PkgPath, pkg.compiledGoFiles, m.Errors)
}
cfg := &types.Config{
@@ -504,14 +504,14 @@
if dep == nil {
return nil, snapshot.missingPkgError(ctx, pkgPath)
}
- if !source.IsValidImport(string(m.pkgPath), string(dep.m.pkgPath)) {
+ if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) {
return nil, errors.Errorf("invalid use of internal package %s", pkgPath)
}
depPkg, err := dep.check(ctx, snapshot)
if err != nil {
return nil, err
}
- pkg.imports[depPkg.m.pkgPath] = depPkg
+ pkg.imports[depPkg.m.PkgPath] = depPkg
return depPkg.types, nil
}),
}
@@ -544,7 +544,7 @@
}
func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
- for _, cgf := range pkg.m.compiledGoFiles {
+ for _, cgf := range pkg.m.CompiledGoFiles {
fh, err := snapshot.GetFile(ctx, cgf)
if err != nil {
return err
@@ -605,7 +605,7 @@
}
directImporter := depsError.ImportStack[directImporterIdx]
- if s.isWorkspacePackage(packageID(directImporter)) {
+ if s.isWorkspacePackage(PackageID(directImporter)) {
continue
}
relevantErrors = append(relevantErrors, depsError)
@@ -640,7 +640,7 @@
for _, depErr := range relevantErrors {
for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
item := depErr.ImportStack[i]
- if s.isWorkspacePackage(packageID(item)) {
+ if s.isWorkspacePackage(PackageID(item)) {
break
}
@@ -686,11 +686,11 @@
for _, depErr := range relevantErrors {
for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
item := depErr.ImportStack[i]
- m := s.getMetadata(packageID(item))
- if m == nil || m.module == nil {
+ m := s.getMetadata(PackageID(item))
+ if m == nil || m.Module == nil {
continue
}
- modVer := module.Version{Path: m.module.Path, Version: m.module.Version}
+ modVer := module.Version{Path: m.Module.Path, Version: m.Module.Version}
reference := findModuleReference(pm.File, modVer)
if reference == nil {
continue
@@ -809,14 +809,14 @@
// resolveImportPath resolves an import path in pkg to a package from deps.
// It should produce the same results as resolveImportPath:
// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990.
-func resolveImportPath(importPath string, pkg *pkg, deps map[packagePath]*packageHandle) *packageHandle {
- if dep := deps[packagePath(importPath)]; dep != nil {
+func resolveImportPath(importPath string, pkg *pkg, deps map[PackagePath]*packageHandle) *packageHandle {
+ if dep := deps[PackagePath(importPath)]; dep != nil {
return dep
}
// We may be in GOPATH mode, in which case we need to check vendor dirs.
searchDir := path.Dir(pkg.PkgPath())
for {
- vdir := packagePath(path.Join(searchDir, "vendor", importPath))
+ vdir := PackagePath(path.Join(searchDir, "vendor", importPath))
if vdep := deps[vdir]; vdep != nil {
return vdep
}
diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go
index b866646..3f58d67 100644
--- a/internal/lsp/cache/errors.go
+++ b/internal/lsp/cache/errors.go
@@ -41,7 +41,7 @@
var spn span.Span
if e.Pos == "" {
- spn = parseGoListError(e.Msg, pkg.m.config.Dir)
+ spn = parseGoListError(e.Msg, pkg.m.Config.Dir)
// We may not have been able to parse a valid span. Apply the errors to all files.
if _, err := spanToRange(pkg, spn); err != nil {
var diags []*source.Diagnostic
@@ -56,7 +56,7 @@
return diags, nil
}
} else {
- spn = span.ParseInDir(e.Pos, pkg.m.config.Dir)
+ spn = span.ParseInDir(e.Pos, pkg.m.Config.Dir)
}
rng, err := spanToRange(pkg, spn)
diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go
index 5c027e7..2886c86 100644
--- a/internal/lsp/cache/load.go
+++ b/internal/lsp/cache/load.go
@@ -8,7 +8,6 @@
"context"
"crypto/sha256"
"fmt"
- "go/types"
"io/ioutil"
"os"
"path/filepath"
@@ -28,40 +27,6 @@
errors "golang.org/x/xerrors"
)
-// metadata holds package metadata extracted from a call to packages.Load.
-type metadata struct {
- id packageID
- pkgPath packagePath
- name packageName
- goFiles []span.URI
- compiledGoFiles []span.URI
- forTest packagePath
- typesSizes types.Sizes
- errors []packages.Error
- deps []packageID
- missingDeps map[packagePath]struct{}
- module *packages.Module
- depsErrors []*packagesinternal.PackageError
-
- // config is the *packages.Config associated with the loaded package.
- config *packages.Config
-
- // isIntermediateTestVariant reports whether the given package is an
- // intermediate test variant, e.g.
- // "golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test]".
- isIntermediateTestVariant bool
-}
-
-// Name implements the source.Metadata interface.
-func (m *metadata) Name() string {
- return string(m.name)
-}
-
-// PkgPath implements the source.Metadata interface.
-func (m *metadata) PkgPath() string {
- return string(m.pkgPath)
-}
-
// load calls packages.Load for the given scopes, updating package metadata,
// import graph, and mapped files with the result.
func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) (err error) {
@@ -80,7 +45,7 @@
s.clearShouldLoad(scope)
}()
switch scope := scope.(type) {
- case packagePath:
+ case PackagePath:
if source.IsCommandLineArguments(string(scope)) {
panic("attempted to load command-line-arguments")
}
@@ -205,12 +170,12 @@
}
// Set the metadata for this package.
s.mu.Lock()
- m, err := s.setMetadataLocked(ctx, packagePath(pkg.PkgPath), pkg, cfg, query, map[packageID]struct{}{})
+ m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, map[PackageID]struct{}{})
s.mu.Unlock()
if err != nil {
return err
}
- if _, err := s.buildPackageHandle(ctx, m.id, s.workspaceParseMode(m.id)); err != nil {
+ if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil {
return err
}
}
@@ -403,25 +368,25 @@
// setMetadataLocked extracts metadata from pkg and records it in s. It
// recurses through pkg.Imports to ensure that metadata exists for all
// dependencies.
-func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath packagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[packageID]struct{}) (*metadata, error) {
- id := packageID(pkg.ID)
+func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[PackageID]struct{}) (*Metadata, error) {
+ id := PackageID(pkg.ID)
if source.IsCommandLineArguments(pkg.ID) {
suffix := ":" + strings.Join(query, ",")
- id = packageID(string(id) + suffix)
- pkgPath = packagePath(string(pkgPath) + suffix)
+ id = PackageID(string(id) + suffix)
+ pkgPath = PackagePath(string(pkgPath) + suffix)
}
if _, ok := seen[id]; ok {
return nil, errors.Errorf("import cycle detected: %q", id)
}
// Recreate the metadata rather than reusing it to avoid locking.
- m := &metadata{
- id: id,
- pkgPath: pkgPath,
- name: packageName(pkg.Name),
- forTest: packagePath(packagesinternal.GetForTest(pkg)),
- typesSizes: pkg.TypesSizes,
- config: cfg,
- module: pkg.Module,
+ m := &Metadata{
+ ID: id,
+ PkgPath: pkgPath,
+ Name: PackageName(pkg.Name),
+ ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
+ TypesSizes: pkg.TypesSizes,
+ Config: cfg,
+ Module: pkg.Module,
depsErrors: packagesinternal.GetDepsErrors(pkg),
}
@@ -432,41 +397,41 @@
if strings.Contains(err.Msg, "expected '") {
continue
}
- m.errors = append(m.errors, err)
+ m.Errors = append(m.Errors, err)
}
uris := map[span.URI]struct{}{}
for _, filename := range pkg.CompiledGoFiles {
uri := span.URIFromPath(filename)
- m.compiledGoFiles = append(m.compiledGoFiles, uri)
+ m.CompiledGoFiles = append(m.CompiledGoFiles, uri)
uris[uri] = struct{}{}
}
for _, filename := range pkg.GoFiles {
uri := span.URIFromPath(filename)
- m.goFiles = append(m.goFiles, uri)
+ m.GoFiles = append(m.GoFiles, uri)
uris[uri] = struct{}{}
}
s.updateIDForURIsLocked(id, uris)
// TODO(rstambler): is this still necessary?
- copied := map[packageID]struct{}{
+ copied := map[PackageID]struct{}{
id: {},
}
for k, v := range seen {
copied[k] = v
}
for importPath, importPkg := range pkg.Imports {
- importPkgPath := packagePath(importPath)
- importID := packageID(importPkg.ID)
+ importPkgPath := PackagePath(importPath)
+ importID := PackageID(importPkg.ID)
- m.deps = append(m.deps, importID)
+ m.Deps = append(m.Deps, importID)
// Don't remember any imports with significant errors.
if importPkgPath != "unsafe" && len(importPkg.CompiledGoFiles) == 0 {
- if m.missingDeps == nil {
- m.missingDeps = make(map[packagePath]struct{})
+ if m.MissingDeps == nil {
+ m.MissingDeps = make(map[PackagePath]struct{})
}
- m.missingDeps[importPkgPath] = struct{}{}
+ m.MissingDeps[importPkgPath] = struct{}{}
continue
}
if s.noValidMetadataForIDLocked(importID) {
@@ -479,25 +444,25 @@
// Add the metadata to the cache.
// If we've already set the metadata for this snapshot, reuse it.
- if original, ok := s.metadata[m.id]; ok && original.valid {
+ if original, ok := s.metadata[m.ID]; ok && original.Valid {
// Since we've just reloaded, clear out shouldLoad.
- original.shouldLoad = false
- m = original.metadata
+ original.ShouldLoad = false
+ m = original.Metadata
} else {
- s.metadata[m.id] = &knownMetadata{
- metadata: m,
- valid: true,
+ s.metadata[m.ID] = &KnownMetadata{
+ Metadata: m,
+ Valid: true,
}
// Invalidate any packages we may have associated with this metadata.
for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
- key := packageKey{mode, m.id}
+ key := packageKey{mode, m.ID}
delete(s.packages, key)
}
}
// Set the workspace packages. If any of the package's files belong to the
// view, then the package may be a workspace package.
- for _, uri := range append(m.compiledGoFiles, m.goFiles...) {
+ for _, uri := range append(m.CompiledGoFiles, m.GoFiles...) {
if !s.view.contains(uri) {
continue
}
@@ -509,16 +474,16 @@
}
switch {
- case m.forTest == "":
+ case m.ForTest == "":
// A normal package.
- s.workspacePackages[m.id] = pkgPath
- case m.forTest == m.pkgPath, m.forTest+"_test" == m.pkgPath:
+ s.workspacePackages[m.ID] = pkgPath
+ case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath:
// The test variant of some workspace package or its x_test.
// To load it, we need to load the non-test variant with -test.
- s.workspacePackages[m.id] = m.forTest
+ s.workspacePackages[m.ID] = m.ForTest
default:
// A test variant of some intermediate package. We don't care about it.
- m.isIntermediateTestVariant = true
+ m.IsIntermediateTestVariant = true
}
}
return m, nil
diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go
new file mode 100644
index 0000000..bef7bf8
--- /dev/null
+++ b/internal/lsp/cache/metadata.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/go/packages"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/span"
+)
+
+// Declare explicit types for package paths, names, and IDs to ensure that we
+// never use an ID where a path belongs, and vice versa. If we confused these,
+// it would result in confusing errors because package IDs often look like
+// package paths.
+type (
+ PackageID string
+ PackagePath string
+ PackageName string
+)
+
+// Metadata holds package Metadata extracted from a call to packages.Load.
+type Metadata struct {
+ ID PackageID
+ PkgPath PackagePath
+ Name PackageName
+ GoFiles []span.URI
+ CompiledGoFiles []span.URI
+ ForTest PackagePath
+ TypesSizes types.Sizes
+ Errors []packages.Error
+ Deps []PackageID
+ MissingDeps map[PackagePath]struct{}
+ Module *packages.Module
+ depsErrors []*packagesinternal.PackageError
+
+ // Config is the *packages.Config associated with the loaded package.
+ Config *packages.Config
+
+ // IsIntermediateTestVariant reports whether the given package is an
+ // intermediate test variant, e.g.
+ // "golang.org/x/tools/internal/lsp/cache [golang.org/x/tools/internal/lsp/source.test]".
+ IsIntermediateTestVariant bool
+}
+
+// Name implements the source.Metadata interface.
+func (m *Metadata) PackageName() string {
+ return string(m.Name)
+}
+
+// PkgPath implements the source.Metadata interface.
+func (m *Metadata) PackagePath() string {
+ return string(m.PkgPath)
+}
+
+// KnownMetadata is a wrapper around metadata that tracks its validity.
+type KnownMetadata struct {
+ *Metadata
+
+ // Valid is true if the given metadata is Valid.
+ // Invalid metadata can still be used if a metadata reload fails.
+ Valid bool
+
+ // ShouldLoad is true if the given metadata should be reloaded.
+ ShouldLoad bool
+}
diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go
index e2c7244..742f48f 100644
--- a/internal/lsp/cache/parse.go
+++ b/internal/lsp/cache/parse.go
@@ -107,9 +107,9 @@
func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) {
pkg := spkg.(*pkg)
- pkgHandle := s.getPackage(pkg.m.id, pkg.mode)
+ pkgHandle := s.getPackage(pkg.m.ID, pkg.mode)
if pkgHandle == nil {
- return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.id)
+ return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID)
}
tok := s.FileSet().File(pos)
if tok == nil {
diff --git a/internal/lsp/cache/pkg.go b/internal/lsp/cache/pkg.go
index 5a87a14..88ea886 100644
--- a/internal/lsp/cache/pkg.go
+++ b/internal/lsp/cache/pkg.go
@@ -17,12 +17,12 @@
// pkg contains the type information needed by the source package.
type pkg struct {
- m *metadata
+ m *Metadata
mode source.ParseMode
goFiles []*source.ParsedGoFile
compiledGoFiles []*source.ParsedGoFile
diagnostics []*source.Diagnostic
- imports map[packagePath]*pkg
+ imports map[PackagePath]*pkg
version *module.Version
parseErrors []scanner.ErrorList
typeErrors []types.Error
@@ -32,16 +32,6 @@
hasFixedFiles bool
}
-// Declare explicit types for package paths, names, and IDs to ensure that we
-// never use an ID where a path belongs, and vice versa. If we confused these,
-// it would result in confusing errors because package IDs often look like
-// package paths.
-type (
- packageID string
- packagePath string
- packageName string
-)
-
// Declare explicit types for files and directories to distinguish between the two.
type (
fileURI span.URI
@@ -50,15 +40,15 @@
)
func (p *pkg) ID() string {
- return string(p.m.id)
+ return string(p.m.ID)
}
func (p *pkg) Name() string {
- return string(p.m.name)
+ return string(p.m.Name)
}
func (p *pkg) PkgPath() string {
- return string(p.m.pkgPath)
+ return string(p.m.PkgPath)
}
func (p *pkg) ParseMode() source.ParseMode {
@@ -80,7 +70,7 @@
return gf, nil
}
}
- return nil, errors.Errorf("no parsed file for %s in %v", uri, p.m.id)
+ return nil, errors.Errorf("no parsed file for %s in %v", uri, p.m.ID)
}
func (p *pkg) GetSyntax() []*ast.File {
@@ -108,11 +98,11 @@
}
func (p *pkg) ForTest() string {
- return string(p.m.forTest)
+ return string(p.m.ForTest)
}
func (p *pkg) GetImport(pkgPath string) (source.Package, error) {
- if imp := p.imports[packagePath(pkgPath)]; imp != nil {
+ if imp := p.imports[PackagePath(pkgPath)]; imp != nil {
return imp, nil
}
// Don't return a nil pointer because that still satisfies the interface.
@@ -124,14 +114,14 @@
// imports via the *types.Package. Only use metadata if p.types is nil.
if p.types == nil {
var md []string
- for i := range p.m.missingDeps {
+ for i := range p.m.MissingDeps {
md = append(md, string(i))
}
return md
}
var md []string
for _, pkg := range p.types.Imports() {
- if _, ok := p.m.missingDeps[packagePath(pkg.Path())]; ok {
+ if _, ok := p.m.MissingDeps[PackagePath(pkg.Path())]; ok {
md = append(md, pkg.Path())
}
}
@@ -151,7 +141,7 @@
}
func (p *pkg) HasListOrParseErrors() bool {
- return len(p.m.errors) != 0 || len(p.parseErrors) != 0
+ return len(p.m.Errors) != 0 || len(p.parseErrors) != 0
}
func (p *pkg) HasTypeErrors() bool {
diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go
index 487270b..bcb799a 100644
--- a/internal/lsp/cache/session.go
+++ b/internal/lsp/cache/session.go
@@ -230,14 +230,14 @@
initializeOnce: &sync.Once{},
generation: s.cache.store.Generation(generationName(v, 0)),
packages: make(map[packageKey]*packageHandle),
- ids: make(map[span.URI][]packageID),
- metadata: make(map[packageID]*knownMetadata),
+ ids: make(map[span.URI][]PackageID),
+ metadata: make(map[PackageID]*KnownMetadata),
files: make(map[span.URI]source.VersionedFileHandle),
goFiles: make(map[parseKey]*parseGoHandle),
symbols: make(map[span.URI]*symbolHandle),
- importedBy: make(map[packageID][]packageID),
+ importedBy: make(map[PackageID][]PackageID),
actions: make(map[actionKey]*actionHandle),
- workspacePackages: make(map[packageID]packagePath),
+ workspacePackages: make(map[PackageID]PackagePath),
unloadableFiles: make(map[span.URI]struct{}),
parseModHandles: make(map[span.URI]*parseModHandle),
modTidyHandles: make(map[span.URI]*modTidyHandle),
diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go
index dc235d6..d5f230a 100644
--- a/internal/lsp/cache/snapshot.go
+++ b/internal/lsp/cache/snapshot.go
@@ -69,14 +69,14 @@
// ids maps file URIs to package IDs.
// It may be invalidated on calls to go/packages.
- ids map[span.URI][]packageID
+ ids map[span.URI][]PackageID
// metadata maps file IDs to their associated metadata.
// It may invalidated on calls to go/packages.
- metadata map[packageID]*knownMetadata
+ metadata map[PackageID]*KnownMetadata
// importedBy maps package IDs to the list of packages that import them.
- importedBy map[packageID][]packageID
+ importedBy map[PackageID][]PackageID
// files maps file URIs to their corresponding FileHandles.
// It may invalidated when a file's content changes.
@@ -97,7 +97,7 @@
// workspacePackages contains the workspace's packages, which are loaded
// when the view is created.
- workspacePackages map[packageID]packagePath
+ workspacePackages map[PackageID]PackagePath
// unloadableFiles keeps track of files that we've failed to load.
unloadableFiles map[span.URI]struct{}
@@ -126,7 +126,7 @@
type packageKey struct {
mode source.ParseMode
- id packageID
+ id PackageID
}
type actionKey struct {
@@ -134,18 +134,6 @@
analyzer *analysis.Analyzer
}
-// knownMetadata is a wrapper around metadata that tracks its validity.
-type knownMetadata struct {
- *metadata
-
- // valid is true if the given metadata is valid.
- // Invalid metadata can still be used if a metadata reload fails.
- valid bool
-
- // shouldLoad is true if the given metadata should be reloaded.
- shouldLoad bool
-}
-
func (s *snapshot) ID() uint64 {
return s.id
}
@@ -535,7 +523,7 @@
for _, id := range knownIDs {
// Filter out any intermediate test variants. We typically aren't
// interested in these packages for file= style queries.
- if m := s.getMetadata(id); m != nil && m.isIntermediateTestVariant {
+ if m := s.getMetadata(id); m != nil && m.IsIntermediateTestVariant {
continue
}
var parseModes []source.ParseMode
@@ -563,7 +551,7 @@
return phs, nil
}
-func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]packageID, error) {
+func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) {
knownIDs := s.getIDsForURI(uri)
reload := len(knownIDs) == 0
for _, id := range knownIDs {
@@ -605,11 +593,11 @@
if err := s.awaitLoaded(ctx); err != nil {
return nil, err
}
- ids := make(map[packageID]struct{})
- s.transitiveReverseDependencies(packageID(id), ids)
+ ids := make(map[PackageID]struct{})
+ s.transitiveReverseDependencies(PackageID(id), ids)
// Make sure to delete the original package ID from the map.
- delete(ids, packageID(id))
+ delete(ids, PackageID(id))
var pkgs []source.Package
for id := range ids {
@@ -622,7 +610,7 @@
return pkgs, nil
}
-func (s *snapshot) checkedPackage(ctx context.Context, id packageID, mode source.ParseMode) (*pkg, error) {
+func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source.ParseMode) (*pkg, error) {
ph, err := s.buildPackageHandle(ctx, id, mode)
if err != nil {
return nil, err
@@ -632,13 +620,13 @@
// transitiveReverseDependencies populates the ids map with package IDs
// belonging to the provided package and its transitive reverse dependencies.
-func (s *snapshot) transitiveReverseDependencies(id packageID, ids map[packageID]struct{}) {
+func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID]struct{}) {
if _, ok := ids[id]; ok {
return
}
m := s.getMetadata(id)
// Only use invalid metadata if we support it.
- if m == nil || !(m.valid || s.useInvalidMetadata()) {
+ if m == nil || !(m.Valid || s.useInvalidMetadata()) {
return
}
ids[id] = struct{}{}
@@ -682,13 +670,13 @@
return s.modTidyHandles[uri]
}
-func (s *snapshot) getImportedBy(id packageID) []packageID {
+func (s *snapshot) getImportedBy(id PackageID) []PackageID {
s.mu.Lock()
defer s.mu.Unlock()
return s.getImportedByLocked(id)
}
-func (s *snapshot) getImportedByLocked(id packageID) []packageID {
+func (s *snapshot) getImportedByLocked(id PackageID) []PackageID {
// If we haven't rebuilt the import graph since creating the snapshot.
if len(s.importedBy) == 0 {
s.rebuildImportGraph()
@@ -701,13 +689,13 @@
defer s.mu.Unlock()
// Completely invalidate the original map.
- s.importedBy = make(map[packageID][]packageID)
+ s.importedBy = make(map[PackageID][]PackageID)
s.rebuildImportGraph()
}
func (s *snapshot) rebuildImportGraph() {
for id, m := range s.metadata {
- for _, importID := range m.deps {
+ for _, importID := range m.Deps {
s.importedBy[importID] = append(s.importedBy[importID], id)
}
}
@@ -726,7 +714,7 @@
return ph
}
-func (s *snapshot) workspacePackageIDs() (ids []packageID) {
+func (s *snapshot) workspacePackageIDs() (ids []PackageID) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -736,7 +724,7 @@
return ids
}
-func (s *snapshot) activePackageIDs() (ids []packageID) {
+func (s *snapshot) activePackageIDs() (ids []PackageID) {
if s.view.Options().MemoryMode == source.ModeNormal {
return s.workspacePackageIDs()
}
@@ -744,7 +732,7 @@
s.mu.Lock()
defer s.mu.Unlock()
- seen := make(map[packageID]bool)
+ seen := make(map[PackageID]bool)
for id := range s.workspacePackages {
if s.isActiveLocked(id, seen) {
ids = append(ids, id)
@@ -753,9 +741,9 @@
return ids
}
-func (s *snapshot) isActiveLocked(id packageID, seen map[packageID]bool) (active bool) {
+func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) {
if seen == nil {
- seen = make(map[packageID]bool)
+ seen = make(map[PackageID]bool)
}
if seen, ok := seen[id]; ok {
return seen
@@ -767,12 +755,12 @@
if !ok {
return false
}
- for _, cgf := range m.compiledGoFiles {
+ for _, cgf := range m.CompiledGoFiles {
if s.isOpenLocked(cgf) {
return true
}
}
- for _, dep := range m.deps {
+ for _, dep := range m.Deps {
if s.isActiveLocked(dep, seen) {
return true
}
@@ -780,7 +768,7 @@
return false
}
-func (s *snapshot) getWorkspacePkgPath(id packageID) packagePath {
+func (s *snapshot) getWorkspacePkgPath(id PackageID) PackagePath {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1073,7 +1061,7 @@
return match
}
-func (s *snapshot) getPackage(id packageID, mode source.ParseMode) *packageHandle {
+func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1104,7 +1092,8 @@
s.symbols[uri] = sh
return sh
}
-func (s *snapshot) getActionHandle(id packageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
+
+func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1125,7 +1114,7 @@
key := actionKey{
analyzer: ah.analyzer,
pkg: packageKey{
- id: ah.pkg.m.id,
+ id: ah.pkg.m.ID,
mode: ah.pkg.mode,
},
}
@@ -1136,14 +1125,14 @@
return ah
}
-func (s *snapshot) getIDsForURI(uri span.URI) []packageID {
+func (s *snapshot) getIDsForURI(uri span.URI) []PackageID {
s.mu.Lock()
defer s.mu.Unlock()
return s.ids[uri]
}
-func (s *snapshot) getMetadata(id packageID) *knownMetadata {
+func (s *snapshot) getMetadata(id PackageID) *KnownMetadata {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1155,15 +1144,15 @@
defer s.mu.Unlock()
switch scope := scope.(type) {
- case packagePath:
- var meta *knownMetadata
+ case PackagePath:
+ var meta *KnownMetadata
for _, m := range s.metadata {
- if m.pkgPath != scope {
+ if m.PkgPath != scope {
continue
}
meta = m
}
- if meta == nil || meta.shouldLoad {
+ if meta == nil || meta.ShouldLoad {
return true
}
return false
@@ -1175,7 +1164,7 @@
}
for _, id := range ids {
m, ok := s.metadata[id]
- if !ok || m.shouldLoad {
+ if !ok || m.ShouldLoad {
return true
}
}
@@ -1190,17 +1179,17 @@
defer s.mu.Unlock()
switch scope := scope.(type) {
- case packagePath:
- var meta *knownMetadata
+ case PackagePath:
+ var meta *KnownMetadata
for _, m := range s.metadata {
- if m.pkgPath == scope {
+ if m.PkgPath == scope {
meta = m
}
}
if meta == nil {
return
}
- meta.shouldLoad = false
+ meta.ShouldLoad = false
case fileURI:
uri := span.URI(scope)
ids := s.ids[uri]
@@ -1209,7 +1198,7 @@
}
for _, id := range ids {
if m, ok := s.metadata[id]; ok {
- m.shouldLoad = false
+ m.ShouldLoad = false
}
}
}
@@ -1223,7 +1212,7 @@
return true
}
for _, id := range ids {
- if m, ok := s.metadata[id]; ok && m.valid {
+ if m, ok := s.metadata[id]; ok && m.Valid {
return false
}
}
@@ -1232,15 +1221,15 @@
// noValidMetadataForID reports whether there is no valid metadata for the
// given ID.
-func (s *snapshot) noValidMetadataForID(id packageID) bool {
+func (s *snapshot) noValidMetadataForID(id PackageID) bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.noValidMetadataForIDLocked(id)
}
-func (s *snapshot) noValidMetadataForIDLocked(id packageID) bool {
+func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool {
m := s.metadata[id]
- return m == nil || !m.valid
+ return m == nil || !m.Valid
}
// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI.
@@ -1248,10 +1237,10 @@
// not "command-line-arguments" are preferred, so if a new ID comes in for a
// URI that previously only had "command-line-arguments", the new ID will
// replace the "command-line-arguments" ID.
-func (s *snapshot) updateIDForURIsLocked(id packageID, uris map[span.URI]struct{}) {
+func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) {
for uri := range uris {
// Collect the new set of IDs, preserving any valid existing IDs.
- newIDs := []packageID{id}
+ newIDs := []PackageID{id}
for _, existingID := range s.ids[uri] {
// Don't set duplicates of the same ID.
if existingID == id {
@@ -1265,7 +1254,7 @@
}
// If the metadata for an existing ID is invalid, and we are
// setting metadata for a new, valid ID--don't preserve the old ID.
- if m, ok := s.metadata[existingID]; !ok || !m.valid {
+ if m, ok := s.metadata[existingID]; !ok || !m.Valid {
continue
}
newIDs = append(newIDs, existingID)
@@ -1277,7 +1266,7 @@
}
}
-func (s *snapshot) isWorkspacePackage(id packageID) bool {
+func (s *snapshot) isWorkspacePackage(id PackageID) bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -1363,7 +1352,7 @@
return nil
}
for _, m := range s.metadata {
- if m.valid {
+ if m.Valid {
return nil
}
}
@@ -1487,9 +1476,9 @@
// See which of the workspace packages are missing metadata.
s.mu.Lock()
missingMetadata := len(s.workspacePackages) == 0 || len(s.metadata) == 0
- pkgPathSet := map[packagePath]struct{}{}
+ pkgPathSet := map[PackagePath]struct{}{}
for id, pkgPath := range s.workspacePackages {
- if m, ok := s.metadata[id]; ok && m.valid {
+ if m, ok := s.metadata[id]; ok && m.Valid {
continue
}
missingMetadata = true
@@ -1634,7 +1623,7 @@
// belonging to that workspace package.
for wsID := range s.workspacePackages {
if m, ok := s.metadata[wsID]; ok {
- for _, uri := range m.goFiles {
+ for _, uri := range m.GoFiles {
found := false
for _, id := range s.ids[uri] {
if id == wsID {
@@ -1686,15 +1675,15 @@
builtin: s.builtin,
initializeOnce: s.initializeOnce,
initializedErr: s.initializedErr,
- ids: make(map[span.URI][]packageID, len(s.ids)),
- importedBy: make(map[packageID][]packageID, len(s.importedBy)),
- metadata: make(map[packageID]*knownMetadata, len(s.metadata)),
+ ids: make(map[span.URI][]PackageID, len(s.ids)),
+ importedBy: make(map[PackageID][]PackageID, len(s.importedBy)),
+ metadata: make(map[PackageID]*KnownMetadata, len(s.metadata)),
packages: make(map[packageKey]*packageHandle, len(s.packages)),
actions: make(map[actionKey]*actionHandle, len(s.actions)),
files: make(map[span.URI]source.VersionedFileHandle, len(s.files)),
goFiles: make(map[parseKey]*parseGoHandle, len(s.goFiles)),
symbols: make(map[span.URI]*symbolHandle, len(s.symbols)),
- workspacePackages: make(map[packageID]packagePath, len(s.workspacePackages)),
+ workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)),
unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)),
parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)),
modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)),
@@ -1767,7 +1756,7 @@
// directIDs keeps track of package IDs that have directly changed.
// It maps id->invalidateMetadata.
- directIDs := map[packageID]bool{}
+ directIDs := map[PackageID]bool{}
// Invalidate all package metadata if the workspace module has changed.
if workspaceReload {
@@ -1776,7 +1765,7 @@
}
}
- changedPkgNames := map[packageID]struct{}{}
+ changedPkgNames := map[PackageID]struct{}{}
anyImportDeleted := false
for uri, change := range changes {
// Maybe reinitialize the view if we see a change in the vendor
@@ -1850,7 +1839,7 @@
// starting point to compare with.
if anyImportDeleted {
for id, metadata := range s.metadata {
- if len(metadata.errors) > 0 {
+ if len(metadata.Errors) > 0 {
directIDs[id] = true
}
}
@@ -1861,9 +1850,9 @@
// idsToInvalidate keeps track of transitive reverse dependencies.
// If an ID is present in the map, invalidate its types.
// If an ID's value is true, invalidate its metadata too.
- idsToInvalidate := map[packageID]bool{}
- var addRevDeps func(packageID, bool)
- addRevDeps = func(id packageID, invalidateMetadata bool) {
+ idsToInvalidate := map[PackageID]bool{}
+ var addRevDeps func(PackageID, bool)
+ addRevDeps = func(id PackageID, invalidateMetadata bool) {
current, seen := idsToInvalidate[id]
newInvalidateMetadata := current || invalidateMetadata
@@ -1903,7 +1892,7 @@
// If a file has been deleted, we must delete metadata all packages
// containing that file.
workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
- skipID := map[packageID]bool{}
+ skipID := map[PackageID]bool{}
for _, c := range changes {
if c.exists {
continue
@@ -1918,9 +1907,9 @@
// Collect all of the IDs that are reachable from the workspace packages.
// Any unreachable IDs will have their metadata deleted outright.
- reachableID := map[packageID]bool{}
- var addForwardDeps func(packageID)
- addForwardDeps = func(id packageID) {
+ reachableID := map[PackageID]bool{}
+ var addForwardDeps func(PackageID)
+ addForwardDeps = func(id PackageID) {
if reachableID[id] {
return
}
@@ -1929,7 +1918,7 @@
if !ok {
return
}
- for _, depID := range m.deps {
+ for _, depID := range m.Deps {
addForwardDeps(depID)
}
}
@@ -1940,7 +1929,7 @@
// Copy the URI to package ID mappings, skipping only those URIs whose
// metadata will be reloaded in future calls to load.
deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged
- idsInSnapshot := map[packageID]bool{} // track all known IDs
+ idsInSnapshot := map[PackageID]bool{} // track all known IDs
for uri, ids := range s.ids {
for _, id := range ids {
invalidateMetadata := idsToInvalidate[id]
@@ -1967,10 +1956,10 @@
}
invalidateMetadata := idsToInvalidate[k]
// Mark invalidated metadata rather than deleting it outright.
- result.metadata[k] = &knownMetadata{
- metadata: v.metadata,
- valid: v.valid && !invalidateMetadata,
- shouldLoad: v.shouldLoad || invalidateMetadata,
+ result.metadata[k] = &KnownMetadata{
+ Metadata: v.Metadata,
+ Valid: v.Valid && !invalidateMetadata,
+ ShouldLoad: v.ShouldLoad || invalidateMetadata,
}
}
@@ -1990,11 +1979,11 @@
// the package is gone and we should no longer try to load it.
if m := s.metadata[id]; m != nil {
hasFiles := false
- for _, uri := range s.metadata[id].goFiles {
+ for _, uri := range s.metadata[id].GoFiles {
// For internal tests, we need _test files, not just the normal
// ones. External tests only have _test files, but we can check
// them anyway.
- if m.forTest != "" && !strings.HasSuffix(string(uri), "_test.go") {
+ if m.ForTest != "" && !strings.HasSuffix(string(uri), "_test.go") {
continue
}
if _, ok := result.files[uri]; ok {
@@ -2034,7 +2023,7 @@
// If the snapshot's workspace mode has changed, the packages loaded using
// the previous mode are no longer relevant, so clear them out.
if workspaceModeChanged {
- result.workspacePackages = map[packageID]packagePath{}
+ result.workspacePackages = map[PackageID]PackagePath{}
}
// The snapshot may need to be reinitialized.
@@ -2050,7 +2039,7 @@
// seen this URI before, we guess based on files in the same directory. This
// is of course incorrect in build systems where packages are not organized by
// directory.
-func guessPackageIDsForURI(uri span.URI, known map[span.URI][]packageID) []packageID {
+func guessPackageIDsForURI(uri span.URI, known map[span.URI][]PackageID) []PackageID {
packages := known[uri]
if len(packages) > 0 {
// We've seen this file before.
@@ -2083,7 +2072,7 @@
}
// Aggregate all possibly relevant package IDs.
- var found []packageID
+ var found []PackageID
for knownURI, ids := range known {
knownDir := filepath.Dir(knownURI.Filename())
knownFI, err := getInfo(knownDir)
diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go
index 1989a62..b54210e 100644
--- a/internal/lsp/cache/view.go
+++ b/internal/lsp/cache/view.go
@@ -611,7 +611,7 @@
// If we're loading anything, ensure we also load builtin.
// TODO(rstambler): explain the rationale for this.
if len(scopes) > 0 {
- scopes = append(scopes, packagePath("builtin"))
+ scopes = append(scopes, PackagePath("builtin"))
}
err := s.load(ctx, firstAttempt, scopes...)
diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go
index e5acf49..19fca6e 100644
--- a/internal/lsp/source/view.go
+++ b/internal/lsp/source/view.go
@@ -307,11 +307,11 @@
// Metadata represents package metadata retrieved from go/packages.
type Metadata interface {
- // Name is the package name.
- Name() string
+ // PackageName is the package name.
+ PackageName() string
- // PkgPath is the package path.
- PkgPath() string
+ // PackagePath is the package path.
+ PackagePath() string
}
// Session represents a single connection from a client.
diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go
index ecea2b4..1f6fd20 100644
--- a/internal/lsp/source/workspace_symbol.go
+++ b/internal/lsp/source/workspace_symbol.go
@@ -75,7 +75,7 @@
func fullyQualifiedSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
_, score := dynamicSymbolMatch(name, pkg, matcher)
if score > 0 {
- return []string{pkg.PkgPath(), ".", name}, score
+ return []string{pkg.PackagePath(), ".", name}, score
}
return nil, 0
}
@@ -83,14 +83,14 @@
func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
var score float64
- endsInPkgName := strings.HasSuffix(pkg.PkgPath(), pkg.Name())
+ endsInPkgName := strings.HasSuffix(pkg.PackagePath(), pkg.PackageName())
// If the package path does not end in the package name, we need to check the
// package-qualified symbol as an extra pass first.
if !endsInPkgName {
- pkgQualified := []string{pkg.Name(), ".", name}
+ pkgQualified := []string{pkg.PackageName(), ".", name}
idx, score := matcher(pkgQualified)
- nameStart := len(pkg.Name()) + 1
+ nameStart := len(pkg.PackageName()) + 1
if score > 0 {
// If our match is contained entirely within the unqualified portion,
// just return that.
@@ -103,11 +103,11 @@
}
// Now try matching the fully qualified symbol.
- fullyQualified := []string{pkg.PkgPath(), ".", name}
+ fullyQualified := []string{pkg.PackagePath(), ".", name}
idx, score := matcher(fullyQualified)
// As above, check if we matched just the unqualified symbol name.
- nameStart := len(pkg.PkgPath()) + 1
+ nameStart := len(pkg.PackagePath()) + 1
if idx >= nameStart {
return []string{name}, score
}
@@ -116,9 +116,9 @@
// initial pass above, so check if we matched just the package-qualified
// name.
if endsInPkgName && idx >= 0 {
- pkgStart := len(pkg.PkgPath()) - len(pkg.Name())
+ pkgStart := len(pkg.PackagePath()) - len(pkg.PackageName())
if idx >= pkgStart {
- return []string{pkg.Name(), ".", name}, score
+ return []string{pkg.PackageName(), ".", name}, score
}
}
@@ -128,7 +128,7 @@
}
func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) {
- qualified := []string{pkg.Name(), ".", name}
+ qualified := []string{pkg.PackageName(), ".", name}
if _, s := matcher(qualified); s > 0 {
return qualified, s
}
@@ -458,7 +458,7 @@
kind: sym.Kind,
uri: i.uri,
rng: sym.Range,
- container: i.md.PkgPath(),
+ container: i.md.PackagePath(),
}
w.ss.store(si)
}