gopls/internal/lsp/cache: use local aliases for all source objects

As a step toward inverting the import between cache and source, create
additional local aliases in the cache package for all referenced objects
in the source package.

Subsequent CLs will clean these up, and reverse the import.

Change-Id: I914e0b8d54aa15d3d5e9ee20fae2e64bc1e48553
Reviewed-on: https://go-review.googlesource.com/c/tools/+/543717
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Alan Donovan <adonovan@google.com>
diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go
index 3977e5f..79ebdaa 100644
--- a/gopls/internal/lsp/cache/analysis.go
+++ b/gopls/internal/lsp/cache/analysis.go
@@ -37,7 +37,6 @@
 	"golang.org/x/tools/gopls/internal/lsp/frob"
 	"golang.org/x/tools/gopls/internal/lsp/progress"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/settings"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
@@ -155,7 +154,7 @@
 //   to the driver package.
 //   Steps:
 //   - define a narrow driver.Snapshot interface with only these methods:
-//        Metadata(PackageID) source.Metadata
+//        Metadata(PackageID) Metadata
 //        ReadFile(Context, URI) (file.Handle, error)
 //        View() *View // for Options
 //   - share cache.{goVersionRx,parseGoImpl}
@@ -171,7 +170,7 @@
 // The analyzers list must be duplicate free; order does not matter.
 //
 // Notifications of progress may be sent to the optional reporter.
-func (snapshot *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]unit, analyzers []*settings.Analyzer, reporter *progress.Tracker) ([]*source.Diagnostic, error) {
+func (snapshot *Snapshot) Analyze(ctx context.Context, pkgs map[PackageID]unit, analyzers []*settings.Analyzer, reporter *progress.Tracker) ([]*Diagnostic, error) {
 	start := time.Now() // for progress reporting
 
 	var tagStr string // sorted comma-separated list of PackageIDs
@@ -436,7 +435,7 @@
 	// begin the analysis you asked for".
 	// Even if current callers choose to discard the
 	// results, we should propagate the per-action errors.
-	var results []*source.Diagnostic
+	var results []*Diagnostic
 	for _, root := range roots {
 		for _, a := range enabled {
 			// Skip analyzers that were added only to
@@ -504,7 +503,7 @@
 // type-checking and analyzing syntax (miss).
 type analysisNode struct {
 	fset            *token.FileSet              // file set shared by entire batch (DAG)
-	m               *source.Metadata            // metadata for this package
+	m               *Metadata                   // metadata for this package
 	files           []file.Handle               // contents of CompiledGoFiles
 	analyzers       []*analysis.Analyzer        // set of analyzers to run
 	preds           []*analysisNode             // graph edges:
@@ -784,7 +783,7 @@
 func (an *analysisNode) run(ctx context.Context) (*analyzeSummary, error) {
 	// Parse only the "compiled" Go files.
 	// Do the computation in parallel.
-	parsed := make([]*source.ParsedGoFile, len(an.files))
+	parsed := make([]*ParsedGoFile, len(an.files))
 	{
 		var group errgroup.Group
 		group.SetLimit(4) // not too much: run itself is already called in parallel
@@ -795,7 +794,7 @@
 				// as cached ASTs require the global FileSet.
 				// ast.Object resolution is unfortunately an implied part of the
 				// go/analysis contract.
-				pgf, err := parseGoImpl(ctx, an.fset, fh, source.ParseFull&^source.SkipObjectResolution, false)
+				pgf, err := parseGoImpl(ctx, an.fset, fh, ParseFull&^SkipObjectResolution, false)
 				parsed[i] = pgf
 				return err
 			})
@@ -909,7 +908,7 @@
 }
 
 // Postcondition: analysisPackage.types and an.exportDeps are populated.
-func (an *analysisNode) typeCheck(parsed []*source.ParsedGoFile) *analysisPackage {
+func (an *analysisNode) typeCheck(parsed []*ParsedGoFile) *analysisPackage {
 	m := an.m
 
 	if false { // debugging
@@ -964,7 +963,7 @@
 			// as parser recovery can be quite lossy (#59888).
 			typeError := e.(types.Error)
 			for _, p := range parsed {
-				if p.ParseErr != nil && source.NodeContains(p.File, typeError.Pos) {
+				if p.ParseErr != nil && NodeContains(p.File, typeError.Pos) {
 					return
 				}
 			}
@@ -1000,7 +999,7 @@
 			}
 
 			// (Duplicates logic from check.go.)
-			if !source.IsValidImport(an.m.PkgPath, dep.m.PkgPath) {
+			if !IsValidImport(an.m.PkgPath, dep.m.PkgPath) {
 				return nil, fmt.Errorf("invalid use of internal package %s", importPath)
 			}
 
@@ -1098,9 +1097,9 @@
 // analysisPackage contains information about a package, including
 // syntax trees, used transiently during its type-checking and analysis.
 type analysisPackage struct {
-	m              *source.Metadata
+	m              *Metadata
 	fset           *token.FileSet // local to this package
-	parsed         []*source.ParsedGoFile
+	parsed         []*ParsedGoFile
 	files          []*ast.File // same as parsed[i].File
 	types          *types.Package
 	compiles       bool // package is transitively free of list/parse/type errors
diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go
index 9a590a6..dea637a 100644
--- a/gopls/internal/lsp/cache/check.go
+++ b/gopls/internal/lsp/cache/check.go
@@ -27,7 +27,6 @@
 	"golang.org/x/tools/gopls/internal/immutable"
 	"golang.org/x/tools/gopls/internal/lsp/filecache"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/lsp/source/typerefs"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
@@ -93,8 +92,8 @@
 // This is different from having type-checking errors: a failure to type-check
 // indicates context cancellation or otherwise significant failure to perform
 // the type-checking operation.
-func (s *Snapshot) TypeCheck(ctx context.Context, ids ...PackageID) ([]source.Package, error) {
-	pkgs := make([]source.Package, len(ids))
+func (s *Snapshot) TypeCheck(ctx context.Context, ids ...PackageID) ([]Package_, error) {
+	pkgs := make([]Package_, len(ids))
 
 	var (
 		needIDs []PackageID // ids to type-check
@@ -200,13 +199,13 @@
 		if err != nil {
 			return nil, err
 		}
-		source.RemoveIntermediateTestVariants(&meta)
+		RemoveIntermediateTestVariants(&meta)
 		for _, m := range meta {
 			openPackages[m.ID] = true
 		}
 	}
 
-	var openPackageIDs []source.PackageID
+	var openPackageIDs []PackageID
 	for id := range openPackages {
 		openPackageIDs = append(openPackageIDs, id)
 	}
@@ -543,7 +542,7 @@
 
 // importPackage loads the given package from its export data in p.exportData
 // (which must already be populated).
-func (b *typeCheckBatch) importPackage(ctx context.Context, m *source.Metadata, data []byte) (*types.Package, error) {
+func (b *typeCheckBatch) importPackage(ctx context.Context, m *Metadata, data []byte) (*types.Package, error) {
 	ctx, done := event.Start(ctx, "cache.typeCheckBatch.importPackage", tag.Package.Of(string(m.ID)))
 	defer done()
 
@@ -606,7 +605,7 @@
 	// Parse the compiled go files, bypassing the parse cache as packages checked
 	// for import are unlikely to get cache hits. Additionally, we can optimize
 	// parsing slightly by not passing parser.ParseComments.
-	pgfs := make([]*source.ParsedGoFile, len(ph.localInputs.compiledGoFiles))
+	pgfs := make([]*ParsedGoFile, len(ph.localInputs.compiledGoFiles))
 	{
 		var group errgroup.Group
 		// Set an arbitrary concurrency limit; we want some parallelism but don't
@@ -713,7 +712,7 @@
 // unrecoverable error loading export data.
 //
 // TODO(rfindley): inline, now that this is only called in one place.
-func (b *typeCheckBatch) awaitPredecessors(ctx context.Context, m *source.Metadata) error {
+func (b *typeCheckBatch) awaitPredecessors(ctx context.Context, m *Metadata) error {
 	// await predecessors concurrently, as some of them may be non-syntax
 	// packages, and therefore will not have been started by the type-checking
 	// batch.
@@ -730,10 +729,10 @@
 
 // importMap returns the map of package path -> package ID relative to the
 // specified ID.
-func (b *typeCheckBatch) importMap(id PackageID) map[string]source.PackageID {
-	impMap := make(map[string]source.PackageID)
-	var populateDeps func(m *source.Metadata)
-	populateDeps = func(parent *source.Metadata) {
+func (b *typeCheckBatch) importMap(id PackageID) map[string]PackageID {
+	impMap := make(map[string]PackageID)
+	var populateDeps func(m *Metadata)
+	populateDeps = func(parent *Metadata) {
 		for _, id := range parent.DepsByPkgPath {
 			m := b.handles[id].m
 			if _, ok := impMap[string(m.PkgPath)]; ok {
@@ -779,7 +778,7 @@
 // changed (as detected by the depkeys field), then the packageHandle in
 // question must also not have changed, and we need not re-evaluate its key.
 type packageHandle struct {
-	m *source.Metadata
+	m *Metadata
 
 	// Local data:
 
@@ -853,7 +852,7 @@
 			if n.unfinishedSuccs == 0 {
 				leaves = append(leaves, n)
 			} else {
-				n.succs = make(map[source.PackageID]*handleNode, n.unfinishedSuccs)
+				n.succs = make(map[PackageID]*handleNode, n.unfinishedSuccs)
 			}
 			b.nodes[idxID] = n
 			for _, depID := range m.DepsByPkgPath {
@@ -938,7 +937,7 @@
 //
 // It is used to implement a bottom-up construction of packageHandles.
 type handleNode struct {
-	m               *source.Metadata
+	m               *Metadata
 	idxID           typerefs.IndexID
 	ph              *packageHandle
 	err             error
@@ -1210,8 +1209,8 @@
 
 // typerefs returns typerefs for the package described by m and cgfs, after
 // either computing it or loading it from the file cache.
-func (s *Snapshot) typerefs(ctx context.Context, m *source.Metadata, cgfs []file.Handle) (map[string][]typerefs.Symbol, error) {
-	imports := make(map[ImportPath]*source.Metadata)
+func (s *Snapshot) typerefs(ctx context.Context, m *Metadata, cgfs []file.Handle) (map[string][]typerefs.Symbol, error) {
+	imports := make(map[ImportPath]*Metadata)
 	for impPath, id := range m.DepsByImpPath {
 		if id != "" {
 			imports[impPath] = s.Metadata(id)
@@ -1234,7 +1233,7 @@
 
 // typerefData retrieves encoded typeref data from the filecache, or computes it on
 // a cache miss.
-func (s *Snapshot) typerefData(ctx context.Context, id PackageID, imports map[ImportPath]*source.Metadata, cgfs []file.Handle) ([]byte, error) {
+func (s *Snapshot) typerefData(ctx context.Context, id PackageID, imports map[ImportPath]*Metadata, cgfs []file.Handle) ([]byte, error) {
 	key := typerefsKey(id, imports, cgfs)
 	if data, err := filecache.Get(typerefsKind, key); err == nil {
 		return data, nil
@@ -1242,7 +1241,7 @@
 		bug.Reportf("internal error reading typerefs data: %v", err)
 	}
 
-	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), source.ParseFull&^parser.ParseComments, true, cgfs...)
+	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), ParseFull&^parser.ParseComments, true, cgfs...)
 	if err != nil {
 		return nil, err
 	}
@@ -1260,7 +1259,7 @@
 
 // typerefsKey produces a key for the reference information produced by the
 // typerefs package.
-func typerefsKey(id PackageID, imports map[ImportPath]*source.Metadata, compiledGoFiles []file.Handle) file.Hash {
+func typerefsKey(id PackageID, imports map[ImportPath]*Metadata, compiledGoFiles []file.Handle) file.Hash {
 	hasher := sha256.New()
 
 	fmt.Fprintf(hasher, "typerefs: %s\n", id)
@@ -1309,7 +1308,7 @@
 	moduleMode         bool
 }
 
-func (s *Snapshot) typeCheckInputs(ctx context.Context, m *source.Metadata) (typeCheckInputs, error) {
+func (s *Snapshot) typeCheckInputs(ctx context.Context, m *Metadata) (typeCheckInputs, error) {
 	// Read both lists of files of this package.
 	//
 	// Parallelism is not necessary here as the files will have already been
@@ -1426,7 +1425,7 @@
 
 	// Our heuristic for whether to show type checking errors is:
 	//  + If any file was 'fixed', don't show type checking errors as we
-	//    can't guarantee that they reference accurate locations in the source.
+	//    can't guarantee that they reference accurate locations in thesource.
 	//  + If there is a parse error _in the current file_, suppress type
 	//    errors in that file.
 	//  + Otherwise, show type errors even in the presence of parse errors in
@@ -1512,11 +1511,11 @@
 	// Collect parsed files from the type check pass, capturing parse errors from
 	// compiled files.
 	var err error
-	pkg.goFiles, err = b.parseCache.parseFiles(ctx, b.fset, source.ParseFull, false, inputs.goFiles...)
+	pkg.goFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.goFiles...)
 	if err != nil {
 		return nil, err
 	}
-	pkg.compiledGoFiles, err = b.parseCache.parseFiles(ctx, b.fset, source.ParseFull, false, inputs.compiledGoFiles...)
+	pkg.compiledGoFiles, err = b.parseCache.parseFiles(ctx, b.fset, ParseFull, false, inputs.compiledGoFiles...)
 	if err != nil {
 		return nil, err
 	}
@@ -1609,7 +1608,7 @@
 				// e.g. missing metadata for dependencies in buildPackageHandle
 				return nil, missingPkgError(inputs.id, path, inputs.moduleMode)
 			}
-			if !source.IsValidImport(inputs.pkgPath, depPH.m.PkgPath) {
+			if !IsValidImport(inputs.pkgPath, depPH.m.PkgPath) {
 				return nil, fmt.Errorf("invalid use of internal package %q", path)
 			}
 			return b.getImportPackage(ctx, id)
@@ -1637,7 +1636,7 @@
 // of pkg, or to 'requires' declarations in the package's go.mod file.
 //
 // TODO(rfindley): move this to load.go
-func depsErrors(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs file.Source, workspacePackages immutable.Map[PackageID, PackagePath]) ([]*source.Diagnostic, error) {
+func depsErrors(ctx context.Context, m *Metadata, meta *metadataGraph, fs file.Source, workspacePackages immutable.Map[PackageID, PackagePath]) ([]*Diagnostic, error) {
 	// Select packages that can't be found, and were imported in non-workspace packages.
 	// Workspace packages already show their own errors.
 	var relevantErrors []*packagesinternal.PackageError
@@ -1668,12 +1667,12 @@
 
 	// Build an index of all imports in the package.
 	type fileImport struct {
-		cgf *source.ParsedGoFile
+		cgf *ParsedGoFile
 		imp *ast.ImportSpec
 	}
 	allImports := map[string][]fileImport{}
 	for _, uri := range m.CompiledGoFiles {
-		pgf, err := parseGoURI(ctx, fs, uri, source.ParseHeader)
+		pgf, err := parseGoURI(ctx, fs, uri, ParseHeader)
 		if err != nil {
 			return nil, err
 		}
@@ -1692,7 +1691,7 @@
 
 	// Apply a diagnostic to any import involved in the error, stopping once
 	// we reach the workspace.
-	var errors []*source.Diagnostic
+	var errors []*Diagnostic
 	for _, depErr := range relevantErrors {
 		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
 			item := depErr.ImportStack[i]
@@ -1709,15 +1708,15 @@
 				if err != nil {
 					return nil, err
 				}
-				diag := &source.Diagnostic{
+				diag := &Diagnostic{
 					URI:            imp.cgf.URI,
 					Range:          rng,
 					Severity:       protocol.SeverityError,
-					Source:         source.TypeError,
+					Source:         TypeError,
 					Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
 					SuggestedFixes: fixes,
 				}
-				if !source.BundleQuickFixes(diag) {
+				if !BundleQuickFixes(diag) {
 					bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
 				}
 				errors = append(errors, diag)
@@ -1756,15 +1755,15 @@
 			if err != nil {
 				return nil, err
 			}
-			diag := &source.Diagnostic{
+			diag := &Diagnostic{
 				URI:            pm.URI,
 				Range:          rng,
 				Severity:       protocol.SeverityError,
-				Source:         source.TypeError,
+				Source:         TypeError,
 				Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
 				SuggestedFixes: fixes,
 			}
-			if !source.BundleQuickFixes(diag) {
+			if !BundleQuickFixes(diag) {
 				bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
 			}
 			errors = append(errors, diag)
@@ -1781,7 +1780,7 @@
 	// access to the full snapshot, and could provide more information (such as
 	// the initialization error).
 	if moduleMode {
-		if source.IsCommandLineArguments(from) {
+		if IsCommandLineArguments(from) {
 			return fmt.Errorf("current file is not included in a workspace module")
 		} else {
 			// Previously, we would present the initialization error here.
diff --git a/gopls/internal/lsp/cache/cycle_test.go b/gopls/internal/lsp/cache/cycle_test.go
index 25edbbf..732c31d 100644
--- a/gopls/internal/lsp/cache/cycle_test.go
+++ b/gopls/internal/lsp/cache/cycle_test.go
@@ -8,14 +8,12 @@
 	"sort"
 	"strings"
 	"testing"
-
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 // This is an internal test of the breakImportCycles logic.
 func TestBreakImportCycles(t *testing.T) {
 
-	type Graph = map[PackageID]*source.Metadata
+	type Graph = map[PackageID]*Metadata
 
 	// cyclic returns a description of a cycle,
 	// if the graph is cyclic, otherwise "".
@@ -62,11 +60,11 @@
 	// and the set of edges {a->b, b->c, b->d}.
 	parse := func(s string) Graph {
 		m := make(Graph)
-		makeNode := func(name string) *source.Metadata {
+		makeNode := func(name string) *Metadata {
 			id := PackageID(name)
 			n, ok := m[id]
 			if !ok {
-				n = &source.Metadata{
+				n = &Metadata{
 					ID:            id,
 					DepsByPkgPath: make(map[PackagePath]PackageID),
 				}
diff --git a/gopls/internal/lsp/cache/errors.go b/gopls/internal/lsp/cache/errors.go
index c63276b..5620b05 100644
--- a/gopls/internal/lsp/cache/errors.go
+++ b/gopls/internal/lsp/cache/errors.go
@@ -37,7 +37,7 @@
 // diagnostic, using the provided metadata and filesource.
 //
 // The slice of diagnostics may be empty.
-func goPackagesErrorDiagnostics(ctx context.Context, e packages.Error, m *source.Metadata, fs file.Source) ([]*source.Diagnostic, error) {
+func goPackagesErrorDiagnostics(ctx context.Context, e packages.Error, m *Metadata, fs file.Source) ([]*source.Diagnostic, error) {
 	if diag, err := parseGoListImportCycleError(ctx, e, m, fs); err != nil {
 		return nil, err
 	} else if diag != nil {
@@ -512,7 +512,7 @@
 // an import cycle, returning a diagnostic if successful.
 //
 // If the error is not detected as an import cycle error, it returns nil, nil.
-func parseGoListImportCycleError(ctx context.Context, e packages.Error, m *source.Metadata, fs file.Source) (*source.Diagnostic, error) {
+func parseGoListImportCycleError(ctx context.Context, e packages.Error, m *Metadata, fs file.Source) (*source.Diagnostic, error) {
 	re := regexp.MustCompile(`(.*): import stack: \[(.+)\]`)
 	matches := re.FindStringSubmatch(strings.TrimSpace(e.Msg))
 	if len(matches) < 3 {
diff --git a/gopls/internal/lsp/cache/errors_test.go b/gopls/internal/lsp/cache/errors_test.go
index fa7bf3b..38bd652 100644
--- a/gopls/internal/lsp/cache/errors_test.go
+++ b/gopls/internal/lsp/cache/errors_test.go
@@ -12,7 +12,6 @@
 	"github.com/google/go-cmp/cmp"
 	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 func TestParseErrorMessage(t *testing.T) {
@@ -57,7 +56,7 @@
 }
 
 func TestDiagnosticEncoding(t *testing.T) {
-	diags := []*source.Diagnostic{
+	diags := []*Diagnostic{
 		{}, // empty
 		{
 			URI: "file///foo",
@@ -86,7 +85,7 @@
 
 			// Fields below are used internally to generate quick fixes. They aren't
 			// part of the LSP spec and don't leave the server.
-			SuggestedFixes: []source.SuggestedFix{
+			SuggestedFixes: []SuggestedFix{
 				{
 					Title: "fix it!",
 					Edits: map[protocol.DocumentURI][]protocol.TextEdit{
diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go
index e4432e8..f043a8a 100644
--- a/gopls/internal/lsp/cache/graph.go
+++ b/gopls/internal/lsp/cache/graph.go
@@ -10,14 +10,13 @@
 	"golang.org/x/tools/go/packages"
 	"golang.org/x/tools/gopls/internal/bug"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 // A metadataGraph is an immutable and transitively closed import
 // graph of Go packages, as obtained from go/packages.
 type metadataGraph struct {
 	// metadata maps package IDs to their associated metadata.
-	metadata map[PackageID]*source.Metadata
+	metadata map[PackageID]*Metadata
 
 	// importedBy maps package IDs to the list of packages that import them.
 	importedBy map[PackageID][]PackageID
@@ -29,21 +28,21 @@
 	ids map[protocol.DocumentURI][]PackageID
 }
 
-// Metadata implements the source.MetadataSource interface.
-func (g *metadataGraph) Metadata(id PackageID) *source.Metadata {
+// Metadata implements the MetadataSource interface.
+func (g *metadataGraph) Metadata(id PackageID) *Metadata {
 	return g.metadata[id]
 }
 
 // Clone creates a new metadataGraph, applying the given updates to the
 // receiver. A nil map value represents a deletion.
-func (g *metadataGraph) Clone(updates map[PackageID]*source.Metadata) *metadataGraph {
+func (g *metadataGraph) Clone(updates map[PackageID]*Metadata) *metadataGraph {
 	if len(updates) == 0 {
 		// Optimization: since the graph is immutable, we can return the receiver.
 		return g
 	}
 
 	// Copy metadata map then apply updates.
-	metadata := make(map[PackageID]*source.Metadata, len(g.metadata))
+	metadata := make(map[PackageID]*Metadata, len(g.metadata))
 	for id, m := range g.metadata {
 		metadata[id] = m
 	}
@@ -63,7 +62,7 @@
 
 // newMetadataGraph returns a new metadataGraph,
 // deriving relations from the specified metadata.
-func newMetadataGraph(metadata map[PackageID]*source.Metadata) *metadataGraph {
+func newMetadataGraph(metadata map[PackageID]*Metadata) *metadataGraph {
 	// Build the import graph.
 	importedBy := make(map[PackageID][]PackageID)
 	for id, m := range metadata {
@@ -90,8 +89,8 @@
 	// Sort and filter file associations.
 	for uri, ids := range uriIDs {
 		sort.Slice(ids, func(i, j int) bool {
-			cli := source.IsCommandLineArguments(ids[i])
-			clj := source.IsCommandLineArguments(ids[j])
+			cli := IsCommandLineArguments(ids[i])
+			clj := IsCommandLineArguments(ids[j])
 			if cli != clj {
 				return clj
 			}
@@ -109,7 +108,7 @@
 		for i, id := range ids {
 			// If we've seen *anything* prior to command-line arguments package, take
 			// it. Note that ids[0] may itself be command-line-arguments.
-			if i > 0 && source.IsCommandLineArguments(id) {
+			if i > 0 && IsCommandLineArguments(id) {
 				uriIDs[uri] = ids[:i]
 				break
 			}
@@ -126,8 +125,8 @@
 // reverseReflexiveTransitiveClosure returns a new mapping containing the
 // metadata for the specified packages along with any package that
 // transitively imports one of them, keyed by ID, including all the initial packages.
-func (g *metadataGraph) reverseReflexiveTransitiveClosure(ids ...PackageID) map[PackageID]*source.Metadata {
-	seen := make(map[PackageID]*source.Metadata)
+func (g *metadataGraph) reverseReflexiveTransitiveClosure(ids ...PackageID) map[PackageID]*Metadata {
+	seen := make(map[PackageID]*Metadata)
 	var visitAll func([]PackageID)
 	visitAll = func(ids []PackageID) {
 		for _, id := range ids {
@@ -146,7 +145,7 @@
 // breakImportCycles breaks import cycles in the metadata by deleting
 // Deps* edges. It modifies only metadata present in the 'updates'
 // subset. This function has an internal test.
-func breakImportCycles(metadata, updates map[PackageID]*source.Metadata) {
+func breakImportCycles(metadata, updates map[PackageID]*Metadata) {
 	// 'go list' should never report a cycle without flagging it
 	// as such, but we're extra cautious since we're combining
 	// information from multiple runs of 'go list'. Also, Bazel
@@ -230,7 +229,7 @@
 // detectImportCycles reports cycles in the metadata graph. It returns a new
 // unordered array of all cycles (nontrivial strong components) in the
 // metadata graph reachable from a non-nil 'updates' value.
-func detectImportCycles(metadata, updates map[PackageID]*source.Metadata) [][]*source.Metadata {
+func detectImportCycles(metadata, updates map[PackageID]*Metadata) [][]*Metadata {
 	// We use the depth-first algorithm of Tarjan.
 	// https://doi.org/10.1137/0201010
 	//
@@ -242,7 +241,7 @@
 	// (Unfortunately we can't intrude on shared Metadata.)
 	type node struct {
 		rep            *node
-		m              *source.Metadata
+		m              *Metadata
 		index, lowlink int32
 		scc            int8 // TODO(adonovan): opt: cram these 1.5 bits into previous word
 	}
@@ -256,7 +255,7 @@
 				// Not sure whether a go/packages driver ever
 				// emits this, but create a dummy node in case.
 				// Obviously it won't be part of any cycle.
-				m = &source.Metadata{ID: id}
+				m = &Metadata{ID: id}
 			}
 			n = &node{m: m}
 			n.rep = n
@@ -281,7 +280,7 @@
 	var (
 		index int32 = 1
 		stack []*node
-		sccs  [][]*source.Metadata // set of nontrivial strongly connected components
+		sccs  [][]*Metadata // set of nontrivial strongly connected components
 	)
 
 	// visit implements the depth-first search of Tarjan's SCC algorithm
@@ -327,7 +326,7 @@
 		// Is x the root of an SCC?
 		if x.lowlink == x.index {
 			// Gather all metadata in the SCC (if nontrivial).
-			var scc []*source.Metadata
+			var scc []*Metadata
 			for {
 				// Pop y from stack.
 				i := len(stack) - 1
diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go
index c5ede95..09892f4 100644
--- a/gopls/internal/lsp/cache/load.go
+++ b/gopls/internal/lsp/cache/load.go
@@ -20,7 +20,6 @@
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/immutable"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
 	"golang.org/x/tools/internal/gocommand"
@@ -113,9 +112,9 @@
 	ctx, done := event.Start(ctx, "cache.snapshot.load", tag.Query.Of(query))
 	defer done()
 
-	flags := source.LoadWorkspace
+	flags := LoadWorkspace
 	if allowNetwork {
-		flags |= source.AllowNetwork
+		flags |= AllowNetwork
 	}
 	_, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{
 		WorkingDir: s.view.goCommandDir.Path(),
@@ -162,7 +161,7 @@
 
 	moduleErrs := make(map[string][]packages.Error) // module path -> errors
 	filterFunc := s.view.filterFunc()
-	newMetadata := make(map[PackageID]*source.Metadata)
+	newMetadata := make(map[PackageID]*Metadata)
 	for _, pkg := range pkgs {
 		// The Go command returns synthetic list results for module queries that
 		// encountered module errors.
@@ -228,7 +227,7 @@
 	// required to preserve the above invariant.
 	var files []protocol.DocumentURI // files to preload
 	seenFiles := make(map[protocol.DocumentURI]bool)
-	updates := make(map[PackageID]*source.Metadata)
+	updates := make(map[PackageID]*Metadata)
 	for _, m := range newMetadata {
 		if existing := s.meta.metadata[m.ID]; existing == nil {
 			// Record any new files we should pre-load.
@@ -311,7 +310,7 @@
 //
 // TODO(rfindley): separate workspace diagnostics from critical workspace
 // errors.
-func (s *Snapshot) workspaceLayoutError(ctx context.Context) (error, []*source.Diagnostic) {
+func (s *Snapshot) workspaceLayoutError(ctx context.Context) (error, []*Diagnostic) {
 	// TODO(rfindley): both of the checks below should be delegated to the workspace.
 
 	if s.view.effectiveGO111MODULE() == off {
@@ -355,14 +354,14 @@
 	return nil, nil
 }
 
-func (s *Snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []*Overlay) []*source.Diagnostic {
-	var srcDiags []*source.Diagnostic
+func (s *Snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, files []*Overlay) []*Diagnostic {
+	var srcDiags []*Diagnostic
 	for _, fh := range files {
 		// Place the diagnostics on the package or module declarations.
 		var rng protocol.Range
 		switch s.FileKind(fh) {
 		case file.Go:
-			if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil {
+			if pgf, err := s.ParseGo(ctx, fh, ParseHeader); err == nil {
 				// Check that we have a valid `package foo` range to use for positioning the error.
 				if pgf.File.Package.IsValid() && pgf.File.Name != nil && pgf.File.Name.End().IsValid() {
 					rng, _ = pgf.PosRange(pgf.File.Package, pgf.File.Name.End())
@@ -375,11 +374,11 @@
 				}
 			}
 		}
-		srcDiags = append(srcDiags, &source.Diagnostic{
+		srcDiags = append(srcDiags, &Diagnostic{
 			URI:      fh.URI(),
 			Range:    rng,
 			Severity: protocol.SeverityError,
-			Source:   source.ListError,
+			Source:   ListError,
 			Message:  msg,
 		})
 	}
@@ -389,12 +388,12 @@
 // buildMetadata populates the updates map with metadata updates to
 // apply, based on the given pkg. It recurs through pkg.Imports to ensure that
 // metadata exists for all dependencies.
-func buildMetadata(updates map[PackageID]*source.Metadata, pkg *packages.Package, loadDir string, standalone bool) {
+func buildMetadata(updates map[PackageID]*Metadata, pkg *packages.Package, loadDir string, standalone bool) {
 	// Allow for multiple ad-hoc packages in the workspace (see #47584).
 	pkgPath := PackagePath(pkg.PkgPath)
 	id := PackageID(pkg.ID)
 
-	if source.IsCommandLineArguments(id) {
+	if IsCommandLineArguments(id) {
 		if len(pkg.CompiledGoFiles) != 1 {
 			bug.Reportf("unexpected files in command-line-arguments package: %v", pkg.CompiledGoFiles)
 			return
@@ -419,7 +418,7 @@
 	}
 
 	// Recreate the metadata rather than reusing it to avoid locking.
-	m := &source.Metadata{
+	m := &Metadata{
 		ID:         id,
 		PkgPath:    pkgPath,
 		Name:       PackageName(pkg.Name),
@@ -558,7 +557,7 @@
 // computeLoadDiagnostics computes and sets m.Diagnostics for the given metadata m.
 //
 // It should only be called during metadata construction in snapshot.load.
-func computeLoadDiagnostics(ctx context.Context, m *source.Metadata, meta *metadataGraph, fs file.Source, workspacePackages immutable.Map[PackageID, PackagePath]) {
+func computeLoadDiagnostics(ctx context.Context, m *Metadata, meta *metadataGraph, fs file.Source, workspacePackages immutable.Map[PackageID, PackagePath]) {
 	for _, packagesErr := range m.Errors {
 		// Filter out parse errors from go list. We'll get them when we
 		// actually parse, and buggy overlay support may generate spurious
@@ -594,7 +593,7 @@
 // snapshot s.
 //
 // s.mu must be held while calling this function.
-func containsPackageLocked(s *Snapshot, m *source.Metadata) bool {
+func containsPackageLocked(s *Snapshot, m *Metadata) bool {
 	// In legacy workspace mode, or if a package does not have an associated
 	// module, a package is considered inside the workspace if any of its files
 	// are under the workspace root (and not excluded).
@@ -639,7 +638,7 @@
 // the snapshot s.
 //
 // s.mu must be held while calling this function.
-func containsOpenFileLocked(s *Snapshot, m *source.Metadata) bool {
+func containsOpenFileLocked(s *Snapshot, m *Metadata) bool {
 	uris := map[protocol.DocumentURI]struct{}{}
 	for _, uri := range m.CompiledGoFiles {
 		uris[uri] = struct{}{}
@@ -661,7 +660,7 @@
 // workspace of the snapshot s.
 //
 // s.mu must be held while calling this function.
-func containsFileInWorkspaceLocked(v *View, m *source.Metadata) bool {
+func containsFileInWorkspaceLocked(v *View, m *Metadata) bool {
 	uris := map[protocol.DocumentURI]struct{}{}
 	for _, uri := range m.CompiledGoFiles {
 		uris[uri] = struct{}{}
@@ -695,7 +694,7 @@
 			continue
 		}
 
-		if source.IsCommandLineArguments(m.ID) {
+		if IsCommandLineArguments(m.ID) {
 			// If all the files contained in m have a real package, we don't need to
 			// keep m as a workspace package.
 			if allFilesHaveRealPackages(meta, m) {
@@ -733,12 +732,12 @@
 // function returns false.
 //
 // If m is not a command-line-arguments package, this is trivially true.
-func allFilesHaveRealPackages(g *metadataGraph, m *source.Metadata) bool {
+func allFilesHaveRealPackages(g *metadataGraph, m *Metadata) bool {
 	n := len(m.CompiledGoFiles)
 checkURIs:
 	for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) {
 		for _, id := range g.ids[uri] {
-			if !source.IsCommandLineArguments(id) {
+			if !IsCommandLineArguments(id) {
 				continue checkURIs
 			}
 		}
@@ -760,7 +759,7 @@
 	if len(pkg.GoFiles) > 1 {
 		return false
 	}
-	if !source.InDir(gocache, pkg.GoFiles[0]) {
+	if !InDir(gocache, pkg.GoFiles[0]) {
 		return false
 	}
 	return true
diff --git a/gopls/internal/lsp/cache/mod.go b/gopls/internal/lsp/cache/mod.go
index 0330a3b..10bb1ff 100644
--- a/gopls/internal/lsp/cache/mod.go
+++ b/gopls/internal/lsp/cache/mod.go
@@ -17,7 +17,6 @@
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/command"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
 	"golang.org/x/tools/internal/gocommand"
@@ -25,7 +24,7 @@
 )
 
 // ParseMod parses a go.mod file, using a cache. It may return partial results and an error.
-func (s *Snapshot) ParseMod(ctx context.Context, fh file.Handle) (*source.ParsedModule, error) {
+func (s *Snapshot) ParseMod(ctx context.Context, fh file.Handle) (*ParsedModule, error) {
 	uri := fh.URI()
 
 	s.mu.Lock()
@@ -34,7 +33,7 @@
 
 	type parseModKey file.Identity
 	type parseModResult struct {
-		parsed *source.ParsedModule
+		parsed *ParsedModule
 		err    error
 	}
 
@@ -62,7 +61,7 @@
 
 // parseModImpl parses the go.mod file whose name and contents are in fh.
 // It may return partial results and an error.
-func parseModImpl(ctx context.Context, fh file.Handle) (*source.ParsedModule, error) {
+func parseModImpl(ctx context.Context, fh file.Handle) (*ParsedModule, error) {
 	_, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI()))
 	defer done()
 
@@ -73,7 +72,7 @@
 	m := protocol.NewMapper(fh.URI(), contents)
 	file, parseErr := modfile.Parse(fh.URI().Path(), contents, nil)
 	// Attempt to convert the error to a standardized parse error.
-	var parseErrors []*source.Diagnostic
+	var parseErrors []*Diagnostic
 	if parseErr != nil {
 		mfErrList, ok := parseErr.(modfile.ErrorList)
 		if !ok {
@@ -84,16 +83,16 @@
 			if err != nil {
 				return nil, err
 			}
-			parseErrors = append(parseErrors, &source.Diagnostic{
+			parseErrors = append(parseErrors, &Diagnostic{
 				URI:      fh.URI(),
 				Range:    rng,
 				Severity: protocol.SeverityError,
-				Source:   source.ParseError,
+				Source:   ParseError,
 				Message:  mfErr.Err.Error(),
 			})
 		}
 	}
-	return &source.ParsedModule{
+	return &ParsedModule{
 		URI:         fh.URI(),
 		Mapper:      m,
 		File:        file,
@@ -103,7 +102,7 @@
 
 // ParseWork parses a go.work file, using a cache. It may return partial results and an error.
 // TODO(adonovan): move to new work.go file.
-func (s *Snapshot) ParseWork(ctx context.Context, fh file.Handle) (*source.ParsedWorkFile, error) {
+func (s *Snapshot) ParseWork(ctx context.Context, fh file.Handle) (*ParsedWorkFile, error) {
 	uri := fh.URI()
 
 	s.mu.Lock()
@@ -112,7 +111,7 @@
 
 	type parseWorkKey file.Identity
 	type parseWorkResult struct {
-		parsed *source.ParsedWorkFile
+		parsed *ParsedWorkFile
 		err    error
 	}
 
@@ -139,7 +138,7 @@
 }
 
 // parseWorkImpl parses a go.work file. It may return partial results and an error.
-func parseWorkImpl(ctx context.Context, fh file.Handle) (*source.ParsedWorkFile, error) {
+func parseWorkImpl(ctx context.Context, fh file.Handle) (*ParsedWorkFile, error) {
 	_, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI()))
 	defer done()
 
@@ -150,7 +149,7 @@
 	m := protocol.NewMapper(fh.URI(), content)
 	file, parseErr := modfile.ParseWork(fh.URI().Path(), content, nil)
 	// Attempt to convert the error to a standardized parse error.
-	var parseErrors []*source.Diagnostic
+	var parseErrors []*Diagnostic
 	if parseErr != nil {
 		mfErrList, ok := parseErr.(modfile.ErrorList)
 		if !ok {
@@ -161,16 +160,16 @@
 			if err != nil {
 				return nil, err
 			}
-			parseErrors = append(parseErrors, &source.Diagnostic{
+			parseErrors = append(parseErrors, &Diagnostic{
 				URI:      fh.URI(),
 				Range:    rng,
 				Severity: protocol.SeverityError,
-				Source:   source.ParseError,
+				Source:   ParseError,
 				Message:  mfErr.Err.Error(),
 			})
 		}
 	}
-	return &source.ParsedWorkFile{
+	return &ParsedWorkFile{
 		URI:         fh.URI(),
 		Mapper:      m,
 		File:        file,
@@ -270,7 +269,7 @@
 	for _, req := range pm.File.Require {
 		inv.Args = append(inv.Args, req.Mod.Path)
 	}
-	stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv)
+	stdout, err := snapshot.RunGoCommandDirect(ctx, Normal, inv)
 	if err != nil {
 		return nil, err
 	}
@@ -288,7 +287,7 @@
 // extractGoCommandErrors tries to parse errors that come from the go command
 // and shape them into go.mod diagnostics.
 // TODO: rename this to 'load errors'
-func (s *Snapshot) extractGoCommandErrors(ctx context.Context, goCmdError error) []*source.Diagnostic {
+func (s *Snapshot) extractGoCommandErrors(ctx context.Context, goCmdError error) []*Diagnostic {
 	if goCmdError == nil {
 		return nil
 	}
@@ -297,8 +296,8 @@
 		loc protocol.Location
 		msg string
 	}
-	diagLocations := map[*source.ParsedModule]locatedErr{}
-	backupDiagLocations := map[*source.ParsedModule]locatedErr{}
+	diagLocations := map[*ParsedModule]locatedErr{}
+	backupDiagLocations := map[*ParsedModule]locatedErr{}
 
 	// If moduleErrs is non-nil, go command errors are scoped to specific
 	// modules.
@@ -357,7 +356,7 @@
 		diagLocations = backupDiagLocations
 	}
 
-	var srcErrs []*source.Diagnostic
+	var srcErrs []*Diagnostic
 	for pm, le := range diagLocations {
 		diag, err := s.goCommandDiagnostic(pm, le.loc, le.msg)
 		if err != nil {
@@ -380,7 +379,7 @@
 //
 // It returns the location of a reference to the one of the modules and true
 // if one exists. If none is found it returns a fallback location and false.
-func (s *Snapshot) matchErrorToModule(ctx context.Context, pm *source.ParsedModule, goCmdError string) (protocol.Location, bool, error) {
+func (s *Snapshot) matchErrorToModule(ctx context.Context, pm *ParsedModule, goCmdError string) (protocol.Location, bool, error) {
 	var reference *modfile.Line
 	matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
 
@@ -413,7 +412,7 @@
 }
 
 // goCommandDiagnostic creates a diagnostic for a given go command error.
-func (s *Snapshot) goCommandDiagnostic(pm *source.ParsedModule, loc protocol.Location, goCmdError string) (*source.Diagnostic, error) {
+func (s *Snapshot) goCommandDiagnostic(pm *ParsedModule, loc protocol.Location, goCmdError string) (*Diagnostic, error) {
 	matches := moduleVersionInErrorRe.FindAllStringSubmatch(goCmdError, -1)
 	var innermost *module.Version
 	for i := len(matches) - 1; i >= 0; i-- {
@@ -431,14 +430,14 @@
 		if err != nil {
 			return nil, err
 		}
-		return &source.Diagnostic{
+		return &Diagnostic{
 			URI:      pm.URI,
 			Range:    loc.Range,
 			Severity: protocol.SeverityError,
-			Source:   source.ListError,
+			Source:   ListError,
 			Message: `Inconsistent vendoring detected. Please re-run "go mod vendor".
 See https://github.com/golang/go/issues/39164 for more detail on this issue.`,
-			SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+			SuggestedFixes: []SuggestedFix{SuggestedFixFromCommand(cmd, protocol.QuickFix)},
 		}, nil
 
 	case strings.Contains(goCmdError, "updates to go.sum needed"), strings.Contains(goCmdError, "missing go.sum entry"):
@@ -458,15 +457,15 @@
 		if innermost != nil {
 			msg = fmt.Sprintf("go.sum is out of sync with go.mod: entry for %v is missing. Please updating it by applying the quick fix.", innermost)
 		}
-		return &source.Diagnostic{
+		return &Diagnostic{
 			URI:      pm.URI,
 			Range:    loc.Range,
 			Severity: protocol.SeverityError,
-			Source:   source.ListError,
+			Source:   ListError,
 			Message:  msg,
-			SuggestedFixes: []source.SuggestedFix{
-				source.SuggestedFixFromCommand(tidyCmd, protocol.QuickFix),
-				source.SuggestedFixFromCommand(updateCmd, protocol.QuickFix),
+			SuggestedFixes: []SuggestedFix{
+				SuggestedFixFromCommand(tidyCmd, protocol.QuickFix),
+				SuggestedFixFromCommand(updateCmd, protocol.QuickFix),
 			},
 		}, nil
 	case strings.Contains(goCmdError, "disabled by GOPROXY=off") && innermost != nil:
@@ -479,20 +478,20 @@
 		if err != nil {
 			return nil, err
 		}
-		return &source.Diagnostic{
+		return &Diagnostic{
 			URI:            pm.URI,
 			Range:          loc.Range,
 			Severity:       protocol.SeverityError,
 			Message:        fmt.Sprintf("%v@%v has not been downloaded", innermost.Path, innermost.Version),
-			Source:         source.ListError,
-			SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+			Source:         ListError,
+			SuggestedFixes: []SuggestedFix{SuggestedFixFromCommand(cmd, protocol.QuickFix)},
 		}, nil
 	default:
-		return &source.Diagnostic{
+		return &Diagnostic{
 			URI:      pm.URI,
 			Range:    loc.Range,
 			Severity: protocol.SeverityError,
-			Source:   source.ListError,
+			Source:   ListError,
 			Message:  goCmdError,
 		}, nil
 	}
diff --git a/gopls/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go
index 1917071..c96da3d 100644
--- a/gopls/internal/lsp/cache/mod_tidy.go
+++ b/gopls/internal/lsp/cache/mod_tidy.go
@@ -18,7 +18,6 @@
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/command"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/settings"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
@@ -28,7 +27,7 @@
 
 // ModTidy returns the go.mod file that would be obtained by running
 // "go mod tidy". Concurrent requests are combined into a single command.
-func (s *Snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) {
+func (s *Snapshot) ModTidy(ctx context.Context, pm *ParsedModule) (*TidiedModule, error) {
 	ctx, done := event.Start(ctx, "cache.snapshot.ModTidy")
 	defer done()
 
@@ -42,7 +41,7 @@
 	s.mu.Unlock()
 
 	type modTidyResult struct {
-		tidied *source.TidiedModule
+		tidied *TidiedModule
 		err    error
 	}
 
@@ -57,12 +56,12 @@
 		}
 		if _, ok := fh.(*Overlay); ok {
 			if info, _ := os.Stat(uri.Path()); info == nil {
-				return nil, source.ErrNoModOnDisk
+				return nil, ErrNoModOnDisk
 			}
 		}
 
 		if criticalErr := s.CriticalError(ctx); criticalErr != nil {
-			return &source.TidiedModule{
+			return &TidiedModule{
 				Diagnostics: criticalErr.Diagnostics,
 			}, nil
 		}
@@ -95,7 +94,7 @@
 }
 
 // modTidyImpl runs "go mod tidy" on a go.mod file.
-func modTidyImpl(ctx context.Context, snapshot *Snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) {
+func modTidyImpl(ctx context.Context, snapshot *Snapshot, filename string, pm *ParsedModule) (*TidiedModule, error) {
 	ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename))
 	defer done()
 
@@ -105,7 +104,7 @@
 		WorkingDir: filepath.Dir(filename),
 	}
 	// TODO(adonovan): ensure that unsaved overlays are passed through to 'go'.
-	tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv)
+	tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, WriteTemporaryModFile, inv)
 	if err != nil {
 		return nil, err
 	}
@@ -136,7 +135,7 @@
 		return nil, err
 	}
 
-	return &source.TidiedModule{
+	return &TidiedModule{
 		Diagnostics:   diagnostics,
 		TidiedContent: tempContents,
 	}, nil
@@ -145,7 +144,7 @@
 // modTidyDiagnostics computes the differences between the original and tidied
 // go.mod files to produce diagnostic and suggested fixes. Some diagnostics
 // may appear on the Go files that import packages from missing modules.
-func modTidyDiagnostics(ctx context.Context, snapshot *Snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) {
+func modTidyDiagnostics(ctx context.Context, snapshot *Snapshot, pm *ParsedModule, ideal *modfile.File) (diagnostics []*Diagnostic, err error) {
 	// First, determine which modules are unused and which are missing from the
 	// original go.mod file.
 	var (
@@ -207,9 +206,9 @@
 	return diagnostics, nil
 }
 
-func missingModuleDiagnostics(ctx context.Context, snapshot *Snapshot, pm *source.ParsedModule, ideal *modfile.File, missing map[string]*modfile.Require) ([]*source.Diagnostic, error) {
-	missingModuleFixes := map[*modfile.Require][]source.SuggestedFix{}
-	var diagnostics []*source.Diagnostic
+func missingModuleDiagnostics(ctx context.Context, snapshot *Snapshot, pm *ParsedModule, ideal *modfile.File, missing map[string]*modfile.Require) ([]*Diagnostic, error) {
+	missingModuleFixes := map[*modfile.Require][]SuggestedFix{}
+	var diagnostics []*Diagnostic
 	for _, req := range missing {
 		srcDiag, err := missingModuleDiagnostic(pm, req)
 		if err != nil {
@@ -279,7 +278,7 @@
 			continue
 		}
 		for _, goFile := range compiledGoFiles {
-			pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader)
+			pgf, err := snapshot.ParseGo(ctx, goFile, ParseHeader)
 			if err != nil {
 				continue
 			}
@@ -319,8 +318,8 @@
 	return diagnostics, nil
 }
 
-// unusedDiagnostic returns a source.Diagnostic for an unused require.
-func unusedDiagnostic(m *protocol.Mapper, req *modfile.Require, onlyDiagnostic bool) (*source.Diagnostic, error) {
+// unusedDiagnostic returns a Diagnostic for an unused require.
+func unusedDiagnostic(m *protocol.Mapper, req *modfile.Require, onlyDiagnostic bool) (*Diagnostic, error) {
 	rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
 	if err != nil {
 		return nil, err
@@ -334,19 +333,19 @@
 	if err != nil {
 		return nil, err
 	}
-	return &source.Diagnostic{
+	return &Diagnostic{
 		URI:            m.URI,
 		Range:          rng,
 		Severity:       protocol.SeverityWarning,
-		Source:         source.ModTidyError,
+		Source:         ModTidyError,
 		Message:        fmt.Sprintf("%s is not used in this module", req.Mod.Path),
-		SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+		SuggestedFixes: []SuggestedFix{SuggestedFixFromCommand(cmd, protocol.QuickFix)},
 	}, nil
 }
 
 // directnessDiagnostic extracts errors when a dependency is labeled indirect when
 // it should be direct and vice versa.
-func directnessDiagnostic(m *protocol.Mapper, req *modfile.Require, computeEdits settings.DiffFunction) (*source.Diagnostic, error) {
+func directnessDiagnostic(m *protocol.Mapper, req *modfile.Require, computeEdits settings.DiffFunction) (*Diagnostic, error) {
 	rng, err := m.OffsetRange(req.Syntax.Start.Byte, req.Syntax.End.Byte)
 	if err != nil {
 		return nil, err
@@ -371,13 +370,13 @@
 	if err != nil {
 		return nil, err
 	}
-	return &source.Diagnostic{
+	return &Diagnostic{
 		URI:      m.URI,
 		Range:    rng,
 		Severity: protocol.SeverityWarning,
-		Source:   source.ModTidyError,
+		Source:   ModTidyError,
 		Message:  fmt.Sprintf("%s should be %s", req.Mod.Path, direction),
-		SuggestedFixes: []source.SuggestedFix{{
+		SuggestedFixes: []SuggestedFix{{
 			Title: fmt.Sprintf("Change %s to %s", req.Mod.Path, direction),
 			Edits: map[protocol.DocumentURI][]protocol.TextEdit{
 				m.URI: edits,
@@ -387,7 +386,7 @@
 	}, nil
 }
 
-func missingModuleDiagnostic(pm *source.ParsedModule, req *modfile.Require) (*source.Diagnostic, error) {
+func missingModuleDiagnostic(pm *ParsedModule, req *modfile.Require) (*Diagnostic, error) {
 	var rng protocol.Range
 	// Default to the start of the file if there is no module declaration.
 	if pm.File != nil && pm.File.Module != nil && pm.File.Module.Syntax != nil {
@@ -407,13 +406,13 @@
 	if err != nil {
 		return nil, err
 	}
-	return &source.Diagnostic{
+	return &Diagnostic{
 		URI:            pm.Mapper.URI,
 		Range:          rng,
 		Severity:       protocol.SeverityError,
-		Source:         source.ModTidyError,
+		Source:         ModTidyError,
 		Message:        fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
-		SuggestedFixes: []source.SuggestedFix{source.SuggestedFixFromCommand(cmd, protocol.QuickFix)},
+		SuggestedFixes: []SuggestedFix{SuggestedFixFromCommand(cmd, protocol.QuickFix)},
 	}, nil
 }
 
@@ -454,12 +453,12 @@
 	}
 	// Calculate the edits to be made due to the change.
 	edits := computeEdits(string(m.Content), string(newContent))
-	return source.ToProtocolEdits(m, edits)
+	return ToProtocolEdits(m, edits)
 }
 
 // missingModuleForImport creates an error for a given import path that comes
 // from a missing module.
-func missingModuleForImport(pgf *source.ParsedGoFile, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) {
+func missingModuleForImport(pgf *ParsedGoFile, imp *ast.ImportSpec, req *modfile.Require, fixes []SuggestedFix) (*Diagnostic, error) {
 	if req.Syntax == nil {
 		return nil, fmt.Errorf("no syntax for %v", req)
 	}
@@ -467,11 +466,11 @@
 	if err != nil {
 		return nil, err
 	}
-	return &source.Diagnostic{
+	return &Diagnostic{
 		URI:            pgf.URI,
 		Range:          rng,
 		Severity:       protocol.SeverityError,
-		Source:         source.ModTidyError,
+		Source:         ModTidyError,
 		Message:        fmt.Sprintf("%s is not in your go.mod file", req.Mod.Path),
 		SuggestedFixes: fixes,
 	}, nil
@@ -484,9 +483,9 @@
 // (We can't simply use Metadata.Imports because it is based on
 // CompiledGoFiles, after cgo processing.)
 //
-// TODO(rfindley): this should key off source.ImportPath.
+// TODO(rfindley): this should key off ImportPath.
 func parseImports(ctx context.Context, s *Snapshot, files []file.Handle) (map[string]bool, error) {
-	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), source.ParseHeader, false, files...)
+	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), ParseHeader, false, files...)
 	if err != nil { // e.g. context cancellation
 		return nil, err
 	}
diff --git a/gopls/internal/lsp/cache/parse.go b/gopls/internal/lsp/cache/parse.go
index 27c21e4..854647e 100644
--- a/gopls/internal/lsp/cache/parse.go
+++ b/gopls/internal/lsp/cache/parse.go
@@ -19,7 +19,6 @@
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
 	"golang.org/x/tools/gopls/internal/lsp/safetoken"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/internal/diff"
 	"golang.org/x/tools/internal/event"
 	"golang.org/x/tools/internal/event/tag"
@@ -27,7 +26,7 @@
 
 // ParseGo parses the file whose contents are provided by fh, using a cache.
 // The resulting tree may have been fixed up.
-func (s *Snapshot) ParseGo(ctx context.Context, fh file.Handle, mode parser.Mode) (*source.ParsedGoFile, error) {
+func (s *Snapshot) ParseGo(ctx context.Context, fh file.Handle, mode parser.Mode) (*ParsedGoFile, error) {
 	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), mode, false, fh)
 	if err != nil {
 		return nil, err
@@ -36,7 +35,7 @@
 }
 
 // parseGoImpl parses the Go source file whose content is provided by fh.
-func parseGoImpl(ctx context.Context, fset *token.FileSet, fh file.Handle, mode parser.Mode, purgeFuncBodies bool) (*source.ParsedGoFile, error) {
+func parseGoImpl(ctx context.Context, fset *token.FileSet, fh file.Handle, mode parser.Mode, purgeFuncBodies bool) (*ParsedGoFile, error) {
 	ext := filepath.Ext(fh.URI().Path())
 	if ext != ".go" && ext != "" { // files generated by cgo have no extension
 		return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI())
@@ -56,7 +55,7 @@
 // ParseGoSrc parses a buffer of Go source, repairing the tree if necessary.
 //
 // The provided ctx is used only for logging.
-func ParseGoSrc(ctx context.Context, fset *token.FileSet, uri protocol.DocumentURI, src []byte, mode parser.Mode, purgeFuncBodies bool) (res *source.ParsedGoFile, fixes []fixType) {
+func ParseGoSrc(ctx context.Context, fset *token.FileSet, uri protocol.DocumentURI, src []byte, mode parser.Mode, purgeFuncBodies bool) (res *ParsedGoFile, fixes []fixType) {
 	if purgeFuncBodies {
 		src = goplsastutil.PurgeFuncBodies(src)
 	}
@@ -133,7 +132,7 @@
 		}
 	}
 
-	return &source.ParsedGoFile{
+	return &ParsedGoFile{
 		URI:      uri,
 		Mode:     mode,
 		Src:      src,
diff --git a/gopls/internal/lsp/cache/parse_cache.go b/gopls/internal/lsp/cache/parse_cache.go
index 669de65..d037cff 100644
--- a/gopls/internal/lsp/cache/parse_cache.go
+++ b/gopls/internal/lsp/cache/parse_cache.go
@@ -19,7 +19,6 @@
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/internal/memoize"
 	"golang.org/x/tools/internal/tokeninternal"
 )
@@ -135,7 +134,7 @@
 type parseCacheEntry struct {
 	key      parseKey
 	hash     file.Hash
-	promise  *memoize.Promise // memoize.Promise[*source.ParsedGoFile]
+	promise  *memoize.Promise // memoize.Promise[*ParsedGoFile]
 	atime    uint64           // clock time of last access, for use in LRU sorting
 	walltime time.Time        // actual time of last access, for use in time-based eviction; too coarse for LRU on some systems
 	lruIndex int              // owned by the queue implementation
@@ -317,8 +316,8 @@
 //
 // If parseFiles returns an error, it still returns a slice,
 // but with a nil entry for each file that could not be parsed.
-func (c *parseCache) parseFiles(ctx context.Context, fset *token.FileSet, mode parser.Mode, purgeFuncBodies bool, fhs ...file.Handle) ([]*source.ParsedGoFile, error) {
-	pgfs := make([]*source.ParsedGoFile, len(fhs))
+func (c *parseCache) parseFiles(ctx context.Context, fset *token.FileSet, mode parser.Mode, purgeFuncBodies bool, fhs ...file.Handle) ([]*ParsedGoFile, error) {
+	pgfs := make([]*ParsedGoFile, len(fhs))
 
 	// Temporary fall-back for 32-bit systems, where reservedForParsing is too
 	// small to be viable. We don't actually support 32-bit systems, so this
@@ -351,7 +350,7 @@
 			if err != nil {
 				return err
 			}
-			pgfs[i] = result.(*source.ParsedGoFile)
+			pgfs[i] = result.(*ParsedGoFile)
 			return nil
 		})
 	}
diff --git a/gopls/internal/lsp/cache/parse_cache_test.go b/gopls/internal/lsp/cache/parse_cache_test.go
index 5693972..61a204d 100644
--- a/gopls/internal/lsp/cache/parse_cache_test.go
+++ b/gopls/internal/lsp/cache/parse_cache_test.go
@@ -14,7 +14,6 @@
 
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 func skipIfNoParseCache(t *testing.T) {
@@ -32,12 +31,12 @@
 	fset := token.NewFileSet()
 
 	cache := newParseCache(0)
-	pgfs1, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh)
+	pgfs1, err := cache.parseFiles(ctx, fset, ParseFull, false, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
 	pgf1 := pgfs1[0]
-	pgfs2, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh)
+	pgfs2, err := cache.parseFiles(ctx, fset, ParseFull, false, fh)
 	pgf2 := pgfs2[0]
 	if err != nil {
 		t.Fatal(err)
@@ -51,7 +50,7 @@
 	files := []file.Handle{fh}
 	files = append(files, dummyFileHandles(parseCacheMinFiles-1)...)
 
-	pgfs3, err := cache.parseFiles(ctx, fset, source.ParseFull, false, files...)
+	pgfs3, err := cache.parseFiles(ctx, fset, ParseFull, false, files...)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -69,13 +68,13 @@
 	// Now overwrite the cache, after which we should get new results.
 	cache.gcOnce()
 	files = dummyFileHandles(parseCacheMinFiles)
-	_, err = cache.parseFiles(ctx, fset, source.ParseFull, false, files...)
+	_, err = cache.parseFiles(ctx, fset, ParseFull, false, files...)
 	if err != nil {
 		t.Fatal(err)
 	}
 	// force a GC, which should collect the recently parsed files
 	cache.gcOnce()
-	pgfs4, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh)
+	pgfs4, err := cache.parseFiles(ctx, fset, ParseFull, false, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -99,7 +98,7 @@
 
 	// Parsing should succeed even though we overflow the padding.
 	cache := newParseCache(0)
-	_, err := cache.parseFiles(context.Background(), token.NewFileSet(), source.ParseFull, false, files...)
+	_, err := cache.parseFiles(context.Background(), token.NewFileSet(), ParseFull, false, files...)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -119,7 +118,7 @@
 
 	// Parsing should succeed even though we overflow the padding.
 	cache := newParseCache(0)
-	_, err := cache.parseFiles(context.Background(), token.NewFileSet(), source.ParseFull, false, files...)
+	_, err := cache.parseFiles(context.Background(), token.NewFileSet(), ParseFull, false, files...)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -137,19 +136,19 @@
 	cache := newParseCache(gcDuration)
 	cache.stop() // we'll manage GC manually, for testing.
 
-	pgfs0, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh, fh)
+	pgfs0, err := cache.parseFiles(ctx, fset, ParseFull, false, fh, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	files := dummyFileHandles(parseCacheMinFiles)
-	_, err = cache.parseFiles(ctx, fset, source.ParseFull, false, files...)
+	_, err = cache.parseFiles(ctx, fset, ParseFull, false, files...)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	// Even after filling up the 'min' files, we get a cache hit for our original file.
-	pgfs1, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh, fh)
+	pgfs1, err := cache.parseFiles(ctx, fset, ParseFull, false, fh, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -159,14 +158,14 @@
 	}
 
 	// But after GC, we get a cache miss.
-	_, err = cache.parseFiles(ctx, fset, source.ParseFull, false, files...) // mark dummy files as newer
+	_, err = cache.parseFiles(ctx, fset, ParseFull, false, files...) // mark dummy files as newer
 	if err != nil {
 		t.Fatal(err)
 	}
 	time.Sleep(gcDuration)
 	cache.gcOnce()
 
-	pgfs2, err := cache.parseFiles(ctx, fset, source.ParseFull, false, fh, fh)
+	pgfs2, err := cache.parseFiles(ctx, fset, ParseFull, false, fh, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -184,7 +183,7 @@
 	fh := makeFakeFileHandle(uri, []byte("package p\n\nconst _ = \"foo\""))
 
 	cache := newParseCache(0)
-	pgfs, err := cache.parseFiles(ctx, token.NewFileSet(), source.ParseFull, false, fh, fh)
+	pgfs, err := cache.parseFiles(ctx, token.NewFileSet(), ParseFull, false, fh, fh)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go
index 626c120..6f26aa5 100644
--- a/gopls/internal/lsp/cache/pkg.go
+++ b/gopls/internal/lsp/cache/pkg.go
@@ -19,12 +19,70 @@
 	"golang.org/x/tools/gopls/internal/lsp/source/xrefs"
 )
 
-// Convenient local aliases for typed strings.
+// Temporary refactoring, reversing the source import:
+// Types
 type (
+	// Metadata.
 	PackageID   = source.PackageID
 	PackagePath = source.PackagePath
 	PackageName = source.PackageName
 	ImportPath  = source.ImportPath
+	Metadata    = source.Metadata
+
+	// Diagnostics.
+	Diagnostic   = source.Diagnostic
+	SuggestedFix = source.SuggestedFix
+
+	// Computed objects.
+	TidiedModule   = source.TidiedModule
+	ParsedGoFile   = source.ParsedGoFile
+	ParsedModule   = source.ParsedModule
+	ParsedWorkFile = source.ParsedWorkFile
+	Package_       = source.Package // renamed to avoid conflict
+	Symbol         = source.Symbol
+
+	XrefIndex_       = source.XrefIndex // renamed to avoid conflict
+	GlobalSnapshotID = source.GlobalSnapshotID
+	InvocationFlags  = source.InvocationFlags
+	CriticalError    = source.CriticalError
+	Filterer         = source.Filterer
+)
+
+// Values
+var (
+	// Parse Modes
+	ParseFull            = source.ParseFull
+	SkipObjectResolution = source.SkipObjectResolution
+	ParseHeader          = source.ParseHeader
+
+	// Diagnostic sources.
+	ModTidyError = source.ModTidyError
+	ListError    = source.ListError
+	ParseError   = source.ParseError
+	TypeError    = source.TypeError
+
+	// Invocation flags.
+	Normal                = source.Normal
+	AllowNetwork          = source.AllowNetwork
+	LoadWorkspace         = source.LoadWorkspace
+	WriteTemporaryModFile = source.WriteTemporaryModFile
+
+	// Errors
+	ErrNoModOnDisk = source.ErrNoModOnDisk
+	ErrViewExists  = source.ErrViewExists
+)
+
+// Functions
+var (
+	NodeContains                   = source.NodeContains
+	IsValidImport                  = source.IsValidImport
+	RemoveIntermediateTestVariants = source.RemoveIntermediateTestVariants
+	IsCommandLineArguments         = source.IsCommandLineArguments
+	BundleQuickFixes               = source.BundleQuickFixes
+	InDir                          = source.InDir
+	SuggestedFixFromCommand        = source.SuggestedFixFromCommand
+	ToProtocolEdits                = source.ToProtocolEdits
+	NewFilterer                    = source.NewFilterer
 )
 
 // A Package is the union of package metadata and type checking results.
@@ -33,7 +91,7 @@
 // loadDiagnostics, because the value of the snapshot.packages map is just the
 // package handle. Fix this.
 type Package struct {
-	m   *source.Metadata
+	m   *Metadata
 	pkg *syntaxPackage
 }
 
@@ -77,7 +135,7 @@
 
 func (p *Package) String() string { return string(p.m.ID) }
 
-func (p *Package) Metadata() *source.Metadata { return p.m }
+func (p *Package) Metadata() *Metadata { return p.m }
 
 // A loadScope defines a package loading scope for use with go/packages.
 //
diff --git a/gopls/internal/lsp/cache/session.go b/gopls/internal/lsp/cache/session.go
index 4fbffe8..f60105c 100644
--- a/gopls/internal/lsp/cache/session.go
+++ b/gopls/internal/lsp/cache/session.go
@@ -17,7 +17,6 @@
 	"golang.org/x/tools/gopls/internal/bug"
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/lsp/source/typerefs"
 	"golang.org/x/tools/gopls/internal/persistent"
 	"golang.org/x/tools/gopls/internal/vulncheck"
@@ -89,7 +88,7 @@
 		for _, view := range s.views {
 			inode2, err := os.Stat(filepath.FromSlash(view.folder.Dir.Path()))
 			if err == nil && os.SameFile(inode1, inode2) {
-				return nil, nil, nil, source.ErrViewExists
+				return nil, nil, nil, ErrViewExists
 			}
 		}
 	}
@@ -140,7 +139,7 @@
 				if !strings.HasPrefix(uri+"/", prefix) {
 					return false
 				}
-				filterer := source.NewFilterer(folder.Options.DirectoryFilters)
+				filterer := NewFilterer(folder.Options.DirectoryFilters)
 				rel := strings.TrimPrefix(uri, prefix)
 				disallow := filterer.Disallow(rel)
 				return disallow
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
index 4b32407..91e57ee 100644
--- a/gopls/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -35,7 +35,6 @@
 	"golang.org/x/tools/gopls/internal/lsp/command"
 	"golang.org/x/tools/gopls/internal/lsp/filecache"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/lsp/source/methodsets"
 	"golang.org/x/tools/gopls/internal/lsp/source/typerefs"
 	"golang.org/x/tools/gopls/internal/lsp/source/xrefs"
@@ -54,7 +53,7 @@
 
 type Snapshot struct {
 	sequenceID uint64
-	globalID   source.GlobalSnapshotID
+	globalID   GlobalSnapshotID
 
 	// TODO(rfindley): the snapshot holding a reference to the view poses
 	// lifecycle problems: a view may be shut down and waiting for work
@@ -82,7 +81,7 @@
 	// initializedErr holds the last error resulting from initialization. If
 	// initialization fails, we only retry when the workspace modules change,
 	// to avoid too many go/packages calls.
-	initializedErr *source.CriticalError
+	initializedErr *CriticalError
 
 	// mu guards all of the maps in the snapshot, as well as the builtin URI.
 	mu sync.Mutex
@@ -181,8 +180,8 @@
 
 var globalSnapshotID uint64
 
-func nextSnapshotID() source.GlobalSnapshotID {
-	return source.GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1))
+func nextSnapshotID() GlobalSnapshotID {
+	return GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1))
 }
 
 var _ memoize.RefCounted = (*Snapshot)(nil) // snapshots are reference-counted
@@ -269,7 +268,7 @@
 // GlobalID is a globally unique identifier for this snapshot. Global IDs are
 // monotonic: subsequent snapshots will have higher global ID, though
 // subsequent snapshots in a view may not have adjacent global IDs.
-func (s *Snapshot) GlobalID() source.GlobalSnapshotID {
+func (s *Snapshot) GlobalID() GlobalSnapshotID {
 	return s.globalID
 }
 
@@ -423,7 +422,7 @@
 	return cfg
 }
 
-func (s *Snapshot) RunGoCommandDirect(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) {
+func (s *Snapshot) RunGoCommandDirect(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation) (*bytes.Buffer, error) {
 	_, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
 	if err != nil {
 		return nil, err
@@ -439,7 +438,7 @@
 // RunGoCommandPiped runs the command serially using gocommand.RunPiped,
 // enforcing that this command executes exclusively to other commands on the
 // server.
-func (s *Snapshot) RunGoCommandPiped(ctx context.Context, mode source.InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
+func (s *Snapshot) RunGoCommandPiped(ctx context.Context, mode InvocationFlags, inv *gocommand.Invocation, stdout, stderr io.Writer) error {
 	_, inv, cleanup, err := s.goCommandInvocation(ctx, mode, inv)
 	if err != nil {
 		return err
@@ -454,7 +453,7 @@
 // TODO(rfindley): the signature of RunGoModUpdateCommands is very confusing.
 // Simplify it.
 func (s *Snapshot) RunGoModUpdateCommands(ctx context.Context, wd string, run func(invoke func(...string) (*bytes.Buffer, error)) error) ([]byte, []byte, error) {
-	flags := source.WriteTemporaryModFile | source.AllowNetwork
+	flags := WriteTemporaryModFile | AllowNetwork
 	tmpURI, inv, cleanup, err := s.goCommandInvocation(ctx, flags, &gocommand.Invocation{WorkingDir: wd})
 	if err != nil {
 		return nil, nil, err
@@ -468,7 +467,7 @@
 	if err := run(invoke); err != nil {
 		return nil, nil, err
 	}
-	if flags.Mode() != source.WriteTemporaryModFile {
+	if flags.Mode() != WriteTemporaryModFile {
 		return nil, nil, nil
 	}
 	var modBytes, sumBytes []byte
@@ -490,7 +489,7 @@
 //
 // TODO(adonovan): simplify cleanup mechanism. It's hard to see, but
 // it used only after call to tempModFile.
-func (s *Snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI protocol.DocumentURI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
+func (s *Snapshot) goCommandInvocation(ctx context.Context, flags InvocationFlags, inv *gocommand.Invocation) (tmpURI protocol.DocumentURI, updatedInv *gocommand.Invocation, cleanup func(), err error) {
 	allowModfileModificationOption := s.Options().AllowModfileModifications
 	allowNetworkOption := s.Options().AllowImplicitNetworkAccess
 
@@ -526,7 +525,7 @@
 	// These are dependent on a number of factors: whether we need to run in a
 	// synthetic workspace, whether flags are supported at the current go
 	// version, and what we're actually trying to achieve (the
-	// source.InvocationFlags).
+	// InvocationFlags).
 	//
 	// TODO(rfindley): should we set -overlays here?
 
@@ -535,7 +534,7 @@
 	// If we're type checking, we need to use the workspace context, meaning
 	// the main (workspace) module. Otherwise, we should use the module for
 	// the passed-in working dir.
-	if mode == source.LoadWorkspace {
+	if mode == LoadWorkspace {
 		if gowork, _ := s.view.GOWORK(); gowork == "" && s.view.gomod != "" {
 			modURI = s.view.gomod
 		}
@@ -567,7 +566,7 @@
 	// If the mod flag isn't set, populate it based on the mode and workspace.
 	if inv.ModFlag == "" {
 		switch mode {
-		case source.LoadWorkspace, source.Normal:
+		case LoadWorkspace, Normal:
 			if vendorEnabled {
 				inv.ModFlag = "vendor"
 			} else if !allowModfileModificationOption {
@@ -575,7 +574,7 @@
 			} else {
 				inv.ModFlag = mutableModFlag
 			}
-		case source.WriteTemporaryModFile:
+		case WriteTemporaryModFile:
 			inv.ModFlag = mutableModFlag
 			// -mod must be readonly when using go.work files - see issue #48941
 			inv.Env = append(inv.Env, "GOWORK=off")
@@ -635,13 +634,13 @@
 	typerefsKind    = "typerefs"
 )
 
-func (s *Snapshot) PackageDiagnostics(ctx context.Context, ids ...PackageID) (map[protocol.DocumentURI][]*source.Diagnostic, error) {
+func (s *Snapshot) PackageDiagnostics(ctx context.Context, ids ...PackageID) (map[protocol.DocumentURI][]*Diagnostic, error) {
 	ctx, done := event.Start(ctx, "cache.snapshot.PackageDiagnostics")
 	defer done()
 
 	var mu sync.Mutex
-	perFile := make(map[protocol.DocumentURI][]*source.Diagnostic)
-	collect := func(diags []*source.Diagnostic) {
+	perFile := make(map[protocol.DocumentURI][]*Diagnostic)
+	collect := func(diags []*Diagnostic) {
 		mu.Lock()
 		defer mu.Unlock()
 		for _, diag := range diags {
@@ -666,11 +665,11 @@
 	return perFile, s.forEachPackage(ctx, ids, pre, post)
 }
 
-func (s *Snapshot) References(ctx context.Context, ids ...PackageID) ([]source.XrefIndex, error) {
+func (s *Snapshot) References(ctx context.Context, ids ...PackageID) ([]XrefIndex_, error) {
 	ctx, done := event.Start(ctx, "cache.snapshot.References")
 	defer done()
 
-	indexes := make([]source.XrefIndex, len(ids))
+	indexes := make([]XrefIndex_, len(ids))
 	pre := func(i int, ph *packageHandle) bool {
 		data, err := filecache.Get(xrefsKind, ph.key)
 		if err == nil { // hit
@@ -689,7 +688,7 @@
 
 // An XrefIndex is a helper for looking up a package in a given package.
 type XrefIndex struct {
-	m    *source.Metadata
+	m    *Metadata
 	data []byte
 }
 
@@ -718,7 +717,7 @@
 	return indexes, s.forEachPackage(ctx, ids, pre, post)
 }
 
-func (s *Snapshot) MetadataForFile(ctx context.Context, uri protocol.DocumentURI) ([]*source.Metadata, error) {
+func (s *Snapshot) MetadataForFile(ctx context.Context, uri protocol.DocumentURI) ([]*Metadata, error) {
 	if s.view.ViewType() == AdHocView {
 		// As described in golang/go#57209, in ad-hoc workspaces (where we load ./
 		// rather than ./...), preempting the directory load with file loads can
@@ -786,7 +785,7 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	ids = s.meta.ids[uri]
-	metas := make([]*source.Metadata, len(ids))
+	metas := make([]*Metadata, len(ids))
 	for i, id := range ids {
 		metas[i] = s.meta.metadata[id]
 		if metas[i] == nil {
@@ -818,7 +817,7 @@
 
 func boolLess(x, y bool) bool { return !x && y } // false < true
 
-func (s *Snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*source.Metadata, error) {
+func (s *Snapshot) ReverseDependencies(ctx context.Context, id PackageID, transitive bool) (map[PackageID]*Metadata, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
 	}
@@ -826,7 +825,7 @@
 	meta := s.meta
 	s.mu.Unlock()
 
-	var rdeps map[PackageID]*source.Metadata
+	var rdeps map[PackageID]*Metadata
 	if transitive {
 		rdeps = meta.reverseReflexiveTransitiveClosure(id)
 
@@ -837,7 +836,7 @@
 
 	} else {
 		// direct reverse dependencies
-		rdeps = make(map[PackageID]*source.Metadata)
+		rdeps = make(map[PackageID]*Metadata)
 		for _, rdepID := range meta.importedBy[id] {
 			if rdep := meta.metadata[rdepID]; rdep != nil {
 				rdeps[rdepID] = rdep
@@ -906,7 +905,7 @@
 
 	// If GOWORK is outside the folder, ensure we are watching it.
 	gowork, _ := s.view.GOWORK()
-	if gowork != "" && !source.InDir(s.view.folder.Dir.Path(), gowork.Path()) {
+	if gowork != "" && !InDir(s.view.folder.Dir.Path(), gowork.Path()) {
 		patterns[gowork.Path()] = struct{}{}
 	}
 
@@ -915,7 +914,7 @@
 	for _, dir := range dirs {
 		// If the directory is within the view's folder, we're already watching
 		// it with the first pattern above.
-		if source.InDir(s.view.folder.Dir.Path(), dir) {
+		if InDir(s.view.folder.Dir.Path(), dir) {
 			continue
 		}
 		// TODO(rstambler): If microsoft/vscode#3025 is resolved before
@@ -956,7 +955,7 @@
 
 	s.files.Dirs().Range(func(dir string) {
 		for _, wsDir := range wsDirs {
-			if source.InDir(wsDir, dir) {
+			if InDir(wsDir, dir) {
 				patterns[dir] = unit{}
 			}
 		}
@@ -1030,14 +1029,14 @@
 	}
 	var files []protocol.DocumentURI
 	s.files.Range(func(uri protocol.DocumentURI, _ file.Handle) {
-		if source.InDir(dir, uri.Path()) {
+		if InDir(dir, uri.Path()) {
 			files = append(files, uri)
 		}
 	})
 	return files
 }
 
-func (s *Snapshot) WorkspaceMetadata(ctx context.Context) ([]*source.Metadata, error) {
+func (s *Snapshot) WorkspaceMetadata(ctx context.Context) ([]*Metadata, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
 	}
@@ -1045,7 +1044,7 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
-	meta := make([]*source.Metadata, 0, s.workspacePackages.Len())
+	meta := make([]*Metadata, 0, s.workspacePackages.Len())
 	s.workspacePackages.Range(func(id PackageID, _ PackagePath) {
 		meta = append(meta, s.meta.metadata[id])
 	})
@@ -1056,13 +1055,13 @@
 // a loaded package. It awaits snapshot loading.
 //
 // TODO(rfindley): move this to the top of cache/symbols.go
-func (s *Snapshot) Symbols(ctx context.Context, workspaceOnly bool) (map[protocol.DocumentURI][]source.Symbol, error) {
+func (s *Snapshot) Symbols(ctx context.Context, workspaceOnly bool) (map[protocol.DocumentURI][]Symbol, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
 	}
 
 	var (
-		meta []*source.Metadata
+		meta []*Metadata
 		err  error
 	)
 	if workspaceOnly {
@@ -1089,7 +1088,7 @@
 		group    errgroup.Group
 		nprocs   = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU
 		resultMu sync.Mutex
-		result   = make(map[protocol.DocumentURI][]source.Symbol)
+		result   = make(map[protocol.DocumentURI][]Symbol)
 	)
 	group.SetLimit(nprocs)
 	for uri := range goFiles {
@@ -1113,7 +1112,7 @@
 	return result, nil
 }
 
-func (s *Snapshot) AllMetadata(ctx context.Context) ([]*source.Metadata, error) {
+func (s *Snapshot) AllMetadata(ctx context.Context) ([]*Metadata, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
 	}
@@ -1122,7 +1121,7 @@
 	g := s.meta
 	s.mu.Unlock()
 
-	meta := make([]*source.Metadata, 0, len(g.metadata))
+	meta := make([]*Metadata, 0, len(g.metadata))
 	for _, m := range g.metadata {
 		meta = append(meta, m)
 	}
@@ -1140,7 +1139,7 @@
 func moduleForURI(modFiles map[protocol.DocumentURI]struct{}, uri protocol.DocumentURI) protocol.DocumentURI {
 	var match protocol.DocumentURI
 	for modURI := range modFiles {
-		if !source.InDir(filepath.Dir(modURI.Path()), uri.Path()) {
+		if !InDir(filepath.Dir(modURI.Path()), uri.Path()) {
 			continue
 		}
 		if len(modURI) > len(match) {
@@ -1163,7 +1162,7 @@
 	return protocol.URIFromPath(mod), nil
 }
 
-func (s *Snapshot) Metadata(id PackageID) *source.Metadata {
+func (s *Snapshot) Metadata(id PackageID) *Metadata {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	return s.meta.metadata[id]
@@ -1301,13 +1300,13 @@
 // CriticalError returns any critical errors in the workspace.
 //
 // A nil result may mean success, or context cancellation.
-func (s *Snapshot) CriticalError(ctx context.Context) *source.CriticalError {
+func (s *Snapshot) CriticalError(ctx context.Context) *CriticalError {
 	// If we couldn't compute workspace mod files, then the load below is
 	// invalid.
 	//
 	// TODO(rfindley): is this a clear error to present to the user?
 	if s.view.workspaceModFilesErr != nil {
-		return &source.CriticalError{MainError: s.view.workspaceModFilesErr}
+		return &CriticalError{MainError: s.view.workspaceModFilesErr}
 	}
 
 	loadErr := s.awaitLoadedAllErrors(ctx)
@@ -1320,7 +1319,7 @@
 	if loadErr == nil {
 		active, _ := s.WorkspaceMetadata(ctx)
 		if msg := shouldShowAdHocPackagesWarning(s, active); msg != "" {
-			return &source.CriticalError{
+			return &CriticalError{
 				MainError: errors.New(msg),
 			}
 		}
@@ -1338,9 +1337,9 @@
 			err, diags := s.workspaceLayoutError(ctx)
 			if err != nil {
 				if ctx.Err() != nil {
-					return nil // see the API documentation for source.Snapshot
+					return nil // see the API documentation for Snapshot
 				}
-				return &source.CriticalError{
+				return &CriticalError{
 					MainError:   err,
 					Diagnostics: diags,
 				}
@@ -1353,9 +1352,9 @@
 		err, diags := s.workspaceLayoutError(ctx)
 		if err != nil {
 			if ctx.Err() != nil {
-				return nil // see the API documentation for source.Snapshot
+				return nil // see the API documentation for Snapshot
 			}
-			return &source.CriticalError{
+			return &CriticalError{
 				MainError:   err,
 				Diagnostics: diags,
 			}
@@ -1369,7 +1368,7 @@
 If you are using modules, please open your editor to a directory in your module.
 If you believe this warning is incorrect, please file an issue: https://github.com/golang/go/issues/new.`
 
-func shouldShowAdHocPackagesWarning(snapshot *Snapshot, active []*source.Metadata) string {
+func shouldShowAdHocPackagesWarning(snapshot *Snapshot, active []*Metadata) string {
 	if !snapshot.validBuildConfiguration() {
 		for _, m := range active {
 			// A blank entry in DepsByImpPath
@@ -1384,16 +1383,16 @@
 	return ""
 }
 
-func containsCommandLineArguments(metas []*source.Metadata) bool {
+func containsCommandLineArguments(metas []*Metadata) bool {
 	for _, m := range metas {
-		if source.IsCommandLineArguments(m.ID) {
+		if IsCommandLineArguments(m.ID) {
 			return true
 		}
 	}
 	return false
 }
 
-func (s *Snapshot) awaitLoadedAllErrors(ctx context.Context) *source.CriticalError {
+func (s *Snapshot) awaitLoadedAllErrors(ctx context.Context) *CriticalError {
 	// Do not return results until the snapshot's view has been initialized.
 	s.AwaitInitialized(ctx)
 
@@ -1411,7 +1410,7 @@
 	// Also: GetCriticalError ignores context cancellation errors. Should we be
 	// returning nil here?
 	if ctx.Err() != nil {
-		return &source.CriticalError{MainError: ctx.Err()}
+		return &CriticalError{MainError: ctx.Err()}
 	}
 
 	// TODO(rfindley): reloading is not idempotent: if we try to reload or load
@@ -1421,7 +1420,7 @@
 
 	if err := s.reloadWorkspace(ctx); err != nil {
 		diags := s.extractGoCommandErrors(ctx, err)
-		return &source.CriticalError{
+		return &CriticalError{
 			MainError:   err,
 			Diagnostics: diags,
 		}
@@ -1429,7 +1428,7 @@
 
 	if err := s.reloadOrphanedOpenFiles(ctx); err != nil {
 		diags := s.extractGoCommandErrors(ctx, err)
-		return &source.CriticalError{
+		return &CriticalError{
 			MainError:   err,
 			Diagnostics: diags,
 		}
@@ -1437,7 +1436,7 @@
 	return nil
 }
 
-func (s *Snapshot) getInitializationError() *source.CriticalError {
+func (s *Snapshot) getInitializationError() *CriticalError {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
@@ -1605,7 +1604,7 @@
 // TODO(rfindley): reconcile the definition of "orphaned" here with
 // reloadOrphanedFiles. The latter does not include files with
 // command-line-arguments packages.
-func (s *Snapshot) OrphanedFileDiagnostics(ctx context.Context) (map[protocol.DocumentURI]*source.Diagnostic, error) {
+func (s *Snapshot) OrphanedFileDiagnostics(ctx context.Context) (map[protocol.DocumentURI]*Diagnostic, error) {
 	if err := s.awaitLoaded(ctx); err != nil {
 		return nil, err
 	}
@@ -1623,7 +1622,7 @@
 			return nil, err
 		}
 		for _, m := range md {
-			if !source.IsCommandLineArguments(m.ID) || m.Standalone {
+			if !IsCommandLineArguments(m.ID) || m.Standalone {
 				continue searchOverlays
 			}
 		}
@@ -1651,13 +1650,13 @@
 		}
 	}
 
-	diagnostics := make(map[protocol.DocumentURI]*source.Diagnostic)
+	diagnostics := make(map[protocol.DocumentURI]*Diagnostic)
 	for _, fh := range files {
 		// Only warn about orphaned files if the file is well-formed enough to
 		// actually be part of a package.
 		//
 		// Use ParseGo as for open files this is likely to be a cache hit (we'll have )
-		pgf, err := s.ParseGo(ctx, fh, source.ParseHeader)
+		pgf, err := s.ParseGo(ctx, fh, ParseHeader)
 		if err != nil {
 			continue
 		}
@@ -1670,8 +1669,8 @@
 		}
 
 		var (
-			msg            string                // if non-empty, report a diagnostic with this message
-			suggestedFixes []source.SuggestedFix // associated fixes, if any
+			msg            string         // if non-empty, report a diagnostic with this message
+			suggestedFixes []SuggestedFix // associated fixes, if any
 		)
 
 		// If we have a relevant go.mod file, check whether the file is orphaned
@@ -1685,7 +1684,7 @@
 
 				// When the module is underneath the view dir, we offer
 				// "use all modules" quick-fixes.
-				inDir := source.InDir(viewDir, modDir)
+				inDir := InDir(viewDir, modDir)
 
 				if rel, err := filepath.Rel(viewDir, modDir); err == nil {
 					modDir = rel
@@ -1699,7 +1698,7 @@
 							ViewID: s.view.ID(),
 							Args:   []string{"use", modDir},
 						}); err == nil {
-							suggestedFixes = append(suggestedFixes, source.SuggestedFix{
+							suggestedFixes = append(suggestedFixes, SuggestedFix{
 								Title:      "Use this module in your go.work file",
 								Command:    &cmd,
 								ActionKind: protocol.QuickFix,
@@ -1711,7 +1710,7 @@
 								ViewID: s.view.ID(),
 								Args:   []string{"use", "-r", "."},
 							}); err == nil {
-								suggestedFixes = append(suggestedFixes, source.SuggestedFix{
+								suggestedFixes = append(suggestedFixes, SuggestedFix{
 									Title:      "Use all modules in your workspace",
 									Command:    &cmd,
 									ActionKind: protocol.QuickFix,
@@ -1726,7 +1725,7 @@
 							InitFirst: true,
 							Args:      []string{"use", modDir},
 						}); err == nil {
-							suggestedFixes = []source.SuggestedFix{
+							suggestedFixes = []SuggestedFix{
 								{
 									Title:      "Add a go.work file using this module",
 									Command:    &cmd,
@@ -1741,7 +1740,7 @@
 								InitFirst: true,
 								Args:      []string{"use", "-r", "."},
 							}); err == nil {
-								suggestedFixes = append(suggestedFixes, source.SuggestedFix{
+								suggestedFixes = append(suggestedFixes, SuggestedFix{
 									Title:      "Add a go.work file using all modules in your workspace",
 									Command:    &cmd,
 									ActionKind: protocol.QuickFix,
@@ -1790,15 +1789,15 @@
 		}
 
 		if msg != "" {
-			d := &source.Diagnostic{
+			d := &Diagnostic{
 				URI:            fh.URI(),
 				Range:          rng,
 				Severity:       protocol.SeverityWarning,
-				Source:         source.ListError,
+				Source:         ListError,
 				Message:        msg,
 				SuggestedFixes: suggestedFixes,
 			}
-			if ok := source.BundleQuickFixes(d); !ok {
+			if ok := BundleQuickFixes(d); !ok {
 				bug.Reportf("failed to bundle quick fixes for %v", d)
 			}
 			// Only report diagnostics if we detect an actual exclusion.
@@ -2106,13 +2105,13 @@
 	// Compute which metadata updates are required. We only need to invalidate
 	// packages directly containing the affected file, and only if it changed in
 	// a relevant way.
-	metadataUpdates := make(map[PackageID]*source.Metadata)
+	metadataUpdates := make(map[PackageID]*Metadata)
 	for k, v := range s.meta.metadata {
 		invalidateMetadata := idsToInvalidate[k]
 
 		// For metadata that has been newly invalidated, capture package paths
 		// requiring reloading in the shouldLoad map.
-		if invalidateMetadata && !source.IsCommandLineArguments(v.ID) {
+		if invalidateMetadata && !IsCommandLineArguments(v.ID) {
 			needsReload := []PackagePath{v.PkgPath}
 			if v.ForTest != "" && v.ForTest != v.PkgPath {
 				// When reloading test variants, always reload their ForTest package as
@@ -2180,7 +2179,7 @@
 
 	m.Range(func(modURI protocol.DocumentURI, _ *memoize.Promise) {
 		if len(modURI) > len(mostRelevant) {
-			if source.InDir(filepath.Dir(modURI.Path()), changedFile) {
+			if InDir(filepath.Dir(modURI.Path()), changedFile) {
 				mostRelevant = modURI
 			}
 		}
@@ -2295,8 +2294,8 @@
 
 	fset := token.NewFileSet()
 	// Parse headers to compare package names and imports.
-	oldHeads, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, source.ParseHeader, false, oldFH)
-	newHeads, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, source.ParseHeader, false, newFH)
+	oldHeads, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseHeader, false, oldFH)
+	newHeads, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseHeader, false, newFH)
 
 	if oldErr != nil || newErr != nil {
 		errChanged := (oldErr == nil) != (newErr == nil)
@@ -2342,8 +2341,8 @@
 	// Note: if this affects performance we can probably avoid parsing in the
 	// common case by first scanning the source for potential comments.
 	if !invalidate {
-		origFulls, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, source.ParseFull, false, oldFH)
-		newFulls, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, source.ParseFull, false, newFH)
+		origFulls, oldErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseFull, false, oldFH)
+		newFulls, newErr := lockedSnapshot.view.parseCache.parseFiles(ctx, fset, ParseFull, false, newFH)
 		if oldErr == nil && newErr == nil {
 			invalidate = magicCommentsChanged(origFulls[0].File, newFulls[0].File)
 		} else {
@@ -2410,7 +2409,7 @@
 	return results
 }
 
-func (s *Snapshot) BuiltinFile(ctx context.Context) (*source.ParsedGoFile, error) {
+func (s *Snapshot) BuiltinFile(ctx context.Context) (*ParsedGoFile, error) {
 	s.AwaitInitialized(ctx)
 
 	s.mu.Lock()
@@ -2427,7 +2426,7 @@
 	}
 	// For the builtin file only, we need syntactic object resolution
 	// (since we can't type check).
-	mode := source.ParseFull &^ source.SkipObjectResolution
+	mode := ParseFull &^ SkipObjectResolution
 	pgfs, err := s.view.parseCache.parseFiles(ctx, token.NewFileSet(), mode, false, fh)
 	if err != nil {
 		return nil, err
diff --git a/gopls/internal/lsp/cache/symbols.go b/gopls/internal/lsp/cache/symbols.go
index 0f3bfc6..d4bc929 100644
--- a/gopls/internal/lsp/cache/symbols.go
+++ b/gopls/internal/lsp/cache/symbols.go
@@ -14,18 +14,17 @@
 	"golang.org/x/tools/gopls/internal/astutil"
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 // symbolize returns the result of symbolizing the file identified by uri, using a cache.
-func (s *Snapshot) symbolize(ctx context.Context, uri protocol.DocumentURI) ([]source.Symbol, error) {
+func (s *Snapshot) symbolize(ctx context.Context, uri protocol.DocumentURI) ([]Symbol, error) {
 
 	s.mu.Lock()
 	entry, hit := s.symbolizeHandles.Get(uri)
 	s.mu.Unlock()
 
 	type symbolizeResult struct {
-		symbols []source.Symbol
+		symbols []Symbol
 		err     error
 	}
 
@@ -59,8 +58,8 @@
 }
 
 // symbolizeImpl reads and parses a file and extracts symbols from it.
-func symbolizeImpl(ctx context.Context, snapshot *Snapshot, fh file.Handle) ([]source.Symbol, error) {
-	pgfs, err := snapshot.view.parseCache.parseFiles(ctx, token.NewFileSet(), source.ParseFull, false, fh)
+func symbolizeImpl(ctx context.Context, snapshot *Snapshot, fh file.Handle) ([]Symbol, error) {
+	pgfs, err := snapshot.view.parseCache.parseFiles(ctx, token.NewFileSet(), ParseFull, false, fh)
 	if err != nil {
 		return nil, err
 	}
@@ -79,7 +78,7 @@
 	tokFile *token.File
 	mapper  *protocol.Mapper
 
-	symbols    []source.Symbol
+	symbols    []Symbol
 	firstError error
 }
 
@@ -98,7 +97,7 @@
 		w.error(err)
 		return
 	}
-	sym := source.Symbol{
+	sym := Symbol{
 		Name:  b.String(),
 		Kind:  kind,
 		Range: rng,
diff --git a/gopls/internal/lsp/cache/view.go b/gopls/internal/lsp/cache/view.go
index 59b1c02..0869624 100644
--- a/gopls/internal/lsp/cache/view.go
+++ b/gopls/internal/lsp/cache/view.go
@@ -25,7 +25,6 @@
 	"golang.org/x/mod/semver"
 	"golang.org/x/tools/gopls/internal/file"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 	"golang.org/x/tools/gopls/internal/settings"
 	"golang.org/x/tools/gopls/internal/vulncheck"
 	"golang.org/x/tools/internal/event"
@@ -569,10 +568,10 @@
 	// user. It would be better to explicitly consider the set of active modules
 	// wherever relevant.
 	inGoDir := false
-	if source.InDir(v.goCommandDir.Path(), v.folder.Dir.Path()) {
-		inGoDir = source.InDir(v.goCommandDir.Path(), uri.Path())
+	if InDir(v.goCommandDir.Path(), v.folder.Dir.Path()) {
+		inGoDir = InDir(v.goCommandDir.Path(), uri.Path())
 	}
-	inFolder := source.InDir(v.folder.Dir.Path(), uri.Path())
+	inFolder := InDir(v.folder.Dir.Path(), uri.Path())
 
 	if !inGoDir && !inFolder {
 		return false
@@ -588,7 +587,7 @@
 	filterer := buildFilterer(folderDir, v.gomodcache, v.folder.Options)
 	return func(uri protocol.DocumentURI) bool {
 		// Only filter relative to the configured root directory.
-		if source.InDir(folderDir, uri.Path()) {
+		if InDir(folderDir, uri.Path()) {
 			return pathExcludedByFilter(strings.TrimPrefix(uri.Path(), folderDir), filterer)
 		}
 		return false
@@ -796,13 +795,13 @@
 
 	// Collect module paths to load by parsing go.mod files. If a module fails to
 	// parse, capture the parsing failure as a critical diagnostic.
-	var scopes []loadScope                  // scopes to load
-	var modDiagnostics []*source.Diagnostic // diagnostics for broken go.mod files
+	var scopes []loadScope           // scopes to load
+	var modDiagnostics []*Diagnostic // diagnostics for broken go.mod files
 	addError := func(uri protocol.DocumentURI, err error) {
-		modDiagnostics = append(modDiagnostics, &source.Diagnostic{
+		modDiagnostics = append(modDiagnostics, &Diagnostic{
 			URI:      uri,
 			Severity: protocol.SeverityError,
-			Source:   source.ListError,
+			Source:   ListError,
 			Message:  err.Error(),
 		})
 	}
@@ -861,27 +860,27 @@
 		return loadErr
 	}
 
-	var criticalErr *source.CriticalError
+	var criticalErr *CriticalError
 	switch {
 	case loadErr != nil && ctx.Err() != nil:
 		event.Error(ctx, fmt.Sprintf("initial workspace load: %v", loadErr), loadErr)
-		criticalErr = &source.CriticalError{
+		criticalErr = &CriticalError{
 			MainError: loadErr,
 		}
 	case loadErr != nil:
 		event.Error(ctx, "initial workspace load failed", loadErr)
 		extractedDiags := s.extractGoCommandErrors(ctx, loadErr)
-		criticalErr = &source.CriticalError{
+		criticalErr = &CriticalError{
 			MainError:   loadErr,
 			Diagnostics: append(modDiagnostics, extractedDiags...),
 		}
 	case len(modDiagnostics) == 1:
-		criticalErr = &source.CriticalError{
+		criticalErr = &CriticalError{
 			MainError:   fmt.Errorf(modDiagnostics[0].Message),
 			Diagnostics: modDiagnostics,
 		}
 	case len(modDiagnostics) > 1:
-		criticalErr = &source.CriticalError{
+		criticalErr = &CriticalError{
 			MainError:   fmt.Errorf("error loading module names"),
 			Diagnostics: modDiagnostics,
 		}
@@ -979,7 +978,7 @@
 
 	// Check if the workspace is within any GOPATH directory.
 	for _, gp := range filepath.SplitList(def.gopath) {
-		if source.InDir(filepath.Join(gp, "src"), folder.Dir.Path()) {
+		if InDir(filepath.Join(gp, "src"), folder.Dir.Path()) {
 			def.inGOPATH = true
 			break
 		}
@@ -1257,17 +1256,17 @@
 // TODO(rfindley): passing root and gomodcache here makes it confusing whether
 // path should be absolute or relative, and has already caused at least one
 // bug.
-func pathExcludedByFilter(path string, filterer *source.Filterer) bool {
+func pathExcludedByFilter(path string, filterer *Filterer) bool {
 	path = strings.TrimPrefix(filepath.ToSlash(path), "/")
 	return filterer.Disallow(path)
 }
 
-func buildFilterer(folder, gomodcache string, opts *settings.Options) *source.Filterer {
+func buildFilterer(folder, gomodcache string, opts *settings.Options) *Filterer {
 	filters := opts.DirectoryFilters
 
 	if pref := strings.TrimPrefix(gomodcache, folder); pref != gomodcache {
 		modcacheFilter := "-" + strings.TrimPrefix(filepath.ToSlash(pref), "/")
 		filters = append(filters, modcacheFilter)
 	}
-	return source.NewFilterer(filters)
+	return NewFilterer(filters)
 }
diff --git a/gopls/internal/lsp/cache/view_test.go b/gopls/internal/lsp/cache/view_test.go
index 199f7de..37b6f1d 100644
--- a/gopls/internal/lsp/cache/view_test.go
+++ b/gopls/internal/lsp/cache/view_test.go
@@ -12,7 +12,6 @@
 
 	"golang.org/x/tools/gopls/internal/lsp/fake"
 	"golang.org/x/tools/gopls/internal/lsp/protocol"
-	"golang.org/x/tools/gopls/internal/lsp/source"
 )
 
 func TestCaseInsensitiveFilesystem(t *testing.T) {
@@ -151,7 +150,7 @@
 	}
 
 	for _, tt := range tests {
-		filterer := source.NewFilterer(tt.filters)
+		filterer := NewFilterer(tt.filters)
 		for _, inc := range tt.included {
 			if pathExcludedByFilter(inc, filterer) {
 				t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc)