gopls/internal/lsp/cache: limit parallelism

Analysis has far too many threads running at once,
resulting in cache thrashing.
Reduce parallelism of runCached, cache update, and parsing.
(The first of these is logic borrowed from CL 511215.)

Change-Id: I3ec654bd49965b3cb9e0f06cc7f49dcd01093778
Reviewed-on: https://go-review.googlesource.com/c/tools/+/511755
Reviewed-by: Robert Findley <rfindley@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Auto-Submit: Alan Donovan <adonovan@google.com>
Run-TryBot: Alan Donovan <adonovan@google.com>
diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go
index 7efb4ed..c5cda14 100644
--- a/gopls/internal/lsp/cache/analysis.go
+++ b/gopls/internal/lsp/cache/analysis.go
@@ -643,6 +643,9 @@
 			return nil, err
 		}
 		go func() {
+			cacheLimit <- unit{}            // acquire token
+			defer func() { <-cacheLimit }() // release token
+
 			data := analyzeSummaryCodec.Encode(summary)
 			if false {
 				log.Printf("Set key=%d value=%d id=%s\n", len(key), len(data), an.m.ID)
@@ -656,6 +659,10 @@
 	return summary, nil
 }
 
+// cacheLimit reduces parallelism of cache updates.
+// We allow more than typical GOMAXPROCS as it's a mix of CPU and I/O.
+var cacheLimit = make(chan unit, 32)
+
 // analysisCacheKey returns a cache key that is a cryptographic digest
 // of the all the values that might affect type checking and analysis:
 // the analyzer names, package metadata, names and contents of
@@ -747,6 +754,7 @@
 	parsed := make([]*source.ParsedGoFile, len(an.files))
 	{
 		var group errgroup.Group
+		group.SetLimit(4) // not too much: run itself is already called in parallel
 		for i, fh := range an.files {
 			i, fh := i, fh
 			group.Go(func() error {