internal/lsp/cache: reduce critical sections

This change reduces the sizes of the critical sections
in traces.ProcessEvent and Generation.Bind, in particular
moving allocations ahead of Lock. This reduces the contention
according to the trace profiler.

See https://go-review.googlesource.com/c/go/+/411909 for
another reduction in contention. The largest remaining
contention is Handle.Get, which thousands of goroutines
wait for because we initiate typechecking top down.

Also, add a couple of possible optimization TODO comments,
and delete a stale comment re: Bind.

Change-Id: I995a0bb46e8c9bf0c23492fb62b56f4539bc32f8
Reviewed-on: https://go-review.googlesource.com/c/tools/+/411910
Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Hyang-Ah Hana Kim <hyangah@gmail.com>
diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go
index f09fc29..51d7d1a 100644
--- a/internal/lsp/cache/check.go
+++ b/internal/lsp/cache/check.go
@@ -97,14 +97,6 @@
 		return nil, err
 	}
 
-	// Do not close over the packageHandle or the snapshot in the Bind function.
-	// This creates a cycle, which causes the finalizers to never run on the handles.
-	// The possible cycles are:
-	//
-	//     packageHandle.h.function -> packageHandle
-	//     packageHandle.h.function -> snapshot -> packageHandle
-	//
-
 	m := ph.m
 	key := ph.key
 
@@ -121,6 +113,13 @@
 			}(dep)
 		}
 
+		// TODO(adonovan): opt: consider moving the Wait here,
+		// so that dependencies complete before we start to
+		// read+parse+typecheck this package. Although the
+		// read+parse can proceed, typechecking will block
+		// almost immediately until the imports are done.
+		// The effect is to increase contention.
+
 		data := &packageData{}
 		data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
 		// Make sure that the workers above have finished before we return,
@@ -448,6 +447,7 @@
 	}
 	typeparams.InitInstanceInfo(pkg.typesInfo)
 
+	// TODO(adonovan): opt: execute this loop in parallel.
 	for _, gf := range pkg.m.GoFiles {
 		// In the presence of line directives, we may need to report errors in
 		// non-compiled Go files, so we need to register them on the package.
diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go
index 668c437..ab55743 100644
--- a/internal/lsp/cache/parse.go
+++ b/internal/lsp/cache/parse.go
@@ -278,7 +278,7 @@
 
 	tok := fset.File(file.Pos())
 	if tok == nil {
-		// file.Pos is the location of the package declaration. If there was
+		// file.Pos is the location of the package declaration (issue #53202). If there was
 		// none, we can't find the token.File that ParseFile created, and we
 		// have no choice but to recreate it.
 		tok = fset.AddFile(fh.URI().Filename(), -1, len(src))
diff --git a/internal/lsp/debug/trace.go b/internal/lsp/debug/trace.go
index ca61286..bb402cf 100644
--- a/internal/lsp/debug/trace.go
+++ b/internal/lsp/debug/trace.go
@@ -119,8 +119,6 @@
 }
 
 func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
-	t.mu.Lock()
-	defer t.mu.Unlock()
 	span := export.GetSpan(ctx)
 	if span == nil {
 		return ctx
@@ -128,11 +126,8 @@
 
 	switch {
 	case event.IsStart(ev):
-		if t.sets == nil {
-			t.sets = make(map[string]*traceSet)
-			t.unfinished = make(map[export.SpanContext]*traceData)
-		}
-		// just starting, add it to the unfinished map
+		// Just starting: add it to the unfinished map.
+		// Allocate before the critical section.
 		td := &traceData{
 			TraceID:  span.ID.TraceID,
 			SpanID:   span.ID.SpanID,
@@ -141,6 +136,13 @@
 			Start:    span.Start().At(),
 			Tags:     renderLabels(span.Start()),
 		}
+
+		t.mu.Lock()
+		defer t.mu.Unlock()
+		if t.sets == nil {
+			t.sets = make(map[string]*traceSet)
+			t.unfinished = make(map[export.SpanContext]*traceData)
+		}
 		t.unfinished[span.ID] = td
 		// and wire up parents if we have them
 		if !span.ParentID.IsValid() {
@@ -155,7 +157,19 @@
 		parent.Children = append(parent.Children, td)
 
 	case event.IsEnd(ev):
-		// finishing, must be already in the map
+		// Finishing: must be already in the map.
+		// Allocate events before the critical section.
+		events := span.Events()
+		tdEvents := make([]traceEvent, len(events))
+		for i, event := range events {
+			tdEvents[i] = traceEvent{
+				Time: event.At(),
+				Tags: renderLabels(event),
+			}
+		}
+
+		t.mu.Lock()
+		defer t.mu.Unlock()
 		td, found := t.unfinished[span.ID]
 		if !found {
 			return ctx // if this happens we are in a bad place
@@ -164,14 +178,7 @@
 
 		td.Finish = span.Finish().At()
 		td.Duration = span.Finish().At().Sub(span.Start().At())
-		events := span.Events()
-		td.Events = make([]traceEvent, len(events))
-		for i, event := range events {
-			td.Events[i] = traceEvent{
-				Time: event.At(),
-				Tags: renderLabels(event),
-			}
-		}
+		td.Events = tdEvents
 
 		set, ok := t.sets[span.Name]
 		if !ok {
diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go
index dec2fff..28d5e2c 100644
--- a/internal/memoize/memoize.go
+++ b/internal/memoize/memoize.go
@@ -181,8 +181,9 @@
 	if atomic.LoadUint32(&g.destroyed) != 0 {
 		panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy)
 	}
+
+	// Avoid 'defer Unlock' to reduce critical section.
 	g.store.mu.Lock()
-	defer g.store.mu.Unlock()
 	h, ok := g.store.handles[key]
 	if !ok {
 		h := &Handle{
@@ -192,8 +193,11 @@
 			cleanup:     cleanup,
 		}
 		g.store.handles[key] = h
+		g.store.mu.Unlock()
 		return h
 	}
+	g.store.mu.Unlock()
+
 	h.mu.Lock()
 	defer h.mu.Unlock()
 	if _, ok := h.generations[g]; !ok {