internal/lsp: more careful error handling for semantic tokens

The implementation now returns fewer errors to the client. The LSP
specification restricts errors to 'exceptions', so gopls no longer
returns errors if parsing or typechecking fails.

Also, some internal routines that always returned nil errors no longer
return errors at all. The logging for the errors that //line directives
induce was too verbose, and has been turned off. (Many LSP requests
will fail if there are //line directives.)

Fixes golang/go#46176

Change-Id: I18b2cb164b55174f4edbc31e1376da7a8c505a1b
Reviewed-on: https://go-review.googlesource.com/c/tools/+/319249
Run-TryBot: Peter Weinberger <pjw@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
Trust: Peter Weinberger <pjw@google.com>
diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go
index c6e9190..4bbe5ac 100644
--- a/internal/lsp/semantic.go
+++ b/internal/lsp/semantic.go
@@ -23,6 +23,10 @@
 	errors "golang.org/x/xerrors"
 )
 
+// The LSP says that errors for the semantic token requests should only be returned
+// for exceptions (a word not otherwise defined). This code treats a too-large file
+// as an exception. On parse errors, the code does what it can.
+
 // reject full semantic token requests for large files
 const maxFullFileSize int = 100000
 
@@ -72,7 +76,7 @@
 		add := func(line, start uint32, len uint32) {
 			e.add(line, start, len, tokMacro, nil)
 		}
-		data := func() ([]uint32, error) {
+		data := func() []uint32 {
 			return e.Data()
 		}
 		return template.SemanticTokens(ctx, snapshot, fh.URI(), add, data)
@@ -89,9 +93,7 @@
 	if err != nil {
 		return nil, err
 	}
-	if pgf.ParseErr != nil {
-		return nil, pgf.ParseErr
-	}
+	// don't return errors on pgf.ParseErr. Do what we can.
 	if rng == nil && len(pgf.Src) > maxFullFileSize {
 		err := fmt.Errorf("semantic tokens: file %s too large for full (%d>%d)",
 			fh.URI().Filename(), len(pgf.Src), maxFullFileSize)
@@ -107,16 +109,13 @@
 		tokMods:  s.session.Options().SemanticMods,
 	}
 	if err := e.init(); err != nil {
+		// e.init should never return an error, unless there's some
+		// seemingly impossible race condition
 		return nil, err
 	}
 	e.semantics()
-	ans.Data, err = e.Data()
-	if err != nil {
-		// this is an internal error, likely caused by a typo
-		// for a token or modifier
-		return nil, err
-	}
-	// for small cache, some day. for now, the client ignores this
+	ans.Data = e.Data()
+	// For delta requests, but we've never seen any.
 	ans.ResultID = fmt.Sprintf("%v", time.Now())
 	return &ans, nil
 }
@@ -182,7 +181,8 @@
 		// "column mapper is for file...instead of..."
 		// "line is beyond end of file..."
 		// see line 116 of internal/span/token.go which uses Position not PositionFor
-		event.Error(e.ctx, "failed to convert to range", err)
+		// (it is too verbose to print the error on every token. some other RPC will fail)
+		// event.Error(e.ctx, "failed to convert to range", err)
 		return
 	}
 	if lspRange.End.Line != lspRange.Start.Line {
@@ -381,11 +381,13 @@
 	case *ast.UnaryExpr:
 		e.token(x.OpPos, len(x.Op.String()), tokOperator, nil)
 	case *ast.ValueSpec:
-	// things we won't see
-	case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt,
-		*ast.File, *ast.Package:
+	// things we only see with parsing or type errors, so we ignore them
+	case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
+		return true
+	// not going to see these
+	case *ast.File, *ast.Package:
 		log.Printf("implement %T %s", x, e.pgf.Tok.PositionFor(x.Pos(), false))
-	// things we knowingly ignore
+	// other things we knowingly ignore
 	case *ast.Comment, *ast.CommentGroup:
 		pop()
 		return false
@@ -578,14 +580,14 @@
 	}
 	span, err := e.pgf.Mapper.RangeSpan(*e.rng)
 	if err != nil {
-		return errors.Errorf("range span (%v) error for %s", err, e.pgf.File.Name)
+		return errors.Errorf("range span (%w) error for %s", err, e.pgf.File.Name)
 	}
 	e.end = e.start + token.Pos(span.End().Offset())
 	e.start += token.Pos(span.Start().Offset())
 	return nil
 }
 
-func (e *encoded) Data() ([]uint32, error) {
+func (e *encoded) Data() []uint32 {
 	// binary operators, at least, will be out of order
 	sort.Slice(e.items, func(i, j int) bool {
 		if e.items[i].line != e.items[j].line {
@@ -616,7 +618,7 @@
 		}
 		x[j+4] = uint32(mask)
 	}
-	return x, nil
+	return x
 }
 
 func (e *encoded) importSpec(d *ast.ImportSpec) {
diff --git a/internal/lsp/template/implementations.go b/internal/lsp/template/implementations.go
index 6cdab9e..6c57a68 100644
--- a/internal/lsp/template/implementations.go
+++ b/internal/lsp/template/implementations.go
@@ -150,7 +150,7 @@
 	return ans, nil
 }
 
-func SemanticTokens(ctx context.Context, snapshot source.Snapshot, spn span.URI, add func(line, start, len uint32), d func() ([]uint32, error)) (*protocol.SemanticTokens, error) {
+func SemanticTokens(ctx context.Context, snapshot source.Snapshot, spn span.URI, add func(line, start, len uint32), d func() []uint32) (*protocol.SemanticTokens, error) {
 	if skipTemplates(snapshot) {
 		return nil, nil
 	}
@@ -184,12 +184,7 @@
 		line, col := p.LineCol(t.Start)
 		add(line, col, uint32(sz))
 	}
-	data, err := d()
-	if err != nil {
-		// this is an internal error, likely caused by a typo
-		// for a token or modifier
-		return nil, err
-	}
+	data := d()
 	ans := &protocol.SemanticTokens{
 		Data: data,
 		// for small cache, some day. for now, the LSP client ignores this