internal/lsp/tests: factor normalization logic out of cmd tests

Some features (notably, workspace symbols) produce results with file
paths outside of the command-line package. This logic can be useful for
all tests, so factor it out into the shared testing package.

Change-Id: I2e00ebc0174079660c2f07562c50fd9377088475
Reviewed-on: https://go-review.googlesource.com/c/tools/+/272210
Trust: Rebecca Stambler <rstambler@golang.org>
Run-TryBot: Rebecca Stambler <rstambler@golang.org>
Reviewed-by: Robert Findley <rfindley@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go
index 457fdbb..b8abe6d 100644
--- a/internal/lsp/cmd/test/cmdtest.go
+++ b/internal/lsp/cmd/test/cmdtest.go
@@ -11,9 +11,6 @@
 	"fmt"
 	"io"
 	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
 	"sync"
 	"testing"
 
@@ -33,17 +30,10 @@
 	data        *tests.Data
 	ctx         context.Context
 	options     func(*source.Options)
-	normalizers []normalizer
+	normalizers []tests.Normalizer
 	remote      string
 }
 
-type normalizer struct {
-	path     string
-	slashed  string
-	escaped  string
-	fragment string
-}
-
 func TestCommandLine(t *testing.T, testdata string, options func(*source.Options)) {
 	// On Android, the testdata directory is not copied to the runner.
 	if stat, err := os.Stat(testdata); err != nil || !stat.IsDir() {
@@ -65,31 +55,13 @@
 }
 
 func NewRunner(data *tests.Data, ctx context.Context, remote string, options func(*source.Options)) *runner {
-	r := &runner{
+	return &runner{
 		data:        data,
 		ctx:         ctx,
 		options:     options,
-		normalizers: make([]normalizer, 0, len(data.Exported.Modules)),
+		normalizers: tests.CollectNormalizers(data.Exported),
 		remote:      remote,
 	}
-	// build the path normalizing patterns
-	for _, m := range data.Exported.Modules {
-		for fragment := range m.Files {
-			n := normalizer{
-				path:     data.Exported.File(m.Name, fragment),
-				fragment: fragment,
-			}
-			if n.slashed = filepath.ToSlash(n.path); n.slashed == n.path {
-				n.slashed = ""
-			}
-			quoted := strconv.Quote(n.path)
-			if n.escaped = quoted[1 : len(quoted)-1]; n.escaped == n.path {
-				n.escaped = ""
-			}
-			r.normalizers = append(r.normalizers, n)
-		}
-	}
-	return r
 }
 
 func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {
@@ -174,80 +146,10 @@
 	return r.Normalize(stdout), r.Normalize(stderr)
 }
 
-// NormalizePrefix normalizes a single path at the front of the input string.
-func (r *runner) NormalizePrefix(s string) string {
-	for _, n := range r.normalizers {
-		if t := strings.TrimPrefix(s, n.path); t != s {
-			return n.fragment + t
-		}
-		if t := strings.TrimPrefix(s, n.slashed); t != s {
-			return n.fragment + t
-		}
-		if t := strings.TrimPrefix(s, n.escaped); t != s {
-			return n.fragment + t
-		}
-	}
-	return s
+func (r *runner) Normalize(s string) string {
+	return tests.Normalize(s, r.normalizers)
 }
 
-// Normalize replaces all paths present in s with just the fragment portion
-// this is used to make golden files not depend on the temporary paths of the files
-func (r *runner) Normalize(s string) string {
-	type entry struct {
-		path     string
-		index    int
-		fragment string
-	}
-	match := make([]entry, 0, len(r.normalizers))
-	// collect the initial state of all the matchers
-	for _, n := range r.normalizers {
-		index := strings.Index(s, n.path)
-		if index >= 0 {
-			match = append(match, entry{n.path, index, n.fragment})
-		}
-		if n.slashed != "" {
-			index := strings.Index(s, n.slashed)
-			if index >= 0 {
-				match = append(match, entry{n.slashed, index, n.fragment})
-			}
-		}
-		if n.escaped != "" {
-			index := strings.Index(s, n.escaped)
-			if index >= 0 {
-				match = append(match, entry{n.escaped, index, n.fragment})
-			}
-		}
-	}
-	// result should be the same or shorter than the input
-	buf := bytes.NewBuffer(make([]byte, 0, len(s)))
-	last := 0
-	for {
-		// find the nearest path match to the start of the buffer
-		next := -1
-		nearest := len(s)
-		for i, c := range match {
-			if c.index >= 0 && nearest > c.index {
-				nearest = c.index
-				next = i
-			}
-		}
-		// if there are no matches, we copy the rest of the string and are done
-		if next < 0 {
-			buf.WriteString(s[last:])
-			return buf.String()
-		}
-		// we have a match
-		n := &match[next]
-		// copy up to the start of the match
-		buf.WriteString(s[last:n.index])
-		// skip over the filename
-		last = n.index + len(n.path)
-		// add in the fragment instead
-		buf.WriteString(n.fragment)
-		// see what the next match for this path is
-		n.index = strings.Index(s[last:], n.path)
-		if n.index >= 0 {
-			n.index += last
-		}
-	}
+func (r *runner) NormalizePrefix(s string) string {
+	return tests.NormalizePrefix(s, r.normalizers)
 }
diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go
index 705e26a..2d3ecee 100644
--- a/internal/lsp/lsp_test.go
+++ b/internal/lsp/lsp_test.go
@@ -38,6 +38,7 @@
 	data        *tests.Data
 	diagnostics map[span.URI][]*source.Diagnostic
 	ctx         context.Context
+	normalizers []tests.Normalizer
 }
 
 func testLSP(t *testing.T, datum *tests.Data) {
@@ -83,9 +84,10 @@
 		t.Fatal(err)
 	}
 	r := &runner{
-		server: NewServer(session, testClient{}),
-		data:   datum,
-		ctx:    ctx,
+		server:      NewServer(session, testClient{}),
+		data:        datum,
+		ctx:         ctx,
+		normalizers: tests.CollectNormalizers(datum.Exported),
 	}
 	tests.Run(t, r, datum)
 }
diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go
index 932351e..2726b5e 100644
--- a/internal/lsp/source/source_test.go
+++ b/internal/lsp/source/source_test.go
@@ -37,10 +37,11 @@
 }
 
 type runner struct {
-	snapshot source.Snapshot
-	view     source.View
-	data     *tests.Data
-	ctx      context.Context
+	snapshot    source.Snapshot
+	view        source.View
+	data        *tests.Data
+	ctx         context.Context
+	normalizers []tests.Normalizer
 }
 
 func testSource(t *testing.T, datum *tests.Data) {
@@ -82,10 +83,11 @@
 	snapshot, release := view.Snapshot(ctx)
 	defer release()
 	r := &runner{
-		view:     view,
-		snapshot: snapshot,
-		data:     datum,
-		ctx:      ctx,
+		view:        view,
+		snapshot:    snapshot,
+		data:        datum,
+		ctx:         ctx,
+		normalizers: tests.CollectNormalizers(datum.Exported),
 	}
 	tests.Run(t, r, datum)
 }
diff --git a/internal/lsp/tests/normalizer.go b/internal/lsp/tests/normalizer.go
new file mode 100644
index 0000000..ebe0ef4
--- /dev/null
+++ b/internal/lsp/tests/normalizer.go
@@ -0,0 +1,120 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tests
+
+import (
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"golang.org/x/tools/go/packages/packagestest"
+)
+
+type Normalizer struct {
+	path     string
+	slashed  string
+	escaped  string
+	fragment string
+}
+
+func CollectNormalizers(exported *packagestest.Exported) []Normalizer {
+	// build the path normalizing patterns
+	var normalizers []Normalizer
+	for _, m := range exported.Modules {
+		for fragment := range m.Files {
+			n := Normalizer{
+				path:     exported.File(m.Name, fragment),
+				fragment: fragment,
+			}
+			if n.slashed = filepath.ToSlash(n.path); n.slashed == n.path {
+				n.slashed = ""
+			}
+			quoted := strconv.Quote(n.path)
+			if n.escaped = quoted[1 : len(quoted)-1]; n.escaped == n.path {
+				n.escaped = ""
+			}
+			normalizers = append(normalizers, n)
+		}
+	}
+	return normalizers
+}
+
+// NormalizePrefix normalizes a single path at the front of the input string.
+func NormalizePrefix(s string, normalizers []Normalizer) string {
+	for _, n := range normalizers {
+		if t := strings.TrimPrefix(s, n.path); t != s {
+			return n.fragment + t
+		}
+		if t := strings.TrimPrefix(s, n.slashed); t != s {
+			return n.fragment + t
+		}
+		if t := strings.TrimPrefix(s, n.escaped); t != s {
+			return n.fragment + t
+		}
+	}
+	return s
+}
+
+// Normalize replaces all paths present in s with just the fragment portion
+// this is used to make golden files not depend on the temporary paths of the files
+func Normalize(s string, normalizers []Normalizer) string {
+	type entry struct {
+		path     string
+		index    int
+		fragment string
+	}
+	var match []entry
+	// collect the initial state of all the matchers
+	for _, n := range normalizers {
+		index := strings.Index(s, n.path)
+		if index >= 0 {
+			match = append(match, entry{n.path, index, n.fragment})
+		}
+		if n.slashed != "" {
+			index := strings.Index(s, n.slashed)
+			if index >= 0 {
+				match = append(match, entry{n.slashed, index, n.fragment})
+			}
+		}
+		if n.escaped != "" {
+			index := strings.Index(s, n.escaped)
+			if index >= 0 {
+				match = append(match, entry{n.escaped, index, n.fragment})
+			}
+		}
+	}
+	// result should be the same or shorter than the input
+	var b strings.Builder
+	last := 0
+	for {
+		// find the nearest path match to the start of the buffer
+		next := -1
+		nearest := len(s)
+		for i, c := range match {
+			if c.index >= 0 && nearest > c.index {
+				nearest = c.index
+				next = i
+			}
+		}
+		// if there are no matches, we copy the rest of the string and are done
+		if next < 0 {
+			b.WriteString(s[last:])
+			return b.String()
+		}
+		// we have a match
+		n := &match[next]
+		// copy up to the start of the match
+		b.WriteString(s[last:n.index])
+		// skip over the filename
+		last = n.index + len(n.path)
+		// add in the fragment instead
+		b.WriteString(n.fragment)
+		// see what the next match for this path is
+		n.index = strings.Index(s[last:], n.path)
+		if n.index >= 0 {
+			n.index += last
+		}
+	}
+}