gopls/internal/regtest/bench: refactor and improve benchmarks
Significantly refactor the gopls benchmarks to turn them into proper
benchmarks, eliminate the need for passing flags, and allow running them
on external gopls processes so that they may be used to test older gopls
versions.
Doing this required decoupling the benchmarks themselves from the
regtest.Runner. Instead, they just create their own regtest.Env to use
for scripting operations. In order to facilitate this, I tried to
redefine Env as a convenience wrapper around other primitives.
By using a separate environment setup for benchmarks, I was able to
eliminate a lot of regtest.Options that existed only to prevent the
regtest runner from adding instrumentation that would affect
benchmarking. This also helped clean up Runner.Run somewhat, though it
is still too complicated.
Also eliminate the unused AnyDiagnosticAtCurrentVersion, and make a few
other TODOs about future cleanup.
For golang/go#53992
For golang/go#53538
Change-Id: Idbf923178d4256900c3c05bc8999c0c9839a3c07
Reviewed-on: https://go-review.googlesource.com/c/tools/+/419988
gopls-CI: kokoro <noreply+kokoro@google.com>
Reviewed-by: Peter Weinberger <pjw@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go
index fc1fa74..a3780f0 100644
--- a/gopls/internal/regtest/bench/bench_test.go
+++ b/gopls/internal/regtest/bench/bench_test.go
@@ -5,38 +5,209 @@
package bench
import (
+ "context"
+ "flag"
"fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "sync"
"testing"
+ "time"
"golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/fakenet"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/bug"
+ "golang.org/x/tools/internal/lsp/cache"
+ "golang.org/x/tools/internal/lsp/fake"
+ "golang.org/x/tools/internal/lsp/lsprpc"
+ "golang.org/x/tools/internal/lsp/regtest"
. "golang.org/x/tools/internal/lsp/regtest"
)
+// This package implements benchmarks that share a common editor session.
+//
+// It is a work-in-progress.
+//
+// Remaining TODO(rfindley):
+// - add detailed documentation for how to write a benchmark, as a package doc
+// - add benchmarks for more features
+// - eliminate flags, and just run benchmarks on with a predefined set of
+// arguments
+
func TestMain(m *testing.M) {
bug.PanicOnBugs = true
- Main(m, hooks.Options)
+ event.SetExporter(nil) // don't log to stderr
+ code := doMain(m)
+ os.Exit(code)
}
-func benchmarkOptions(dir string) []RunOption {
- return []RunOption{
- // Run in an existing directory, since we're trying to simulate known cases
- // that cause gopls memory problems.
- InExistingDir(dir),
- // Skip logs as they buffer up memory unnaturally.
- SkipLogs(),
- // The Debug server only makes sense if running in singleton mode.
- Modes(Default),
- // Remove the default timeout. Individual tests should control their
- // own graceful termination.
- NoDefaultTimeout(),
+func doMain(m *testing.M) (code int) {
+ defer func() {
+ if editor != nil {
+ if err := editor.Close(context.Background()); err != nil {
+ fmt.Fprintf(os.Stderr, "closing editor: %v", err)
+ if code == 0 {
+ code = 1
+ }
+ }
+ }
+ if tempDir != "" {
+ if err := os.RemoveAll(tempDir); err != nil {
+ fmt.Fprintf(os.Stderr, "cleaning temp dir: %v", err)
+ if code == 0 {
+ code = 1
+ }
+ }
+ }
+ }()
+ return m.Run()
+}
- // Use the actual proxy, since we want our builds to succeed.
- GOPROXY("https://proxy.golang.org"),
+var (
+ workdir = flag.String("workdir", "", "if set, working directory to use for benchmarks; overrides -repo and -commit")
+ repo = flag.String("repo", "https://go.googlesource.com/tools", "if set (and -workdir is unset), run benchmarks in this repo")
+ file = flag.String("file", "go/ast/astutil/util.go", "active file, for benchmarks that operate on a file")
+ commitish = flag.String("commit", "gopls/v0.9.0", "if set (and -workdir is unset), run benchmarks at this commit")
+
+ goplsPath = flag.String("gopls", "", "if set, use this gopls for testing")
+
+ // If non-empty, tempDir is a temporary working dir that was created by this
+ // test suite.
+ setupDirOnce sync.Once
+ tempDir string
+
+ setupEditorOnce sync.Once
+ sandbox *fake.Sandbox
+ editor *fake.Editor
+ awaiter *regtest.Awaiter
+)
+
+// benchmarkDir returns the directory to use for benchmarks.
+//
+// If -workdir is set, just use that directory. Otherwise, check out a shallow
+// copy of -repo at the given -commit, and clean up when the test suite exits.
+func benchmarkDir() string {
+ if *workdir != "" {
+ return *workdir
+ }
+ setupDirOnce.Do(func() {
+ if *repo == "" {
+ log.Fatal("-repo must be provided")
+ }
+
+ if *commitish == "" {
+ log.Fatal("-commit must be provided")
+ }
+
+ var err error
+ tempDir, err = ioutil.TempDir("", "gopls-bench")
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("checking out %s@%s to %s\n", *repo, *commitish, tempDir)
+
+ // Set a timeout for git fetch. If this proves flaky, it can be removed.
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
+ defer cancel()
+
+ // Use a shallow fetch to download just the releveant commit.
+ shInit := fmt.Sprintf("git init && git fetch --depth=1 %q %q && git checkout FETCH_HEAD", *repo, *commitish)
+ initCmd := exec.CommandContext(ctx, "/bin/sh", "-c", shInit)
+ initCmd.Dir = tempDir
+ if err := initCmd.Run(); err != nil {
+ log.Fatalf("checking out %s: %v", *repo, err)
+ }
+ })
+ return tempDir
+}
+
+// benchmarkEnv returns a shared benchmark environment
+func benchmarkEnv(tb testing.TB) *Env {
+ setupEditorOnce.Do(func() {
+ dir := benchmarkDir()
+
+ var err error
+ sandbox, editor, awaiter, err = connectEditor(dir)
+ if err != nil {
+ log.Fatalf("connecting editor: %v", err)
+ }
+
+ if err := awaiter.Await(context.Background(), InitialWorkspaceLoad); err != nil {
+ panic(err)
+ }
+ })
+
+ return &Env{
+ T: tb,
+ Ctx: context.Background(),
+ Editor: editor,
+ Sandbox: sandbox,
+ Awaiter: awaiter,
}
}
-func printBenchmarkResults(result testing.BenchmarkResult) {
- fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString())
+// connectEditor connects a fake editor session in the given dir, using the
+// given editor config.
+func connectEditor(dir string) (*fake.Sandbox, *fake.Editor, *regtest.Awaiter, error) {
+ s, err := fake.NewSandbox(&fake.SandboxConfig{
+ Workdir: dir,
+ GOPROXY: "https://proxy.golang.org",
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ a := regtest.NewAwaiter(s.Workdir)
+ ts := getServer()
+ e, err := fake.NewEditor(s, fake.EditorConfig{}).Connect(context.Background(), ts, a.Hooks())
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return s, e, a, nil
+}
+
+// getServer returns a server connector that either starts a new in-process
+// server, or starts a separate gopls process.
+func getServer() servertest.Connector {
+ if *goplsPath != "" {
+ return &SidecarServer{*goplsPath}
+ }
+ server := lsprpc.NewStreamServer(cache.New(nil, nil, hooks.Options), false)
+ return servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+}
+
+// A SidecarServer starts (and connects to) a separate gopls process at the
+// given path.
+type SidecarServer struct {
+ goplsPath string
+}
+
+// Connect creates new io.Pipes and binds them to the underlying StreamServer.
+func (s *SidecarServer) Connect(ctx context.Context) jsonrpc2.Conn {
+ cmd := exec.CommandContext(ctx, *goplsPath, "serve")
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+ cmd.Stderr = os.Stdout
+ if err := cmd.Start(); err != nil {
+ log.Fatalf("starting gopls: %v", err)
+ }
+
+ go cmd.Wait() // to free resources; error is ignored
+
+ clientStream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", stdout, stdin))
+ clientConn := jsonrpc2.NewConn(clientStream)
+ return clientConn
}
diff --git a/gopls/internal/regtest/bench/completion_test.go b/gopls/internal/regtest/bench/completion_test.go
index f9b8445..cdafb08 100644
--- a/gopls/internal/regtest/bench/completion_test.go
+++ b/gopls/internal/regtest/bench/completion_test.go
@@ -5,7 +5,7 @@
package bench
import (
- "flag"
+ "context"
"fmt"
"strings"
"testing"
@@ -15,63 +15,63 @@
"golang.org/x/tools/internal/lsp/fake"
)
-// dummyCompletionFunction to test manually configured completion using CLI.
-func dummyCompletionFunction() { const s = "placeholder"; fmt.Printf("%s", s) }
-
type completionBenchOptions struct {
- workdir, file, locationRegexp string
- printResults bool
- // hook to run edits before initial completion, not supported for manually
- // configured completions.
+ file, locationRegexp string
+
+ // hook to run edits before initial completion
preCompletionEdits func(*Env)
}
-var completionOptions = completionBenchOptions{}
+func benchmarkCompletion(options completionBenchOptions, b *testing.B) {
+ dir := benchmarkDir()
-func init() {
- flag.StringVar(&completionOptions.workdir, "completion_workdir", "", "directory to run completion benchmarks in")
- flag.StringVar(&completionOptions.file, "completion_file", "", "relative path to the file to complete in")
- flag.StringVar(&completionOptions.locationRegexp, "completion_regexp", "", "regexp location to complete at")
- flag.BoolVar(&completionOptions.printResults, "completion_print_results", false, "whether to print completion results")
-}
+ // Use a new environment for each test, to avoid any existing state from the
+ // previous session.
+ sandbox, editor, awaiter, err := connectEditor(dir)
+ if err != nil {
+ b.Fatal(err)
+ }
+ ctx := context.Background()
+ defer func() {
+ if err := editor.Close(ctx); err != nil {
+ b.Errorf("closing editor: %v", err)
+ }
+ }()
-func benchmarkCompletion(options completionBenchOptions, t *testing.T) {
- if completionOptions.workdir == "" {
- t.Skip("-completion_workdir not configured, skipping benchmark")
+ env := &Env{
+ T: b,
+ Ctx: ctx,
+ Editor: editor,
+ Sandbox: sandbox,
+ Awaiter: awaiter,
+ }
+ env.OpenFile(options.file)
+
+ // Run edits required for this completion.
+ if options.preCompletionEdits != nil {
+ options.preCompletionEdits(env)
}
- opts := stressTestOptions(options.workdir)
+ // Run a completion to make sure the system is warm.
+ pos := env.RegexpSearch(options.file, options.locationRegexp)
+ completions := env.Completion(options.file, pos)
- // Completion gives bad results if IWL is not yet complete, so we must await
- // it first (and therefore need hooks).
- opts = append(opts, SkipHooks(false))
-
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
- env.OpenFile(options.file)
-
- // Run edits required for this completion.
- if options.preCompletionEdits != nil {
- options.preCompletionEdits(env)
+ if testing.Verbose() {
+ fmt.Println("Results:")
+ for i := 0; i < len(completions.Items); i++ {
+ fmt.Printf("\t%d. %v\n", i, completions.Items[i])
}
+ }
- // Run a completion to make sure the system is warm.
- pos := env.RegexpSearch(options.file, options.locationRegexp)
- completions := env.Completion(options.file, pos)
+ b.ResetTimer()
- if options.printResults {
- fmt.Println("Results:")
- for i := 0; i < len(completions.Items); i++ {
- fmt.Printf("\t%d. %v\n", i, completions.Items[i])
- }
+ // Use a subtest to ensure that benchmarkCompletion does not itself get
+ // executed multiple times (as it is doing expensive environment
+ // initialization).
+ b.Run("completion", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ env.Completion(options.file, pos)
}
-
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- env.Completion(options.file, pos)
- }
- })
-
- printBenchmarkResults(results)
})
}
@@ -88,26 +88,8 @@
}
}
-// Benchmark completion at a specified file and location. When no CLI options
-// are specified, this test is skipped.
-// To Run (from x/tools/gopls) against the dummy function above:
-//
-// go test -v ./internal/regtest/bench -run=TestBenchmarkConfiguredCompletion
-// -completion_workdir="$HOME/Developer/tools"
-// -completion_file="gopls/internal/regtest/completion_bench_test.go"
-// -completion_regexp="dummyCompletionFunction.*fmt\.Printf\(\"%s\", s(\))"
-func TestBenchmarkConfiguredCompletion(t *testing.T) {
- benchmarkCompletion(completionOptions, t)
-}
-
-// To run (from x/tools/gopls):
-// go test -v ./internal/regtest/bench -run TestBenchmark<>Completion
-// -completion_workdir="$HOME/Developer/tools"
-// where <> is one of the tests below. completion_workdir should be path to
-// x/tools on your system.
-
// Benchmark struct completion in tools codebase.
-func TestBenchmarkStructCompletion(t *testing.T) {
+func BenchmarkStructCompletion(b *testing.B) {
file := "internal/lsp/cache/session.go"
preCompletionEdits := func(env *Env) {
@@ -120,26 +102,22 @@
}
benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
file: file,
locationRegexp: `var testVariable map\[string\]bool = Session{}(\.)`,
preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
+ }, b)
}
// Benchmark import completion in tools codebase.
-func TestBenchmarkImportCompletion(t *testing.T) {
+func BenchmarkImportCompletion(b *testing.B) {
benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
file: "internal/lsp/source/completion/completion.go",
locationRegexp: `go\/()`,
- printResults: completionOptions.printResults,
- }, t)
+ }, b)
}
// Benchmark slice completion in tools codebase.
-func TestBenchmarkSliceCompletion(t *testing.T) {
+func BenchmarkSliceCompletion(b *testing.B) {
file := "internal/lsp/cache/session.go"
preCompletionEdits := func(env *Env) {
@@ -152,16 +130,14 @@
}
benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
file: file,
locationRegexp: `var testVariable \[\]byte (=)`,
preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
+ }, b)
}
// Benchmark deep completion in function call in tools codebase.
-func TestBenchmarkFuncDeepCompletion(t *testing.T) {
+func BenchmarkFuncDeepCompletion(b *testing.B) {
file := "internal/lsp/source/completion/completion.go"
fileContent := `
func (c *completer) _() {
@@ -178,10 +154,8 @@
}
benchmarkCompletion(completionBenchOptions{
- workdir: completionOptions.workdir,
file: file,
locationRegexp: `func \(c \*completer\) _\(\) {\n\tc\.inference\.kindMatches\((c)`,
preCompletionEdits: preCompletionEdits,
- printResults: completionOptions.printResults,
- }, t)
+ }, b)
}
diff --git a/gopls/internal/regtest/bench/didchange_test.go b/gopls/internal/regtest/bench/didchange_test.go
index 76f40be..8fcf253 100644
--- a/gopls/internal/regtest/bench/didchange_test.go
+++ b/gopls/internal/regtest/bench/didchange_test.go
@@ -5,10 +5,7 @@
package bench
import (
- "flag"
"fmt"
- "os"
- "runtime/pprof"
"testing"
"golang.org/x/tools/internal/lsp/fake"
@@ -16,64 +13,28 @@
. "golang.org/x/tools/internal/lsp/regtest"
)
-var (
- benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.")
- benchFile = flag.String("didchange_file", "", "The file to modify")
- benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to")
-)
-
-// TestBenchmarkDidChange benchmarks modifications of a single file by making
+// BenchmarkDidChange benchmarks modifications of a single file by making
// synthetic modifications in a comment. It controls pacing by waiting for the
// server to actually start processing the didChange notification before
// proceeding. Notably it does not wait for diagnostics to complete.
//
-// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir
-// is the path to a workspace root, and -didchange_file is the
-// workspace-relative path to a file to modify. e.g.:
-//
-// go test -run=TestBenchmarkDidChange \
-// -didchange_dir=path/to/kubernetes \
-// -didchange_file=pkg/util/hash/hash.go
-func TestBenchmarkDidChange(t *testing.T) {
- if *benchDir == "" {
- t.Skip("-didchange_dir is not set")
- }
- if *benchFile == "" {
- t.Fatal("-didchange_file must be set if -didchange_dir is set")
- }
+// Uses -workdir and -file to control where the edits occur.
+func BenchmarkDidChange(b *testing.B) {
+ env := benchmarkEnv(b)
+ env.OpenFile(*file)
+ env.Await(env.DoneWithOpen())
- opts := benchmarkOptions(*benchDir)
- WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
- env.OpenFile(*benchFile)
- env.Await(env.DoneWithOpen())
- // Insert the text we'll be modifying at the top of the file.
- env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
+ // Insert the text we'll be modifying at the top of the file.
+ env.EditBuffer(*file, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"})
- // Run the profiler after the initial load,
- // across all benchmark iterations.
- if *benchProfile != "" {
- profile, err := os.Create(*benchProfile)
- if err != nil {
- t.Fatal(err)
- }
- defer profile.Close()
- if err := pprof.StartCPUProfile(profile); err != nil {
- t.Fatal(err)
- }
- defer pprof.StopCPUProfile()
- }
-
- result := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- env.EditBuffer(*benchFile, fake.Edit{
- Start: fake.Pos{Line: 0, Column: 0},
- End: fake.Pos{Line: 1, Column: 0},
- // Increment
- Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
- })
- env.Await(StartedChange(uint64(i + 1)))
- }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ env.EditBuffer(*file, fake.Edit{
+ Start: fake.Pos{Line: 0, Column: 0},
+ End: fake.Pos{Line: 1, Column: 0},
+ // Increment the placeholder text, to ensure cache misses.
+ Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1),
})
- printBenchmarkResults(result)
- })
+ env.Await(StartedChange(uint64(i + 1)))
+ }
}
diff --git a/gopls/internal/regtest/bench/iwl_test.go b/gopls/internal/regtest/bench/iwl_test.go
index 62faa34..e262a39 100644
--- a/gopls/internal/regtest/bench/iwl_test.go
+++ b/gopls/internal/regtest/bench/iwl_test.go
@@ -5,35 +5,31 @@
package bench
import (
- "flag"
+ "context"
"testing"
. "golang.org/x/tools/internal/lsp/regtest"
)
-var iwlOptions struct {
- workdir string
-}
+// BenchmarkIWL benchmarks the initial workspace load time for a new editing
+// session.
+func BenchmarkIWL(b *testing.B) {
+ dir := benchmarkDir()
+ b.ResetTimer()
-func init() {
- flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory")
-}
-
-func TestBenchmarkIWL(t *testing.T) {
- if iwlOptions.workdir == "" {
- t.Skip("-iwl_workdir not configured")
- }
-
- opts := stressTestOptions(iwlOptions.workdir)
- // Don't skip hooks, so that we can wait for IWL.
- opts = append(opts, SkipHooks(false))
-
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {})
-
+ ctx := context.Background()
+ for i := 0; i < b.N; i++ {
+ _, editor, awaiter, err := connectEditor(dir)
+ if err != nil {
+ b.Fatal(err)
}
- })
-
- printBenchmarkResults(results)
+ if err := awaiter.Await(ctx, InitialWorkspaceLoad); err != nil {
+ b.Fatal(err)
+ }
+ b.StopTimer()
+ if err := editor.Close(ctx); err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+ }
}
diff --git a/gopls/internal/regtest/bench/mem_test.go b/gopls/internal/regtest/bench/mem_test.go
index f48b60b..1962678 100644
--- a/gopls/internal/regtest/bench/mem_test.go
+++ b/gopls/internal/regtest/bench/mem_test.go
@@ -7,8 +7,6 @@
import (
"runtime"
"testing"
-
- . "golang.org/x/tools/internal/lsp/regtest"
)
// TestPrintMemStats measures the memory usage of loading a project.
@@ -17,25 +15,25 @@
//
// Kubernetes example:
//
-// $ go test -v -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes
+// $ go test -v -run=TestPrintMemStats -workdir=$HOME/w/kubernetes
// TotalAlloc: 5766 MB
// HeapAlloc: 1984 MB
//
// Both figures exhibit variance of less than 1%.
func TestPrintMemStats(t *testing.T) {
- if *benchDir == "" {
- t.Skip("-didchange_dir is not set")
- }
+ // This test only makes sense when run in isolation, so for now it is
+ // manually skipped.
+ //
+ // TODO(rfindley): figure out a better way to capture memstats as a benchmark
+ // metric.
+ t.Skip("unskip to run this test manually")
- // Load the program...
- opts := benchmarkOptions(*benchDir)
- WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
- // ...and print the memory usage.
- runtime.GC()
- runtime.GC()
- var mem runtime.MemStats
- runtime.ReadMemStats(&mem)
- t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
- t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
- })
+ _ = benchmarkEnv(t)
+
+ runtime.GC()
+ runtime.GC()
+ var mem runtime.MemStats
+ runtime.ReadMemStats(&mem)
+ t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6)
+ t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6)
}
diff --git a/gopls/internal/regtest/bench/stress_test.go b/gopls/internal/regtest/bench/stress_test.go
index f7e59fa..a410c30 100644
--- a/gopls/internal/regtest/bench/stress_test.go
+++ b/gopls/internal/regtest/bench/stress_test.go
@@ -11,56 +11,83 @@
"testing"
"time"
- . "golang.org/x/tools/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/hooks"
+ "golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
+ "golang.org/x/tools/internal/lsp/cache"
+ "golang.org/x/tools/internal/lsp/fake"
+ "golang.org/x/tools/internal/lsp/lsprpc"
)
-// Pilosa is a repository that has historically caused significant memory
-// problems for Gopls. We use it for a simple stress test that types
-// arbitrarily in a file with lots of dependents.
+// github.com/pilosa/pilosa is a repository that has historically caused
+// significant memory problems for Gopls. We use it for a simple stress test
+// that types arbitrarily in a file with lots of dependents.
var pilosaPath = flag.String("pilosa_path", "", "Path to a directory containing "+
"github.com/pilosa/pilosa, for stress testing. Do not set this unless you "+
"know what you're doing!")
-func stressTestOptions(dir string) []RunOption {
- opts := benchmarkOptions(dir)
- opts = append(opts, SkipHooks(true), DebugAddress(":8087"))
- return opts
-}
-
func TestPilosaStress(t *testing.T) {
+ // TODO(rfindley): revisit this test and make it is hermetic: it should check
+ // out pilosa into a directory.
+ //
+ // Note: This stress test has not been run recently, and may no longer
+ // function properly.
if *pilosaPath == "" {
t.Skip("-pilosa_path not configured")
}
- opts := stressTestOptions(*pilosaPath)
- WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) {
- files := []string{
- "cmd.go",
- "internal/private.pb.go",
- "roaring/roaring.go",
- "roaring/roaring_internal_test.go",
- "server/handler_test.go",
- }
- for _, file := range files {
- env.OpenFile(file)
- }
- ctx, cancel := context.WithTimeout(env.Ctx, 10*time.Minute)
- defer cancel()
-
- i := 1
- // MagicNumber is an identifier that occurs in roaring.go. Just change it
- // arbitrarily.
- env.RegexpReplace("roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1))
- for {
- select {
- case <-ctx.Done():
- return
- default:
- }
- env.RegexpReplace("roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1))
- time.Sleep(20 * time.Millisecond)
- i++
- }
+ sandbox, err := fake.NewSandbox(&fake.SandboxConfig{
+ Workdir: *pilosaPath,
+ GOPROXY: "https://proxy.golang.org",
})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ server := lsprpc.NewStreamServer(cache.New(nil, nil, hooks.Options), false)
+ ts := servertest.NewPipeServer(server, jsonrpc2.NewRawStream)
+ ctx := context.Background()
+
+ editor, err := fake.NewEditor(sandbox, fake.EditorConfig{}).Connect(ctx, ts, fake.ClientHooks{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ files := []string{
+ "cmd.go",
+ "internal/private.pb.go",
+ "roaring/roaring.go",
+ "roaring/roaring_internal_test.go",
+ "server/handler_test.go",
+ }
+ for _, file := range files {
+ if err := editor.OpenFile(ctx, file); err != nil {
+ t.Fatal(err)
+ }
+ }
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
+ defer cancel()
+
+ i := 1
+ // MagicNumber is an identifier that occurs in roaring.go. Just change it
+ // arbitrarily.
+ if err := editor.RegexpReplace(ctx, "roaring/roaring.go", "MagicNumber", fmt.Sprintf("MagicNumber%d", 1)); err != nil {
+ t.Fatal(err)
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ if err := editor.RegexpReplace(ctx, "roaring/roaring.go", fmt.Sprintf("MagicNumber%d", i), fmt.Sprintf("MagicNumber%d", i+1)); err != nil {
+ t.Fatal(err)
+ }
+ // Simulate (very fast) typing.
+ //
+ // Typing 80 wpm ~150ms per keystroke.
+ time.Sleep(150 * time.Millisecond)
+ i++
+ }
}
diff --git a/gopls/internal/regtest/bench/workspace_symbols_test.go b/gopls/internal/regtest/bench/workspace_symbols_test.go
index ad258dc..fccc818 100644
--- a/gopls/internal/regtest/bench/workspace_symbols_test.go
+++ b/gopls/internal/regtest/bench/workspace_symbols_test.go
@@ -8,67 +8,28 @@
"flag"
"fmt"
"testing"
-
- "golang.org/x/tools/internal/lsp/protocol"
-
- . "golang.org/x/tools/internal/lsp/regtest"
)
-var symbolOptions struct {
- workdir, query, matcher, style string
- printResults bool
-}
+var symbolQuery = flag.String("symbol_query", "test", "symbol query to use in benchmark")
-func init() {
- flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory")
- flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark")
- flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark")
- flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark")
- flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results")
-}
+// BenchmarkWorkspaceSymbols benchmarks the time to execute a workspace symbols
+// request (controlled by the -symbol_query flag).
+func BenchmarkWorkspaceSymbols(b *testing.B) {
+ env := benchmarkEnv(b)
-func TestBenchmarkSymbols(t *testing.T) {
- if symbolOptions.workdir == "" {
- t.Skip("-symbol_workdir not configured")
- }
+ // Make an initial symbol query to warm the cache.
+ symbols := env.WorkspaceSymbol(*symbolQuery)
- opts := benchmarkOptions(symbolOptions.workdir)
- settings := make(Settings)
- if symbolOptions.matcher != "" {
- settings["symbolMatcher"] = symbolOptions.matcher
- }
- if symbolOptions.style != "" {
- settings["symbolStyle"] = symbolOptions.style
- }
- opts = append(opts, settings)
-
- WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {
- // We can't Await in this test, since we have disabled hooks. Instead, run
- // one symbol request to completion to ensure all necessary cache entries
- // are populated.
- symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
- Query: symbolOptions.query,
- })
- if err != nil {
- t.Fatal(err)
+ if testing.Verbose() {
+ fmt.Println("Results:")
+ for i := 0; i < len(symbols); i++ {
+ fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
}
+ }
- if symbolOptions.printResults {
- fmt.Println("Results:")
- for i := 0; i < len(symbols); i++ {
- fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName)
- }
- }
+ b.ResetTimer()
- results := testing.Benchmark(func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{
- Query: symbolOptions.query,
- }); err != nil {
- t.Fatal(err)
- }
- }
- })
- printBenchmarkResults(results)
- })
+ for i := 0; i < b.N; i++ {
+ env.WorkspaceSymbol(*symbolQuery)
+ }
}
diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go
index f915fcc..d7246ae 100644
--- a/gopls/internal/regtest/diagnostics/diagnostics_test.go
+++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go
@@ -1296,7 +1296,7 @@
Run(t, dir, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
env.OpenFile("other.go")
- x := env.DiagnosticsFor("main.go")
+ x := env.Awaiter.DiagnosticsFor("main.go")
if x == nil {
t.Fatalf("expected 1 diagnostic, got none")
}
@@ -1304,7 +1304,7 @@
t.Fatalf("main.go, got %d diagnostics, expected 1", len(x.Diagnostics))
}
keep := x.Diagnostics[0]
- y := env.DiagnosticsFor("other.go")
+ y := env.Awaiter.DiagnosticsFor("other.go")
if len(y.Diagnostics) != 1 {
t.Fatalf("other.go: got %d diagnostics, expected 1", len(y.Diagnostics))
}
diff --git a/gopls/internal/regtest/diagnostics/undeclared_test.go b/gopls/internal/regtest/diagnostics/undeclared_test.go
index 79f7d42..ed2b1d0 100644
--- a/gopls/internal/regtest/diagnostics/undeclared_test.go
+++ b/gopls/internal/regtest/diagnostics/undeclared_test.go
@@ -45,7 +45,7 @@
// 'x' is undeclared, but still necessary.
env.OpenFile("a/a.go")
env.Await(env.DiagnosticAtRegexp("a/a.go", "x"))
- diags := env.DiagnosticsFor("a/a.go")
+ diags := env.Awaiter.DiagnosticsFor("a/a.go")
if got := len(diags.Diagnostics); got != 1 {
t.Errorf("len(Diagnostics) = %d, want 1", got)
}
@@ -56,7 +56,7 @@
// 'y = y' is pointless, and should be detected as unnecessary.
env.OpenFile("b/b.go")
env.Await(env.DiagnosticAtRegexp("b/b.go", "y = y"))
- diags = env.DiagnosticsFor("b/b.go")
+ diags = env.Awaiter.DiagnosticsFor("b/b.go")
if got := len(diags.Diagnostics); got != 1 {
t.Errorf("len(Diagnostics) = %d, want 1", got)
}
diff --git a/gopls/internal/regtest/misc/failures_test.go b/gopls/internal/regtest/misc/failures_test.go
index 23fccfd..86c9b22 100644
--- a/gopls/internal/regtest/misc/failures_test.go
+++ b/gopls/internal/regtest/misc/failures_test.go
@@ -29,7 +29,7 @@
var err error
err.Error()
}`
- WithOptions(SkipLogs()).Run(t, mod, func(t *testing.T, env *Env) {
+ Run(t, mod, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
content, _ := env.Hover("main.go", env.RegexpSearch("main.go", "Error"))
// without the //line comment content would be non-nil
diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go
index 170e164..e433f4b 100644
--- a/gopls/internal/regtest/misc/shared_test.go
+++ b/gopls/internal/regtest/misc/shared_test.go
@@ -7,6 +7,7 @@
import (
"testing"
+ "golang.org/x/tools/internal/lsp/fake"
. "golang.org/x/tools/internal/lsp/regtest"
)
@@ -33,8 +34,19 @@
WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) {
// Create a second test session connected to the same workspace and server
// as the first.
- env2, cleanup := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config(), true)
- defer cleanup()
+ awaiter := NewAwaiter(env1.Sandbox.Workdir)
+ editor, err := fake.NewEditor(env1.Sandbox, env1.Editor.Config()).Connect(env1.Ctx, env1.Server, awaiter.Hooks())
+ if err != nil {
+ t.Fatal(err)
+ }
+ env2 := &Env{
+ T: t,
+ Ctx: env1.Ctx,
+ Sandbox: env1.Sandbox,
+ Server: env1.Server,
+ Editor: editor,
+ Awaiter: awaiter,
+ }
env2.Await(InitialWorkspaceLoad)
testFunc(env1, env2)
})
diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go
index 91c8005..a32a06a 100644
--- a/gopls/internal/regtest/modfile/modfile_test.go
+++ b/gopls/internal/regtest/modfile/modfile_test.go
@@ -572,7 +572,7 @@
env.DiagnosticAtRegexpWithMessage("a/main.go", `"example.com/blah/v2"`, "cannot find module providing"),
env.DiagnosticAtRegexpWithMessage("a/go.mod", `require example.com/blah/v2`, "cannot find module providing"),
)
- env.ApplyQuickFixes("a/go.mod", env.DiagnosticsFor("a/go.mod").Diagnostics)
+ env.ApplyQuickFixes("a/go.mod", env.Awaiter.DiagnosticsFor("a/go.mod").Diagnostics)
const want = `module mod.com
go 1.12
diff --git a/gopls/internal/regtest/template/template_test.go b/gopls/internal/regtest/template/template_test.go
index 292088c..ade9ac9 100644
--- a/gopls/internal/regtest/template/template_test.go
+++ b/gopls/internal/regtest/template/template_test.go
@@ -71,7 +71,7 @@
).Run(t, files, func(t *testing.T, env *Env) {
// TODO: can we move this diagnostic onto {{}}?
env.Await(env.DiagnosticAtRegexp("hello.tmpl", "()Hello {{}}"))
- d := env.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source
+ d := env.Awaiter.DiagnosticsFor("hello.tmpl").Diagnostics // issue 50786: check for Source
if len(d) != 1 {
t.Errorf("expected 1 diagnostic, got %d", len(d))
return
diff --git a/internal/jsonrpc2/servertest/servertest.go b/internal/jsonrpc2/servertest/servertest.go
index b879ebd..37f8475 100644
--- a/internal/jsonrpc2/servertest/servertest.go
+++ b/internal/jsonrpc2/servertest/servertest.go
@@ -50,7 +50,7 @@
// Connect dials the test server and returns a jsonrpc2 Connection that is
// ready for use.
-func (s *TCPServer) Connect(ctx context.Context) jsonrpc2.Conn {
+func (s *TCPServer) Connect(_ context.Context) jsonrpc2.Conn {
netConn, err := net.Dial("tcp", s.Addr)
if err != nil {
panic(fmt.Sprintf("servertest: failed to connect to test instance: %v", err))
diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go
index 642957c..1977614 100644
--- a/internal/lsp/diagnostics.go
+++ b/internal/lsp/diagnostics.go
@@ -545,6 +545,7 @@
s.diagnosticsMu.Lock()
defer s.diagnosticsMu.Unlock()
+ // TODO(rfindley): remove this noisy (and not useful) logging.
published := 0
defer func() {
log.Trace.Logf(ctx, "published %d diagnostics", published)
diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go
index a07e9e7..bc2cb2f 100644
--- a/internal/lsp/fake/editor.go
+++ b/internal/lsp/fake/editor.go
@@ -17,9 +17,11 @@
"sync"
"golang.org/x/tools/internal/jsonrpc2"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/command"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/span"
+ "golang.org/x/tools/internal/xcontext"
)
// Editor is a fake editor client. It keeps track of client state and can be
@@ -29,6 +31,7 @@
// Server, client, and sandbox are concurrency safe and written only
// at construction time, so do not require synchronization.
Server protocol.Server
+ cancelConn func()
serverConn jsonrpc2.Conn
client *Client
sandbox *Sandbox
@@ -119,14 +122,19 @@
// It returns the editor, so that it may be called as follows:
//
// editor, err := NewEditor(s).Connect(ctx, conn)
-func (e *Editor) Connect(ctx context.Context, conn jsonrpc2.Conn, hooks ClientHooks) (*Editor, error) {
+func (e *Editor) Connect(ctx context.Context, connector servertest.Connector, hooks ClientHooks) (*Editor, error) {
+ bgCtx, cancelConn := context.WithCancel(xcontext.Detach(ctx))
+ conn := connector.Connect(bgCtx)
+ e.cancelConn = cancelConn
+
e.serverConn = conn
e.Server = protocol.ServerDispatcher(conn)
e.client = &Client{editor: e, hooks: hooks}
- conn.Go(ctx,
+ conn.Go(bgCtx,
protocol.Handlers(
protocol.ClientHandler(e.client,
jsonrpc2.MethodNotFound)))
+
if err := e.initialize(ctx, e.config.WorkspaceFolders); err != nil {
return nil, err
}
@@ -170,6 +178,10 @@
if err := e.Exit(ctx); err != nil {
return err
}
+ defer func() {
+ e.cancelConn()
+ }()
+
// called close on the editor should result in the connection closing
select {
case <-e.serverConn.Done():
diff --git a/internal/lsp/fake/sandbox.go b/internal/lsp/fake/sandbox.go
index 7efbdb3..72b846c 100644
--- a/internal/lsp/fake/sandbox.go
+++ b/internal/lsp/fake/sandbox.go
@@ -70,6 +70,10 @@
// If rootDir is non-empty, it will be used as the root of temporary
// directories created for the sandbox. Otherwise, a new temporary directory
// will be used as root.
+//
+// TODO(rfindley): the sandbox abstraction doesn't seem to carry its weight.
+// Sandboxes should be composed out of their building-blocks, rather than via a
+// monolithic configuration.
func NewSandbox(config *SandboxConfig) (_ *Sandbox, err error) {
if config == nil {
config = new(SandboxConfig)
diff --git a/internal/lsp/lsprpc/lsprpc.go b/internal/lsp/lsprpc/lsprpc.go
index a85e791..7e37229 100644
--- a/internal/lsp/lsprpc/lsprpc.go
+++ b/internal/lsp/lsprpc/lsprpc.go
@@ -56,7 +56,9 @@
server := s.serverForTest
if server == nil {
server = lsp.NewServer(session, client)
- debug.GetInstance(ctx).AddService(server, session)
+ if instance := debug.GetInstance(ctx); instance != nil {
+ instance.AddService(server, session)
+ }
}
return server
}
@@ -71,7 +73,9 @@
server := s.serverForTest
if server == nil {
server = lsp.NewServer(session, client)
- debug.GetInstance(ctx).AddService(server, session)
+ if instance := debug.GetInstance(ctx); instance != nil {
+ instance.AddService(server, session)
+ }
}
// Clients may or may not send a shutdown message. Make sure the server is
// shut down.
diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go
index 498566d..b43629b 100644
--- a/internal/lsp/lsprpc/lsprpc_test.go
+++ b/internal/lsp/lsprpc/lsprpc_test.go
@@ -226,14 +226,12 @@
}
tsForwarder := servertest.NewPipeServer(forwarder, nil)
- conn1 := tsForwarder.Connect(clientCtx)
- ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{})
+ ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, tsForwarder, fake.ClientHooks{})
if err != nil {
t.Fatal(err)
}
defer ed1.Close(clientCtx)
- conn2 := tsBackend.Connect(baseCtx)
- ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2, fake.ClientHooks{})
+ ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, tsBackend, fake.ClientHooks{})
if err != nil {
t.Fatal(err)
}
diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go
index 8960a0d..502636a 100644
--- a/internal/lsp/regtest/env.go
+++ b/internal/lsp/regtest/env.go
@@ -14,25 +14,36 @@
"golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/fake"
"golang.org/x/tools/internal/lsp/protocol"
- "golang.org/x/tools/internal/xcontext"
)
-// Env holds an initialized fake Editor, Workspace, and Server, which may be
-// used for writing tests. It also provides adapter methods that call t.Fatal
-// on any error, so that tests for the happy path may be written without
-// checking errors.
+// Env holds the building blocks of an editor testing environment, providing
+// wrapper methods that hide the boilerplate of plumbing contexts and checking
+// errors.
type Env struct {
- T testing.TB
+ T testing.TB // TODO(rfindley): rename to TB
Ctx context.Context
// Most tests should not need to access the scratch area, editor, server, or
// connection, but they are available if needed.
Sandbox *fake.Sandbox
- Editor *fake.Editor
Server servertest.Connector
- // mu guards the fields below, for the purpose of checking conditions on
- // every change to diagnostics.
+ // Editor is owned by the Env, and shut down
+ Editor *fake.Editor
+
+ Awaiter *Awaiter
+}
+
+// An Awaiter keeps track of relevant LSP state, so that it may be asserted
+// upon with Expectations.
+//
+// Wire it into a fake.Editor using Awaiter.Hooks().
+//
+// TODO(rfindley): consider simply merging Awaiter with the fake.Editor. It
+// probably is not worth its own abstraction.
+type Awaiter struct {
+ workdir *fake.Workdir
+
mu sync.Mutex
// For simplicity, each waiter gets a unique ID.
nextWaiterID int
@@ -40,6 +51,32 @@
waiters map[int]*condition
}
+func NewAwaiter(workdir *fake.Workdir) *Awaiter {
+ return &Awaiter{
+ workdir: workdir,
+ state: State{
+ diagnostics: make(map[string]*protocol.PublishDiagnosticsParams),
+ outstandingWork: make(map[protocol.ProgressToken]*workProgress),
+ startedWork: make(map[string]uint64),
+ completedWork: make(map[string]uint64),
+ },
+ waiters: make(map[int]*condition),
+ }
+}
+
+func (a *Awaiter) Hooks() fake.ClientHooks {
+ return fake.ClientHooks{
+ OnDiagnostics: a.onDiagnostics,
+ OnLogMessage: a.onLogMessage,
+ OnWorkDoneProgressCreate: a.onWorkDoneProgressCreate,
+ OnProgress: a.onProgress,
+ OnShowMessage: a.onShowMessage,
+ OnShowMessageRequest: a.onShowMessageRequest,
+ OnRegistration: a.onRegistration,
+ OnUnregistration: a.onUnregistration,
+ }
+}
+
// State encapsulates the server state TODO: explain more
type State struct {
// diagnostics are a map of relative path->diagnostics params
@@ -108,103 +145,55 @@
verdict chan Verdict
}
-// NewEnv creates a new test environment using the given scratch environment
-// and gopls server.
-//
-// The resulting cleanup func must be called to close the jsonrpc2 connection.
-//
-// TODO(rfindley): this function provides questionable value. Consider
-// refactoring to move things like creating the server outside of this
-// constructor.
-func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) (_ *Env, cleanup func()) {
- tb.Helper()
+func (a *Awaiter) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- bgCtx, cleanupConn := context.WithCancel(xcontext.Detach(ctx))
- conn := ts.Connect(bgCtx)
-
- env := &Env{
- T: tb,
- Ctx: ctx,
- Sandbox: sandbox,
- Server: ts,
- state: State{
- diagnostics: make(map[string]*protocol.PublishDiagnosticsParams),
- outstandingWork: make(map[protocol.ProgressToken]*workProgress),
- startedWork: make(map[string]uint64),
- completedWork: make(map[string]uint64),
- },
- waiters: make(map[int]*condition),
- }
- var hooks fake.ClientHooks
- if withHooks {
- hooks = fake.ClientHooks{
- OnDiagnostics: env.onDiagnostics,
- OnLogMessage: env.onLogMessage,
- OnWorkDoneProgressCreate: env.onWorkDoneProgressCreate,
- OnProgress: env.onProgress,
- OnShowMessage: env.onShowMessage,
- OnShowMessageRequest: env.onShowMessageRequest,
- OnRegistration: env.onRegistration,
- OnUnregistration: env.onUnregistration,
- }
- }
- editor, err := fake.NewEditor(sandbox, editorConfig).Connect(bgCtx, conn, hooks)
- if err != nil {
- tb.Fatal(err)
- }
- env.Editor = editor
- return env, cleanupConn
-}
-
-func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- pth := e.Sandbox.Workdir.URIToPath(d.URI)
- e.state.diagnostics[pth] = d
- e.checkConditionsLocked()
+ pth := a.workdir.URIToPath(d.URI)
+ a.state.diagnostics[pth] = d
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onShowMessage(_ context.Context, m *protocol.ShowMessageParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.showMessage = append(e.state.showMessage, m)
- e.checkConditionsLocked()
+ a.state.showMessage = append(a.state.showMessage, m)
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onShowMessageRequest(_ context.Context, m *protocol.ShowMessageRequestParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.showMessageRequest = append(e.state.showMessageRequest, m)
- e.checkConditionsLocked()
+ a.state.showMessageRequest = append(a.state.showMessageRequest, m)
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.logs = append(e.state.logs, m)
- e.checkConditionsLocked()
+ a.state.logs = append(a.state.logs, m)
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onWorkDoneProgressCreate(_ context.Context, m *protocol.WorkDoneProgressCreateParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.outstandingWork[m.Token] = &workProgress{}
+ a.state.outstandingWork[m.Token] = &workProgress{}
return nil
}
-func (e *Env) onProgress(_ context.Context, m *protocol.ProgressParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
- work, ok := e.state.outstandingWork[m.Token]
+func (a *Awaiter) onProgress(_ context.Context, m *protocol.ProgressParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ work, ok := a.state.outstandingWork[m.Token]
if !ok {
panic(fmt.Sprintf("got progress report for unknown report %v: %v", m.Token, m))
}
@@ -212,7 +201,7 @@
switch kind := v["kind"]; kind {
case "begin":
work.title = v["title"].(string)
- e.state.startedWork[work.title] = e.state.startedWork[work.title] + 1
+ a.state.startedWork[work.title] = a.state.startedWork[work.title] + 1
if msg, ok := v["message"]; ok {
work.msg = msg.(string)
}
@@ -224,36 +213,36 @@
work.msg = msg.(string)
}
case "end":
- title := e.state.outstandingWork[m.Token].title
- e.state.completedWork[title] = e.state.completedWork[title] + 1
- delete(e.state.outstandingWork, m.Token)
+ title := a.state.outstandingWork[m.Token].title
+ a.state.completedWork[title] = a.state.completedWork[title] + 1
+ delete(a.state.outstandingWork, m.Token)
}
- e.checkConditionsLocked()
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onRegistration(_ context.Context, m *protocol.RegistrationParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onRegistration(_ context.Context, m *protocol.RegistrationParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.registrations = append(e.state.registrations, m)
- e.checkConditionsLocked()
+ a.state.registrations = append(a.state.registrations, m)
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error {
- e.mu.Lock()
- defer e.mu.Unlock()
+func (a *Awaiter) onUnregistration(_ context.Context, m *protocol.UnregistrationParams) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
- e.state.unregistrations = append(e.state.unregistrations, m)
- e.checkConditionsLocked()
+ a.state.unregistrations = append(a.state.unregistrations, m)
+ a.checkConditionsLocked()
return nil
}
-func (e *Env) checkConditionsLocked() {
- for id, condition := range e.waiters {
- if v, _ := checkExpectations(e.state, condition.expectations); v != Unmet {
- delete(e.waiters, id)
+func (a *Awaiter) checkConditionsLocked() {
+ for id, condition := range a.waiters {
+ if v, _ := checkExpectations(a.state, condition.expectations); v != Unmet {
+ delete(a.waiters, id)
condition.verdict <- v
}
}
@@ -276,53 +265,62 @@
// DiagnosticsFor returns the current diagnostics for the file. It is useful
// after waiting on AnyDiagnosticAtCurrentVersion, when the desired diagnostic
// is not simply described by DiagnosticAt.
-func (e *Env) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams {
- e.mu.Lock()
- defer e.mu.Unlock()
- return e.state.diagnostics[name]
+//
+// TODO(rfindley): this method is inherently racy. Replace usages of this
+// method with the atomic OnceMet(..., ReadDiagnostics) pattern.
+func (a *Awaiter) DiagnosticsFor(name string) *protocol.PublishDiagnosticsParams {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.state.diagnostics[name]
+}
+
+func (e *Env) Await(expectations ...Expectation) {
+ if err := e.Awaiter.Await(e.Ctx, expectations...); err != nil {
+ e.T.Fatal(err)
+ }
}
// Await waits for all expectations to simultaneously be met. It should only be
// called from the main test goroutine.
-func (e *Env) Await(expectations ...Expectation) {
- e.T.Helper()
- e.mu.Lock()
+func (a *Awaiter) Await(ctx context.Context, expectations ...Expectation) error {
+ a.mu.Lock()
// Before adding the waiter, we check if the condition is currently met or
// failed to avoid a race where the condition was realized before Await was
// called.
- switch verdict, summary := checkExpectations(e.state, expectations); verdict {
+ switch verdict, summary := checkExpectations(a.state, expectations); verdict {
case Met:
- e.mu.Unlock()
- return
+ a.mu.Unlock()
+ return nil
case Unmeetable:
- failure := fmt.Sprintf("unmeetable expectations:\n%s\nstate:\n%v", summary, e.state)
- e.mu.Unlock()
- e.T.Fatal(failure)
+ err := fmt.Errorf("unmeetable expectations:\n%s\nstate:\n%v", summary, a.state)
+ a.mu.Unlock()
+ return err
}
cond := &condition{
expectations: expectations,
verdict: make(chan Verdict),
}
- e.waiters[e.nextWaiterID] = cond
- e.nextWaiterID++
- e.mu.Unlock()
+ a.waiters[a.nextWaiterID] = cond
+ a.nextWaiterID++
+ a.mu.Unlock()
var err error
select {
- case <-e.Ctx.Done():
- err = e.Ctx.Err()
+ case <-ctx.Done():
+ err = ctx.Err()
case v := <-cond.verdict:
if v != Met {
err = fmt.Errorf("condition has final verdict %v", v)
}
}
- e.mu.Lock()
- defer e.mu.Unlock()
- _, summary := checkExpectations(e.state, expectations)
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ _, summary := checkExpectations(a.state, expectations)
// Debugging an unmet expectation can be tricky, so we put some effort into
// nicely formatting the failure.
if err != nil {
- e.T.Fatalf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, e.state)
+ return fmt.Errorf("waiting on:\n%s\nerr:%v\n\nstate:\n%v", summary, err, a.state)
}
+ return nil
}
diff --git a/internal/lsp/regtest/env_test.go b/internal/lsp/regtest/env_test.go
index fe5864c..f54f7f2 100644
--- a/internal/lsp/regtest/env_test.go
+++ b/internal/lsp/regtest/env_test.go
@@ -13,7 +13,7 @@
)
func TestProgressUpdating(t *testing.T) {
- e := &Env{
+ a := &Awaiter{
state: State{
outstandingWork: make(map[protocol.ProgressToken]*workProgress),
startedWork: make(map[string]uint64),
@@ -21,12 +21,12 @@
},
}
ctx := context.Background()
- if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+ if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
Token: "foo",
}); err != nil {
t.Fatal(err)
}
- if err := e.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
+ if err := a.onWorkDoneProgressCreate(ctx, &protocol.WorkDoneProgressCreateParams{
Token: "bar",
}); err != nil {
t.Fatal(err)
@@ -53,14 +53,14 @@
if err := json.Unmarshal(data, &unmarshaled); err != nil {
t.Fatal(err)
}
- if err := e.onProgress(ctx, &unmarshaled); err != nil {
+ if err := a.onProgress(ctx, &unmarshaled); err != nil {
t.Fatal(err)
}
}
- if _, ok := e.state.outstandingWork["foo"]; ok {
+ if _, ok := a.state.outstandingWork["foo"]; ok {
t.Error("got work entry for \"foo\", want none")
}
- got := *e.state.outstandingWork["bar"]
+ got := *a.state.outstandingWork["bar"]
want := workProgress{title: "bar work", percent: 42}
if got != want {
t.Errorf("work progress for \"bar\": %v, want %v", got, want)
diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go
index 3253885..a0a7d52 100644
--- a/internal/lsp/regtest/expectation.go
+++ b/internal/lsp/regtest/expectation.go
@@ -617,24 +617,6 @@
}
}
-// AnyDiagnosticAtCurrentVersion asserts that there is a diagnostic report for
-// the current edited version of the buffer corresponding to the given
-// workdir-relative pathname.
-func (e *Env) AnyDiagnosticAtCurrentVersion(name string) Expectation {
- version := e.Editor.BufferVersion(name)
- check := func(s State) Verdict {
- diags, ok := s.diagnostics[name]
- if ok && diags.Version == int32(version) {
- return Met
- }
- return Unmet
- }
- return SimpleExpectation{
- check: check,
- description: fmt.Sprintf("any diagnostics at version %d", version),
- }
-}
-
// DiagnosticAtRegexp expects that there is a diagnostic entry at the start
// position matching the regexp search string re in the buffer specified by
// name. Note that this currently ignores the end position.
diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go
index 12fd323..93bb139 100644
--- a/internal/lsp/regtest/runner.go
+++ b/internal/lsp/regtest/runner.go
@@ -132,13 +132,10 @@
}
type runConfig struct {
- editor fake.EditorConfig
- sandbox fake.SandboxConfig
- modes Mode
- noDefaultTimeout bool
- debugAddr string
- skipLogs bool
- skipHooks bool
+ editor fake.EditorConfig
+ sandbox fake.SandboxConfig
+ modes Mode
+ skipHooks bool
}
// A RunOption augments the behavior of the test runner.
@@ -152,15 +149,6 @@
f(opts)
}
-// NoDefaultTimeout removes the timeout set by the -regtest_timeout flag, for
-// individual tests that are expected to run longer than is reasonable for
-// ordinary regression tests.
-func NoDefaultTimeout() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.noDefaultTimeout = true
- })
-}
-
// ProxyFiles configures a file proxy using the given txtar-encoded string.
func ProxyFiles(txt string) RunOption {
return optionSetter(func(opts *runConfig) {
@@ -241,50 +229,6 @@
})
}
-// DebugAddress configures a debug server bound to addr. This option is
-// currently only supported when executing in Default mode. It is intended to
-// be used for long-running stress tests.
-func DebugAddress(addr string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.debugAddr = addr
- })
-}
-
-// SkipLogs skips the buffering of logs during test execution. It is intended
-// for long-running stress tests.
-func SkipLogs() RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.skipLogs = true
- })
-}
-
-// InExistingDir runs the test in a pre-existing directory. If set, no initial
-// files may be passed to the runner. It is intended for long-running stress
-// tests.
-func InExistingDir(dir string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.Workdir = dir
- })
-}
-
-// SkipHooks allows for disabling the test runner's client hooks that are used
-// for instrumenting expectations (tracking diagnostics, logs, work done,
-// etc.). It is intended for performance-sensitive stress tests or benchmarks.
-func SkipHooks(skip bool) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.skipHooks = skip
- })
-}
-
-// GOPROXY configures the test environment to have an explicit proxy value.
-// This is intended for stress tests -- to ensure their isolation, regtests
-// should instead use WithProxyFiles.
-func GOPROXY(goproxy string) RunOption {
- return optionSetter(func(opts *runConfig) {
- opts.sandbox.GOPROXY = goproxy
- })
-}
-
type TestFunc func(t *testing.T, env *Env)
// Run executes the test function in the default configured gopls execution
@@ -321,20 +265,13 @@
continue
}
- if config.debugAddr != "" && tc.mode != Default {
- // Debugging is useful for running stress tests, but since the daemon has
- // likely already been started, it would be too late to debug.
- t.Fatalf("debugging regtest servers only works in Default mode, "+
- "got debug addr %q and mode %v", config.debugAddr, tc.mode)
- }
-
t.Run(tc.name, func(t *testing.T) {
// TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak
// goroutines in this test function.
// stacktest.NoLeak(t)
ctx := context.Background()
- if r.Timeout != 0 && !config.noDefaultTimeout {
+ if r.Timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.Timeout)
defer cancel()
@@ -345,12 +282,8 @@
defer cancel()
}
+ // TODO(rfindley): do we need an instance at all? Can it be removed?
ctx = debug.WithInstance(ctx, "", "off")
- if config.debugAddr != "" {
- di := debug.GetInstance(ctx)
- di.Serve(ctx, config.debugAddr)
- di.MonitorMemory(ctx)
- }
rootDir := filepath.Join(r.tempDir, filepath.FromSlash(t.Name()))
if err := os.MkdirAll(rootDir, 0755); err != nil {
@@ -382,12 +315,22 @@
framer := jsonrpc2.NewRawStream
ls := &loggingFramer{}
- if !config.skipLogs {
- framer = ls.framer(jsonrpc2.NewRawStream)
- }
+ framer = ls.framer(jsonrpc2.NewRawStream)
ts := servertest.NewPipeServer(ss, framer)
- env, cleanup := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks)
- defer cleanup()
+
+ awaiter := NewAwaiter(sandbox.Workdir)
+ editor, err := fake.NewEditor(sandbox, config.editor).Connect(ctx, ts, awaiter.Hooks())
+ if err != nil {
+ t.Fatal(err)
+ }
+ env := &Env{
+ T: t,
+ Ctx: ctx,
+ Sandbox: sandbox,
+ Editor: editor,
+ Server: ts,
+ Awaiter: awaiter,
+ }
defer func() {
if t.Failed() && r.PrintGoroutinesOnFailure {
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
@@ -402,7 +345,7 @@
// the editor: in general we want to clean up before proceeding to the
// next test, and if there is a deadlock preventing closing it will
// eventually be handled by the `go test` timeout.
- if err := env.Editor.Close(xcontext.Detach(ctx)); err != nil {
+ if err := editor.Close(xcontext.Detach(ctx)); err != nil {
t.Errorf("closing editor: %v", err)
}
}()