all: merge master (f112c43) into gopls-release-branch.0.10
Also update the go.mod to put back the replace directive.
Conflicts:
- gopls/go.mod
- gopls/go.sum
Merge List:
+ 2022-10-20 f112c4332 go.mod: update golang.org/x dependencies
+ 2022-10-20 207f456f2 go/internal/gcimporter: bump version number in skew check
+ 2022-10-20 65196caee gopls/README.md: fix wording around supported Go versions
+ 2022-10-20 61280309a gopls/internal: support renaming packages with int. test variants
+ 2022-10-20 649df2ea1 go.mod: mark as requiring -compat 1.16
+ 2022-10-19 91311ab3b gopls/internal/lsp/cache: better import path hygiene
+ 2022-10-19 9eda97bc2 go/analysis: enable a test that applies after go list behavior change
+ 2022-10-19 b50d7ba6e gopls: minor cleanup of standalone package support
+ 2022-10-19 502b93c33 gopls/internal/lsp: tolerate missing end position in RelatedInformation
+ 2022-10-19 d67c3ada0 internal/imports: repair warnings from default analyzers
+ 2022-10-18 bc2e3aeab internal/jsonrpc2_v2: add Func convenience wrappers for the Binder and Preempter interfaces
+ 2022-10-18 a9b653b41 cmd/compilebench: use -a instead of -i to ensure dependencies are built
+ 2022-10-18 4818d9eec Revert "gopls/internal/lsp/cache: disable strict analysis while we fix panics"
+ 2022-10-17 b2efd4d15 internal/jsonrpc2_v2: eliminate most arbitrary timeouts in tests
+ 2022-10-17 371ef162f internal/jsonrpc2_v2: rework concurrency in idleListener
+ 2022-10-17 593553125 internal/jsonrpc2_v2: remove “Box” suffix from channel field names
+ 2022-10-17 fd32990e0 internal/jsonrpc2_v2: error out in-flight client calls when the reader breaks
+ 2022-10-17 0e222f5c6 internal/jsonrpc2_v2: close the underlying connection if Wait is called instead of Close
+ 2022-10-17 bc4e384f8 gopls/internal/lsp/cache: fix crash in analysis
+ 2022-10-17 b93a56f28 refactor/satisfy: fix visiting functions in the unsafe package
+ 2022-10-15 9b5e55b1a gopls/internal/lsp/cache: disable strict analysis while we fix panics
+ 2022-10-13 9ffaf69c2 gopls/internal/lsp/cache: minor analysis cleanups
Change-Id: I2337730a702e26b41fa6167c4aea4f8dc4b9a48f
diff --git a/cmd/compilebench/main.go b/cmd/compilebench/main.go
index 28dc450..abdf28a 100644
--- a/cmd/compilebench/main.go
+++ b/cmd/compilebench/main.go
@@ -336,9 +336,9 @@
func (c compile) run(name string, count int) error {
// Make sure dependencies needed by go tool compile are installed to GOROOT/pkg.
- out, err := exec.Command(*flagGoCmd, "build", "-i", c.dir).CombinedOutput()
+ out, err := exec.Command(*flagGoCmd, "build", "-a", c.dir).CombinedOutput()
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", c.dir, err, out)
+ return fmt.Errorf("go build -a %s: %v\n%s", c.dir, err, out)
}
// Find dir and source file list.
@@ -406,9 +406,9 @@
}
// Build dependencies.
- out, err := exec.Command(*flagGoCmd, "build", "-i", "-o", "/dev/null", r.dir).CombinedOutput()
+ out, err := exec.Command(*flagGoCmd, "build", "-a", "-o", "/dev/null", r.dir).CombinedOutput()
if err != nil {
- return fmt.Errorf("go build -i %s: %v\n%s", r.dir, err, out)
+ return fmt.Errorf("go build -a %s: %v\n%s", r.dir, err, out)
}
// Build the main package.
diff --git a/go.mod b/go.mod
index 492efe9..cfc184e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,10 +1,10 @@
module golang.org/x/tools
-go 1.18
+go 1.18 // tagx:compat 1.16
require (
github.com/yuin/goldmark v1.4.13
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
- golang.org/x/net v0.0.0-20220722155237-a158d28d115b
- golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
+ golang.org/x/mod v0.6.0
+ golang.org/x/net v0.1.0
+ golang.org/x/sys v0.1.0
)
diff --git a/go.sum b/go.sum
index 20a9d41..da2cda7 100644
--- a/go.sum
+++ b/go.sum
@@ -2,24 +2,32 @@
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/go/analysis/diagnostic.go b/go/analysis/diagnostic.go
index cd462a0..5cdcf46 100644
--- a/go/analysis/diagnostic.go
+++ b/go/analysis/diagnostic.go
@@ -37,7 +37,7 @@
// declaration.
type RelatedInformation struct {
Pos token.Pos
- End token.Pos
+ End token.Pos // optional
Message string
}
diff --git a/go/analysis/internal/checker/checker_test.go b/go/analysis/internal/checker/checker_test.go
index d91425e..f07963f 100644
--- a/go/analysis/internal/checker/checker_test.go
+++ b/go/analysis/internal/checker/checker_test.go
@@ -172,7 +172,8 @@
// no errors
{name: "no-errors", pattern: []string{"sort"}, analyzers: []*analysis.Analyzer{analyzer, noop}, code: 0},
} {
- if test.name == "despite-error" { // TODO(matloob): once CL 437298 is submitted, add the condition testenv.Go1Point() < 20
+ if test.name == "despite-error" && testenv.Go1Point() < 20 {
+ // The behavior in the comment on the despite-error test only occurs for Go 1.20+.
continue
}
if got := checker.Run(test.pattern, test.analyzers); got != test.code {
diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go
index 2ed25a7..42adb8f 100644
--- a/go/gcexportdata/gcexportdata.go
+++ b/go/gcexportdata/gcexportdata.go
@@ -87,7 +87,11 @@
// Read reads export data from in, decodes it, and returns type
// information for the package.
-// The package name is specified by path.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
diff --git a/go/internal/gcimporter/iimport.go b/go/internal/gcimporter/iimport.go
index 4caa0f5..6e4c066 100644
--- a/go/internal/gcimporter/iimport.go
+++ b/go/internal/gcimporter/iimport.go
@@ -51,6 +51,8 @@
iexportVersionPosCol = 1
iexportVersionGo1_18 = 2
iexportVersionGenerics = 2
+
+ iexportVersionCurrent = 2
)
type ident struct {
@@ -96,7 +98,7 @@
}
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
- const currentVersion = 1
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
if !debug {
defer func() {
diff --git a/gopls/README.md b/gopls/README.md
index 9afc2e4..646419b 100644
--- a/gopls/README.md
+++ b/gopls/README.md
@@ -76,9 +76,9 @@
support for the last 4 major Go releases, but this support extends only to not
breaking the build and avoiding easily fixable regressions.
-The following table shows the final gopls version that supports being built at
-a given Go Version. Any more recent Go versions missing from this table can
-still be built with the latest version of gopls.
+The following table shows the final gopls version that supports being built
+with a given Go version. Go releases more recent than any in the table can
+build any version of gopls.
| Go Version | Final gopls Version With Support |
| ----------- | -------------------------------- |
diff --git a/gopls/go.mod b/gopls/go.mod
index 9768f03..efb9be1 100644
--- a/gopls/go.mod
+++ b/gopls/go.mod
@@ -7,11 +7,11 @@
github.com/jba/printsrc v0.2.2
github.com/jba/templatecheck v0.6.0
github.com/sergi/go-diff v1.1.0
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
+ golang.org/x/mod v0.6.0
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
- golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
- golang.org/x/text v0.3.7
- golang.org/x/tools v0.1.13-0.20221013174914-7b95c71b8d4d
+ golang.org/x/sys v0.1.0
+ golang.org/x/text v0.4.0
+ golang.org/x/tools v0.1.13-0.20220928184430-f80e98464e27
golang.org/x/vuln v0.0.0-20221010193109-563322be2ea9
gopkg.in/yaml.v3 v3.0.1
honnef.co/go/tools v0.3.3
@@ -26,3 +26,5 @@
github.com/google/safehtml v0.0.2 // indirect
golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e // indirect
)
+
+replace golang.org/x/tools => ../
diff --git a/gopls/go.sum b/gopls/go.sum
index 1050989..78ff483 100644
--- a/gopls/go.sum
+++ b/gopls/go.sum
@@ -40,67 +40,38 @@
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e h1:7Xs2YCOpMlNqSQSmrrnhlzBXIE/bpMecZplbLePTJvE=
golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs=
-golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
-golang.org/x/tools v0.1.13-0.20220928184430-f80e98464e27/go.mod h1:VsjNM1dMo+Ofkp5d7y7fOdQZD8MTXSQ4w3EPk65AvKU=
-golang.org/x/tools v0.1.13-0.20221013174914-7b95c71b8d4d h1:XNTt4f6Y+5Zy40ZyJv5GKPlzKa2+UBdyrNwi7PQhkR4=
-golang.org/x/tools v0.1.13-0.20221013174914-7b95c71b8d4d/go.mod h1:VsjNM1dMo+Ofkp5d7y7fOdQZD8MTXSQ4w3EPk65AvKU=
+golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/vuln v0.0.0-20221010193109-563322be2ea9 h1:KaYZQUtEEaV8aVADIHAuYBTjo77aUcCvC7KTGKM3J1I=
golang.org/x/vuln v0.0.0-20221010193109-563322be2ea9/go.mod h1:F12iebNzxRMpJsm4W7ape+r/KdnXiSy3VC94WsyCG68=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/gopls/internal/lsp/cache/analysis.go b/gopls/internal/lsp/cache/analysis.go
index d41116c..fa2f7ea 100644
--- a/gopls/internal/lsp/cache/analysis.go
+++ b/gopls/internal/lsp/cache/analysis.go
@@ -66,21 +66,19 @@
analyzer *analysis.Analyzer
}
-type actionHandleKey source.Hash
-
// An action represents one unit of analysis work: the application of
// one analysis to one package. Actions form a DAG, both within a
// package (as different analyzers are applied, either in sequence or
// parallel), and across packages (as dependencies are analyzed).
type actionHandle struct {
+ key actionKey // just for String()
promise *memoize.Promise // [actionResult]
-
- analyzer *analysis.Analyzer
- pkg *pkg
}
// actionData is the successful result of analyzing a package.
type actionData struct {
+ analyzer *analysis.Analyzer
+ pkgTypes *types.Package // types only; don't keep syntax live
diagnostics []*source.Diagnostic
result interface{}
objectFacts map[objectFactKey]analysis.Fact
@@ -134,7 +132,7 @@
}
// Add a dependency on each required analyzer.
- var deps []*actionHandle
+ var deps []*actionHandle // unordered
for _, req := range a.Requires {
// TODO(adonovan): opt: there's no need to repeat the package-handle
// portion of the recursion here, since we have the pkg already.
@@ -152,7 +150,7 @@
// An analysis that consumes/produces facts
// must run on the package's dependencies too.
if len(a.FactTypes) > 0 {
- for _, importID := range ph.m.Deps {
+ for _, importID := range ph.m.Imports {
depActionHandle, err := s.actionHandle(ctx, importID, a)
if err != nil {
return nil, err
@@ -162,15 +160,23 @@
}
}
- promise, release := s.store.Promise(buildActionKey(a, ph), func(ctx context.Context, arg interface{}) interface{} {
+ // The promises are kept in a store on the package,
+ // so the key need only include the analyzer name.
+ //
+ // (Since type-checking and analysis depend on the identity
+ // of packages--distinct packages produced by the same
+ // recipe are not fungible--we must in effect use the package
+ // itself as part of the key. Rather than actually use a pointer
+ // in the key, we get a simpler object graph if we shard the
+ // store by packages.)
+ promise, release := pkg.analyses.Promise(a.Name, func(ctx context.Context, arg interface{}) interface{} {
res, err := actionImpl(ctx, arg.(*snapshot), deps, a, pkg)
return actionResult{res, err}
})
ah := &actionHandle{
- analyzer: a,
- pkg: pkg,
- promise: promise,
+ key: key,
+ promise: promise,
}
s.mu.Lock()
@@ -187,12 +193,12 @@
return ah, nil
}
-func buildActionKey(a *analysis.Analyzer, ph *packageHandle) actionHandleKey {
- return actionHandleKey(source.Hashf("%p%s", a, ph.key[:]))
+func (key actionKey) String() string {
+ return fmt.Sprintf("%s@%s", key.analyzer, key.pkgid)
}
func (act *actionHandle) String() string {
- return fmt.Sprintf("%s@%s", act.analyzer, act.pkg.PkgPath())
+ return act.key.String()
}
// actionImpl runs the analysis for action node (analyzer, pkg),
@@ -222,29 +228,20 @@
mu.Lock()
defer mu.Unlock()
- if dep.pkg == pkg {
+ if data.pkgTypes == pkg.types {
// Same package, different analysis (horizontal edge):
// in-memory outputs of prerequisite analyzers
// become inputs to this analysis pass.
- inputs[dep.analyzer] = data.result
+ inputs[data.analyzer] = data.result
- } else if dep.analyzer == analyzer {
+ } else if data.analyzer == analyzer {
// Same analysis, different package (vertical edge):
// serialized facts produced by prerequisite analysis
// become available to this analysis pass.
for key, fact := range data.objectFacts {
- // Filter out facts related to objects
- // that are irrelevant downstream
- // (equivalently: not in the compiler export data).
- if !exportedFrom(key.obj, dep.pkg.types) {
- continue
- }
objectFacts[key] = fact
}
for key, fact := range data.packageFacts {
- // TODO: filter out facts that belong to
- // packages not mentioned in the export data
- // to prevent side channels.
packageFacts[key] = fact
}
@@ -268,7 +265,11 @@
if source.IsCommandLineArguments(pkg.ID()) {
errorf = fmt.Errorf // suppress reporting
}
- return errorf("internal error: unexpected analysis dependency %s@%s -> %s", analyzer.Name, pkg.ID(), dep)
+ err := errorf("internal error: unexpected analysis dependency %s@%s -> %s", analyzer.Name, pkg.ID(), dep)
+ // Log the event in any case, as the ultimate
+ // consumer of actionResult ignores errors.
+ event.Error(ctx, "analysis", err)
+ return err
}
return nil
})
@@ -401,6 +402,16 @@
panic(fmt.Sprintf("%s:%s: Pass.ExportPackageFact(%T) called after Run", analyzer.Name, pkg.PkgPath(), fact))
}
+ // Filter out facts related to objects that are irrelevant downstream
+ // (equivalently: not in the compiler export data).
+ for key := range objectFacts {
+ if !exportedFrom(key.obj, pkg.types) {
+ delete(objectFacts, key)
+ }
+ }
+ // TODO: filter out facts that belong to packages not
+ // mentioned in the export data to prevent side channels.
+
var diagnostics []*source.Diagnostic
for _, diag := range rawDiagnostics {
srcDiags, err := analysisDiagnosticDiagnostics(snapshot, pkg, analyzer, &diag)
@@ -411,6 +422,8 @@
diagnostics = append(diagnostics, srcDiags...)
}
return &actionData{
+ analyzer: analyzer,
+ pkgTypes: pkg.types,
diagnostics: diagnostics,
result: result,
objectFacts: objectFacts,
@@ -434,8 +447,10 @@
case *types.Var:
return obj.Exported() && obj.Pkg() == pkg ||
obj.IsField()
- case *types.TypeName, *types.Const:
+ case *types.TypeName:
return true
+ case *types.Const:
+ return obj.Exported() && obj.Pkg() == pkg
}
return false // Nil, Builtin, Label, or PkgName
}
diff --git a/gopls/internal/lsp/cache/check.go b/gopls/internal/lsp/cache/check.go
index a186a81..70eaed0 100644
--- a/gopls/internal/lsp/cache/check.go
+++ b/gopls/internal/lsp/cache/check.go
@@ -11,7 +11,6 @@
"fmt"
"go/ast"
"go/types"
- "path"
"path/filepath"
"regexp"
"strings"
@@ -99,9 +98,13 @@
// TODO(adonovan): use a promise cache to ensure that the key
// for each package is computed by at most one thread, then do
// the recursive key building of dependencies in parallel.
- deps := make(map[PackagePath]*packageHandle)
- depKeys := make([]packageHandleKey, len(m.Deps))
- for i, depID := range m.Deps {
+ deps := make(map[PackageID]*packageHandle)
+ var depKey source.Hash // XOR of all unique deps
+ for _, depID := range m.Imports {
+ depHandle, ok := deps[depID]
+ if ok {
+ continue // e.g. duplicate import
+ }
depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
// Don't use invalid metadata for dependencies if the top-level
// metadata is valid. We only load top-level packages, so if the
@@ -125,8 +128,9 @@
continue
}
- deps[depHandle.m.PkgPath] = depHandle
- depKeys[i] = depHandle.key
+ depKey.XORWith(source.Hash(depHandle.key))
+
+ deps[depID] = depHandle
}
// Read both lists of files of this package, in parallel.
@@ -144,9 +148,8 @@
// All the file reading has now been done.
// Create a handle for the result of type checking.
experimentalKey := s.View().Options().ExperimentalPackageCacheKey
- phKey := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey)
+ phKey := computePackageKey(m.ID, compiledGoFiles, m, depKey, mode, experimentalKey)
promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} {
-
pkg, err := typeCheckImpl(ctx, arg.(*snapshot), goFiles, compiledGoFiles, m.Metadata, mode, deps)
return typeCheckResult{pkg, err}
})
@@ -225,8 +228,8 @@
// computePackageKey returns a key representing the act of type checking
// a package named id containing the specified files, metadata, and
-// dependency hashes.
-func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
+// combined dependency hash.
+func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata, depsKey source.Hash, mode source.ParseMode, experimentalKey bool) packageHandleKey {
// TODO(adonovan): opt: no need to materalize the bytes; hash them directly.
// Also, use field separators to avoid spurious collisions.
b := bytes.NewBuffer(nil)
@@ -243,9 +246,7 @@
b.Write(hc[:])
}
b.WriteByte(byte(mode))
- for _, dep := range deps {
- b.Write(dep[:])
- }
+ b.Write(depsKey[:])
for _, file := range files {
b.WriteString(file.FileIdentity().String())
}
@@ -311,7 +312,7 @@
// typeCheckImpl type checks the parsed source files in compiledGoFiles.
// (The resulting pkg also holds the parsed but not type-checked goFiles.)
// deps holds the future results of type-checking the direct dependencies.
-func typeCheckImpl(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) {
+func typeCheckImpl(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackageID]*packageHandle) (*pkg, error) {
// Start type checking of direct dependencies,
// in parallel and asynchronously.
// As the type checker imports each of these
@@ -446,15 +447,15 @@
var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
-func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
+func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackageID]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID)))
defer done()
pkg := &pkg{
- m: m,
- mode: mode,
- imports: make(map[PackagePath]*pkg),
- types: types.NewPackage(string(m.PkgPath), string(m.Name)),
+ m: m,
+ mode: mode,
+ depsByPkgPath: make(map[PackagePath]*pkg),
+ types: types.NewPackage(string(m.PkgPath), string(m.Name)),
typesInfo: &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
@@ -522,23 +523,30 @@
Error: func(e error) {
pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
},
- Importer: importerFunc(func(pkgPath string) (*types.Package, error) {
- // If the context was cancelled, we should abort.
- if ctx.Err() != nil {
- return nil, ctx.Err()
+ Importer: importerFunc(func(path string) (*types.Package, error) {
+ // While all of the import errors could be reported
+ // based on the metadata before we start type checking,
+ // reporting them via types.Importer places the errors
+ // at the correct source location.
+ id, ok := pkg.m.Imports[ImportPath(path)]
+ if !ok {
+ // If the import declaration is broken,
+ // go list may fail to report metadata about it.
+ // See TestFixImportDecl for an example.
+ return nil, fmt.Errorf("missing metadata for import of %q", path)
}
- dep := resolveImportPath(pkgPath, pkg, deps)
- if dep == nil {
- return nil, snapshot.missingPkgError(ctx, pkgPath)
+ dep, ok := deps[id]
+ if !ok {
+ return nil, snapshot.missingPkgError(ctx, path)
}
if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) {
- return nil, fmt.Errorf("invalid use of internal package %s", pkgPath)
+ return nil, fmt.Errorf("invalid use of internal package %s", path)
}
depPkg, err := dep.await(ctx, snapshot)
if err != nil {
return nil, err
}
- pkg.imports[depPkg.m.PkgPath] = depPkg
+ pkg.depsByPkgPath[depPkg.m.PkgPath] = depPkg
return depPkg.types, nil
}),
}
@@ -840,41 +848,6 @@
return result
}
-// resolveImportPath resolves an import path in pkg to a package from deps.
-// It should produce the same results as resolveImportPath:
-// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990.
-func resolveImportPath(importPath string, pkg *pkg, deps map[PackagePath]*packageHandle) *packageHandle {
- if dep := deps[PackagePath(importPath)]; dep != nil {
- return dep
- }
- // We may be in GOPATH mode, in which case we need to check vendor dirs.
- searchDir := path.Dir(pkg.PkgPath())
- for {
- vdir := PackagePath(path.Join(searchDir, "vendor", importPath))
- if vdep := deps[vdir]; vdep != nil {
- return vdep
- }
-
- // Search until Dir doesn't take us anywhere new, e.g. "." or "/".
- next := path.Dir(searchDir)
- if searchDir == next {
- break
- }
- searchDir = next
- }
-
- // Vendor didn't work. Let's try minimal module compatibility mode.
- // In MMC, the packagePath is the canonical (.../vN/...) path, which
- // is hard to calculate. But the go command has already resolved the ID
- // to the non-versioned path, and we can take advantage of that.
- for _, dep := range deps {
- if dep.ID() == importPath {
- return dep
- }
- }
- return nil
-}
-
// An importFunc is an implementation of the single-method
// types.Importer interface based on a function value.
type importerFunc func(path string) (*types.Package, error)
diff --git a/gopls/internal/lsp/cache/errors.go b/gopls/internal/lsp/cache/errors.go
index 1c38e20..bca68d5 100644
--- a/gopls/internal/lsp/cache/errors.go
+++ b/gopls/internal/lsp/cache/errors.go
@@ -334,7 +334,11 @@
if tokFile == nil {
return nil, bug.Errorf("no file for %q diagnostic position", diag.Category)
}
- spn, err := span.NewRange(tokFile, related.Pos, related.End).Span()
+ end := related.End
+ if !end.IsValid() {
+ end = related.Pos
+ }
+ spn, err := span.NewRange(tokFile, related.Pos, end).Span()
if err != nil {
return nil, err
}
diff --git a/gopls/internal/lsp/cache/graph.go b/gopls/internal/lsp/cache/graph.go
index 3d530c4..047a55c 100644
--- a/gopls/internal/lsp/cache/graph.go
+++ b/gopls/internal/lsp/cache/graph.go
@@ -53,7 +53,7 @@
// Build the import graph.
g.importedBy = make(map[PackageID][]PackageID)
for id, m := range g.metadata {
- for _, importID := range m.Deps {
+ for _, importID := range uniqueDeps(m.Imports) {
g.importedBy[importID] = append(g.importedBy[importID], id)
}
}
@@ -129,8 +129,27 @@
}
}
+// uniqueDeps returns a new sorted and duplicate-free slice containing the
+// IDs of the package's direct dependencies.
+func uniqueDeps(imports map[ImportPath]PackageID) []PackageID {
+ // TODO(adonovan): use generic maps.SortedUniqueValues(m.Imports) when available.
+ ids := make([]PackageID, 0, len(imports))
+ for _, id := range imports {
+ ids = append(ids, id)
+ }
+ sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
+ // de-duplicate in place
+ out := ids[:0]
+ for _, id := range ids {
+ if len(out) == 0 || id != out[len(out)-1] {
+ out = append(out, id)
+ }
+ }
+ return out
+}
+
// reverseTransitiveClosure calculates the set of packages that transitively
-// reach an id in ids via their Deps. The result also includes given ids.
+// import an id in ids. The result also includes given ids.
//
// If includeInvalid is false, the algorithm ignores packages with invalid
// metadata (including those in the given list of ids).
diff --git a/gopls/internal/lsp/cache/load.go b/gopls/internal/lsp/cache/load.go
index e930eb4..83ea2ca 100644
--- a/gopls/internal/lsp/cache/load.go
+++ b/gopls/internal/lsp/cache/load.go
@@ -207,7 +207,7 @@
if s.view.allFilesExcluded(pkg, filterer) {
continue
}
- if err := buildMetadata(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, newMetadata, nil); err != nil {
+ if err := buildMetadata(ctx, pkg, cfg, query, newMetadata, nil); err != nil {
return err
}
}
@@ -476,7 +476,9 @@
// buildMetadata populates the updates map with metadata updates to
// apply, based on the given pkg. It recurs through pkg.Imports to ensure that
// metadata exists for all dependencies.
-func buildMetadata(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error {
+func buildMetadata(ctx context.Context, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error {
+ // Allow for multiple ad-hoc packages in the workspace (see #47584).
+ pkgPath := PackagePath(pkg.PkgPath)
id := PackageID(pkg.ID)
if source.IsCommandLineArguments(pkg.ID) {
suffix := ":" + strings.Join(query, ",")
@@ -540,33 +542,66 @@
m.GoFiles = append(m.GoFiles, uri)
}
- for importPath, importPkg := range pkg.Imports {
- // TODO(rfindley): in rare cases it is possible that the import package
- // path is not the same as the package path of the import. That is to say
- // (quoting adonovan):
- // "The importPath string is the path by which one package is imported from
- // another, but that needn't be the same as its internal name (sometimes
- // called the "package path") used to prefix its linker symbols"
- //
- // We should not set this package path on the metadata of the dep.
- importPkgPath := PackagePath(importPath)
- importID := PackageID(importPkg.ID)
+ imports := make(map[ImportPath]PackageID)
+ for importPath, imported := range pkg.Imports {
+ importPath := ImportPath(importPath)
+ imports[importPath] = PackageID(imported.ID)
- m.Deps = append(m.Deps, importID)
+ // It is not an invariant that importPath == imported.PkgPath.
+ // For example, package "net" imports "golang.org/x/net/dns/dnsmessage"
+ // which refers to the package whose ID and PkgPath are both
+ // "vendor/golang.org/x/net/dns/dnsmessage". Notice the ImportMap,
+ // which maps ImportPaths to PackagePaths:
+ //
+ // $ go list -json net vendor/golang.org/x/net/dns/dnsmessage
+ // {
+ // "ImportPath": "net",
+ // "Name": "net",
+ // "Imports": [
+ // "C",
+ // "vendor/golang.org/x/net/dns/dnsmessage",
+ // "vendor/golang.org/x/net/route",
+ // ...
+ // ],
+ // "ImportMap": {
+ // "golang.org/x/net/dns/dnsmessage": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "golang.org/x/net/route": "vendor/golang.org/x/net/route"
+ // },
+ // ...
+ // }
+ // {
+ // "ImportPath": "vendor/golang.org/x/net/dns/dnsmessage",
+ // "Name": "dnsmessage",
+ // ...
+ // }
+ //
+ // (Beware that, for historical reasons, go list uses
+ // the JSON field "ImportPath" for the package's
+ // path--effectively the linker symbol prefix.)
// Don't remember any imports with significant errors.
- if importPkgPath != "unsafe" && len(importPkg.CompiledGoFiles) == 0 {
+ //
+ // The len=0 condition is a heuristic check for imports of
+ // non-existent packages (for which go/packages will create
+ // an edge to a synthesized node). The heuristic is unsound
+ // because some valid packages have zero files, for example,
+ // a directory containing only the file p_test.go defines an
+ // empty package p.
+ // TODO(adonovan): clarify this. Perhaps go/packages should
+ // report which nodes were synthesized.
+ if importPath != "unsafe" && len(imported.CompiledGoFiles) == 0 {
if m.MissingDeps == nil {
- m.MissingDeps = make(map[PackagePath]struct{})
+ m.MissingDeps = make(map[ImportPath]struct{})
}
- m.MissingDeps[importPkgPath] = struct{}{}
+ m.MissingDeps[importPath] = struct{}{}
continue
}
- if err := buildMetadata(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil {
+
+ if err := buildMetadata(ctx, imported, cfg, query, updates, append(path, id)); err != nil {
event.Error(ctx, "error in dependency", err)
}
}
- sort.Slice(m.Deps, func(i, j int) bool { return m.Deps[i] < m.Deps[j] }) // for determinism
+ m.Imports = imports
return nil
}
diff --git a/gopls/internal/lsp/cache/metadata.go b/gopls/internal/lsp/cache/metadata.go
index 2fa87eb..5ac741a 100644
--- a/gopls/internal/lsp/cache/metadata.go
+++ b/gopls/internal/lsp/cache/metadata.go
@@ -17,9 +17,10 @@
// it would result in confusing errors because package IDs often look like
// package paths.
type (
- PackageID string
- PackagePath string
- PackageName string
+ PackageID string // go list's unique identifier for a package (e.g. "vendor/example.com/foo [vendor/example.com/bar.test]")
+ PackagePath string // name used to prefix linker symbols (e.g. "vendor/example.com/foo")
+ PackageName string // identifier in 'package' declaration (e.g. "foo")
+ ImportPath string // path that appears in an import declaration (e.g. "example.com/foo")
)
// Metadata holds package Metadata extracted from a call to packages.Load.
@@ -32,8 +33,8 @@
ForTest PackagePath // package path under test, or ""
TypesSizes types.Sizes
Errors []packages.Error
- Deps []PackageID // direct dependencies, in string order
- MissingDeps map[PackagePath]struct{}
+ Imports map[ImportPath]PackageID // may contain duplicate IDs
+ MissingDeps map[ImportPath]struct{}
Module *packages.Module
depsErrors []*packagesinternal.PackageError
diff --git a/gopls/internal/lsp/cache/mod_tidy.go b/gopls/internal/lsp/cache/mod_tidy.go
index 8e4e060..cc604c1 100644
--- a/gopls/internal/lsp/cache/mod_tidy.go
+++ b/gopls/internal/lsp/cache/mod_tidy.go
@@ -466,9 +466,8 @@
// the set of strings that appear in import declarations within
// GoFiles. Errors are ignored.
//
-// (We can't simply use ph.m.Metadata.Deps because it contains
-// PackageIDs--not import paths--and is based on CompiledGoFiles,
-// after cgo processing.)
+// (We can't simply use Metadata.Imports because it is based on
+// CompiledGoFiles, after cgo processing.)
func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) map[string]bool {
s.mu.Lock() // peekOrParse requires a locked snapshot (!)
defer s.mu.Unlock()
diff --git a/gopls/internal/lsp/cache/pkg.go b/gopls/internal/lsp/cache/pkg.go
index 44fe855..2f7389d 100644
--- a/gopls/internal/lsp/cache/pkg.go
+++ b/gopls/internal/lsp/cache/pkg.go
@@ -13,6 +13,7 @@
"golang.org/x/mod/module"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
+ "golang.org/x/tools/internal/memoize"
)
// pkg contains the type information needed by the source package.
@@ -22,7 +23,7 @@
goFiles []*source.ParsedGoFile
compiledGoFiles []*source.ParsedGoFile
diagnostics []*source.Diagnostic
- imports map[PackagePath]*pkg
+ depsByPkgPath map[PackagePath]*pkg
version *module.Version
parseErrors []scanner.ErrorList
typeErrors []types.Error
@@ -30,6 +31,8 @@
typesInfo *types.Info
typesSizes types.Sizes
hasFixedFiles bool // if true, AST was sufficiently mangled that we should hide type errors
+
+ analyses memoize.Store // maps analyzer.Name to Promise[actionResult]
}
// A loadScope defines a package loading scope for use with go/packages.
@@ -108,27 +111,57 @@
return string(p.m.ForTest)
}
-func (p *pkg) GetImport(pkgPath string) (source.Package, error) {
- if imp := p.imports[PackagePath(pkgPath)]; imp != nil {
+// DirectDep returns the directly imported dependency of this package,
+// given its PackagePath. (If you have an ImportPath, e.g. a string
+// from an import declaration, use ResolveImportPath instead.
+// They may differ in case of vendoring.)
+func (p *pkg) DirectDep(pkgPath string) (source.Package, error) {
+ if imp := p.depsByPkgPath[PackagePath(pkgPath)]; imp != nil {
return imp, nil
}
// Don't return a nil pointer because that still satisfies the interface.
return nil, fmt.Errorf("no imported package for %s", pkgPath)
}
+// ResolveImportPath returns the directly imported dependency of this package,
+// given its ImportPath. See also DirectDep.
+func (p *pkg) ResolveImportPath(importPath string) (source.Package, error) {
+ if id, ok := p.m.Imports[ImportPath(importPath)]; ok {
+ for _, imported := range p.depsByPkgPath {
+ if PackageID(imported.ID()) == id {
+ return imported, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("package does not import %s", importPath)
+}
+
func (p *pkg) MissingDependencies() []string {
// We don't invalidate metadata for import deletions, so check the package
// imports via the *types.Package. Only use metadata if p.types is nil.
if p.types == nil {
var md []string
- for i := range p.m.MissingDeps {
- md = append(md, string(i))
+ for importPath := range p.m.MissingDeps {
+ md = append(md, string(importPath))
}
return md
}
+
+ // This looks wrong.
+ //
+ // rfindley says: it looks like this is intending to implement
+ // a heuristic "if go list couldn't resolve import paths to
+ // packages, then probably you're not in GOPATH or a module".
+ // This is used to determine if we need to show a warning diagnostic.
+ // It looks like this logic is implementing the heuristic that
+ // "even if the metadata has a MissingDep, if the types.Package
+ // doesn't need that dep anymore we shouldn't show the warning".
+ // But either we're outside of GOPATH/Module, or we're not...
+ //
+ // TODO(adonovan): figure out what it is trying to do.
var md []string
for _, pkg := range p.types.Imports() {
- if _, ok := p.m.MissingDeps[PackagePath(pkg.Path())]; ok {
+ if _, ok := p.m.MissingDeps[ImportPath(pkg.Path())]; ok {
md = append(md, pkg.Path())
}
}
@@ -137,7 +170,7 @@
func (p *pkg) Imports() []source.Package {
var result []source.Package
- for _, imp := range p.imports {
+ for _, imp := range p.depsByPkgPath {
result = append(result, imp)
}
return result
diff --git a/gopls/internal/lsp/cache/snapshot.go b/gopls/internal/lsp/cache/snapshot.go
index f070fe3..c7e5e8a 100644
--- a/gopls/internal/lsp/cache/snapshot.go
+++ b/gopls/internal/lsp/cache/snapshot.go
@@ -892,7 +892,7 @@
}
// TODO(rfindley): it looks incorrect that we don't also check GoFiles here.
// If a CGo file is open, we want to consider the package active.
- for _, dep := range m.Deps {
+ for _, dep := range m.Imports {
if s.isActiveLocked(dep) {
return true
}
@@ -1195,6 +1195,28 @@
return pkgs, nil
}
+func (s *snapshot) AllValidMetadata(ctx context.Context) ([]source.Metadata, error) {
+ if err := s.awaitLoaded(ctx); err != nil {
+ return nil, err
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var meta []source.Metadata
+ for _, m := range s.meta.metadata {
+ if m.Valid {
+ meta = append(meta, m)
+ }
+ }
+ return meta, nil
+}
+
+func (s *snapshot) WorkspacePackageByID(ctx context.Context, id string) (source.Package, error) {
+ packageID := PackageID(id)
+ return s.checkedPackage(ctx, packageID, s.workspaceParseMode(packageID))
+}
+
func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) {
// Don't reload workspace package metadata.
// This function is meant to only return currently cached information.
@@ -1209,14 +1231,14 @@
if err != nil {
return
}
- for importPath, newPkg := range cachedPkg.imports {
- if oldPkg, ok := results[string(importPath)]; ok {
+ for pkgPath, newPkg := range cachedPkg.depsByPkgPath {
+ if oldPkg, ok := results[string(pkgPath)]; ok {
// Using the same trick as NarrowestPackage, prefer non-variants.
if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
- results[string(importPath)] = newPkg
+ results[string(pkgPath)] = newPkg
}
} else {
- results[string(importPath)] = newPkg
+ results[string(pkgPath)] = newPkg
}
}
})
diff --git a/gopls/internal/lsp/cache/standalone_go116_test.go b/gopls/internal/lsp/cache/standalone_go116_test.go
index 5eb7ff0..9adf01e 100644
--- a/gopls/internal/lsp/cache/standalone_go116_test.go
+++ b/gopls/internal/lsp/cache/standalone_go116_test.go
@@ -31,6 +31,12 @@
true,
},
{
+ "multiple tags",
+ "//go:build ignore\n\npackage main\n",
+ []string{"exclude", "ignore"},
+ true,
+ },
+ {
"invalid tag",
"// +build ignore\n\npackage main\n",
[]string{"script"},
diff --git a/gopls/internal/lsp/cmd/test/rename.go b/gopls/internal/lsp/cmd/test/rename.go
index 0750d70..a9eb31e 100644
--- a/gopls/internal/lsp/cmd/test/rename.go
+++ b/gopls/internal/lsp/cmd/test/rename.go
@@ -8,6 +8,7 @@
"fmt"
"testing"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
"golang.org/x/tools/gopls/internal/span"
)
@@ -17,13 +18,13 @@
loc := fmt.Sprintf("%v", spn)
got, err := r.NormalizeGoplsCmd(t, "rename", loc, newText)
got += err
- expect := string(r.data.Golden(t, goldenTag, filename, func() ([]byte, error) {
+ want := string(r.data.Golden(t, goldenTag, filename, func() ([]byte, error) {
return []byte(got), nil
}))
- if expect != got {
- t.Errorf("rename failed with %v %v\nexpected:\n%s\ngot:\n%s", loc, newText, expect, got)
+ if diff := compare.Text(want, got); diff != "" {
+ t.Errorf("rename failed with %v %v (-want +got):\n%s", loc, newText, diff)
}
// now check we can build a valid unified diff
unified, _ := r.NormalizeGoplsCmd(t, "rename", "-d", loc, newText)
- checkUnified(t, filename, expect, unified)
+ checkUnified(t, filename, want, unified)
}
diff --git a/gopls/internal/lsp/diagnostics.go b/gopls/internal/lsp/diagnostics.go
index 558556e..ff3e4b2 100644
--- a/gopls/internal/lsp/diagnostics.go
+++ b/gopls/internal/lsp/diagnostics.go
@@ -534,7 +534,7 @@
if snapshot.IsBuiltin(ctx, fh.URI()) {
return nil
}
- pkgs, err := snapshot.PackagesForFile(ctx, fh.URI(), source.TypecheckWorkspace, false)
+ pkgs, _ := snapshot.PackagesForFile(ctx, fh.URI(), source.TypecheckWorkspace, false)
if len(pkgs) > 0 {
return nil
}
diff --git a/gopls/internal/lsp/fake/editor.go b/gopls/internal/lsp/fake/editor.go
index b9f62f3..dfd17c7 100644
--- a/gopls/internal/lsp/fake/editor.go
+++ b/gopls/internal/lsp/fake/editor.go
@@ -366,7 +366,12 @@
}
// OpenFile creates a buffer for the given workdir-relative file.
+//
+// If the file is already open, it is a no-op.
func (e *Editor) OpenFile(ctx context.Context, path string) error {
+ if e.HasBuffer(path) {
+ return nil
+ }
content, err := e.sandbox.Workdir.ReadFile(path)
if err != nil {
return err
@@ -383,6 +388,11 @@
func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content string) error {
e.mu.Lock()
+ if _, ok := e.buffers[path]; ok {
+ e.mu.Unlock()
+ return fmt.Errorf("buffer %q already exists", path)
+ }
+
buf := buffer{
windowsLineEndings: e.config.WindowsLineEndings,
version: 1,
@@ -712,7 +722,7 @@
}
content, err := applyEdits(buf.lines, edits)
if err != nil {
- return err
+ return fmt.Errorf("editing %q: %v; edits:\n%v", path, err, edits)
}
return e.setBufferContentLocked(ctx, path, true, content, edits)
}
@@ -1171,6 +1181,15 @@
if e.Server == nil {
return nil
}
+
+ // Verify that PrepareRename succeeds.
+ prepareParams := &protocol.PrepareRenameParams{}
+ prepareParams.TextDocument = e.TextDocumentIdentifier(path)
+ prepareParams.Position = pos.ToProtocolPosition()
+ if _, err := e.Server.PrepareRename(ctx, prepareParams); err != nil {
+ return fmt.Errorf("preparing rename: %v", err)
+ }
+
params := &protocol.RenameParams{
TextDocument: e.TextDocumentIdentifier(path),
Position: pos.ToProtocolPosition(),
diff --git a/gopls/internal/lsp/fake/workdir.go b/gopls/internal/lsp/fake/workdir.go
index f194e26..2b426e4 100644
--- a/gopls/internal/lsp/fake/workdir.go
+++ b/gopls/internal/lsp/fake/workdir.go
@@ -13,6 +13,7 @@
"os"
"path/filepath"
"runtime"
+ "sort"
"strings"
"sync"
"time"
@@ -349,6 +350,22 @@
return nil
}
+// ListFiles returns a new sorted list of the relative paths of files in dir,
+// recursively.
+func (w *Workdir) ListFiles(dir string) ([]string, error) {
+ m, err := w.listFiles(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ var paths []string
+ for p := range m {
+ paths = append(paths, p)
+ }
+ sort.Strings(paths)
+ return paths, nil
+}
+
// listFiles lists files in the given directory, returning a map of relative
// path to contents and modification time.
func (w *Workdir) listFiles(dir string) (map[string]fileID, error) {
diff --git a/gopls/internal/lsp/link.go b/gopls/internal/lsp/link.go
index 929fe28..b26bfb3 100644
--- a/gopls/internal/lsp/link.go
+++ b/gopls/internal/lsp/link.go
@@ -174,8 +174,8 @@
return links, nil
}
-func moduleAtVersion(target string, pkg source.Package) (string, string, bool) {
- impPkg, err := pkg.GetImport(target)
+func moduleAtVersion(targetImportPath string, pkg source.Package) (string, string, bool) {
+ impPkg, err := pkg.ResolveImportPath(targetImportPath)
if err != nil {
return "", "", false
}
diff --git a/gopls/internal/lsp/regtest/env.go b/gopls/internal/lsp/regtest/env.go
index 23be64e..2374dab 100644
--- a/gopls/internal/lsp/regtest/env.go
+++ b/gopls/internal/lsp/regtest/env.go
@@ -11,9 +11,9 @@
"sync"
"testing"
- "golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/gopls/internal/lsp/fake"
"golang.org/x/tools/gopls/internal/lsp/protocol"
+ "golang.org/x/tools/internal/jsonrpc2/servertest"
)
// Env holds the building blocks of an editor testing environment, providing
@@ -119,7 +119,7 @@
for name, params := range s.diagnostics {
fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version))
for _, d := range params.Diagnostics {
- fmt.Fprintf(&b, "\t\t(%d, %d): %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Message)
+ fmt.Fprintf(&b, "\t\t(%d, %d) [%s]: %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Source, d.Message)
}
}
b.WriteString("\n")
diff --git a/gopls/internal/lsp/regtest/wrappers.go b/gopls/internal/lsp/regtest/wrappers.go
index 63b5991..13e5f7b 100644
--- a/gopls/internal/lsp/regtest/wrappers.go
+++ b/gopls/internal/lsp/regtest/wrappers.go
@@ -58,6 +58,17 @@
}
}
+// ListFiles lists relative paths to files in the given directory.
+// It calls t.Fatal on any error.
+func (e *Env) ListFiles(dir string) []string {
+ e.T.Helper()
+ paths, err := e.Sandbox.Workdir.ListFiles(dir)
+ if err != nil {
+ e.T.Fatal(err)
+ }
+ return paths
+}
+
// OpenFile opens a file in the editor, calling t.Fatal on any error.
func (e *Env) OpenFile(name string) {
e.T.Helper()
diff --git a/gopls/internal/lsp/semantic.go b/gopls/internal/lsp/semantic.go
index 641c2c4..713c3ac 100644
--- a/gopls/internal/lsp/semantic.go
+++ b/gopls/internal/lsp/semantic.go
@@ -15,14 +15,15 @@
"log"
"path/filepath"
"sort"
+ "strconv"
"strings"
"time"
- "golang.org/x/tools/internal/event"
"golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/gopls/internal/lsp/safetoken"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/lsp/template"
+ "golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/typeparams"
)
@@ -875,31 +876,25 @@
}
return // don't mark anything for . or _
}
- val := d.Path.Value
- if len(val) < 2 || val[0] != '"' || val[len(val)-1] != '"' {
- // avoid panics on imports without a properly quoted string
+ importPath, err := strconv.Unquote(d.Path.Value)
+ if err != nil {
return
}
- nm := val[1 : len(val)-1] // remove surrounding "s
// Import strings are implementation defined. Try to match with parse information.
- x, err := e.pkg.GetImport(nm)
+ imported, err := e.pkg.ResolveImportPath(importPath)
if err != nil {
// unexpected, but impact is that maybe some import is not colored
return
}
- // expect that nm is x.PkgPath and that x.Name() is a component of it
- if x.PkgPath() != nm {
- // don't know how or what to color (if this can happen at all)
- return
- }
- // this is not a precise test: imagine "github.com/nasty/v/v2"
- j := strings.LastIndex(nm, x.Name())
+ // Check whether the original literal contains the package's declared name.
+ j := strings.LastIndex(d.Path.Value, imported.Name())
if j == -1 {
// name doesn't show up, for whatever reason, so nothing to report
return
}
- start := d.Path.Pos() + 1 + token.Pos(j) // skip the initial quote
- e.token(start, len(x.Name()), tokNamespace, nil)
+ // Report virtual declaration at the position of the substring.
+ start := d.Path.Pos() + token.Pos(j)
+ e.token(start, len(imported.Name()), tokNamespace, nil)
}
// log unexpected state
diff --git a/gopls/internal/lsp/source/completion/completion.go b/gopls/internal/lsp/source/completion/completion.go
index 1e5dd1c..7d98205 100644
--- a/gopls/internal/lsp/source/completion/completion.go
+++ b/gopls/internal/lsp/source/completion/completion.go
@@ -264,7 +264,6 @@
type importInfo struct {
importPath string
name string
- pkg source.Package
}
type methodSetKey struct {
@@ -1165,7 +1164,6 @@
}
imp := &importInfo{
importPath: path,
- pkg: pkg,
}
if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() {
imp.name = pkg.GetTypes().Name()
@@ -1517,7 +1515,6 @@
}
imp := &importInfo{
importPath: path,
- pkg: pkg,
}
if imports.ImportPathToAssumedName(path) != pkg.GetTypes().Name() {
imp.name = pkg.GetTypes().Name()
diff --git a/gopls/internal/lsp/source/completion/statements.go b/gopls/internal/lsp/source/completion/statements.go
index b43629c..1f80193 100644
--- a/gopls/internal/lsp/source/completion/statements.go
+++ b/gopls/internal/lsp/source/completion/statements.go
@@ -336,7 +336,7 @@
if param.Name() == "_" {
continue
}
- testingPkg, err := pkg.GetImport("testing")
+ testingPkg, err := pkg.DirectDep("testing")
if err != nil {
continue
}
diff --git a/gopls/internal/lsp/source/hover.go b/gopls/internal/lsp/source/hover.go
index 0dc8d8a..c758d68 100644
--- a/gopls/internal/lsp/source/hover.go
+++ b/gopls/internal/lsp/source/hover.go
@@ -342,7 +342,7 @@
// See golang/go#36998: don't link to modules matching GOPRIVATE.
//
- // The path returned by linkData is an import path.
+ // The path returned by linkData is a package path.
if i.Snapshot.View().IsGoPrivatePath(h.LinkPath) {
h.LinkPath = ""
} else if mod, version, ok := moduleAtVersion(h.LinkPath, i); ok {
@@ -352,11 +352,11 @@
return h, nil
}
-// linkData returns the name, import path, and anchor to use in building links
+// linkData returns the name, package path, and anchor to use in building links
// to obj.
//
// If obj is not visible in documentation, the returned name will be empty.
-func linkData(obj types.Object, enclosing *types.TypeName) (name, importPath, anchor string) {
+func linkData(obj types.Object, enclosing *types.TypeName) (name, packagePath, anchor string) {
// Package names simply link to the package.
if obj, ok := obj.(*types.PkgName); ok {
return obj.Name(), obj.Imported().Path(), ""
@@ -430,7 +430,7 @@
return "", "", ""
}
- importPath = obj.Pkg().Path()
+ packagePath = obj.Pkg().Path()
if recv != nil {
anchor = fmt.Sprintf("%s.%s", recv.Name(), obj.Name())
name = fmt.Sprintf("(%s.%s).%s", obj.Pkg().Name(), recv.Name(), obj.Name())
@@ -439,7 +439,7 @@
anchor = obj.Name()
name = fmt.Sprintf("%s.%s", obj.Pkg().Name(), obj.Name())
}
- return name, importPath, anchor
+ return name, packagePath, anchor
}
func moduleAtVersion(path string, i *IdentifierInfo) (string, string, bool) {
@@ -448,7 +448,7 @@
if strings.ToLower(i.Snapshot.View().Options().LinkTarget) != "pkg.go.dev" {
return "", "", false
}
- impPkg, err := i.pkg.GetImport(path)
+ impPkg, err := i.pkg.DirectDep(path)
if err != nil {
return "", "", false
}
@@ -535,11 +535,11 @@
}
case *ast.ImportSpec:
// Try to find the package documentation for an imported package.
- pkgPath, err := strconv.Unquote(node.Path.Value)
+ importPath, err := strconv.Unquote(node.Path.Value)
if err != nil {
return nil, err
}
- imp, err := pkg.GetImport(pkgPath)
+ imp, err := pkg.ResolveImportPath(importPath)
if err != nil {
return nil, err
}
diff --git a/gopls/internal/lsp/source/identifier.go b/gopls/internal/lsp/source/identifier.go
index 7bf1e12..f11817f 100644
--- a/gopls/internal/lsp/source/identifier.go
+++ b/gopls/internal/lsp/source/identifier.go
@@ -456,21 +456,21 @@
if err != nil {
return nil, fmt.Errorf("import path not quoted: %s (%v)", imp.Path.Value, err)
}
+ imported, err := pkg.ResolveImportPath(importPath)
+ if err != nil {
+ return nil, err
+ }
result := &IdentifierInfo{
Snapshot: snapshot,
- Name: importPath,
+ Name: importPath, // should this perhaps be imported.PkgPath()?
pkg: pkg,
}
if result.MappedRange, err = posToMappedRange(snapshot.FileSet(), pkg, imp.Path.Pos(), imp.Path.End()); err != nil {
return nil, err
}
// Consider the "declaration" of an import spec to be the imported package.
- importedPkg, err := pkg.GetImport(importPath)
- if err != nil {
- return nil, err
- }
// Return all of the files in the package as the definition of the import spec.
- for _, dst := range importedPkg.GetSyntax() {
+ for _, dst := range imported.GetSyntax() {
rng, err := posToMappedRange(snapshot.FileSet(), pkg, dst.Pos(), dst.End())
if err != nil {
return nil, err
diff --git a/gopls/internal/lsp/source/known_packages.go b/gopls/internal/lsp/source/known_packages.go
index d7f229e..e2d950b 100644
--- a/gopls/internal/lsp/source/known_packages.go
+++ b/gopls/internal/lsp/source/known_packages.go
@@ -28,6 +28,8 @@
for _, imp := range pgf.File.Imports {
alreadyImported[imp.Path.Value] = struct{}{}
}
+ // TODO(adonovan): this whole algorithm could be more
+ // simply expressed in terms of Metadata, not Packages.
pkgs, err := snapshot.CachedImportPaths(ctx)
if err != nil {
return nil, err
diff --git a/gopls/internal/lsp/source/options.go b/gopls/internal/lsp/source/options.go
index fe86ff2..136ba23 100644
--- a/gopls/internal/lsp/source/options.go
+++ b/gopls/internal/lsp/source/options.go
@@ -1306,8 +1306,8 @@
}
m := make(map[string]bool)
for a, enabled := range all {
- if enabled, ok := enabled.(bool); ok {
- m[a] = enabled
+ if e, ok := enabled.(bool); ok {
+ m[a] = e
} else {
r.parseErrorf("invalid type %T for map key %q", enabled, a)
return m
diff --git a/gopls/internal/lsp/source/references.go b/gopls/internal/lsp/source/references.go
index de6687a..e26091c 100644
--- a/gopls/internal/lsp/source/references.go
+++ b/gopls/internal/lsp/source/references.go
@@ -62,6 +62,8 @@
}
if inPackageName {
+ // TODO(rfindley): this is inaccurate, excluding test variants, and
+ // redundant with package renaming. Refactor to share logic.
renamingPkg, err := s.PackageForFile(ctx, f.URI(), TypecheckWorkspace, NarrowestPackage)
if err != nil {
return nil, err
diff --git a/gopls/internal/lsp/source/rename.go b/gopls/internal/lsp/source/rename.go
index 2f0ad56..2d4a188 100644
--- a/gopls/internal/lsp/source/rename.go
+++ b/gopls/internal/lsp/source/rename.go
@@ -76,26 +76,44 @@
err := errors.New("can't rename package: LSP client does not support file renaming")
return nil, err, err
}
- renamingPkg, err := snapshot.PackageForFile(ctx, f.URI(), TypecheckWorkspace, NarrowestPackage)
+ fileMeta, err := snapshot.MetadataForFile(ctx, f.URI())
if err != nil {
return nil, err, err
}
- if renamingPkg.Name() == "main" {
+ if len(fileMeta) == 0 {
+ err := fmt.Errorf("no packages found for file %q", f.URI())
+ return nil, err, err
+ }
+
+ meta := fileMeta[0]
+
+ if meta.PackageName() == "main" {
err := errors.New("can't rename package \"main\"")
return nil, err, err
}
- if renamingPkg.Version() == nil {
- err := fmt.Errorf("can't rename package: missing module information for package %q", renamingPkg.PkgPath())
+ if strings.HasSuffix(meta.PackageName(), "_test") {
+ err := errors.New("can't rename x_test packages")
return nil, err, err
}
- if renamingPkg.Version().Path == renamingPkg.PkgPath() {
- err := fmt.Errorf("can't rename package: package path %q is the same as module path %q", renamingPkg.PkgPath(), renamingPkg.Version().Path)
+ if meta.ModuleInfo() == nil {
+ err := fmt.Errorf("can't rename package: missing module information for package %q", meta.PackagePath())
return nil, err, err
}
- result, err := computePrepareRenameResp(snapshot, renamingPkg, pgf.File.Name, renamingPkg.Name())
+
+ if meta.ModuleInfo().Path == meta.PackagePath() {
+ err := fmt.Errorf("can't rename package: package path %q is the same as module path %q", meta.PackagePath(), meta.ModuleInfo().Path)
+ return nil, err, err
+ }
+ // TODO(rfindley): we should not need the package here.
+ pkg, err := snapshot.WorkspacePackageByID(ctx, meta.PackageID())
+ if err != nil {
+ err = fmt.Errorf("error building package to rename: %v", err)
+ return nil, err, err
+ }
+ result, err := computePrepareRenameResp(snapshot, pkg, pgf.File.Name, pkg.Name())
if err != nil {
return nil, nil, err
}
@@ -164,65 +182,46 @@
}
if inPackageName {
- // Since we only take one package below, no need to include test variants.
+ if !isValidIdentifier(newName) {
+ return nil, true, fmt.Errorf("%q is not a valid identifier", newName)
+ }
+
+ fileMeta, err := s.MetadataForFile(ctx, f.URI())
+ if err != nil {
+ return nil, true, err
+ }
+
+ if len(fileMeta) == 0 {
+ return nil, true, fmt.Errorf("no packages found for file %q", f.URI())
+ }
+
+ // We need metadata for the relevant package and module paths. These should
+ // be the same for all packages containing the file.
//
- // TODO(rfindley): but is this correct? What about x_test packages that
- // import the renaming package?
- const includeTestVariants = false
- pkgs, err := s.PackagesForFile(ctx, f.URI(), TypecheckWorkspace, includeTestVariants)
+ // TODO(rfindley): we mix package path and import path here haphazardly.
+ // Fix this.
+ meta := fileMeta[0]
+ oldPath := meta.PackagePath()
+ var modulePath string
+ if mi := meta.ModuleInfo(); mi == nil {
+ return nil, true, fmt.Errorf("cannot rename package: missing module information for package %q", meta.PackagePath())
+ } else {
+ modulePath = mi.Path
+ }
+
+ if strings.HasSuffix(newName, "_test") {
+ return nil, true, fmt.Errorf("cannot rename to _test package")
+ }
+
+ metadata, err := s.AllValidMetadata(ctx)
if err != nil {
return nil, true, err
}
- var pkg Package // TODO(rfindley): we should consider all packages, so that we get the full reverse transitive closure.
- for _, p := range pkgs {
- // pgf.File.Name must not be nil, else this will panic.
- if pgf.File.Name.Name == p.Name() {
- pkg = p
- break
- }
- }
- activePkgs, err := s.ActivePackages(ctx)
+
+ renamingEdits, err := renamePackage(ctx, s, modulePath, oldPath, newName, metadata)
if err != nil {
return nil, true, err
}
- renamingEdits, err := computeImportRenamingEdits(ctx, s, pkg, activePkgs, newName)
- if err != nil {
- return nil, true, err
- }
- pkgNameEdits, err := computePackageNameRenamingEdits(pkg, newName)
- if err != nil {
- return nil, true, err
- }
- for uri, edits := range pkgNameEdits {
- renamingEdits[uri] = edits
- }
- // Rename test packages
- for _, activePkg := range activePkgs {
- if activePkg.ForTest() != pkg.PkgPath() {
- continue
- }
- // Filter out intermediate test variants.
- if activePkg.PkgPath() != pkg.PkgPath() && activePkg.PkgPath() != pkg.PkgPath()+"_test" {
- continue
- }
- newTestPkgName := newName
- if strings.HasSuffix(activePkg.Name(), "_test") {
- newTestPkgName += "_test"
- }
- perPackageEdits, err := computeRenamePackageImportEditsPerPackage(ctx, s, activePkg, newTestPkgName, pkg.PkgPath())
- for uri, edits := range perPackageEdits {
- renamingEdits[uri] = append(renamingEdits[uri], edits...)
- }
- pkgNameEdits, err := computePackageNameRenamingEdits(activePkg, newTestPkgName)
- if err != nil {
- return nil, true, err
- }
- for uri, edits := range pkgNameEdits {
- if _, ok := renamingEdits[uri]; !ok {
- renamingEdits[uri] = edits
- }
- }
- }
return renamingEdits, true, nil
}
@@ -239,98 +238,187 @@
return result, false, nil
}
-// computeImportRenamingEdits computes all edits to files in other packages that import
-// the renaming package.
-func computeImportRenamingEdits(ctx context.Context, s Snapshot, renamingPkg Package, pkgs []Package, newName string) (map[span.URI][]protocol.TextEdit, error) {
- result := make(map[span.URI][]protocol.TextEdit)
+// renamePackage computes all workspace edits required to rename the package
+// described by the given metadata, to newName, by renaming its package
+// directory.
+//
+// It updates package clauses and import paths for the renamed package as well
+// as any other packages affected by the directory renaming among packages
+// described by allMetadata.
+func renamePackage(ctx context.Context, s Snapshot, modulePath, oldPath, newName string, allMetadata []Metadata) (map[span.URI][]protocol.TextEdit, error) {
+ if modulePath == oldPath {
+ return nil, fmt.Errorf("cannot rename package: module path %q is the same as the package path, so renaming the package directory would have no effect", modulePath)
+ }
+
+ newPathPrefix := path.Join(path.Dir(oldPath), newName)
+
+ edits := make(map[span.URI][]protocol.TextEdit)
+ seen := make(seenPackageRename) // track per-file import renaming we've already processed
+
// Rename imports to the renamed package from other packages.
- for _, pkg := range pkgs {
- if renamingPkg.Version() == nil {
- return nil, fmt.Errorf("cannot rename package: missing module information for package %q", renamingPkg.PkgPath())
- }
- renamingPkgModulePath := renamingPkg.Version().Path
- activePkgModulePath := pkg.Version().Path
- if !strings.HasPrefix(pkg.PkgPath()+"/", renamingPkg.PkgPath()+"/") {
- continue // not a nested package or the renaming package.
+ for _, m := range allMetadata {
+ // Special case: x_test packages for the renamed package will not have the
+ // package path as as a dir prefix, but still need their package clauses
+ // renamed.
+ if m.PackagePath() == oldPath+"_test" {
+ newTestName := newName + "_test"
+
+ if err := renamePackageClause(ctx, m, s, newTestName, seen, edits); err != nil {
+ return nil, err
+ }
+ continue
}
- if activePkgModulePath == pkg.PkgPath() {
- continue // don't edit imports to nested package whose path and module path is the same.
+ // Subtle: check this condition before checking for valid module info
+ // below, because we should not fail this operation if unrelated packages
+ // lack module info.
+ if !strings.HasPrefix(m.PackagePath()+"/", oldPath+"/") {
+ continue // not affected by the package renaming
}
- if renamingPkgModulePath != "" && renamingPkgModulePath != activePkgModulePath {
- continue // don't edit imports if nested package and renaming package has different module path.
+ if m.ModuleInfo() == nil {
+ return nil, fmt.Errorf("cannot rename package: missing module information for package %q", m.PackagePath())
}
- // Compute all edits for other files that import this nested package
- // when updating the its path.
- perFileEdits, err := computeRenamePackageImportEditsPerPackage(ctx, s, pkg, newName, renamingPkg.PkgPath())
- if err != nil {
+ if modulePath != m.ModuleInfo().Path {
+ continue // don't edit imports if nested package and renaming package have different module paths
+ }
+
+ // Renaming a package consists of changing its import path and package name.
+ suffix := strings.TrimPrefix(m.PackagePath(), oldPath)
+ newPath := newPathPrefix + suffix
+
+ pkgName := m.PackageName()
+ if m.PackagePath() == oldPath {
+ pkgName = newName
+
+ if err := renamePackageClause(ctx, m, s, newName, seen, edits); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := renameImports(ctx, s, m, newPath, pkgName, seen, edits); err != nil {
return nil, err
}
- for uri, edits := range perFileEdits {
- result[uri] = append(result[uri], edits...)
- }
}
- return result, nil
+ return edits, nil
}
-// computePackageNameRenamingEdits computes all edits to files within the renming packages.
-func computePackageNameRenamingEdits(renamingPkg Package, newName string) (map[span.URI][]protocol.TextEdit, error) {
- result := make(map[span.URI][]protocol.TextEdit)
+// seenPackageRename tracks import path renamings that have already been
+// processed.
+//
+// Due to test variants, files may appear multiple times in the reverse
+// transitive closure of a renamed package, or in the reverse transitive
+// closure of different variants of a renamed package (both are possible).
+// However, in all cases the resulting edits will be the same.
+type seenPackageRename map[seenPackageKey]bool
+type seenPackageKey struct {
+ uri span.URI
+ importPath string
+}
+
+// add reports whether uri and importPath have been seen, and records them as
+// seen if not.
+func (s seenPackageRename) add(uri span.URI, importPath string) bool {
+ key := seenPackageKey{uri, importPath}
+ seen := s[key]
+ if !seen {
+ s[key] = true
+ }
+ return seen
+}
+
+// renamePackageClause computes edits renaming the package clause of files in
+// the package described by the given metadata, to newName.
+//
+// As files may belong to multiple packages, the seen map tracks files whose
+// package clause has already been updated, to prevent duplicate edits.
+//
+// Edits are written into the edits map.
+func renamePackageClause(ctx context.Context, m Metadata, s Snapshot, newName string, seen seenPackageRename, edits map[span.URI][]protocol.TextEdit) error {
+ pkg, err := s.WorkspacePackageByID(ctx, m.PackageID())
+ if err != nil {
+ return err
+ }
+
// Rename internal references to the package in the renaming package.
- for _, f := range renamingPkg.CompiledGoFiles() {
+ for _, f := range pkg.CompiledGoFiles() {
+ if seen.add(f.URI, m.PackagePath()) {
+ continue
+ }
+
if f.File.Name == nil {
continue
}
pkgNameMappedRange := NewMappedRange(f.Tok, f.Mapper, f.File.Name.Pos(), f.File.Name.End())
- // Invalid range for the package name.
rng, err := pkgNameMappedRange.Range()
if err != nil {
- return nil, err
+ return err
}
- result[f.URI] = append(result[f.URI], protocol.TextEdit{
+ edits[f.URI] = append(edits[f.URI], protocol.TextEdit{
Range: rng,
NewText: newName,
})
}
- return result, nil
+ return nil
}
-// computeRenamePackageImportEditsPerPackage computes the set of edits (to imports)
-// among the files of package nestedPkg that are necessary when package renamedPkg
-// is renamed to newName.
-func computeRenamePackageImportEditsPerPackage(ctx context.Context, s Snapshot, nestedPkg Package, newName, renamingPath string) (map[span.URI][]protocol.TextEdit, error) {
- rdeps, err := s.GetReverseDependencies(ctx, nestedPkg.ID())
+// renameImports computes the set of edits to imports resulting from renaming
+// the package described by the given metadata, to a package with import path
+// newPath and name newName.
+//
+// Edits are written into the edits map.
+func renameImports(ctx context.Context, s Snapshot, m Metadata, newPath, newName string, seen seenPackageRename, edits map[span.URI][]protocol.TextEdit) error {
+ // TODO(rfindley): we should get reverse dependencies as metadata first,
+ // rather then building the package immediately. We don't need reverse
+ // dependencies if they are intermediate test variants.
+ rdeps, err := s.GetReverseDependencies(ctx, m.PackageID())
if err != nil {
- return nil, err
+ return err
}
- result := make(map[span.URI][]protocol.TextEdit)
for _, dep := range rdeps {
+ // Subtle: don't perform renaming in this package if it is not fully
+ // parsed. This can occur inside the workspace if dep is an intermediate
+ // test variant. ITVs are only ever parsed in export mode, and no file is
+ // found only in an ITV. Therefore the renaming will eventually occur in a
+ // full package.
+ //
+ // An alternative algorithm that may be more robust would be to first
+ // collect *files* that need to have their imports updated, and then
+ // perform the rename using s.PackageForFile(..., NarrowestPackage).
+ if dep.ParseMode() != ParseFull {
+ continue
+ }
+
for _, f := range dep.CompiledGoFiles() {
+ if seen.add(f.URI, m.PackagePath()) {
+ continue
+ }
+
for _, imp := range f.File.Imports {
- if impPath, _ := strconv.Unquote(imp.Path.Value); impPath != nestedPkg.PkgPath() {
- continue // not the import we're looking for.
+ if impPath, _ := strconv.Unquote(imp.Path.Value); impPath != m.PackagePath() {
+ continue // not the import we're looking for
}
// Create text edit for the import path (string literal).
impPathMappedRange := NewMappedRange(f.Tok, f.Mapper, imp.Path.Pos(), imp.Path.End())
rng, err := impPathMappedRange.Range()
if err != nil {
- return nil, err
+ return err
}
- newText := strconv.Quote(path.Join(path.Dir(renamingPath), newName) + strings.TrimPrefix(nestedPkg.PkgPath(), renamingPath))
- result[f.URI] = append(result[f.URI], protocol.TextEdit{
+ newText := strconv.Quote(newPath)
+ edits[f.URI] = append(edits[f.URI], protocol.TextEdit{
Range: rng,
NewText: newText,
})
- // If the nested package is not the renaming package or its import path already
- // has an local package name then we don't need to update the local package name.
- if nestedPkg.PkgPath() != renamingPath || imp.Name != nil {
+ // If the package name of an import has not changed or if its import
+ // path already has a local package name, then we don't need to update
+ // the local package name.
+ if newName == m.PackageName() || imp.Name != nil {
continue
}
@@ -344,6 +432,7 @@
var changes map[span.URI][]protocol.TextEdit
localName := newName
try := 0
+
// Keep trying with fresh names until one succeeds.
for fileScope.Lookup(localName) != nil || pkgScope.Lookup(localName) != nil {
try++
@@ -351,8 +440,9 @@
}
changes, err = renameObj(ctx, s, localName, qos)
if err != nil {
- return nil, err
+ return err
}
+
// If the chosen local package name matches the package's new name, delete the
// change that would have inserted an explicit local name, which is always
// the lexically first change.
@@ -363,14 +453,14 @@
})
changes[f.URI] = v[1:]
}
- for uri, edits := range changes {
- result[uri] = append(result[uri], edits...)
+ for uri, changeEdits := range changes {
+ edits[uri] = append(edits[uri], changeEdits...)
}
}
}
}
- return result, nil
+ return nil
}
// renameObj returns a map of TextEdits for renaming an identifier within a file
diff --git a/gopls/internal/lsp/source/rename_check.go b/gopls/internal/lsp/source/rename_check.go
index 6fb7ddf..c4d5709 100644
--- a/gopls/internal/lsp/source/rename_check.go
+++ b/gopls/internal/lsp/source/rename_check.go
@@ -838,11 +838,11 @@
if err != nil {
continue
}
- importPkg, err := pkg.GetImport(importPath)
+ imported, err := pkg.ResolveImportPath(importPath)
if err != nil {
return nil, nil, nil, false
}
- pkgs = append(pkgs, importPkg)
+ pkgs = append(pkgs, imported)
}
}
for _, p := range pkgs {
diff --git a/gopls/internal/lsp/source/stub.go b/gopls/internal/lsp/source/stub.go
index 2d00ea5..28a2e2b 100644
--- a/gopls/internal/lsp/source/stub.go
+++ b/gopls/internal/lsp/source/stub.go
@@ -255,8 +255,10 @@
eiface := iface.Embedded(i).Obj()
depPkg := ifacePkg
if eiface.Pkg().Path() != ifacePkg.PkgPath() {
+ // TODO(adonovan): I'm not sure what this is trying to do, but it
+ // looks wrong the in case of type aliases.
var err error
- depPkg, err = ifacePkg.GetImport(eiface.Pkg().Path())
+ depPkg, err = ifacePkg.DirectDep(eiface.Pkg().Path())
if err != nil {
return nil, err
}
diff --git a/gopls/internal/lsp/source/view.go b/gopls/internal/lsp/source/view.go
index d242cf4..25fa7d7 100644
--- a/gopls/internal/lsp/source/view.go
+++ b/gopls/internal/lsp/source/view.go
@@ -152,8 +152,8 @@
GetReverseDependencies(ctx context.Context, id string) ([]Package, error)
// CachedImportPaths returns all the imported packages loaded in this
- // snapshot, indexed by their import path and checked in TypecheckWorkspace
- // mode.
+ // snapshot, indexed by their package path (not import path, despite the name)
+ // and checked in TypecheckWorkspace mode.
CachedImportPaths(ctx context.Context) (map[string]Package, error)
// KnownPackages returns all the packages loaded in this snapshot, checked
@@ -166,6 +166,13 @@
// mode, this is just the reverse transitive closure of open packages.
ActivePackages(ctx context.Context) ([]Package, error)
+ // AllValidMetadata returns all valid metadata loaded for the snapshot.
+ AllValidMetadata(ctx context.Context) ([]Metadata, error)
+
+ // WorkspacePackageByID returns the workspace package with id, type checked
+ // in 'workspace' mode.
+ WorkspacePackageByID(ctx context.Context, id string) (Package, error)
+
// Symbols returns all symbols in the snapshot.
Symbols(ctx context.Context) map[span.URI][]Symbol
@@ -510,6 +517,14 @@
return bytes.Compare(h[:], other[:]) < 0
}
+// XORWith updates *h to *h XOR h2.
+func (h *Hash) XORWith(h2 Hash) {
+ // Small enough that we don't need crypto/subtle.XORBytes.
+ for i := range h {
+ h[i] ^= h2[i]
+ }
+}
+
// FileIdentity uniquely identifies a file at a version from a FileSystem.
type FileIdentity struct {
URI span.URI
@@ -581,9 +596,9 @@
// Package represents a Go package that has been type-checked. It maintains
// only the relevant fields of a *go/packages.Package.
type Package interface {
- ID() string
- Name() string
- PkgPath() string
+ ID() string // logically a cache.PackageID
+ Name() string // logically a cache.PackageName
+ PkgPath() string // logically a cache.PackagePath
CompiledGoFiles() []*ParsedGoFile
File(uri span.URI) (*ParsedGoFile, error)
GetSyntax() []*ast.File
@@ -591,8 +606,9 @@
GetTypesInfo() *types.Info
GetTypesSizes() types.Sizes
ForTest() string
- GetImport(pkgPath string) (Package, error)
- MissingDependencies() []string
+ DirectDep(packagePath string) (Package, error) // logically a cache.PackagePath
+ ResolveImportPath(importPath string) (Package, error) // logically a cache.ImportPath
+ MissingDependencies() []string // unordered; logically cache.ImportPaths
Imports() []Package
Version() *module.Version
HasListOrParseErrors() bool
diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go
index e02f4c8..56e26a2 100644
--- a/gopls/internal/regtest/completion/postfix_snippet_test.go
+++ b/gopls/internal/regtest/completion/postfix_snippet_test.go
@@ -438,12 +438,14 @@
},
)
r.Run(t, mod, func(t *testing.T, env *Env) {
+ env.CreateBuffer("foo.go", "")
+
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
c.before = strings.Trim(c.before, "\n")
c.after = strings.Trim(c.after, "\n")
- env.CreateBuffer("foo.go", c.before)
+ env.SetBufferContent("foo.go", c.before)
pos := env.RegexpSearch("foo.go", "\n}")
completions := env.Completion("foo.go", pos)
diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go
index f2b7b1a..a5030d7 100644
--- a/gopls/internal/regtest/misc/definition_test.go
+++ b/gopls/internal/regtest/misc/definition_test.go
@@ -87,7 +87,6 @@
Run(t, stdlibDefinition, func(t *testing.T, env *Env) {
env.OpenFile("main.go")
name, _ := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `fmt.(Printf)`))
- env.OpenFile(name)
pos := env.RegexpSearch(name, `:=\s*(newPrinter)\(\)`)
diff --git a/gopls/internal/regtest/misc/rename_test.go b/gopls/internal/regtest/misc/rename_test.go
index 1f6ee1c..a9fd920 100644
--- a/gopls/internal/regtest/misc/rename_test.go
+++ b/gopls/internal/regtest/misc/rename_test.go
@@ -10,6 +10,7 @@
"golang.org/x/tools/gopls/internal/lsp/protocol"
. "golang.org/x/tools/gopls/internal/lsp/regtest"
+ "golang.org/x/tools/gopls/internal/lsp/tests/compare"
"golang.org/x/tools/internal/testenv"
)
@@ -52,6 +53,36 @@
})
}
+// Test case for golang/go#56227
+func TestRenameWithUnsafeSlice(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17) // unsafe.Slice was added in Go 1.17
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- p.go --
+package p
+
+import "unsafe"
+
+type T struct{}
+
+func (T) M() {}
+
+func _() {
+ x := [3]int{1, 2, 3}
+ ptr := unsafe.Pointer(&x)
+ _ = unsafe.Slice((*int)(ptr), 3)
+}
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("p.go")
+ env.Rename("p.go", env.RegexpSearch("p.go", "M"), "N") // must not panic
+ })
+}
+
func TestPrepareRenameWithNoPackageDeclaration(t *testing.T) {
testenv.NeedsGo1Point(t, 15)
const files = `
@@ -418,7 +449,7 @@
})
}
-func TestRenameWithTestPackage(t *testing.T) {
+func TestRenamePackage_Tests(t *testing.T) {
testenv.NeedsGo1Point(t, 17)
const files = `
-- go.mod --
@@ -488,7 +519,7 @@
})
}
-func TestRenameWithNestedModule(t *testing.T) {
+func TestRenamePackage_NestedModule(t *testing.T) {
testenv.NeedsGo1Point(t, 17)
const files = `
-- go.mod --
@@ -547,7 +578,7 @@
})
}
-func TestRenamePackageWithNonBlankSameImportPaths(t *testing.T) {
+func TestRenamePackage_DuplicateImport(t *testing.T) {
testenv.NeedsGo1Point(t, 17)
const files = `
-- go.mod --
@@ -590,7 +621,7 @@
})
}
-func TestRenamePackageWithBlankSameImportPaths(t *testing.T) {
+func TestRenamePackage_DuplicateBlankImport(t *testing.T) {
testenv.NeedsGo1Point(t, 17)
const files = `
-- go.mod --
@@ -632,3 +663,222 @@
env.RegexpSearch("main.go", `lib1 "mod.com/nested/nested"`)
})
}
+
+func TestRenamePackage_TestVariant(t *testing.T) {
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.12
+-- foo/foo.go --
+package foo
+
+const Foo = 42
+-- bar/bar.go --
+package bar
+
+import "mod.com/foo"
+
+const Bar = foo.Foo
+-- bar/bar_test.go --
+package bar
+
+import "mod.com/foo"
+
+const Baz = foo.Foo
+-- testdata/bar/bar.go --
+package bar
+
+import "mod.com/foox"
+
+const Bar = foox.Foo
+-- testdata/bar/bar_test.go --
+package bar
+
+import "mod.com/foox"
+
+const Baz = foox.Foo
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+ env.Rename("foo/foo.go", env.RegexpSearch("foo/foo.go", "package (foo)"), "foox")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_IntermediateTestVariant(t *testing.T) {
+ // In this test set up, we have the following import edges:
+ // bar_test -> baz -> foo -> bar
+ // bar_test -> foo -> bar
+ // bar_test -> bar
+ //
+ // As a consequence, bar_x_test.go is in the reverse closure of both
+ // `foo [bar.test]` and `baz [bar.test]`. This test confirms that we don't
+ // produce duplicate edits in this case.
+ const files = `
+-- go.mod --
+module foo.mod
+
+go 1.12
+-- foo/foo.go --
+package foo
+
+import "foo.mod/bar"
+
+const Foo = 42
+
+const _ = bar.Bar
+-- baz/baz.go --
+package baz
+
+import "foo.mod/foo"
+
+const Baz = foo.Foo
+-- bar/bar.go --
+package bar
+
+var Bar = 123
+-- bar/bar_test.go --
+package bar
+
+const _ = Bar
+-- bar/bar_x_test.go --
+package bar_test
+
+import (
+ "foo.mod/bar"
+ "foo.mod/baz"
+ "foo.mod/foo"
+)
+
+const _ = bar.Bar + baz.Baz + foo.Foo
+-- testdata/foox/foo.go --
+package foox
+
+import "foo.mod/bar"
+
+const Foo = 42
+
+const _ = bar.Bar
+-- testdata/baz/baz.go --
+package baz
+
+import "foo.mod/foox"
+
+const Baz = foox.Foo
+-- testdata/bar/bar_x_test.go --
+package bar_test
+
+import (
+ "foo.mod/bar"
+ "foo.mod/baz"
+ "foo.mod/foox"
+)
+
+const _ = bar.Bar + baz.Baz + foox.Foo
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("foo/foo.go")
+ env.Rename("foo/foo.go", env.RegexpSearch("foo/foo.go", "package (foo)"), "foox")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_Nesting(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+import "mod.com/lib/nested"
+
+const A = 1 + nested.B
+-- lib/nested/a.go --
+package nested
+
+const B = 1
+-- other/other.go --
+package other
+
+import (
+ "mod.com/lib"
+ "mod.com/lib/nested"
+)
+
+const C = lib.A + nested.B
+-- testdata/libx/a.go --
+package libx
+
+import "mod.com/libx/nested"
+
+const A = 1 + nested.B
+-- testdata/other/other.go --
+package other
+
+import (
+ "mod.com/libx"
+ "mod.com/libx/nested"
+)
+
+const C = libx.A + nested.B
+`
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ pos := env.RegexpSearch("lib/a.go", "package (lib)")
+ env.Rename("lib/a.go", pos, "libx")
+
+ checkTestdata(t, env)
+ })
+}
+
+func TestRenamePackage_InvalidName(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17)
+ const files = `
+-- go.mod --
+module mod.com
+
+go 1.18
+-- lib/a.go --
+package lib
+
+import "mod.com/lib/nested"
+
+const A = 1 + nested.B
+`
+
+ Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("lib/a.go")
+ pos := env.RegexpSearch("lib/a.go", "package (lib)")
+
+ for _, badName := range []string{"$$$", "lib_test"} {
+ if err := env.Editor.Rename(env.Ctx, "lib/a.go", pos, badName); err == nil {
+ t.Errorf("Rename(lib, libx) succeeded, want non-nil error")
+ }
+ }
+ })
+}
+
+// checkTestdata checks that current buffer contents match their corresponding
+// expected content in the testdata directory.
+func checkTestdata(t *testing.T, env *Env) {
+ t.Helper()
+ files := env.ListFiles("testdata")
+ if len(files) == 0 {
+ t.Fatal("no files in testdata directory")
+ }
+ for _, file := range files {
+ suffix := strings.TrimPrefix(file, "testdata/")
+ got := env.Editor.BufferText(suffix)
+ want := env.ReadWorkspaceFile(file)
+ if diff := compare.Text(want, got); diff != "" {
+ t.Errorf("Rename: unexpected buffer content for %s (-want +got):\n%s", suffix, diff)
+ }
+ }
+}
diff --git a/gopls/internal/regtest/misc/staticcheck_test.go b/gopls/internal/regtest/misc/staticcheck_test.go
index 8676690..f2ba3cc 100644
--- a/gopls/internal/regtest/misc/staticcheck_test.go
+++ b/gopls/internal/regtest/misc/staticcheck_test.go
@@ -74,3 +74,40 @@
)
})
}
+
+// Test for golang/go#56270: an analysis with related info should not panic if
+// analysis.RelatedInformation.End is not set.
+func TestStaticcheckRelatedInfo(t *testing.T) {
+ testenv.NeedsGo1Point(t, 17) // staticcheck is only supported at Go 1.17+
+ const files = `
+-- go.mod --
+module mod.test
+
+go 1.18
+-- p.go --
+package p
+
+import (
+ "fmt"
+)
+
+func Foo(enabled interface{}) {
+ if enabled, ok := enabled.(bool); ok {
+ } else {
+ _ = fmt.Sprintf("invalid type %T", enabled) // enabled is always bool here
+ }
+}
+`
+
+ WithOptions(
+ Settings{"staticcheck": true},
+ ).Run(t, files, func(t *testing.T, env *Env) {
+ env.OpenFile("p.go")
+ env.Await(
+ OnceMet(
+ env.DoneWithOpen(),
+ env.DiagnosticAtRegexpFromSource("p.go", ", (enabled)", "SA9008"),
+ ),
+ )
+ })
+}
diff --git a/gopls/internal/regtest/workspace/standalone_test.go b/gopls/internal/regtest/workspace/standalone_test.go
index 1e51b31..86e3441 100644
--- a/gopls/internal/regtest/workspace/standalone_test.go
+++ b/gopls/internal/regtest/workspace/standalone_test.go
@@ -214,7 +214,7 @@
WithOptions(
Settings{
- "standaloneTags": []string{"standalone"},
+ "standaloneTags": []string{"standalone", "script"},
},
).Run(t, files, func(t *testing.T, env *Env) {
env.OpenFile("ignore.go")
diff --git a/gopls/internal/span/token.go b/gopls/internal/span/token.go
index da4dc22..6bde300 100644
--- a/gopls/internal/span/token.go
+++ b/gopls/internal/span/token.go
@@ -19,7 +19,9 @@
Start, End token.Pos // both IsValid()
}
-// NewRange creates a new Range from a token.File and two valid positions within it.
+// NewRange creates a new Range from a token.File and two positions within it.
+// The given start position must be valid; if end is invalid, start is used as
+// the end position.
//
// (If you only have a token.FileSet, use file = fset.File(start). But
// most callers know exactly which token.File they're dealing with and
@@ -33,7 +35,7 @@
panic("invalid start token.Pos")
}
if !end.IsValid() {
- panic("invalid end token.Pos")
+ end = start
}
// TODO(adonovan): ideally we would make this stronger assertion:
diff --git a/internal/imports/fix.go b/internal/imports/fix.go
index 45a492e..9b7b106 100644
--- a/internal/imports/fix.go
+++ b/internal/imports/fix.go
@@ -1372,9 +1372,9 @@
return err
}
var roots []gopathwalk.Root
- roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT})
+ roots = append(roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "src"), Type: gopathwalk.RootGOROOT})
for _, p := range filepath.SplitList(goenv["GOPATH"]) {
- roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH})
+ roots = append(roots, gopathwalk.Root{Path: filepath.Join(p, "src"), Type: gopathwalk.RootGOPATH})
}
// The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
roots = filterRoots(roots, callback.rootFound)
diff --git a/internal/imports/mkstdlib.go b/internal/imports/mkstdlib.go
index 072e4e1..a73f024 100644
--- a/internal/imports/mkstdlib.go
+++ b/internal/imports/mkstdlib.go
@@ -49,6 +49,11 @@
outf := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
+ outf(`// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+`)
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
outf("package imports\n")
outf("var stdlib = map[string][]string{\n")
@@ -77,7 +82,7 @@
}
sort.Strings(paths)
for _, path := range paths {
- outf("\t%q: []string{\n", path)
+ outf("\t%q: {\n", path)
pkg := pkgs[path]
var syms []string
for sym := range pkg {
diff --git a/internal/imports/mod.go b/internal/imports/mod.go
index dec388b..7d99d04 100644
--- a/internal/imports/mod.go
+++ b/internal/imports/mod.go
@@ -129,22 +129,22 @@
})
r.roots = []gopathwalk.Root{
- {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
+ {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT},
}
r.mainByDir = make(map[string]*gocommand.ModuleJSON)
for _, main := range r.mains {
- r.roots = append(r.roots, gopathwalk.Root{main.Dir, gopathwalk.RootCurrentModule})
+ r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule})
r.mainByDir[main.Dir] = main
}
if vendorEnabled {
- r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
+ r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther})
} else {
addDep := func(mod *gocommand.ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough.
- r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
+ r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
} else {
- r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
+ r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
@@ -158,7 +158,7 @@
addDep(mod)
}
}
- r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
+ r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
diff --git a/internal/imports/zstdlib.go b/internal/imports/zstdlib.go
index 116a149..5db9b2d 100644
--- a/internal/imports/zstdlib.go
+++ b/internal/imports/zstdlib.go
@@ -1,9 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
// Code generated by mkstdlib.go. DO NOT EDIT.
package imports
var stdlib = map[string][]string{
- "archive/tar": []string{
+ "archive/tar": {
"ErrFieldTooLong",
"ErrHeader",
"ErrWriteAfterClose",
@@ -34,7 +38,7 @@
"TypeXHeader",
"Writer",
},
- "archive/zip": []string{
+ "archive/zip": {
"Compressor",
"Decompressor",
"Deflate",
@@ -54,7 +58,7 @@
"Store",
"Writer",
},
- "bufio": []string{
+ "bufio": {
"ErrAdvanceTooFar",
"ErrBadReadCount",
"ErrBufferFull",
@@ -81,7 +85,7 @@
"SplitFunc",
"Writer",
},
- "bytes": []string{
+ "bytes": {
"Buffer",
"Compare",
"Contains",
@@ -138,11 +142,11 @@
"TrimSpace",
"TrimSuffix",
},
- "compress/bzip2": []string{
+ "compress/bzip2": {
"NewReader",
"StructuralError",
},
- "compress/flate": []string{
+ "compress/flate": {
"BestCompression",
"BestSpeed",
"CorruptInputError",
@@ -160,7 +164,7 @@
"WriteError",
"Writer",
},
- "compress/gzip": []string{
+ "compress/gzip": {
"BestCompression",
"BestSpeed",
"DefaultCompression",
@@ -175,7 +179,7 @@
"Reader",
"Writer",
},
- "compress/lzw": []string{
+ "compress/lzw": {
"LSB",
"MSB",
"NewReader",
@@ -184,7 +188,7 @@
"Reader",
"Writer",
},
- "compress/zlib": []string{
+ "compress/zlib": {
"BestCompression",
"BestSpeed",
"DefaultCompression",
@@ -201,7 +205,7 @@
"Resetter",
"Writer",
},
- "container/heap": []string{
+ "container/heap": {
"Fix",
"Init",
"Interface",
@@ -209,16 +213,16 @@
"Push",
"Remove",
},
- "container/list": []string{
+ "container/list": {
"Element",
"List",
"New",
},
- "container/ring": []string{
+ "container/ring": {
"New",
"Ring",
},
- "context": []string{
+ "context": {
"Background",
"CancelFunc",
"Canceled",
@@ -230,7 +234,7 @@
"WithTimeout",
"WithValue",
},
- "crypto": []string{
+ "crypto": {
"BLAKE2b_256",
"BLAKE2b_384",
"BLAKE2b_512",
@@ -259,12 +263,12 @@
"Signer",
"SignerOpts",
},
- "crypto/aes": []string{
+ "crypto/aes": {
"BlockSize",
"KeySizeError",
"NewCipher",
},
- "crypto/cipher": []string{
+ "crypto/cipher": {
"AEAD",
"Block",
"BlockMode",
@@ -281,13 +285,13 @@
"StreamReader",
"StreamWriter",
},
- "crypto/des": []string{
+ "crypto/des": {
"BlockSize",
"KeySizeError",
"NewCipher",
"NewTripleDESCipher",
},
- "crypto/dsa": []string{
+ "crypto/dsa": {
"ErrInvalidPublicKey",
"GenerateKey",
"GenerateParameters",
@@ -302,7 +306,7 @@
"Sign",
"Verify",
},
- "crypto/ecdsa": []string{
+ "crypto/ecdsa": {
"GenerateKey",
"PrivateKey",
"PublicKey",
@@ -311,7 +315,7 @@
"Verify",
"VerifyASN1",
},
- "crypto/ed25519": []string{
+ "crypto/ed25519": {
"GenerateKey",
"NewKeyFromSeed",
"PrivateKey",
@@ -323,7 +327,7 @@
"SignatureSize",
"Verify",
},
- "crypto/elliptic": []string{
+ "crypto/elliptic": {
"Curve",
"CurveParams",
"GenerateKey",
@@ -336,28 +340,28 @@
"Unmarshal",
"UnmarshalCompressed",
},
- "crypto/hmac": []string{
+ "crypto/hmac": {
"Equal",
"New",
},
- "crypto/md5": []string{
+ "crypto/md5": {
"BlockSize",
"New",
"Size",
"Sum",
},
- "crypto/rand": []string{
+ "crypto/rand": {
"Int",
"Prime",
"Read",
"Reader",
},
- "crypto/rc4": []string{
+ "crypto/rc4": {
"Cipher",
"KeySizeError",
"NewCipher",
},
- "crypto/rsa": []string{
+ "crypto/rsa": {
"CRTValue",
"DecryptOAEP",
"DecryptPKCS1v15",
@@ -382,13 +386,13 @@
"VerifyPKCS1v15",
"VerifyPSS",
},
- "crypto/sha1": []string{
+ "crypto/sha1": {
"BlockSize",
"New",
"Size",
"Sum",
},
- "crypto/sha256": []string{
+ "crypto/sha256": {
"BlockSize",
"New",
"New224",
@@ -397,7 +401,7 @@
"Sum224",
"Sum256",
},
- "crypto/sha512": []string{
+ "crypto/sha512": {
"BlockSize",
"New",
"New384",
@@ -412,7 +416,7 @@
"Sum512_224",
"Sum512_256",
},
- "crypto/subtle": []string{
+ "crypto/subtle": {
"ConstantTimeByteEq",
"ConstantTimeCompare",
"ConstantTimeCopy",
@@ -420,7 +424,7 @@
"ConstantTimeLessOrEq",
"ConstantTimeSelect",
},
- "crypto/tls": []string{
+ "crypto/tls": {
"Certificate",
"CertificateRequestInfo",
"CipherSuite",
@@ -506,7 +510,7 @@
"X25519",
"X509KeyPair",
},
- "crypto/x509": []string{
+ "crypto/x509": {
"CANotAuthorizedForExtKeyUsage",
"CANotAuthorizedForThisName",
"CertPool",
@@ -612,7 +616,7 @@
"UnknownSignatureAlgorithm",
"VerifyOptions",
},
- "crypto/x509/pkix": []string{
+ "crypto/x509/pkix": {
"AlgorithmIdentifier",
"AttributeTypeAndValue",
"AttributeTypeAndValueSET",
@@ -624,7 +628,7 @@
"RevokedCertificate",
"TBSCertificateList",
},
- "database/sql": []string{
+ "database/sql": {
"ColumnType",
"Conn",
"DB",
@@ -665,7 +669,7 @@
"Tx",
"TxOptions",
},
- "database/sql/driver": []string{
+ "database/sql/driver": {
"Bool",
"ColumnConverter",
"Conn",
@@ -713,12 +717,12 @@
"ValueConverter",
"Valuer",
},
- "debug/buildinfo": []string{
+ "debug/buildinfo": {
"BuildInfo",
"Read",
"ReadFile",
},
- "debug/dwarf": []string{
+ "debug/dwarf": {
"AddrType",
"ArrayType",
"Attr",
@@ -969,7 +973,7 @@
"UnsupportedType",
"VoidType",
},
- "debug/elf": []string{
+ "debug/elf": {
"ARM_MAGIC_TRAMP_NUMBER",
"COMPRESS_HIOS",
"COMPRESS_HIPROC",
@@ -2368,7 +2372,7 @@
"Type",
"Version",
},
- "debug/gosym": []string{
+ "debug/gosym": {
"DecodingError",
"Func",
"LineTable",
@@ -2380,7 +2384,7 @@
"UnknownFileError",
"UnknownLineError",
},
- "debug/macho": []string{
+ "debug/macho": {
"ARM64_RELOC_ADDEND",
"ARM64_RELOC_BRANCH26",
"ARM64_RELOC_GOT_LOAD_PAGE21",
@@ -2510,7 +2514,7 @@
"X86_64_RELOC_TLV",
"X86_64_RELOC_UNSIGNED",
},
- "debug/pe": []string{
+ "debug/pe": {
"COFFSymbol",
"COFFSymbolAuxFormat5",
"COFFSymbolSize",
@@ -2623,7 +2627,7 @@
"StringTable",
"Symbol",
},
- "debug/plan9obj": []string{
+ "debug/plan9obj": {
"ErrNoSymbols",
"File",
"FileHeader",
@@ -2637,16 +2641,16 @@
"SectionHeader",
"Sym",
},
- "embed": []string{
+ "embed": {
"FS",
},
- "encoding": []string{
+ "encoding": {
"BinaryMarshaler",
"BinaryUnmarshaler",
"TextMarshaler",
"TextUnmarshaler",
},
- "encoding/ascii85": []string{
+ "encoding/ascii85": {
"CorruptInputError",
"Decode",
"Encode",
@@ -2654,7 +2658,7 @@
"NewDecoder",
"NewEncoder",
},
- "encoding/asn1": []string{
+ "encoding/asn1": {
"BitString",
"ClassApplication",
"ClassContextSpecific",
@@ -2692,7 +2696,7 @@
"Unmarshal",
"UnmarshalWithParams",
},
- "encoding/base32": []string{
+ "encoding/base32": {
"CorruptInputError",
"Encoding",
"HexEncoding",
@@ -2703,7 +2707,7 @@
"StdEncoding",
"StdPadding",
},
- "encoding/base64": []string{
+ "encoding/base64": {
"CorruptInputError",
"Encoding",
"NewDecoder",
@@ -2716,7 +2720,7 @@
"StdPadding",
"URLEncoding",
},
- "encoding/binary": []string{
+ "encoding/binary": {
"AppendByteOrder",
"AppendUvarint",
"AppendVarint",
@@ -2736,7 +2740,7 @@
"Varint",
"Write",
},
- "encoding/csv": []string{
+ "encoding/csv": {
"ErrBareQuote",
"ErrFieldCount",
"ErrQuote",
@@ -2747,7 +2751,7 @@
"Reader",
"Writer",
},
- "encoding/gob": []string{
+ "encoding/gob": {
"CommonType",
"Decoder",
"Encoder",
@@ -2758,7 +2762,7 @@
"Register",
"RegisterName",
},
- "encoding/hex": []string{
+ "encoding/hex": {
"Decode",
"DecodeString",
"DecodedLen",
@@ -2772,7 +2776,7 @@
"NewDecoder",
"NewEncoder",
},
- "encoding/json": []string{
+ "encoding/json": {
"Compact",
"Decoder",
"Delim",
@@ -2799,13 +2803,13 @@
"UnsupportedValueError",
"Valid",
},
- "encoding/pem": []string{
+ "encoding/pem": {
"Block",
"Decode",
"Encode",
"EncodeToMemory",
},
- "encoding/xml": []string{
+ "encoding/xml": {
"Attr",
"CharData",
"Comment",
@@ -2839,13 +2843,13 @@
"UnmarshalerAttr",
"UnsupportedTypeError",
},
- "errors": []string{
+ "errors": {
"As",
"Is",
"New",
"Unwrap",
},
- "expvar": []string{
+ "expvar": {
"Do",
"Float",
"Func",
@@ -2862,7 +2866,7 @@
"String",
"Var",
},
- "flag": []string{
+ "flag": {
"Arg",
"Args",
"Bool",
@@ -2907,7 +2911,7 @@
"Visit",
"VisitAll",
},
- "fmt": []string{
+ "fmt": {
"Append",
"Appendf",
"Appendln",
@@ -2937,7 +2941,7 @@
"State",
"Stringer",
},
- "go/ast": []string{
+ "go/ast": {
"ArrayType",
"AssignStmt",
"Bad",
@@ -3040,7 +3044,7 @@
"Visitor",
"Walk",
},
- "go/build": []string{
+ "go/build": {
"AllowBinary",
"ArchChar",
"Context",
@@ -3057,7 +3061,7 @@
"Package",
"ToolDir",
},
- "go/build/constraint": []string{
+ "go/build/constraint": {
"AndExpr",
"Expr",
"IsGoBuild",
@@ -3069,7 +3073,7 @@
"SyntaxError",
"TagExpr",
},
- "go/constant": []string{
+ "go/constant": {
"BinaryOp",
"BitLen",
"Bool",
@@ -3110,7 +3114,7 @@
"Val",
"Value",
},
- "go/doc": []string{
+ "go/doc": {
"AllDecls",
"AllMethods",
"Example",
@@ -3131,7 +3135,7 @@
"Type",
"Value",
},
- "go/doc/comment": []string{
+ "go/doc/comment": {
"Block",
"Code",
"DefaultLookupPackage",
@@ -3149,17 +3153,17 @@
"Printer",
"Text",
},
- "go/format": []string{
+ "go/format": {
"Node",
"Source",
},
- "go/importer": []string{
+ "go/importer": {
"Default",
"For",
"ForCompiler",
"Lookup",
},
- "go/parser": []string{
+ "go/parser": {
"AllErrors",
"DeclarationErrors",
"ImportsOnly",
@@ -3174,7 +3178,7 @@
"SpuriousErrors",
"Trace",
},
- "go/printer": []string{
+ "go/printer": {
"CommentedNode",
"Config",
"Fprint",
@@ -3184,7 +3188,7 @@
"TabIndent",
"UseSpaces",
},
- "go/scanner": []string{
+ "go/scanner": {
"Error",
"ErrorHandler",
"ErrorList",
@@ -3193,7 +3197,7 @@
"ScanComments",
"Scanner",
},
- "go/token": []string{
+ "go/token": {
"ADD",
"ADD_ASSIGN",
"AND",
@@ -3291,7 +3295,7 @@
"XOR",
"XOR_ASSIGN",
},
- "go/types": []string{
+ "go/types": {
"ArgumentError",
"Array",
"AssertableTo",
@@ -3442,17 +3446,17 @@
"WriteSignature",
"WriteType",
},
- "hash": []string{
+ "hash": {
"Hash",
"Hash32",
"Hash64",
},
- "hash/adler32": []string{
+ "hash/adler32": {
"Checksum",
"New",
"Size",
},
- "hash/crc32": []string{
+ "hash/crc32": {
"Castagnoli",
"Checksum",
"ChecksumIEEE",
@@ -3466,7 +3470,7 @@
"Table",
"Update",
},
- "hash/crc64": []string{
+ "hash/crc64": {
"Checksum",
"ECMA",
"ISO",
@@ -3476,7 +3480,7 @@
"Table",
"Update",
},
- "hash/fnv": []string{
+ "hash/fnv": {
"New128",
"New128a",
"New32",
@@ -3484,18 +3488,18 @@
"New64",
"New64a",
},
- "hash/maphash": []string{
+ "hash/maphash": {
"Bytes",
"Hash",
"MakeSeed",
"Seed",
"String",
},
- "html": []string{
+ "html": {
"EscapeString",
"UnescapeString",
},
- "html/template": []string{
+ "html/template": {
"CSS",
"ErrAmbigContext",
"ErrBadHTML",
@@ -3533,7 +3537,7 @@
"URL",
"URLQueryEscaper",
},
- "image": []string{
+ "image": {
"Alpha",
"Alpha16",
"Black",
@@ -3586,7 +3590,7 @@
"ZP",
"ZR",
},
- "image/color": []string{
+ "image/color": {
"Alpha",
"Alpha16",
"Alpha16Model",
@@ -3622,11 +3626,11 @@
"YCbCrModel",
"YCbCrToRGB",
},
- "image/color/palette": []string{
+ "image/color/palette": {
"Plan9",
"WebSafe",
},
- "image/draw": []string{
+ "image/draw": {
"Draw",
"DrawMask",
"Drawer",
@@ -3638,7 +3642,7 @@
"RGBA64Image",
"Src",
},
- "image/gif": []string{
+ "image/gif": {
"Decode",
"DecodeAll",
"DecodeConfig",
@@ -3650,7 +3654,7 @@
"GIF",
"Options",
},
- "image/jpeg": []string{
+ "image/jpeg": {
"Decode",
"DecodeConfig",
"DefaultQuality",
@@ -3660,7 +3664,7 @@
"Reader",
"UnsupportedError",
},
- "image/png": []string{
+ "image/png": {
"BestCompression",
"BestSpeed",
"CompressionLevel",
@@ -3675,11 +3679,11 @@
"NoCompression",
"UnsupportedError",
},
- "index/suffixarray": []string{
+ "index/suffixarray": {
"Index",
"New",
},
- "io": []string{
+ "io": {
"ByteReader",
"ByteScanner",
"ByteWriter",
@@ -3731,7 +3735,7 @@
"WriterAt",
"WriterTo",
},
- "io/fs": []string{
+ "io/fs": {
"DirEntry",
"ErrClosed",
"ErrExist",
@@ -3775,7 +3779,7 @@
"WalkDir",
"WalkDirFunc",
},
- "io/ioutil": []string{
+ "io/ioutil": {
"Discard",
"NopCloser",
"ReadAll",
@@ -3785,7 +3789,7 @@
"TempFile",
"WriteFile",
},
- "log": []string{
+ "log": {
"Default",
"Fatal",
"Fatalf",
@@ -3814,7 +3818,7 @@
"SetPrefix",
"Writer",
},
- "log/syslog": []string{
+ "log/syslog": {
"Dial",
"LOG_ALERT",
"LOG_AUTH",
@@ -3849,7 +3853,7 @@
"Priority",
"Writer",
},
- "math": []string{
+ "math": {
"Abs",
"Acos",
"Acosh",
@@ -3948,7 +3952,7 @@
"Y1",
"Yn",
},
- "math/big": []string{
+ "math/big": {
"Above",
"Accuracy",
"AwayFromZero",
@@ -3975,7 +3979,7 @@
"ToZero",
"Word",
},
- "math/bits": []string{
+ "math/bits": {
"Add",
"Add32",
"Add64",
@@ -4027,7 +4031,7 @@
"TrailingZeros8",
"UintSize",
},
- "math/cmplx": []string{
+ "math/cmplx": {
"Abs",
"Acos",
"Acosh",
@@ -4056,7 +4060,7 @@
"Tan",
"Tanh",
},
- "math/rand": []string{
+ "math/rand": {
"ExpFloat64",
"Float32",
"Float64",
@@ -4081,7 +4085,7 @@
"Uint64",
"Zipf",
},
- "mime": []string{
+ "mime": {
"AddExtensionType",
"BEncoding",
"ErrInvalidMediaParameter",
@@ -4093,7 +4097,7 @@
"WordDecoder",
"WordEncoder",
},
- "mime/multipart": []string{
+ "mime/multipart": {
"ErrMessageTooLarge",
"File",
"FileHeader",
@@ -4104,13 +4108,13 @@
"Reader",
"Writer",
},
- "mime/quotedprintable": []string{
+ "mime/quotedprintable": {
"NewReader",
"NewWriter",
"Reader",
"Writer",
},
- "net": []string{
+ "net": {
"Addr",
"AddrError",
"Buffers",
@@ -4212,7 +4216,7 @@
"UnixListener",
"UnknownNetworkError",
},
- "net/http": []string{
+ "net/http": {
"AllowQuerySemicolons",
"CanonicalHeaderKey",
"Client",
@@ -4388,25 +4392,25 @@
"TrailerPrefix",
"Transport",
},
- "net/http/cgi": []string{
+ "net/http/cgi": {
"Handler",
"Request",
"RequestFromMap",
"Serve",
},
- "net/http/cookiejar": []string{
+ "net/http/cookiejar": {
"Jar",
"New",
"Options",
"PublicSuffixList",
},
- "net/http/fcgi": []string{
+ "net/http/fcgi": {
"ErrConnClosed",
"ErrRequestAborted",
"ProcessEnv",
"Serve",
},
- "net/http/httptest": []string{
+ "net/http/httptest": {
"DefaultRemoteAddr",
"NewRecorder",
"NewRequest",
@@ -4416,7 +4420,7 @@
"ResponseRecorder",
"Server",
},
- "net/http/httptrace": []string{
+ "net/http/httptrace": {
"ClientTrace",
"ContextClientTrace",
"DNSDoneInfo",
@@ -4425,7 +4429,7 @@
"WithClientTrace",
"WroteRequestInfo",
},
- "net/http/httputil": []string{
+ "net/http/httputil": {
"BufferPool",
"ClientConn",
"DumpRequest",
@@ -4444,7 +4448,7 @@
"ReverseProxy",
"ServerConn",
},
- "net/http/pprof": []string{
+ "net/http/pprof": {
"Cmdline",
"Handler",
"Index",
@@ -4452,7 +4456,7 @@
"Symbol",
"Trace",
},
- "net/mail": []string{
+ "net/mail": {
"Address",
"AddressParser",
"ErrHeaderNotPresent",
@@ -4463,7 +4467,7 @@
"ParseDate",
"ReadMessage",
},
- "net/netip": []string{
+ "net/netip": {
"Addr",
"AddrFrom16",
"AddrFrom4",
@@ -4482,7 +4486,7 @@
"Prefix",
"PrefixFrom",
},
- "net/rpc": []string{
+ "net/rpc": {
"Accept",
"Call",
"Client",
@@ -4509,14 +4513,14 @@
"ServerCodec",
"ServerError",
},
- "net/rpc/jsonrpc": []string{
+ "net/rpc/jsonrpc": {
"Dial",
"NewClient",
"NewClientCodec",
"NewServerCodec",
"ServeConn",
},
- "net/smtp": []string{
+ "net/smtp": {
"Auth",
"CRAMMD5Auth",
"Client",
@@ -4526,7 +4530,7 @@
"SendMail",
"ServerInfo",
},
- "net/textproto": []string{
+ "net/textproto": {
"CanonicalMIMEHeaderKey",
"Conn",
"Dial",
@@ -4542,7 +4546,7 @@
"TrimString",
"Writer",
},
- "net/url": []string{
+ "net/url": {
"Error",
"EscapeError",
"InvalidHostError",
@@ -4560,7 +4564,7 @@
"Userinfo",
"Values",
},
- "os": []string{
+ "os": {
"Args",
"Chdir",
"Chmod",
@@ -4676,7 +4680,7 @@
"UserHomeDir",
"WriteFile",
},
- "os/exec": []string{
+ "os/exec": {
"Cmd",
"Command",
"CommandContext",
@@ -4686,7 +4690,7 @@
"ExitError",
"LookPath",
},
- "os/signal": []string{
+ "os/signal": {
"Ignore",
"Ignored",
"Notify",
@@ -4694,7 +4698,7 @@
"Reset",
"Stop",
},
- "os/user": []string{
+ "os/user": {
"Current",
"Group",
"Lookup",
@@ -4707,7 +4711,7 @@
"UnknownUserIdError",
"User",
},
- "path": []string{
+ "path": {
"Base",
"Clean",
"Dir",
@@ -4718,7 +4722,7 @@
"Match",
"Split",
},
- "path/filepath": []string{
+ "path/filepath": {
"Abs",
"Base",
"Clean",
@@ -4744,12 +4748,12 @@
"WalkDir",
"WalkFunc",
},
- "plugin": []string{
+ "plugin": {
"Open",
"Plugin",
"Symbol",
},
- "reflect": []string{
+ "reflect": {
"Append",
"AppendSlice",
"Array",
@@ -4824,7 +4828,7 @@
"VisibleFields",
"Zero",
},
- "regexp": []string{
+ "regexp": {
"Compile",
"CompilePOSIX",
"Match",
@@ -4835,7 +4839,7 @@
"QuoteMeta",
"Regexp",
},
- "regexp/syntax": []string{
+ "regexp/syntax": {
"ClassNL",
"Compile",
"DotNL",
@@ -4914,7 +4918,7 @@
"UnicodeGroups",
"WasDollar",
},
- "runtime": []string{
+ "runtime": {
"BlockProfile",
"BlockProfileRecord",
"Breakpoint",
@@ -4962,11 +4966,11 @@
"UnlockOSThread",
"Version",
},
- "runtime/cgo": []string{
+ "runtime/cgo": {
"Handle",
"NewHandle",
},
- "runtime/debug": []string{
+ "runtime/debug": {
"BuildInfo",
"BuildSetting",
"FreeOSMemory",
@@ -4985,7 +4989,7 @@
"Stack",
"WriteHeapDump",
},
- "runtime/metrics": []string{
+ "runtime/metrics": {
"All",
"Description",
"Float64Histogram",
@@ -4998,7 +5002,7 @@
"Value",
"ValueKind",
},
- "runtime/pprof": []string{
+ "runtime/pprof": {
"Do",
"ForLabels",
"Label",
@@ -5014,7 +5018,7 @@
"WithLabels",
"WriteHeapProfile",
},
- "runtime/trace": []string{
+ "runtime/trace": {
"IsEnabled",
"Log",
"Logf",
@@ -5026,7 +5030,7 @@
"Task",
"WithRegion",
},
- "sort": []string{
+ "sort": {
"Find",
"Float64Slice",
"Float64s",
@@ -5050,7 +5054,7 @@
"Strings",
"StringsAreSorted",
},
- "strconv": []string{
+ "strconv": {
"AppendBool",
"AppendFloat",
"AppendInt",
@@ -5090,7 +5094,7 @@
"Unquote",
"UnquoteChar",
},
- "strings": []string{
+ "strings": {
"Builder",
"Clone",
"Compare",
@@ -5144,7 +5148,7 @@
"TrimSpace",
"TrimSuffix",
},
- "sync": []string{
+ "sync": {
"Cond",
"Locker",
"Map",
@@ -5155,7 +5159,7 @@
"RWMutex",
"WaitGroup",
},
- "sync/atomic": []string{
+ "sync/atomic": {
"AddInt32",
"AddInt64",
"AddUint32",
@@ -5194,7 +5198,7 @@
"Uintptr",
"Value",
},
- "syscall": []string{
+ "syscall": {
"AF_ALG",
"AF_APPLETALK",
"AF_ARP",
@@ -10344,7 +10348,7 @@
"XP1_UNI_RECV",
"XP1_UNI_SEND",
},
- "syscall/js": []string{
+ "syscall/js": {
"CopyBytesToGo",
"CopyBytesToJS",
"Error",
@@ -10366,7 +10370,7 @@
"ValueError",
"ValueOf",
},
- "testing": []string{
+ "testing": {
"AllocsPerRun",
"B",
"Benchmark",
@@ -10394,12 +10398,12 @@
"TB",
"Verbose",
},
- "testing/fstest": []string{
+ "testing/fstest": {
"MapFS",
"MapFile",
"TestFS",
},
- "testing/iotest": []string{
+ "testing/iotest": {
"DataErrReader",
"ErrReader",
"ErrTimeout",
@@ -10411,7 +10415,7 @@
"TimeoutReader",
"TruncateWriter",
},
- "testing/quick": []string{
+ "testing/quick": {
"Check",
"CheckEqual",
"CheckEqualError",
@@ -10421,7 +10425,7 @@
"SetupError",
"Value",
},
- "text/scanner": []string{
+ "text/scanner": {
"Char",
"Comment",
"EOF",
@@ -10444,7 +10448,7 @@
"String",
"TokenString",
},
- "text/tabwriter": []string{
+ "text/tabwriter": {
"AlignRight",
"Debug",
"DiscardEmptyColumns",
@@ -10455,7 +10459,7 @@
"TabIndent",
"Writer",
},
- "text/template": []string{
+ "text/template": {
"ExecError",
"FuncMap",
"HTMLEscape",
@@ -10473,7 +10477,7 @@
"Template",
"URLQueryEscaper",
},
- "text/template/parse": []string{
+ "text/template/parse": {
"ActionNode",
"BoolNode",
"BranchNode",
@@ -10529,7 +10533,7 @@
"VariableNode",
"WithNode",
},
- "time": []string{
+ "time": {
"ANSIC",
"After",
"AfterFunc",
@@ -10601,7 +10605,7 @@
"Wednesday",
"Weekday",
},
- "unicode": []string{
+ "unicode": {
"ASCII_Hex_Digit",
"Adlam",
"Ahom",
@@ -10887,14 +10891,14 @@
"Zp",
"Zs",
},
- "unicode/utf16": []string{
+ "unicode/utf16": {
"Decode",
"DecodeRune",
"Encode",
"EncodeRune",
"IsSurrogate",
},
- "unicode/utf8": []string{
+ "unicode/utf8": {
"AppendRune",
"DecodeLastRune",
"DecodeLastRuneInString",
@@ -10915,7 +10919,7 @@
"ValidRune",
"ValidString",
},
- "unsafe": []string{
+ "unsafe": {
"Alignof",
"ArbitraryType",
"Offsetof",
diff --git a/internal/jsonrpc2_v2/conn.go b/internal/jsonrpc2_v2/conn.go
index 47668e2..7995f40 100644
--- a/internal/jsonrpc2_v2/conn.go
+++ b/internal/jsonrpc2_v2/conn.go
@@ -9,6 +9,7 @@
"encoding/json"
"fmt"
"io"
+ "sync"
"sync/atomic"
"golang.org/x/tools/internal/event"
@@ -27,6 +28,15 @@
Bind(context.Context, *Connection) (ConnectionOptions, error)
}
+// A BinderFunc implements the Binder interface for a standalone Bind function.
+type BinderFunc func(context.Context, *Connection) (ConnectionOptions, error)
+
+func (f BinderFunc) Bind(ctx context.Context, c *Connection) (ConnectionOptions, error) {
+ return f(ctx, c)
+}
+
+var _ Binder = BinderFunc(nil)
+
// ConnectionOptions holds the options for new connections.
type ConnectionOptions struct {
// Framer allows control over the message framing and encoding.
@@ -45,19 +55,22 @@
// Connection is bidirectional; it does not have a designated server or client
// end.
type Connection struct {
- seq int64 // must only be accessed using atomic operations
- closer io.Closer
- writerBox chan Writer
- outgoingBox chan map[ID]chan<- *Response
- incomingBox chan map[ID]*incoming
- async *async
+ seq int64 // must only be accessed using atomic operations
+
+ closeOnce sync.Once
+ closer io.Closer
+
+ writer chan Writer
+ outgoing chan map[ID]chan<- *Response
+ incoming chan map[ID]*incoming
+ async *async
}
type AsyncCall struct {
- id ID
- response chan *Response // the channel a response will be delivered on
- resultBox chan asyncResult
- endSpan func() // close the tracing span when all processing for the message is complete
+ id ID
+ response chan *Response // the channel a response will be delivered on
+ result chan asyncResult
+ endSpan func() // close the tracing span when all processing for the message is complete
}
type asyncResult struct {
@@ -83,11 +96,11 @@
// This is used by the Dial and Serve functions to build the actual connection.
func newConnection(ctx context.Context, rwc io.ReadWriteCloser, binder Binder) (*Connection, error) {
c := &Connection{
- closer: rwc,
- writerBox: make(chan Writer, 1),
- outgoingBox: make(chan map[ID]chan<- *Response, 1),
- incomingBox: make(chan map[ID]*incoming, 1),
- async: newAsync(),
+ closer: rwc,
+ writer: make(chan Writer, 1),
+ outgoing: make(chan map[ID]chan<- *Response, 1),
+ incoming: make(chan map[ID]*incoming, 1),
+ async: newAsync(),
}
options, err := binder.Bind(ctx, c)
@@ -103,8 +116,8 @@
if options.Handler == nil {
options.Handler = defaultHandler{}
}
- c.outgoingBox <- make(map[ID]chan<- *Response)
- c.incomingBox <- make(map[ID]*incoming)
+ c.outgoing <- make(map[ID]chan<- *Response)
+ c.incoming <- make(map[ID]*incoming)
// the goroutines started here will continue until the underlying stream is closed
reader := options.Framer.Reader(rwc)
readToQueue := make(chan *incoming)
@@ -115,7 +128,7 @@
// releaseing the writer must be the last thing we do in case any requests
// are blocked waiting for the connection to be ready
- c.writerBox <- options.Framer.Writer(rwc)
+ c.writer <- options.Framer.Writer(rwc)
return c, nil
}
@@ -150,14 +163,14 @@
// If sending the call failed, the response will be ready and have the error in it.
func (c *Connection) Call(ctx context.Context, method string, params interface{}) *AsyncCall {
result := &AsyncCall{
- id: Int64ID(atomic.AddInt64(&c.seq, 1)),
- resultBox: make(chan asyncResult, 1),
+ id: Int64ID(atomic.AddInt64(&c.seq, 1)),
+ result: make(chan asyncResult, 1),
}
// generate a new request identifier
call, err := NewCall(result.id, method, params)
if err != nil {
//set the result to failed
- result.resultBox <- asyncResult{err: fmt.Errorf("marshaling call parameters: %w", err)}
+ result.result <- asyncResult{err: fmt.Errorf("marshaling call parameters: %w", err)}
return result
}
ctx, endSpan := event.Start(ctx, method,
@@ -171,9 +184,25 @@
// are racing the response.
// rchan is buffered in case the response arrives without a listener.
result.response = make(chan *Response, 1)
- pending := <-c.outgoingBox
- pending[result.id] = result.response
- c.outgoingBox <- pending
+ outgoing, ok := <-c.outgoing
+ if !ok {
+ // If the call failed due to (say) an I/O error or broken pipe, attribute it
+ // as such. (If the error is nil, then the connection must have been shut
+ // down cleanly.)
+ err := c.async.wait()
+ if err == nil {
+ err = ErrClientClosing
+ }
+
+ resp, respErr := NewResponse(result.id, nil, err)
+ if respErr != nil {
+ panic(fmt.Errorf("unexpected error from NewResponse: %w", respErr))
+ }
+ result.response <- resp
+ return result
+ }
+ outgoing[result.id] = result.response
+ c.outgoing <- outgoing
// now we are ready to send
if err := c.write(ctx, call); err != nil {
// sending failed, we will never get a response, so deliver a fake one
@@ -192,8 +221,8 @@
// returned, or a call that failed to send in the first place.
func (a *AsyncCall) IsReady() bool {
select {
- case r := <-a.resultBox:
- a.resultBox <- r
+ case r := <-a.result:
+ a.result <- r
return true
default:
return false
@@ -216,14 +245,14 @@
r.result = response.Result
event.Label(ctx, tag.StatusCode.Of("OK"))
}
- case r = <-a.resultBox:
+ case r = <-a.result:
// result already available
case <-ctx.Done():
event.Label(ctx, tag.StatusCode.Of("CANCELLED"))
return ctx.Err()
}
// refill the box for the next caller
- a.resultBox <- r
+ a.result <- r
// and unpack the result
if r.err != nil {
return r.err
@@ -239,8 +268,8 @@
// Respond must be called exactly once for any message for which a handler
// returns ErrAsyncResponse. It must not be called for any other message.
func (c *Connection) Respond(id ID, result interface{}, rerr error) error {
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
+ pending := <-c.incoming
+ defer func() { c.incoming <- pending }()
entry, found := pending[id]
if !found {
return nil
@@ -257,8 +286,8 @@
// not cause any messages that have not arrived yet with that ID to be
// cancelled.
func (c *Connection) Cancel(id ID) {
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
+ pending := <-c.incoming
+ defer func() { c.incoming <- pending }()
if entry, found := pending[id]; found && entry.cancel != nil {
entry.cancel()
entry.cancel = nil
@@ -275,28 +304,40 @@
// This does not cancel in flight requests, but waits for them to gracefully complete.
func (c *Connection) Close() error {
// close the underlying stream
- if err := c.closer.Close(); err != nil && !isClosingError(err) {
- return err
- }
+ c.closeOnce.Do(func() {
+ if err := c.closer.Close(); err != nil {
+ c.async.setError(err)
+ }
+ })
// and then wait for it to cause the connection to close
- if err := c.Wait(); err != nil && !isClosingError(err) {
- return err
- }
- return nil
+ return c.Wait()
}
// readIncoming collects inbound messages from the reader and delivers them, either responding
// to outgoing calls or feeding requests to the queue.
-func (c *Connection) readIncoming(ctx context.Context, reader Reader, toQueue chan<- *incoming) {
- defer close(toQueue)
+func (c *Connection) readIncoming(ctx context.Context, reader Reader, toQueue chan<- *incoming) (err error) {
+ defer func() {
+ // Retire any outgoing requests that were still in flight.
+ // With the Reader no longer being processed, they necessarily cannot receive a response.
+ outgoing := <-c.outgoing
+ close(c.outgoing) // Prevent new outgoing requests, which would deadlock.
+ for id, response := range outgoing {
+ response <- &Response{ID: id, Error: err}
+ }
+
+ close(toQueue)
+ }()
+
for {
// get the next message
// no lock is needed, this is the only reader
msg, n, err := reader.Read(ctx)
if err != nil {
// The stream failed, we cannot continue
- c.async.setError(err)
- return
+ if !isClosingError(err) {
+ c.async.setError(err)
+ }
+ return err
}
switch msg := msg.(type) {
case *Request:
@@ -320,9 +361,9 @@
// if the request is a call, add it to the incoming map so it can be
// cancelled by id
if msg.IsCall() {
- pending := <-c.incomingBox
+ pending := <-c.incoming
pending[msg.ID] = entry
- c.incomingBox <- pending
+ c.incoming <- pending
}
// send the message to the incoming queue
toQueue <- entry
@@ -335,12 +376,12 @@
}
func (c *Connection) incomingResponse(msg *Response) {
- pending := <-c.outgoingBox
- response, ok := pending[msg.ID]
- if ok {
- delete(pending, msg.ID)
+ var response chan<- *Response
+ if outgoing, ok := <-c.outgoing; ok {
+ response = outgoing[msg.ID]
+ delete(outgoing, msg.ID)
+ c.outgoing <- outgoing
}
- c.outgoingBox <- pending
if response != nil {
response <- msg
}
@@ -395,7 +436,23 @@
}
func (c *Connection) deliverMessages(ctx context.Context, handler Handler, fromQueue <-chan *incoming) {
- defer c.async.done()
+ defer func() {
+ // Close the underlying ReadWriteCloser if not already closed. We're about
+ // to mark the Connection as done, so we'd better actually be done! 😅
+ //
+ // TODO(bcmills): This is actually a bit premature, since we may have
+ // asynchronous handlers still in flight at this point, but it's at least no
+ // more premature than calling c.async.done at this point (which we were
+ // already doing). This will get a proper fix in https://go.dev/cl/388134.
+ c.closeOnce.Do(func() {
+ if err := c.closer.Close(); err != nil {
+ c.async.setError(err)
+ }
+ })
+
+ c.async.done()
+ }()
+
for entry := range fromQueue {
// cancel any messages in the queue that we have a pending cancel for
var result interface{}
@@ -420,8 +477,8 @@
func (c *Connection) reply(entry *incoming, result interface{}, rerr error) {
if entry.request.IsCall() {
// we have a call finishing, remove it from the incoming map
- pending := <-c.incomingBox
- defer func() { c.incomingBox <- pending }()
+ pending := <-c.incoming
+ defer func() { c.incoming <- pending }()
delete(pending, entry.request.ID)
}
if err := c.respond(entry, result, rerr); err != nil {
@@ -479,8 +536,8 @@
// write is used by all things that write outgoing messages, including replies.
// it makes sure that writes are atomic
func (c *Connection) write(ctx context.Context, msg Message) error {
- writer := <-c.writerBox
- defer func() { c.writerBox <- writer }()
+ writer := <-c.writer
+ defer func() { c.writer <- writer }()
n, err := writer.Write(ctx, msg)
event.Metric(ctx, tag.SentBytes.Of(n))
return err
diff --git a/internal/jsonrpc2_v2/jsonrpc2.go b/internal/jsonrpc2_v2/jsonrpc2.go
index e685584..e9164b0 100644
--- a/internal/jsonrpc2_v2/jsonrpc2.go
+++ b/internal/jsonrpc2_v2/jsonrpc2.go
@@ -47,6 +47,15 @@
Preempt(ctx context.Context, req *Request) (result interface{}, err error)
}
+// A PreempterFunc implements the Preempter interface for a standalone Preempt function.
+type PreempterFunc func(ctx context.Context, req *Request) (interface{}, error)
+
+func (f PreempterFunc) Preempt(ctx context.Context, req *Request) (interface{}, error) {
+ return f(ctx, req)
+}
+
+var _ Preempter = PreempterFunc(nil)
+
// Handler handles messages on a connection.
type Handler interface {
// Handle is invoked sequentially for each incoming request that has not
@@ -75,12 +84,15 @@
return nil, ErrNotHandled
}
+// A HandlerFunc implements the Handler interface for a standalone Handle function.
type HandlerFunc func(ctx context.Context, req *Request) (interface{}, error)
func (f HandlerFunc) Handle(ctx context.Context, req *Request) (interface{}, error) {
return f(ctx, req)
}
+var _ Handler = HandlerFunc(nil)
+
// async is a small helper for operations with an asynchronous result that you
// can wait for.
type async struct {
diff --git a/internal/jsonrpc2_v2/jsonrpc2_test.go b/internal/jsonrpc2_v2/jsonrpc2_test.go
index 8e90c23..b2fa496 100644
--- a/internal/jsonrpc2_v2/jsonrpc2_test.go
+++ b/internal/jsonrpc2_v2/jsonrpc2_test.go
@@ -11,7 +11,6 @@
"path"
"reflect"
"testing"
- "time"
"golang.org/x/tools/internal/event/export/eventtest"
jsonrpc2 "golang.org/x/tools/internal/jsonrpc2_v2"
@@ -77,7 +76,7 @@
type handler struct {
conn *jsonrpc2.Connection
accumulator int
- waitersBox chan map[string]chan struct{}
+ waiters chan map[string]chan struct{}
calls map[string]*jsonrpc2.AsyncCall
}
@@ -256,11 +255,11 @@
func (b binder) Bind(ctx context.Context, conn *jsonrpc2.Connection) (jsonrpc2.ConnectionOptions, error) {
h := &handler{
- conn: conn,
- waitersBox: make(chan map[string]chan struct{}, 1),
- calls: make(map[string]*jsonrpc2.AsyncCall),
+ conn: conn,
+ waiters: make(chan map[string]chan struct{}, 1),
+ calls: make(map[string]*jsonrpc2.AsyncCall),
}
- h.waitersBox <- make(map[string]chan struct{})
+ h.waiters <- make(map[string]chan struct{})
if b.runTest != nil {
go b.runTest(h)
}
@@ -272,8 +271,8 @@
}
func (h *handler) waiter(name string) chan struct{} {
- waiters := <-h.waitersBox
- defer func() { h.waitersBox <- waiters }()
+ waiters := <-h.waiters
+ defer func() { h.waiters <- waiters }()
waiter, found := waiters[name]
if !found {
waiter = make(chan struct{})
@@ -370,8 +369,6 @@
return true, nil
case <-ctx.Done():
return nil, ctx.Err()
- case <-time.After(time.Second):
- return nil, fmt.Errorf("wait for %q timed out", name)
}
case "fork":
var name string
@@ -385,8 +382,6 @@
h.conn.Respond(req.ID, true, nil)
case <-ctx.Done():
h.conn.Respond(req.ID, nil, ctx.Err())
- case <-time.After(time.Second):
- h.conn.Respond(req.ID, nil, fmt.Errorf("wait for %q timed out", name))
}
}()
return nil, jsonrpc2.ErrAsyncResponse
diff --git a/internal/jsonrpc2_v2/serve.go b/internal/jsonrpc2_v2/serve.go
index 646267b..5ffce68 100644
--- a/internal/jsonrpc2_v2/serve.go
+++ b/internal/jsonrpc2_v2/serve.go
@@ -7,6 +7,7 @@
import (
"context"
"errors"
+ "fmt"
"io"
"runtime"
"strings"
@@ -171,80 +172,121 @@
}
// NewIdleListener wraps a listener with an idle timeout.
-// When there are no active connections for at least the timeout duration a
-// call to accept will fail with ErrIdleTimeout.
+//
+// When there are no active connections for at least the timeout duration,
+// calls to Accept will fail with ErrIdleTimeout.
+//
+// A connection is considered inactive as soon as its Close method is called.
func NewIdleListener(timeout time.Duration, wrap Listener) Listener {
l := &idleListener{
- timeout: timeout,
- wrapped: wrap,
- newConns: make(chan *idleCloser),
- closed: make(chan struct{}),
- wasTimeout: make(chan struct{}),
+ wrapped: wrap,
+ timeout: timeout,
+ active: make(chan int, 1),
+ timedOut: make(chan struct{}),
+ idleTimer: make(chan *time.Timer, 1),
}
- go l.run()
+ l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired)
return l
}
type idleListener struct {
- wrapped Listener
- timeout time.Duration
- newConns chan *idleCloser
- closed chan struct{}
- wasTimeout chan struct{}
- closeOnce sync.Once
+ wrapped Listener
+ timeout time.Duration
+
+ // Only one of these channels is receivable at any given time.
+ active chan int // count of active connections; closed when Close is called if not timed out
+ timedOut chan struct{} // closed when the idle timer expires
+ idleTimer chan *time.Timer // holds the timer only when idle
}
-type idleCloser struct {
- wrapped io.ReadWriteCloser
- closed chan struct{}
- closeOnce sync.Once
-}
-
-func (c *idleCloser) Read(p []byte) (int, error) {
- n, err := c.wrapped.Read(p)
- if err != nil && isClosingError(err) {
- c.closeOnce.Do(func() { close(c.closed) })
- }
- return n, err
-}
-
-func (c *idleCloser) Write(p []byte) (int, error) {
- // we do not close on write failure, we rely on the wrapped writer to do that
- // if it is appropriate, which we will detect in the next read.
- return c.wrapped.Write(p)
-}
-
-func (c *idleCloser) Close() error {
- // we rely on closing the wrapped stream to signal to the next read that we
- // are closed, rather than triggering the closed signal directly
- return c.wrapped.Close()
-}
-
+// Accept accepts an incoming connection.
+//
+// If an incoming connection is accepted concurrent to the listener being closed
+// due to idleness, the new connection is immediately closed.
func (l *idleListener) Accept(ctx context.Context) (io.ReadWriteCloser, error) {
rwc, err := l.wrapped.Accept(ctx)
- if err != nil {
- if isClosingError(err) {
- // underlying listener was closed
- l.closeOnce.Do(func() { close(l.closed) })
- // was it closed because of the idle timeout?
- select {
- case <-l.wasTimeout:
- err = ErrIdleTimeout
- default:
- }
- }
+
+ if err != nil && !isClosingError(err) {
return nil, err
}
- conn := &idleCloser{
- wrapped: rwc,
- closed: make(chan struct{}),
+
+ select {
+ case n, ok := <-l.active:
+ if err != nil {
+ if ok {
+ l.active <- n
+ }
+ return nil, err
+ }
+ if ok {
+ l.active <- n + 1
+ } else {
+ // l.wrapped.Close Close has been called, but Accept returned a
+ // connection. This race can occur with concurrent Accept and Close calls
+ // with any net.Listener, and it is benign: since the listener was closed
+ // explicitly, it can't have also timed out.
+ }
+ return l.newConn(rwc), nil
+
+ case <-l.timedOut:
+ if err == nil {
+ // Keeping the connection open would leave the listener simultaneously
+ // active and closed due to idleness, which would be contradictory and
+ // confusing. Close the connection and pretend that it never happened.
+ rwc.Close()
+ }
+ return nil, ErrIdleTimeout
+
+ case timer := <-l.idleTimer:
+ if err != nil {
+ // The idle timer hasn't run yet, so err can't be ErrIdleTimeout.
+ // Leave the idle timer as it was and return whatever error we got.
+ l.idleTimer <- timer
+ return nil, err
+ }
+
+ if !timer.Stop() {
+ // Failed to stop the timer — the timer goroutine is in the process of
+ // firing. Send the timer back to the timer goroutine so that it can
+ // safely close the timedOut channel, and then wait for the listener to
+ // actually be closed before we return ErrIdleTimeout.
+ l.idleTimer <- timer
+ rwc.Close()
+ <-l.timedOut
+ return nil, ErrIdleTimeout
+ }
+
+ l.active <- 1
+ return l.newConn(rwc), nil
}
- l.newConns <- conn
- return conn, err
}
func (l *idleListener) Close() error {
- defer l.closeOnce.Do(func() { close(l.closed) })
+ select {
+ case _, ok := <-l.active:
+ if ok {
+ close(l.active)
+ }
+
+ case <-l.timedOut:
+ // Already closed by the timer; take care not to double-close if the caller
+ // only explicitly invokes this Close method once, since the io.Closer
+ // interface explicitly leaves doubled Close calls undefined.
+ return ErrIdleTimeout
+
+ case timer := <-l.idleTimer:
+ if !timer.Stop() {
+ // Couldn't stop the timer. It shouldn't take long to run, so just wait
+ // (so that the Listener is guaranteed to be closed before we return)
+ // and pretend that this call happened afterward.
+ // That way we won't leak any timers or goroutines when Close returns.
+ l.idleTimer <- timer
+ <-l.timedOut
+ return ErrIdleTimeout
+ }
+ close(l.active)
+ }
+
return l.wrapped.Close()
}
@@ -252,31 +294,83 @@
return l.wrapped.Dialer()
}
-func (l *idleListener) run() {
- var conns []*idleCloser
- for {
- var firstClosed chan struct{} // left at nil if there are no active conns
- var timeout <-chan time.Time // left at nil if there are active conns
- if len(conns) > 0 {
- firstClosed = conns[0].closed
+func (l *idleListener) timerExpired() {
+ select {
+ case n, ok := <-l.active:
+ if ok {
+ panic(fmt.Sprintf("jsonrpc2: idleListener idle timer fired with %d connections still active", n))
} else {
- timeout = time.After(l.timeout)
+ panic("jsonrpc2: Close finished with idle timer still running")
}
- select {
- case <-l.closed:
- // the main listener closed, no need to keep going
- return
- case conn := <-l.newConns:
- // a new conn arrived, add it to the list
- conns = append(conns, conn)
- case <-timeout:
- // we timed out, only happens when there are no active conns
- // close the underlying listener, and allow the normal closing process to happen
- close(l.wasTimeout)
- l.wrapped.Close()
- case <-firstClosed:
- // a conn closed, remove it from the active list
- conns = conns[:copy(conns, conns[1:])]
- }
+
+ case <-l.timedOut:
+ panic("jsonrpc2: idleListener idle timer fired more than once")
+
+ case <-l.idleTimer:
+ // The timer for this very call!
}
+
+ // Close the Listener with all channels still blocked to ensure that this call
+ // to l.wrapped.Close doesn't race with the one in l.Close.
+ defer close(l.timedOut)
+ l.wrapped.Close()
+}
+
+func (l *idleListener) connClosed() {
+ select {
+ case n, ok := <-l.active:
+ if !ok {
+ // l is already closed, so it can't close due to idleness,
+ // and we don't need to track the number of active connections any more.
+ return
+ }
+ n--
+ if n == 0 {
+ l.idleTimer <- time.AfterFunc(l.timeout, l.timerExpired)
+ } else {
+ l.active <- n
+ }
+
+ case <-l.timedOut:
+ panic("jsonrpc2: idleListener idle timer fired before last active connection was closed")
+
+ case <-l.idleTimer:
+ panic("jsonrpc2: idleListener idle timer active before last active connection was closed")
+ }
+}
+
+type idleListenerConn struct {
+ wrapped io.ReadWriteCloser
+ l *idleListener
+ closeOnce sync.Once
+}
+
+func (l *idleListener) newConn(rwc io.ReadWriteCloser) *idleListenerConn {
+ c := &idleListenerConn{
+ wrapped: rwc,
+ l: l,
+ }
+
+ // A caller that forgets to call Close may disrupt the idleListener's
+ // accounting, even though the file descriptor for the underlying connection
+ // may eventually be garbage-collected anyway.
+ //
+ // Set a (best-effort) finalizer to verify that a Close call always occurs.
+ // (We will clear the finalizer explicitly in Close.)
+ runtime.SetFinalizer(c, func(c *idleListenerConn) {
+ panic("jsonrpc2: IdleListener connection became unreachable without a call to Close")
+ })
+
+ return c
+}
+
+func (c *idleListenerConn) Read(p []byte) (int, error) { return c.wrapped.Read(p) }
+func (c *idleListenerConn) Write(p []byte) (int, error) { return c.wrapped.Write(p) }
+
+func (c *idleListenerConn) Close() error {
+ defer c.closeOnce.Do(func() {
+ c.l.connClosed()
+ runtime.SetFinalizer(c, nil)
+ })
+ return c.wrapped.Close()
}
diff --git a/internal/jsonrpc2_v2/serve_test.go b/internal/jsonrpc2_v2/serve_test.go
index 26cf6a5..f0c27a8 100644
--- a/internal/jsonrpc2_v2/serve_test.go
+++ b/internal/jsonrpc2_v2/serve_test.go
@@ -7,6 +7,8 @@
import (
"context"
"errors"
+ "fmt"
+ "runtime/debug"
"testing"
"time"
@@ -16,48 +18,118 @@
func TestIdleTimeout(t *testing.T) {
stacktest.NoLeak(t)
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{})
- if err != nil {
- t.Fatal(err)
- }
- listener = jsonrpc2.NewIdleListener(100*time.Millisecond, listener)
- defer listener.Close()
- server, err := jsonrpc2.Serve(ctx, listener, jsonrpc2.ConnectionOptions{})
- if err != nil {
- t.Fatal(err)
- }
+ // Use a panicking time.AfterFunc instead of context.WithTimeout so that we
+ // get a goroutine dump on failure. We expect the test to take on the order of
+ // a few tens of milliseconds at most, so 10s should be several orders of
+ // magnitude of headroom.
+ timer := time.AfterFunc(10*time.Second, func() {
+ debug.SetTraceback("all")
+ panic("TestIdleTimeout deadlocked")
+ })
+ defer timer.Stop()
- connect := func() *jsonrpc2.Connection {
- client, err := jsonrpc2.Dial(ctx,
- listener.Dialer(),
- jsonrpc2.ConnectionOptions{})
+ ctx := context.Background()
+
+ try := func(d time.Duration) (longEnough bool) {
+ listener, err := jsonrpc2.NetListener(ctx, "tcp", "localhost:0", jsonrpc2.NetListenOptions{})
if err != nil {
t.Fatal(err)
}
- return client
- }
- // Exercise some connection/disconnection patterns, and then assert that when
- // our timer fires, the server exits.
- conn1 := connect()
- conn2 := connect()
- if err := conn1.Close(); err != nil {
- t.Fatalf("conn1.Close failed with error: %v", err)
- }
- if err := conn2.Close(); err != nil {
- t.Fatalf("conn2.Close failed with error: %v", err)
- }
- conn3 := connect()
- if err := conn3.Close(); err != nil {
- t.Fatalf("conn3.Close failed with error: %v", err)
+
+ idleStart := time.Now()
+ listener = jsonrpc2.NewIdleListener(d, listener)
+ defer listener.Close()
+
+ server, err := jsonrpc2.Serve(ctx, listener, jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Exercise some connection/disconnection patterns, and then assert that when
+ // our timer fires, the server exits.
+ conn1, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn1 failed to connect after %v: %v", since, err)
+ }
+ t.Log("jsonrpc2.Dial:", err)
+ return false // Took to long to dial, so the failure could have been due to the idle timeout.
+ }
+ // On the server side, Accept can race with the connection timing out.
+ // Send a call and wait for the response to ensure that the connection was
+ // actually fully accepted.
+ ac := conn1.Call(ctx, "ping", nil)
+ if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn1 broken after %v: %v", since, err)
+ }
+ t.Log(`conn1.Call(ctx, "ping", nil):`, err)
+ conn1.Close()
+ return false
+ }
+
+ conn2, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ conn1.Close()
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn2 failed to connect while non-idle: %v", err)
+ }
+ t.Log("jsonrpc2.Dial:", err)
+ return false
+ }
+
+ if err := conn1.Close(); err != nil {
+ t.Fatalf("conn1.Close failed with error: %v", err)
+ }
+ idleStart = time.Now()
+ if err := conn2.Close(); err != nil {
+ t.Fatalf("conn2.Close failed with error: %v", err)
+ }
+
+ conn3, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ if err != nil {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn3 failed to connect after %v: %v", since, err)
+ }
+ t.Log("jsonrpc2.Dial:", err)
+ return false // Took to long to dial, so the failure could have been due to the idle timeout.
+ }
+
+ ac = conn3.Call(ctx, "ping", nil)
+ if err := ac.Await(ctx, nil); !errors.Is(err, jsonrpc2.ErrMethodNotFound) {
+ if since := time.Since(idleStart); since < d {
+ t.Fatalf("conn3 broken after %v: %v", since, err)
+ }
+ t.Log(`conn3.Call(ctx, "ping", nil):`, err)
+ conn3.Close()
+ return false
+ }
+
+ idleStart = time.Now()
+ if err := conn3.Close(); err != nil {
+ t.Fatalf("conn3.Close failed with error: %v", err)
+ }
+
+ serverError := server.Wait()
+
+ if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) {
+ t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout)
+ }
+ if since := time.Since(idleStart); since < d {
+ t.Errorf("server shut down after %v idle; want at least %v", since, d)
+ }
+ return true
}
- serverError := server.Wait()
-
- if !errors.Is(serverError, jsonrpc2.ErrIdleTimeout) {
- t.Errorf("run() returned error %v, want %v", serverError, jsonrpc2.ErrIdleTimeout)
+ d := 1 * time.Millisecond
+ for {
+ t.Logf("testing with idle timout %v", d)
+ if !try(d) {
+ d *= 2
+ continue
+ }
+ break
}
}
@@ -78,8 +150,7 @@
func TestServe(t *testing.T) {
stacktest.NoLeak(t)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
+ ctx := context.Background()
tests := []struct {
name string
@@ -116,7 +187,6 @@
}
func newFake(t *testing.T, ctx context.Context, l jsonrpc2.Listener) (*jsonrpc2.Connection, func(), error) {
- l = jsonrpc2.NewIdleListener(100*time.Millisecond, l)
server, err := jsonrpc2.Serve(ctx, l, jsonrpc2.ConnectionOptions{
Handler: fakeHandler{},
})
@@ -142,3 +212,52 @@
server.Wait()
}, nil
}
+
+// TestIdleListenerAcceptCloseRace checks for the Accept/Close race fixed in CL 388597.
+//
+// (A bug in the idleListener implementation caused a successful Accept to block
+// on sending to a background goroutine that could have already exited.)
+func TestIdleListenerAcceptCloseRace(t *testing.T) {
+ ctx := context.Background()
+
+ n := 10
+
+ // Each iteration of the loop appears to take around a millisecond, so to
+ // avoid spurious failures we'll set the watchdog for three orders of
+ // magnitude longer. When the bug was present, this reproduced the deadlock
+ // reliably on a Linux workstation when run with -count=100, which should be
+ // frequent enough to show up on the Go build dashboard if it regresses.
+ watchdog := time.Duration(n) * 1000 * time.Millisecond
+ timer := time.AfterFunc(watchdog, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("TestAcceptCloseRace deadlocked after %v", watchdog))
+ })
+ defer timer.Stop()
+
+ for ; n > 0; n-- {
+ listener, err := jsonrpc2.NetPipeListener(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ listener = jsonrpc2.NewIdleListener(24*time.Hour, listener)
+
+ done := make(chan struct{})
+ go func() {
+ conn, err := jsonrpc2.Dial(ctx, listener.Dialer(), jsonrpc2.ConnectionOptions{})
+ listener.Close()
+ if err == nil {
+ conn.Close()
+ }
+ close(done)
+ }()
+
+ // Accept may return a non-nil error if Close closes the underlying network
+ // connection before the wrapped Accept call unblocks. However, it must not
+ // deadlock!
+ c, err := listener.Accept(ctx)
+ if err == nil {
+ c.Close()
+ }
+ <-done
+ }
+}
diff --git a/internal/jsonrpc2_v2/wire.go b/internal/jsonrpc2_v2/wire.go
index 4da129a..c8dc9eb 100644
--- a/internal/jsonrpc2_v2/wire.go
+++ b/internal/jsonrpc2_v2/wire.go
@@ -33,6 +33,10 @@
ErrServerOverloaded = NewError(-32000, "JSON RPC overloaded")
// ErrUnknown should be used for all non coded errors.
ErrUnknown = NewError(-32001, "JSON RPC unknown error")
+ // ErrServerClosing is returned for calls that arrive while the server is closing.
+ ErrServerClosing = NewError(-32002, "JSON RPC server is closing")
+ // ErrClientClosing is a dummy error returned for calls initiated while the client is closing.
+ ErrClientClosing = NewError(-32003, "JSON RPC client is closing")
)
const wireVersion = "2.0"
@@ -72,3 +76,11 @@
func (err *wireError) Error() string {
return err.Message
}
+
+func (err *wireError) Is(other error) bool {
+ w, ok := other.(*wireError)
+ if !ok {
+ return false
+ }
+ return err.Code == w.Code
+}
diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go
index aacb56b..6b4d528 100644
--- a/refactor/satisfy/find.go
+++ b/refactor/satisfy/find.go
@@ -198,7 +198,8 @@
}
}
-func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr, T types.Type) types.Type {
+// builtin visits the arguments of a builtin type with signature sig.
+func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr) {
switch obj.Name() {
case "make", "new":
// skip the type operand
@@ -228,8 +229,6 @@
// ordinary call
f.call(sig, args)
}
-
- return T
}
func (f *Finder) extract(tuple types.Type, i int) types.Type {
@@ -439,12 +438,27 @@
f.assign(tvFun.Type, arg0)
} else {
// function call
+
+ // unsafe call. Treat calls to functions in unsafe like ordinary calls,
+ // except that their signature cannot be determined by their func obj.
+ // Without this special handling, f.expr(e.Fun) would fail below.
+ if s, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
+ if obj, ok := f.info.Uses[s.Sel].(*types.Builtin); ok && obj.Pkg().Path() == "unsafe" {
+ sig := f.info.Types[e.Fun].Type.(*types.Signature)
+ f.call(sig, e.Args)
+ return tv.Type
+ }
+ }
+
+ // builtin call
if id, ok := unparen(e.Fun).(*ast.Ident); ok {
if obj, ok := f.info.Uses[id].(*types.Builtin); ok {
sig := f.info.Types[id].Type.(*types.Signature)
- return f.builtin(obj, sig, e.Args, tv.Type)
+ f.builtin(obj, sig, e.Args)
+ return tv.Type
}
}
+
// ordinary call
f.call(coreType(f.expr(e.Fun)).(*types.Signature), e.Args)
}
diff --git a/refactor/satisfy/find_test.go b/refactor/satisfy/find_test.go
index 234bce9..35a1e87 100644
--- a/refactor/satisfy/find_test.go
+++ b/refactor/satisfy/find_test.go
@@ -7,6 +7,7 @@
import (
"fmt"
"go/ast"
+ "go/importer"
"go/parser"
"go/token"
"go/types"
@@ -27,6 +28,8 @@
const src = `package foo
+import "unsafe"
+
type I interface { f() }
type impl struct{}
@@ -53,6 +56,7 @@
type S struct{impl}
type T struct{impl}
type U struct{impl}
+type V struct{impl}
type Generic[T any] struct{impl}
func (Generic[T]) g(T) {}
@@ -153,8 +157,13 @@
type Gen2[T any] struct{}
func (f Gen2[T]) g(string) { global = f } // GI[string] <- Gen2[T]
-var global GI[string]
+var global GI[string]
+func _() {
+ var x [3]V
+ // golang/go#56227: the finder should visit calls in the unsafe package.
+ _ = unsafe.Slice(&x[0], func() int { var _ I = x[0]; return 3 }()) // I <- V
+}
`
got := constraints(t, src)
want := []string{
@@ -184,6 +193,7 @@
"p.I <- p.S",
"p.I <- p.T",
"p.I <- p.U",
+ "p.I <- p.V",
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("found unexpected constraints: got %s, want %s", got, want)
@@ -209,7 +219,9 @@
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
typeparams.InitInstanceInfo(info)
- conf := types.Config{}
+ conf := types.Config{
+ Importer: importer.Default(),
+ }
if _, err := conf.Check("p", fset, files, info); err != nil {
t.Fatal(err) // type error
}