[release-branch.go1.19] all: merge master (b2b8872) into release-branch.go1.19

Merge List:

+ 2022-07-12 b2b8872c87 compress/gzip: fix stack exhaustion bug in Reader.Read
+ 2022-07-12 ac68c6c683 path/filepath: fix stack exhaustion in Glob
+ 2022-07-12 fa2d41d0ca io/fs: fix stack exhaustion in Glob
+ 2022-07-12 6fa37e98ea encoding/gob: add a depth limit for ignored fields
+ 2022-07-12 695be961d5 go/parser: limit recursion depth
+ 2022-07-12 08c46ed43d encoding/xml: use iterative Skip, rather than recursive
+ 2022-07-12 c4c1993fd2 encoding/xml: limit depth of nesting in unmarshal
+ 2022-07-12 913d05133c cmd/go: avoid spurious readdir during fsys.Walk
+ 2022-07-12 d3d7998756 net/http: clarify that MaxBytesReader returns *MaxBytesError
+ 2022-07-11 126c22a098 syscall: gofmt after CL 412114
+ 2022-07-11 123a6328b7 internal/trace: don't report regions on system goroutines
+ 2022-07-11 846490110a runtime/race: update amd64 syso images to avoid sse4
+ 2022-07-11 b75ad09cae cmd/trace: fix typo in web documentation
+ 2022-07-11 7510e597de cmd/go: make module index loading O(1)
+ 2022-07-11 b8bf820d5d cmd/nm: don't rely on an erroneous install target in tests
+ 2022-07-11 ad641e8521 misc/cgo/testcarchive: don't rely on an erroneous install target in tests
+ 2022-07-11 bf5898ef53 net/url: use EscapedPath for url.JoinPath
+ 2022-07-11 398dcd1cf0 database/sql: make TestTxContextWaitNoDiscard test more robust
+ 2022-07-11 f956941b0f cmd/go: use package index for std in load.loadPackageData
+ 2022-07-11 59ab6f351a net/http: remove Content-Encoding in writeNotModified
+ 2022-07-08 c1a4e0fe01 cmd/compile: fix libfuzzer instrumentation line number
+ 2022-07-08 5c1a13e7a4 cmd/go: avoid setting variables for '/' and ':' in TestScript subprocess environments
+ 2022-07-08 180bcad33d net/http: wait for listeners to exit in Server.Close and Shutdown
+ 2022-07-08 14abe8aa73 cmd/compile: don't convert to interface{} for un-comparable types in generic switch
+ 2022-07-07 1ebc983000 runtime: overestimate the amount of allocated memory in heapLive
+ 2022-07-07 c177d9d98a crypto/x509: restrict CRL number to <=20 octets
+ 2022-07-07 486fc01770 crypto/x509: correctly parse CRL entry extensions
+ 2022-07-07 8ac58de185 crypto/x509: populate Number and AKI of parsed CRLs
+ 2022-07-07 0c7fcf6bd1 cmd/link: explicitly disable PIE for windows/amd64 -race mode
+ 2022-07-07 eaf2125654 cmd/go: default to "exe" build mode for windows -race
+ 2022-07-06 1243ec9c17 cmd/compile: only check implicit dots for method call enabled by a type bound
+ 2022-07-06 c391156f96 cmd/go: set up git identity for build_buildvcs_auto.txt

Change-Id: Ib2b544e080fc7fce20614d4ed310767c2591931f
diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go
index d36b97b..c409c31 100644
--- a/misc/cgo/testcarchive/carchive_test.go
+++ b/misc/cgo/testcarchive/carchive_test.go
@@ -205,6 +205,7 @@
 func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
 	t.Helper()
 	cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+	cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
 	t.Log(buildcmd)
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Logf("%s", out)
@@ -238,7 +239,7 @@
 	binArgs := append(cmdToRun(exe), "arg1", "arg2")
 	cmd = exec.Command(binArgs[0], binArgs[1:]...)
 	if runtime.Compiler == "gccgo" {
-		cmd.Env = append(os.Environ(), "GCCGO=1")
+		cmd.Env = append(cmd.Environ(), "GCCGO=1")
 	}
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Logf("%s", out)
@@ -822,9 +823,15 @@
 		t.Skipf("skipping PIE test on %s", GOOS)
 	}
 
+	libgoa := "libgo.a"
+	if runtime.Compiler == "gccgo" {
+		libgoa = "liblibgo.a"
+	}
+
 	if !testWork {
 		defer func() {
 			os.Remove("testp" + exeSuffix)
+			os.Remove(libgoa)
 			os.RemoveAll(filepath.Join(GOPATH, "pkg"))
 		}()
 	}
@@ -837,18 +844,13 @@
 	// be running this test in a GOROOT owned by root.)
 	genHeader(t, "p.h", "./p")
 
-	cmd := exec.Command("go", "install", "-buildmode=c-archive", "./libgo")
+	cmd := exec.Command("go", "build", "-buildmode=c-archive", "./libgo")
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Logf("%s", out)
 		t.Fatal(err)
 	}
 
-	libgoa := "libgo.a"
-	if runtime.Compiler == "gccgo" {
-		libgoa = "liblibgo.a"
-	}
-
-	ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join(libgodir, libgoa))
+	ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", libgoa)
 	if runtime.Compiler == "gccgo" {
 		ccArgs = append(ccArgs, "-lgo")
 	}
@@ -1035,6 +1037,7 @@
 	buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"}
 
 	cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
+	cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
 	t.Log(buildcmd)
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Logf("%s", out)
@@ -1050,6 +1053,7 @@
 	}
 
 	cmd = exec.Command(buildcmd[0], buildcmd[1:]...)
+	cmd.Env = append(cmd.Environ(), "GO111MODULE=off")
 	t.Log(buildcmd)
 	if out, err := cmd.CombinedOutput(); err != nil {
 		t.Logf("%s", out)
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index cf2f0b3..1534a1f 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -1214,6 +1214,9 @@
 			if m.Tag != nil && m.Tag.Op() == ir.OTYPESW {
 				break // Nothing to do here for type switches.
 			}
+			if m.Tag != nil && !types.IsComparable(m.Tag.Type()) {
+				break // Nothing to do here for un-comparable types.
+			}
 			if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() {
 				// To implement a switch on a value that is or has a type parameter, we first convert
 				// that thing we're switching on to an interface{}.
@@ -1654,12 +1657,14 @@
 				se := call.X.(*ir.SelectorExpr)
 				if se.X.Type().IsShape() {
 					// This is a method call enabled by a type bound.
-
-					// We need this extra check for method expressions,
-					// which don't add in the implicit XDOTs.
-					tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
-					tmpse = typecheck.AddImplicitDots(tmpse)
-					tparam := tmpse.X.Type()
+					tparam := se.X.Type()
+					if call.X.Op() == ir.ODOTMETH {
+						// We need this extra check for method expressions,
+						// which don't add in the implicit XDOTs.
+						tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
+						tmpse = typecheck.AddImplicitDots(tmpse)
+						tparam = tmpse.X.Type()
+					}
 					if !tparam.IsShape() {
 						// The method expression is not
 						// really on a typeparam.
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 8d1089d..2d1e882 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -63,7 +63,7 @@
 		s := fmt.Sprintf("\nbefore order %v", fn.Sym())
 		ir.DumpList(s, fn.Body)
 	}
-
+	ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
 	orderBlock(&fn.Body, map[string][]*ir.Name{})
 }
 
@@ -477,6 +477,12 @@
 // and then replaces the old slice in n with the new slice.
 // free is a map that can be used to obtain temporary variables by type.
 func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+	if len(*n) != 0 {
+		// Set reasonable position for instrumenting code. See issue 53688.
+		// It would be nice if ir.Nodes had a position (the opening {, probably),
+		// but it doesn't. So we use the first statement's position instead.
+		ir.SetPos((*n)[0])
+	}
 	var order orderState
 	order.free = free
 	mark := order.markTemp()
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index b39a62f..c100316 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -1363,6 +1363,15 @@
 	}
 }
 
+func pathEnvName() string {
+	switch runtime.GOOS {
+	case "plan9":
+		return "path"
+	default:
+		return "PATH"
+	}
+}
+
 func TestDefaultGOPATH(t *testing.T) {
 	tg := testgo(t)
 	defer tg.cleanup()
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index 41d0bbf..0d7bef9 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -6,16 +6,65 @@
 	"encoding/json"
 	"errors"
 	"fmt"
+	"internal/godebug"
 	"io/fs"
 	"io/ioutil"
+	"log"
 	"os"
+	pathpkg "path"
 	"path/filepath"
 	"runtime"
+	"runtime/debug"
 	"sort"
 	"strings"
+	"sync"
 	"time"
 )
 
+// Trace emits a trace event for the operation and file path to the trace log,
+// but only when $GODEBUG contains gofsystrace=1.
+// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error.
+// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths
+// matching that glob pattern (using path.Match) will be followed by a full stack trace.
+func Trace(op, path string) {
+	if !doTrace {
+		return
+	}
+	traceMu.Lock()
+	defer traceMu.Unlock()
+	fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path)
+	if traceStack != "" {
+		if match, _ := pathpkg.Match(traceStack, path); match {
+			traceFile.Write(debug.Stack())
+		}
+	}
+}
+
+var (
+	doTrace    bool
+	traceStack string
+	traceFile  *os.File
+	traceMu    sync.Mutex
+)
+
+func init() {
+	if godebug.Get("gofsystrace") != "1" {
+		return
+	}
+	doTrace = true
+	traceStack = godebug.Get("gofsystracestack")
+	if f := godebug.Get("gofsystracelog"); f != "" {
+		// Note: No buffering on writes to this file, so no need to worry about closing it at exit.
+		var err error
+		traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+		if err != nil {
+			log.Fatal(err)
+		}
+	} else {
+		traceFile = os.Stderr
+	}
+}
+
 // OverlayFile is the path to a text file in the OverlayJSON format.
 // It is the value of the -overlay flag.
 var OverlayFile string
@@ -86,6 +135,7 @@
 		return nil
 	}
 
+	Trace("ReadFile", OverlayFile)
 	b, err := os.ReadFile(OverlayFile)
 	if err != nil {
 		return fmt.Errorf("reading overlay file: %v", err)
@@ -191,6 +241,7 @@
 // IsDir returns true if path is a directory on disk or in the
 // overlay.
 func IsDir(path string) (bool, error) {
+	Trace("IsDir", path)
 	path = canonicalize(path)
 
 	if _, ok := parentIsOverlayFile(path); ok {
@@ -260,6 +311,7 @@
 // ReadDir provides a slice of fs.FileInfo entries corresponding
 // to the overlaid files in the directory.
 func ReadDir(dir string) ([]fs.FileInfo, error) {
+	Trace("ReadDir", dir)
 	dir = canonicalize(dir)
 	if _, ok := parentIsOverlayFile(dir); ok {
 		return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
@@ -327,11 +379,17 @@
 
 // Open opens the file at or overlaid on the given path.
 func Open(path string) (*os.File, error) {
-	return OpenFile(path, os.O_RDONLY, 0)
+	Trace("Open", path)
+	return openFile(path, os.O_RDONLY, 0)
 }
 
 // OpenFile opens the file at or overlaid on the given path with the flag and perm.
 func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
+	Trace("OpenFile", path)
+	return openFile(path, flag, perm)
+}
+
+func openFile(path string, flag int, perm os.FileMode) (*os.File, error) {
 	cpath := canonicalize(path)
 	if node, ok := overlay[cpath]; ok {
 		// Opening a file in the overlay.
@@ -360,6 +418,7 @@
 // IsDirWithGoFiles reports whether dir is a directory containing Go files
 // either on disk or in the overlay.
 func IsDirWithGoFiles(dir string) (bool, error) {
+	Trace("IsDirWithGoFiles", dir)
 	fis, err := ReadDir(dir)
 	if os.IsNotExist(err) || errors.Is(err, errNotDir) {
 		return false, nil
@@ -405,28 +464,20 @@
 // walk recursively descends path, calling walkFn. Copied, with some
 // modifications from path/filepath.walk.
 func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
-	if !info.IsDir() {
-		return walkFn(path, info, nil)
+	if err := walkFn(path, info, nil); err != nil || !info.IsDir() {
+		return err
 	}
 
-	fis, readErr := ReadDir(path)
-	walkErr := walkFn(path, info, readErr)
-	// If readErr != nil, walk can't walk into this directory.
-	// walkErr != nil means walkFn want walk to skip this directory or stop walking.
-	// Therefore, if one of readErr and walkErr isn't nil, walk will return.
-	if readErr != nil || walkErr != nil {
-		// The caller's behavior is controlled by the return value, which is decided
-		// by walkFn. walkFn may ignore readErr and return nil.
-		// If walkFn returns SkipDir, it will be handled by the caller.
-		// So walk should return whatever walkFn returns.
-		return walkErr
+	fis, err := ReadDir(path)
+	if err != nil {
+		return walkFn(path, info, err)
 	}
 
 	for _, fi := range fis {
 		filename := filepath.Join(path, fi.Name())
-		if walkErr = walk(filename, fi, walkFn); walkErr != nil {
-			if !fi.IsDir() || walkErr != filepath.SkipDir {
-				return walkErr
+		if err := walk(filename, fi, walkFn); err != nil {
+			if !fi.IsDir() || err != filepath.SkipDir {
+				return err
 			}
 		}
 	}
@@ -436,6 +487,7 @@
 // Walk walks the file tree rooted at root, calling walkFn for each file or
 // directory in the tree, including root.
 func Walk(root string, walkFn filepath.WalkFunc) error {
+	Trace("Walk", root)
 	info, err := Lstat(root)
 	if err != nil {
 		err = walkFn(root, nil, err)
@@ -450,11 +502,13 @@
 
 // lstat implements a version of os.Lstat that operates on the overlay filesystem.
 func Lstat(path string) (fs.FileInfo, error) {
+	Trace("Lstat", path)
 	return overlayStat(path, os.Lstat, "lstat")
 }
 
 // Stat implements a version of os.Stat that operates on the overlay filesystem.
 func Stat(path string) (fs.FileInfo, error) {
+	Trace("Stat", path)
 	return overlayStat(path, os.Stat, "stat")
 }
 
@@ -528,6 +582,7 @@
 
 // Glob is like filepath.Glob but uses the overlay file system.
 func Glob(pattern string) (matches []string, err error) {
+	Trace("Glob", pattern)
 	// Check pattern is well-formed.
 	if _, err := filepath.Match(pattern, ""); err != nil {
 		return nil, err
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index fcb72b0..046f508 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -877,7 +877,14 @@
 			if !cfg.ModulesEnabled {
 				buildMode = build.ImportComment
 			}
-			if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" {
+			modroot := modload.PackageModRoot(ctx, r.path)
+			if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
+				modroot = cfg.GOROOTsrc
+				if str.HasPathPrefix(r.dir, cfg.GOROOTsrc+string(filepath.Separator)+"cmd") {
+					modroot += string(filepath.Separator) + "cmd"
+				}
+			}
+			if modroot != "" {
 				if rp, err := modindex.GetPackage(modroot, r.dir); err == nil {
 					data.p, data.err = rp.Import(cfg.BuildContext, buildMode)
 					goto Happy
diff --git a/src/cmd/go/internal/modindex/index_test.go b/src/cmd/go/internal/modindex/index_test.go
new file mode 100644
index 0000000..2c072f9
--- /dev/null
+++ b/src/cmd/go/internal/modindex/index_test.go
@@ -0,0 +1,87 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"go/build"
+	"internal/diff"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"testing"
+)
+
+func init() {
+	isTest = true
+	enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
+}
+
+func TestIndex(t *testing.T) {
+	src := filepath.Join(runtime.GOROOT(), "src")
+	checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
+		p := m.Package(pkg)
+		bp, err := p.Import(build.Default, build.ImportComment)
+		if err != nil {
+			t.Fatal(err)
+		}
+		bp1, err := build.Default.Import(pkg, filepath.Join(src, pkg), build.ImportComment)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if !reflect.DeepEqual(bp, bp1) {
+			t.Errorf("mismatch")
+			t.Logf("index:\n%s", hex.Dump(data))
+
+			js, err := json.MarshalIndent(bp, "", "\t")
+			if err != nil {
+				t.Fatal(err)
+			}
+			js1, err := json.MarshalIndent(bp1, "", "\t")
+			if err != nil {
+				t.Fatal(err)
+			}
+			t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
+			t.FailNow()
+		}
+	}
+
+	// Check packages in increasing complexity, one at a time.
+	pkgs := []string{
+		"crypto",
+		"encoding",
+		"unsafe",
+		"encoding/json",
+		"runtime",
+		"net",
+	}
+	var raws []*rawPackage
+	for _, pkg := range pkgs {
+		raw := importRaw(src, pkg)
+		raws = append(raws, raw)
+		t.Run(pkg, func(t *testing.T) {
+			data := encodeModuleBytes([]*rawPackage{raw})
+			m, err := fromBytes(src, data)
+			if err != nil {
+				t.Fatal(err)
+			}
+			checkPkg(t, m, pkg, data)
+		})
+	}
+
+	// Check that a multi-package index works too.
+	t.Run("all", func(t *testing.T) {
+		data := encodeModuleBytes(raws)
+		m, err := fromBytes(src, data)
+		if err != nil {
+			t.Fatal(err)
+		}
+		for _, pkg := range pkgs {
+			checkPkg(t, m, pkg, data)
+		}
+	})
+}
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
index 7ee4669..38ddfec 100644
--- a/src/cmd/go/internal/modindex/read.go
+++ b/src/cmd/go/internal/modindex/read.go
@@ -15,7 +15,6 @@
 	"internal/godebug"
 	"internal/goroot"
 	"internal/unsafeheader"
-	"math"
 	"path"
 	"path/filepath"
 	"runtime"
@@ -45,10 +44,9 @@
 // do the equivalent of build.Import of packages in the module and answer other
 // questions based on the index file's data.
 type Module struct {
-	modroot      string
-	od           offsetDecoder
-	packages     map[string]int // offsets of each package
-	packagePaths []string       // paths to package directories relative to modroot; these are the keys of packages
+	modroot string
+	d       *decoder
+	n       int // number of packages
 }
 
 // moduleHash returns an ActionID corresponding to the state of the module
@@ -179,6 +177,7 @@
 		err error
 	}
 	r := mcache.Do(modroot, func() any {
+		fsys.Trace("openIndexModule", modroot)
 		id, err := moduleHash(modroot, ismodcache)
 		if err != nil {
 			return result{nil, err}
@@ -212,6 +211,7 @@
 		err error
 	}
 	r := pcache.Do([2]string{modroot, pkgdir}, func() any {
+		fsys.Trace("openIndexPackage", pkgdir)
 		id, err := dirHash(modroot, pkgdir)
 		if err != nil {
 			return result{nil, err}
@@ -234,110 +234,131 @@
 	return r.pkg, r.err
 }
 
+var errCorrupt = errors.New("corrupt index")
+
+// protect marks the start of a large section of code that accesses the index.
+// It should be used as:
+//
+//	defer unprotect(protect, &err)
+//
+// It should not be used for trivial accesses which would be
+// dwarfed by the overhead of the defer.
+func protect() bool {
+	return debug.SetPanicOnFault(true)
+}
+
+var isTest = false
+
+// unprotect marks the end of a large section of code that accesses the index.
+// It should be used as:
+//
+//	defer unprotect(protect, &err)
+//
+// end looks for panics due to errCorrupt or bad mmap accesses.
+// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
+// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
+func unprotect(old bool, errp *error) {
+	// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
+	// that all its errors satisfy this interface, we'll only check for these errors so that
+	// we don't suppress panics that could have been produced from other sources.
+	type addrer interface {
+		Addr() uintptr
+	}
+
+	debug.SetPanicOnFault(old)
+
+	if e := recover(); e != nil {
+		if _, ok := e.(addrer); ok || e == errCorrupt {
+			// This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
+			err := fmt.Errorf("error reading module index: %v", e)
+			if errp != nil {
+				*errp = err
+				return
+			}
+			if isTest {
+				panic(err)
+			}
+			base.Fatalf("%v", err)
+		}
+		// The panic was likely not caused by SetPanicOnFault.
+		panic(e)
+	}
+}
+
 // fromBytes returns a *Module given the encoded representation.
-func fromBytes(moddir string, data []byte) (mi *Module, err error) {
+func fromBytes(moddir string, data []byte) (m *Module, err error) {
 	if !enabled {
 		panic("use of index")
 	}
 
-	// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
-	// that all its errors satisfy this interface, we'll only check for these errors so that
-	// we don't suppress panics that could have been produced from other sources.
-	type addrer interface {
-		Addr() uintptr
+	defer unprotect(protect(), &err)
+
+	if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
+		return nil, errCorrupt
 	}
 
-	// set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
-	// in case it's mmapped (the common case).
-	old := debug.SetPanicOnFault(true)
-	defer func() {
-		debug.SetPanicOnFault(old)
-		if e := recover(); e != nil {
-			if _, ok := e.(addrer); ok {
-				// This panic was almost certainly caused by SetPanicOnFault.
-				err = fmt.Errorf("error reading module index: %v", e)
-				return
-			}
-			// The panic was likely not caused by SetPanicOnFault.
-			panic(e)
-		}
-	}()
-
-	gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
-	if string(gotVersion) != indexVersion {
-		return nil, fmt.Errorf("bad index version string: %q", gotVersion)
+	const hdr = len(indexVersion + "\n")
+	d := &decoder{data: data}
+	str := d.intAt(hdr)
+	if str < hdr+8 || len(d.data) < str {
+		return nil, errCorrupt
 	}
-	stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
-	st := newStringTable(data[stringTableOffset:])
-	d := decoder{unread, st}
-	numPackages := d.int()
-
-	packagePaths := make([]string, numPackages)
-	for i := range packagePaths {
-		packagePaths[i] = d.string()
-	}
-	packageOffsets := make([]int, numPackages)
-	for i := range packageOffsets {
-		packageOffsets[i] = d.int()
-	}
-	packages := make(map[string]int, numPackages)
-	for i := range packagePaths {
-		packages[packagePaths[i]] = packageOffsets[i]
+	d.data, d.str = data[:str], d.data[str:]
+	// Check that string table looks valid.
+	// First string is empty string (length 0),
+	// and we leave a marker byte 0xFF at the end
+	// just to make sure that the file is not truncated.
+	if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
+		return nil, errCorrupt
 	}
 
-	return &Module{
+	n := d.intAt(hdr + 4)
+	if n < 0 || n > (len(d.data)-8)/8 {
+		return nil, errCorrupt
+	}
+
+	m = &Module{
 		moddir,
-		offsetDecoder{data, st},
-		packages,
-		packagePaths,
-	}, nil
+		d,
+		n,
+	}
+	return m, nil
 }
 
 // packageFromBytes returns a *IndexPackage given the encoded representation.
 func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
-	if !enabled {
-		panic("use of package index when not enabled")
+	m, err := fromBytes(modroot, data)
+	if err != nil {
+		return nil, err
 	}
-
-	// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
-	// that all its errors satisfy this interface, we'll only check for these errors so that
-	// we don't suppress panics that could have been produced from other sources.
-	type addrer interface {
-		Addr() uintptr
+	if m.n != 1 {
+		return nil, fmt.Errorf("corrupt single-package index")
 	}
-
-	// set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
-	// in case it's mmapped (the common case).
-	old := debug.SetPanicOnFault(true)
-	defer func() {
-		debug.SetPanicOnFault(old)
-		if e := recover(); e != nil {
-			if _, ok := e.(addrer); ok {
-				// This panic was almost certainly caused by SetPanicOnFault.
-				err = fmt.Errorf("error reading module index: %v", e)
-				return
-			}
-			// The panic was likely not caused by SetPanicOnFault.
-			panic(e)
-		}
-	}()
-
-	gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
-	if string(gotVersion) != indexVersion {
-		return nil, fmt.Errorf("bad index version string: %q", gotVersion)
-	}
-	stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
-	st := newStringTable(data[stringTableOffset:])
-	d := &decoder{unread, st}
-	p = decodePackage(d, offsetDecoder{data, st})
-	p.modroot = modroot
-	return p, nil
+	return m.pkg(0), nil
 }
 
-// Returns a list of directory paths, relative to the modroot, for
-// packages contained in the module index.
-func (mi *Module) Packages() []string {
-	return mi.packagePaths
+// pkgDir returns the dir string of the i'th package in the index.
+func (m *Module) pkgDir(i int) string {
+	if i < 0 || i >= m.n {
+		panic(errCorrupt)
+	}
+	return m.d.stringAt(12 + 8 + 8*i)
+}
+
+// pkgOff returns the offset of the data for the i'th package in the index.
+func (m *Module) pkgOff(i int) int {
+	if i < 0 || i >= m.n {
+		panic(errCorrupt)
+	}
+	return m.d.intAt(12 + 8 + 8*i + 4)
+}
+
+// Walk calls f for each package in the index, passing the path to that package relative to the module root.
+func (m *Module) Walk(f func(path string)) {
+	defer unprotect(protect(), nil)
+	for i := 0; i < m.n; i++ {
+		f(m.pkgDir(i))
+	}
 }
 
 // relPath returns the path relative to the module's root.
@@ -347,11 +368,7 @@
 
 // Import is the equivalent of build.Import given the information in Module.
 func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
-	defer func() {
-		if e := recover(); e != nil {
-			err = fmt.Errorf("error reading module index: %v", e)
-		}
-	}()
+	defer unprotect(protect(), &err)
 
 	ctxt := (*Context)(&bctxt)
 
@@ -792,46 +809,44 @@
 
 var errCannotFindPackage = errors.New("cannot find package")
 
-// Package returns an IndexPackage constructed using the information in the Module.
-func (mi *Module) Package(path string) *IndexPackage {
-	defer func() {
-		if e := recover(); e != nil {
-			base.Fatalf("error reading module index: %v", e)
-		}
-	}()
-	offset, ok := mi.packages[path]
-	if !ok {
-		return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
-	}
+// Package and returns finds the package with the given path (relative to the module root).
+// If the package does not exist, Package returns an IndexPackage that will return an
+// appropriate error from its methods.
+func (m *Module) Package(path string) *IndexPackage {
+	defer unprotect(protect(), nil)
 
-	// TODO(matloob): do we want to lock on the module index?
-	d := mi.od.decoderAt(offset)
-	p := decodePackage(d, mi.od)
-	p.modroot = mi.modroot
-	return p
+	i, ok := sort.Find(m.n, func(i int) int {
+		return strings.Compare(path, m.pkgDir(i))
+	})
+	if !ok {
+		return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
+	}
+	return m.pkg(i)
 }
 
-func decodePackage(d *decoder, od offsetDecoder) *IndexPackage {
-	rp := new(IndexPackage)
-	if errstr := d.string(); errstr != "" {
-		rp.error = errors.New(errstr)
+// pkgAt returns the i'th IndexPackage in m.
+func (m *Module) pkg(i int) *IndexPackage {
+	r := m.d.readAt(m.pkgOff(i))
+	p := new(IndexPackage)
+	if errstr := r.string(); errstr != "" {
+		p.error = errors.New(errstr)
 	}
-	rp.dir = d.string()
-	numSourceFiles := d.uint32()
-	rp.sourceFiles = make([]*sourceFile, numSourceFiles)
-	for i := uint32(0); i < numSourceFiles; i++ {
-		offset := d.uint32()
-		rp.sourceFiles[i] = &sourceFile{
-			od: od.offsetDecoderAt(offset),
+	p.dir = r.string()
+	p.sourceFiles = make([]*sourceFile, r.int())
+	for i := range p.sourceFiles {
+		p.sourceFiles[i] = &sourceFile{
+			d:   m.d,
+			pos: r.int(),
 		}
 	}
-	return rp
+	p.modroot = m.modroot
+	return p
 }
 
 // sourceFile represents the information of a given source file in the module index.
 type sourceFile struct {
-	od offsetDecoder // od interprets all offsets relative to the start of the source file's data
-
+	d               *decoder // encoding of this source file
+	pos             int      // start of sourceFile encoding in d
 	onceReadImports sync.Once
 	savedImports    []rawImport // saved imports so that they're only read once
 }
@@ -851,73 +866,67 @@
 )
 
 func (sf *sourceFile) error() string {
-	return sf.od.stringAt(sourceFileError)
+	return sf.d.stringAt(sf.pos + sourceFileError)
 }
 func (sf *sourceFile) parseError() string {
-	return sf.od.stringAt(sourceFileParseError)
+	return sf.d.stringAt(sf.pos + sourceFileParseError)
 }
 func (sf *sourceFile) synopsis() string {
-	return sf.od.stringAt(sourceFileSynopsis)
+	return sf.d.stringAt(sf.pos + sourceFileSynopsis)
 }
 func (sf *sourceFile) name() string {
-	return sf.od.stringAt(sourceFileName)
+	return sf.d.stringAt(sf.pos + sourceFileName)
 }
 func (sf *sourceFile) pkgName() string {
-	return sf.od.stringAt(sourceFilePkgName)
+	return sf.d.stringAt(sf.pos + sourceFilePkgName)
 }
 func (sf *sourceFile) ignoreFile() bool {
-	return sf.od.boolAt(sourceFileIgnoreFile)
+	return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
 }
 func (sf *sourceFile) binaryOnly() bool {
-	return sf.od.boolAt(sourceFileBinaryOnly)
+	return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
 }
 func (sf *sourceFile) cgoDirectives() string {
-	return sf.od.stringAt(sourceFileCgoDirectives)
+	return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
 }
 func (sf *sourceFile) goBuildConstraint() string {
-	return sf.od.stringAt(sourceFileGoBuildConstraint)
+	return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
 }
 
 func (sf *sourceFile) plusBuildConstraints() []string {
-	d := sf.od.decoderAt(sourceFileNumPlusBuildConstraints)
-	n := d.int()
+	pos := sf.pos + sourceFileNumPlusBuildConstraints
+	n := sf.d.intAt(pos)
+	pos += 4
 	ret := make([]string, n)
 	for i := 0; i < n; i++ {
-		ret[i] = d.string()
+		ret[i] = sf.d.stringAt(pos)
+		pos += 4
 	}
 	return ret
 }
 
-func importsOffset(numPlusBuildConstraints int) int {
-	// 4 bytes per uin32, add one to advance past numPlusBuildConstraints itself
-	return sourceFileNumPlusBuildConstraints + 4*(numPlusBuildConstraints+1)
-}
-
 func (sf *sourceFile) importsOffset() int {
-	numPlusBuildConstraints := sf.od.intAt(sourceFileNumPlusBuildConstraints)
-	return importsOffset(numPlusBuildConstraints)
-}
-
-func embedsOffset(importsOffset, numImports int) int {
-	// 4 bytes per uint32; 1 to advance past numImports itself, and 5 uint32s per import
-	return importsOffset + 4*(1+(5*numImports))
+	pos := sf.pos + sourceFileNumPlusBuildConstraints
+	n := sf.d.intAt(pos)
+	// each build constraint is 1 uint32
+	return pos + 4 + n*4
 }
 
 func (sf *sourceFile) embedsOffset() int {
-	importsOffset := sf.importsOffset()
-	numImports := sf.od.intAt(importsOffset)
-	return embedsOffset(importsOffset, numImports)
+	pos := sf.importsOffset()
+	n := sf.d.intAt(pos)
+	// each import is 5 uint32s (string + tokpos)
+	return pos + 4 + n*(4*5)
 }
 
 func (sf *sourceFile) imports() []rawImport {
 	sf.onceReadImports.Do(func() {
 		importsOffset := sf.importsOffset()
-		d := sf.od.decoderAt(importsOffset)
-		numImports := d.int()
+		r := sf.d.readAt(importsOffset)
+		numImports := r.int()
 		ret := make([]rawImport, numImports)
 		for i := 0; i < numImports; i++ {
-			ret[i].path = d.string()
-			ret[i].position = d.tokpos()
+			ret[i] = rawImport{r.string(), r.tokpos()}
 		}
 		sf.savedImports = ret
 	})
@@ -926,125 +935,15 @@
 
 func (sf *sourceFile) embeds() []embed {
 	embedsOffset := sf.embedsOffset()
-	d := sf.od.decoderAt(embedsOffset)
-	numEmbeds := d.int()
+	r := sf.d.readAt(embedsOffset)
+	numEmbeds := r.int()
 	ret := make([]embed, numEmbeds)
 	for i := range ret {
-		pattern := d.string()
-		pos := d.tokpos()
-		ret[i] = embed{pattern, pos}
+		ret[i] = embed{r.string(), r.tokpos()}
 	}
 	return ret
 }
 
-// A decoder reads from the current position of the file and advances its position as it
-// reads.
-type decoder struct {
-	b  []byte
-	st *stringTable
-}
-
-func (d *decoder) uint32() uint32 {
-	n := binary.LittleEndian.Uint32(d.b[:4])
-	d.b = d.b[4:]
-	return n
-}
-
-func (d *decoder) int() int {
-	n := d.uint32()
-	if int64(n) > math.MaxInt {
-		base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
-	}
-	return int(n)
-}
-
-func (d *decoder) tokpos() token.Position {
-	file := d.string()
-	offset := d.int()
-	line := d.int()
-	column := d.int()
-	return token.Position{
-		Filename: file,
-		Offset:   offset,
-		Line:     line,
-		Column:   column,
-	}
-}
-
-func (d *decoder) string() string {
-	return d.st.string(d.int())
-}
-
-// And offset decoder reads information offset from its position in the file.
-// It's either offset from the beginning of the index, or the beginning of a sourceFile's data.
-type offsetDecoder struct {
-	b  []byte
-	st *stringTable
-}
-
-func (od *offsetDecoder) uint32At(offset int) uint32 {
-	if offset > len(od.b) {
-		base.Fatalf("go: trying to read from index file at offset higher than file length. This indicates a corrupt offset file in the cache.")
-	}
-	return binary.LittleEndian.Uint32(od.b[offset:])
-}
-
-func (od *offsetDecoder) intAt(offset int) int {
-	n := od.uint32At(offset)
-	if int64(n) > math.MaxInt {
-		base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
-	}
-	return int(n)
-}
-
-func (od *offsetDecoder) boolAt(offset int) bool {
-	switch v := od.uint32At(offset); v {
-	case 0:
-		return false
-	case 1:
-		return true
-	default:
-		base.Fatalf("go: invalid bool value in index file encoding: %v", v)
-	}
-	panic("unreachable")
-}
-
-func (od *offsetDecoder) stringAt(offset int) string {
-	return od.st.string(od.intAt(offset))
-}
-
-func (od *offsetDecoder) decoderAt(offset int) *decoder {
-	return &decoder{od.b[offset:], od.st}
-}
-
-func (od *offsetDecoder) offsetDecoderAt(offset uint32) offsetDecoder {
-	return offsetDecoder{od.b[offset:], od.st}
-}
-
-type stringTable struct {
-	b []byte
-}
-
-func newStringTable(b []byte) *stringTable {
-	return &stringTable{b: b}
-}
-
-func (st *stringTable) string(pos int) string {
-	if pos == 0 {
-		return ""
-	}
-
-	bb := st.b[pos:]
-	i := bytes.IndexByte(bb, 0)
-
-	if i == -1 {
-		panic("reached end of string table trying to read string")
-	}
-	s := asString(bb[:i])
-
-	return s
-}
-
 func asString(b []byte) string {
 	p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
 
@@ -1055,3 +954,82 @@
 
 	return s
 }
+
+// A decoder helps decode the index format.
+type decoder struct {
+	data []byte // data after header
+	str  []byte // string table
+}
+
+// intAt returns the int at the given offset in d.data.
+func (d *decoder) intAt(off int) int {
+	if off < 0 || len(d.data)-off < 4 {
+		panic(errCorrupt)
+	}
+	i := binary.LittleEndian.Uint32(d.data[off : off+4])
+	if int32(i)>>31 != 0 {
+		panic(errCorrupt)
+	}
+	return int(i)
+}
+
+// boolAt returns the bool at the given offset in d.data.
+func (d *decoder) boolAt(off int) bool {
+	return d.intAt(off) != 0
+}
+
+// stringTableAt returns the string pointed at by the int at the given offset in d.data.
+func (d *decoder) stringAt(off int) string {
+	return d.stringTableAt(d.intAt(off))
+}
+
+// stringTableAt returns the string at the given offset in the string table d.str.
+func (d *decoder) stringTableAt(off int) string {
+	if off < 0 || off >= len(d.str) {
+		panic(errCorrupt)
+	}
+	s := d.str[off:]
+	v, n := binary.Uvarint(s)
+	if n <= 0 || v > uint64(len(s[n:])) {
+		panic(errCorrupt)
+	}
+	return asString(s[n : n+int(v)])
+}
+
+// A reader reads sequential fields from a section of the index format.
+type reader struct {
+	d   *decoder
+	pos int
+}
+
+// readAt returns a reader starting at the given position in d.
+func (d *decoder) readAt(pos int) *reader {
+	return &reader{d, pos}
+}
+
+// int reads the next int.
+func (r *reader) int() int {
+	i := r.d.intAt(r.pos)
+	r.pos += 4
+	return i
+}
+
+// string reads the next string.
+func (r *reader) string() string {
+	return r.d.stringTableAt(r.int())
+}
+
+// bool reads the next bool.
+func (r *reader) bool() bool {
+	return r.int() != 0
+}
+
+// tokpos reads the next token.Position.
+func (r *reader) tokpos() token.Position {
+	return token.Position{
+		Filename: r.string(),
+		Offset:   r.int(),
+		Line:     r.int(),
+		Column:   r.int(),
+	}
+}
diff --git a/src/cmd/go/internal/modindex/scan.go b/src/cmd/go/internal/modindex/scan.go
index 1ba7c0c..d3f059b 100644
--- a/src/cmd/go/internal/modindex/scan.go
+++ b/src/cmd/go/internal/modindex/scan.go
@@ -46,6 +46,7 @@
 // encoded representation. It returns ErrNotIndexed if the module can't
 // be indexed because it contains symlinks.
 func indexModule(modroot string) ([]byte, error) {
+	fsys.Trace("indexModule", modroot)
 	var packages []*rawPackage
 	err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
 		if err := moduleWalkErr(modroot, path, info, err); err != nil {
@@ -72,6 +73,7 @@
 // encoded representation. It returns ErrNotIndexed if the package can't
 // be indexed.
 func indexPackage(modroot, pkgdir string) []byte {
+	fsys.Trace("indexPackage", pkgdir)
 	p := importRaw(modroot, relPath(pkgdir, modroot))
 	return encodePackageBytes(p)
 }
diff --git a/src/cmd/go/internal/modindex/write.go b/src/cmd/go/internal/modindex/write.go
index 3408248..7db1fb0 100644
--- a/src/cmd/go/internal/modindex/write.go
+++ b/src/cmd/go/internal/modindex/write.go
@@ -1,54 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package modindex
 
 import (
 	"cmd/go/internal/base"
 	"encoding/binary"
 	"go/token"
-	"math"
 	"sort"
-	"strings"
 )
 
-const indexVersion = "go index v0"
+const indexVersion = "go index v1" // 11 bytes (plus \n), to align uint32s in index
 
 // encodeModuleBytes produces the encoded representation of the module index.
 // encodeModuleBytes may modify the packages slice.
 func encodeModuleBytes(packages []*rawPackage) []byte {
 	e := newEncoder()
-	e.Bytes([]byte(indexVersion))
-	e.Bytes([]byte{'\n'})
+	e.Bytes([]byte(indexVersion + "\n"))
 	stringTableOffsetPos := e.Pos() // fill this at the end
 	e.Uint32(0)                     // string table offset
-	e.Int(len(packages))
 	sort.Slice(packages, func(i, j int) bool {
 		return packages[i].dir < packages[j].dir
 	})
+	e.Int(len(packages))
+	packagesPos := e.Pos()
 	for _, p := range packages {
 		e.String(p.dir)
-	}
-	packagesOffsetPos := e.Pos()
-	for range packages {
 		e.Int(0)
 	}
 	for i, p := range packages {
-		e.IntAt(e.Pos(), packagesOffsetPos+4*i)
+		e.IntAt(e.Pos(), packagesPos+8*i+4)
 		encodePackage(e, p)
 	}
 	e.IntAt(e.Pos(), stringTableOffsetPos)
 	e.Bytes(e.stringTable)
+	e.Bytes([]byte{0xFF}) // end of string table marker
 	return e.b
 }
 
 func encodePackageBytes(p *rawPackage) []byte {
-	e := newEncoder()
-	e.Bytes([]byte(indexVersion))
-	e.Bytes([]byte{'\n'})
-	stringTableOffsetPos := e.Pos() // fill this at the end
-	e.Uint32(0)                     // string table offset
-	encodePackage(e, p)
-	e.IntAt(e.Pos(), stringTableOffsetPos)
-	e.Bytes(e.stringTable)
-	return e.b
+	return encodeModuleBytes([]*rawPackage{p})
 }
 
 func encodePackage(e *encoder, p *rawPackage) {
@@ -126,9 +118,6 @@
 }
 
 func (e *encoder) String(s string) {
-	if strings.IndexByte(s, 0) >= 0 {
-		base.Fatalf("go: attempting to encode a string containing a null byte")
-	}
 	if n, ok := e.strings[s]; ok {
 		e.Int(n)
 		return
@@ -136,8 +125,8 @@
 	pos := len(e.stringTable)
 	e.strings[s] = pos
 	e.Int(pos)
+	e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
 	e.stringTable = append(e.stringTable, []byte(s)...)
-	e.stringTable = append(e.stringTable, 0)
 }
 
 func (e *encoder) Bool(b bool) {
@@ -152,17 +141,18 @@
 	e.b = binary.LittleEndian.AppendUint32(e.b, n)
 }
 
-// Int encodes n. Note that all ints are written to the index as uint32s.
+// Int encodes n. Note that all ints are written to the index as uint32s,
+// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
 func (e *encoder) Int(n int) {
-	if n < 0 || int64(n) > math.MaxUint32 {
-		base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+	if n < 0 || int(int32(n)) != n {
+		base.Fatalf("go: attempting to write an int to the index that overflows int32")
 	}
 	e.Uint32(uint32(n))
 }
 
 func (e *encoder) IntAt(n int, at int) {
-	if n < 0 || int64(n) > math.MaxUint32 {
-		base.Fatalf("go: attempting to write an int to the index that overflows uint32")
+	if n < 0 || int(int32(n)) != n {
+		base.Fatalf("go: attempting to write an int to the index that overflows int32")
 	}
 	binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
 }
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
index 856390a..b2ac7f2 100644
--- a/src/cmd/go/internal/modload/search.go
+++ b/src/cmd/go/internal/modload/search.go
@@ -216,21 +216,20 @@
 // is the module's root directory on disk, index is the modindex.Module for the
 // module, and importPathRoot is the module's path prefix.
 func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
-loopPackages:
-	for _, reldir := range index.Packages() {
+	index.Walk(func(reldir string) {
 		// Avoid .foo, _foo, and testdata subdirectory trees.
 		p := reldir
 		for {
 			elem, rest, found := strings.Cut(p, string(filepath.Separator))
 			if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
-				continue loopPackages
+				return
 			}
 			if found && elem == "vendor" {
 				// Ignore this path if it contains the element "vendor" anywhere
 				// except for the last element (packages named vendor are allowed
 				// for historical reasons). Note that found is true when this
 				// isn't the last path element.
-				continue loopPackages
+				return
 			}
 			if !found {
 				// Didn't find the separator, so we're considering the last element.
@@ -241,12 +240,12 @@
 
 		// Don't use GOROOT/src.
 		if reldir == "" && importPathRoot == "" {
-			continue
+			return
 		}
 
 		name := path.Join(importPathRoot, filepath.ToSlash(reldir))
 		if !treeCanMatch(name) {
-			continue
+			return
 		}
 
 		if !have[name] {
@@ -257,7 +256,7 @@
 				}
 			}
 		}
-	}
+	})
 }
 
 // MatchInModule identifies the packages matching the given pattern within the
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index 5bf548d..255ff3a 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -211,7 +211,11 @@
 			codegenArg = "-shared"
 			ldBuildmode = "pie"
 		case "windows":
-			ldBuildmode = "pie"
+			if cfg.BuildRace {
+				ldBuildmode = "exe"
+			} else {
+				ldBuildmode = "pie"
+			}
 		case "ios":
 			codegenArg = "-shared"
 			ldBuildmode = "pie"
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 5e82929..809dfb4 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -163,7 +163,7 @@
 	ts.cd = filepath.Join(ts.workdir, "gopath/src")
 	ts.env = []string{
 		"WORK=" + ts.workdir, // must be first for ts.abbrev
-		"PATH=" + testBin + string(filepath.ListSeparator) + os.Getenv("PATH"),
+		pathEnvName() + "=" + testBin + string(filepath.ListSeparator) + os.Getenv(pathEnvName()),
 		homeEnvName() + "=/no-home",
 		"CCACHE_DISABLE=1", // ccache breaks with non-existent HOME
 		"GOARCH=" + runtime.GOARCH,
@@ -187,8 +187,6 @@
 		tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"),
 		"devnull=" + os.DevNull,
 		"goversion=" + goVersion(ts),
-		":=" + string(os.PathListSeparator),
-		"/=" + string(os.PathSeparator),
 		"CMDGO_TEST_RUN_MAIN=true",
 	}
 	if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" {
@@ -203,10 +201,6 @@
 		ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic")
 	}
 
-	if runtime.GOOS == "plan9" {
-		ts.env = append(ts.env, "path="+testBin+string(filepath.ListSeparator)+os.Getenv("path"))
-	}
-
 	for _, key := range extraEnvKeys {
 		if val := os.Getenv(key); val != "" {
 			ts.env = append(ts.env, key+"="+val)
@@ -219,6 +213,10 @@
 			ts.envMap[kv[:i]] = kv[i+1:]
 		}
 	}
+	// Add entries for ${:} and ${/} to make it easier to write platform-independent
+	// environment variables.
+	ts.envMap["/"] = string(os.PathSeparator)
+	ts.envMap[":"] = string(os.PathListSeparator)
 
 	fmt.Fprintf(&ts.log, "# (%s)\n", time.Now().UTC().Format(time.RFC3339))
 	ts.mark = ts.log.Len()
@@ -1264,12 +1262,7 @@
 		}
 	}
 
-	pathName := "PATH"
-	if runtime.GOOS == "plan9" {
-		pathName = "path"
-	}
-
-	for _, dir := range strings.Split(ts.envMap[pathName], string(filepath.ListSeparator)) {
+	for _, dir := range strings.Split(ts.envMap[pathEnvName()], string(filepath.ListSeparator)) {
 		if searchExt {
 			ents, err := os.ReadDir(dir)
 			if err != nil {
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index c575bff..e529176 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -41,12 +41,19 @@
 	GODEBUG=<actual GODEBUG>
 	devnull=<value of os.DevNull>
 	goversion=<current Go version; for example, 1.12>
-	:=<OS-specific path list separator>
 
-The scripts' supporting files are unpacked relative to $GOPATH/src (aka $WORK/gopath/src)
-and then the script begins execution in that directory as well. Thus the example above runs
-in $WORK/gopath/src with GOPATH=$WORK/gopath and $WORK/gopath/src/hello.go
-containing the listed contents.
+On Plan 9, the variables $path and $home are set instead of $PATH and $HOME.
+On Windows, the variables $USERPROFILE and $TMP are set instead of
+$HOME and $TMPDIR.
+
+In addition, variables named ':' and '/' are expanded within script arguments
+(expanding to the value of os.PathListSeparator and os.PathSeparator
+respectively) but are not inherited in subprocess environments.
+
+The scripts' supporting files are unpacked relative to $GOPATH/src
+(aka $WORK/gopath/src) and then the script begins execution in that directory as
+well. Thus the example above runs in $WORK/gopath/src with GOPATH=$WORK/gopath
+and $WORK/gopath/src/hello.go containing the listed contents.
 
 The lines at the top of the script are a sequence of commands to be executed
 by a tiny script engine in ../../script_test.go (not the system shell).
diff --git a/src/cmd/go/testdata/script/build_buildvcs_auto.txt b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
index 9eac568..dd9eef5 100644
--- a/src/cmd/go/testdata/script/build_buildvcs_auto.txt
+++ b/src/cmd/go/testdata/script/build_buildvcs_auto.txt
@@ -6,11 +6,15 @@
 
 cd sub
 exec git init .
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
 exec git add sub.go
 exec git commit -m 'initial state'
 cd ..
 
 exec git init
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
 exec git submodule add ./sub
 exec git add go.mod example.go
 exec git commit -m 'initial state'
diff --git a/src/cmd/go/testdata/script/fsys_walk.txt b/src/cmd/go/testdata/script/fsys_walk.txt
new file mode 100644
index 0000000..9d1a945
--- /dev/null
+++ b/src/cmd/go/testdata/script/fsys_walk.txt
@@ -0,0 +1,6 @@
+# Test that go list prefix... does not read directories not beginning with prefix.
+env GODEBUG=gofsystrace=1
+go list m...
+stderr mime
+stderr mime[\\/]multipart
+! stderr archive
diff --git a/src/cmd/go/testdata/script/index.txt b/src/cmd/go/testdata/script/index.txt
new file mode 100644
index 0000000..6a2d13c
--- /dev/null
+++ b/src/cmd/go/testdata/script/index.txt
@@ -0,0 +1,6 @@
+# Check that standard library packages are cached.
+go list -json math # refresh cache
+env GODEBUG=gofsystrace=1,gofsystracelog=fsys.log
+go list -json math
+! grep math/abs.go fsys.log
+grep 'openIndexPackage .*[\\/]math$' fsys.log
diff --git a/src/cmd/go/testdata/script/list_permissions.txt b/src/cmd/go/testdata/script/list_perm.txt
similarity index 99%
rename from src/cmd/go/testdata/script/list_permissions.txt
rename to src/cmd/go/testdata/script/list_perm.txt
index f65896c..3b850ef3 100644
--- a/src/cmd/go/testdata/script/list_permissions.txt
+++ b/src/cmd/go/testdata/script/list_perm.txt
@@ -11,12 +11,11 @@
 go list ./empty/...
 stderr 'matched no packages'
 
-[root] stop # Root typically ignores file permissions.
-
 # Make the directory ./noread unreadable, and verify that 'go list' reports an
 # explicit error for a pattern that should match it (rather than treating it as
 # equivalent to an empty directory).
 
+[root] stop # Root typically ignores file permissions.
 [windows] skip # Does not have Unix-style directory permissions.
 [plan9] skip   # Might not have Unix-style directory permissions.
 
diff --git a/src/cmd/go/testdata/script/mod_perm.txt b/src/cmd/go/testdata/script/mod_perm.txt
new file mode 100644
index 0000000..f5382ec
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_perm.txt
@@ -0,0 +1,23 @@
+# go list should work in ordinary conditions.
+go list ./...
+! stdout _data
+
+# skip in conditions where chmod 0 may not work.
+# plan9 should be fine, but copied from list_perm.txt unchanged.
+[root] skip
+[windows] skip
+[plan9] skip
+
+# go list should work with unreadable _data directory.
+chmod 0 _data
+go list ./...
+! stdout _data
+
+-- go.mod --
+module m
+
+-- x.go --
+package m
+
+-- _data/x.go --
+package p
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 565ff9d..18910dd 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1426,10 +1426,23 @@
 				argv = append(argv, "-Wl,-pagezero_size,4000000")
 			}
 		}
+		if *flagRace && ctxt.HeadType == objabi.Hwindows {
+			// Current windows/amd64 race detector tsan support
+			// library can't handle PIE mode (see #53539 for more details).
+			// For now, explicitly disable PIE (since some compilers
+			// default to it) if -race is in effect.
+			argv = addASLRargs(argv, false)
+		}
 	case BuildModePIE:
 		switch ctxt.HeadType {
 		case objabi.Hdarwin, objabi.Haix:
 		case objabi.Hwindows:
+			if *flagAslr && *flagRace {
+				// Current windows/amd64 race detector tsan support
+				// library can't handle PIE mode (see #53539 for more details).
+				// Disable alsr if -race in effect.
+				*flagAslr = false
+			}
 			argv = addASLRargs(argv, *flagAslr)
 		default:
 			// ELF.
diff --git a/src/cmd/nm/nm_test.go b/src/cmd/nm/nm_test.go
index 226c2c3..4bc9bf9 100644
--- a/src/cmd/nm/nm_test.go
+++ b/src/cmd/nm/nm_test.go
@@ -250,23 +250,14 @@
 		t.Fatal(err)
 	}
 
-	args := []string{"install", "mylib"}
-	cmd := exec.Command(testenv.GoToolPath(t), args...)
+	cmd := exec.Command(testenv.GoToolPath(t), "build", "-buildmode=archive", "-o", "mylib.a", ".")
 	cmd.Dir = libpath
 	cmd.Env = append(os.Environ(), "GOPATH="+gopath)
 	out, err := cmd.CombinedOutput()
 	if err != nil {
 		t.Fatalf("building test lib failed: %s %s", err, out)
 	}
-	pat := filepath.Join(gopath, "pkg", "*", "mylib.a")
-	ms, err := filepath.Glob(pat)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(ms) == 0 {
-		t.Fatalf("cannot found paths for pattern %s", pat)
-	}
-	mylib := ms[0]
+	mylib := filepath.Join(libpath, "mylib.a")
 
 	out, err = exec.Command(testnmpath, mylib).CombinedOutput()
 	if err != nil {
diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go
index 11804d0..0e4d882 100644
--- a/src/cmd/trace/main.go
+++ b/src/cmd/trace/main.go
@@ -247,7 +247,7 @@
   because it made a system call or tried to acquire a mutex.
 
   Directly underneath each bar, a smaller bar or more commonly a fine
-  vertical line indicates an event occuring during its execution.
+  vertical line indicates an event occurring during its execution.
   Some of these are related to garbage collection; most indicate that
   a goroutine yielded its logical processor but then immediately resumed execution
   on the same logical processor. Clicking on the event displays the stack trace
@@ -274,7 +274,7 @@
   function written in C.
 </p>
 <p>
-  Above the event trace for the first logical processor are 
+  Above the event trace for the first logical processor are
   traces for various runtime-internal events.
 
   The "GC" bar shows when the garbage collector is running, and in which stage.
diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go
index 1cabc25..e6c4cca 100644
--- a/src/cmd/trace/trace.go
+++ b/src/cmd/trace/trace.go
@@ -571,7 +571,7 @@
 
 			fname := stk[0].Fn
 			info.name = fmt.Sprintf("G%v %s", newG, fname)
-			info.isSystemG = isSystemGoroutine(fname)
+			info.isSystemG = trace.IsSystemGoroutine(fname)
 
 			ctx.gcount++
 			setGState(ev, newG, gDead, gRunnable)
@@ -1129,12 +1129,6 @@
 	return ctx.buildBranch(node, stk)
 }
 
-func isSystemGoroutine(entryFn string) bool {
-	// This mimics runtime.isSystemGoroutine as closely as
-	// possible.
-	return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
-}
-
 // firstTimestamp returns the timestamp of the first event record.
 func firstTimestamp() int64 {
 	res, _ := parseTrace()
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index aa6780f..ba8de97 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -248,42 +248,40 @@
 		return 0, z.err
 	}
 
-	n, z.err = z.decompressor.Read(p)
-	z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
-	z.size += uint32(n)
-	if z.err != io.EOF {
-		// In the normal case we return here.
-		return n, z.err
+	for n == 0 {
+		n, z.err = z.decompressor.Read(p)
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+		z.size += uint32(n)
+		if z.err != io.EOF {
+			// In the normal case we return here.
+			return n, z.err
+		}
+
+		// Finished file; check checksum and size.
+		if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+			z.err = noEOF(err)
+			return n, z.err
+		}
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return n, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return n, io.EOF
+		}
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			return n, z.err
+		}
 	}
 
-	// Finished file; check checksum and size.
-	if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
-		z.err = noEOF(err)
-		return n, z.err
-	}
-	digest := le.Uint32(z.buf[:4])
-	size := le.Uint32(z.buf[4:8])
-	if digest != z.digest || size != z.size {
-		z.err = ErrChecksum
-		return n, z.err
-	}
-	z.digest, z.size = 0, 0
-
-	// File is ok; check if there is another.
-	if !z.multistream {
-		return n, io.EOF
-	}
-	z.err = nil // Remove io.EOF
-
-	if _, z.err = z.readHeader(); z.err != nil {
-		return n, z.err
-	}
-
-	// Read from next file, if necessary.
-	if n > 0 {
-		return n, nil
-	}
-	return z.Read(p)
+	return n, nil
 }
 
 // Close closes the Reader. It does not close the underlying io.Reader.
diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
index be69185..3309ff6 100644
--- a/src/compress/gzip/gunzip_test.go
+++ b/src/compress/gzip/gunzip_test.go
@@ -569,3 +569,19 @@
 		}
 	}
 }
+
+func TestCVE202230631(t *testing.T) {
+	var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
+		0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	r := bytes.NewReader(bytes.Repeat(empty, 4e6))
+	z, err := NewReader(r)
+	if err != nil {
+		t.Fatalf("NewReader: got %v, want nil", err)
+	}
+	// Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
+	// to stack exhaustion.
+	_, err = z.Read(make([]byte, 10))
+	if err != io.EOF {
+		t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
+	}
+}
diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go
index e0e8f61..a2d3d80 100644
--- a/src/crypto/x509/parser.go
+++ b/src/crypto/x509/parser.go
@@ -1008,22 +1008,22 @@
 	// we can populate RevocationList.Raw, before unwrapping the
 	// SEQUENCE so it can be operated on
 	if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
-		return nil, errors.New("x509: malformed certificate")
+		return nil, errors.New("x509: malformed crl")
 	}
 	rl.Raw = input
 	if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
-		return nil, errors.New("x509: malformed certificate")
+		return nil, errors.New("x509: malformed crl")
 	}
 
 	var tbs cryptobyte.String
 	// do the same trick again as above to extract the raw
 	// bytes for Certificate.RawTBSCertificate
 	if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
-		return nil, errors.New("x509: malformed tbs certificate")
+		return nil, errors.New("x509: malformed tbs crl")
 	}
 	rl.RawTBSRevocationList = tbs
 	if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
-		return nil, errors.New("x509: malformed tbs certificate")
+		return nil, errors.New("x509: malformed tbs crl")
 	}
 
 	var version int
@@ -1106,13 +1106,10 @@
 			}
 			var extensions cryptobyte.String
 			var present bool
-			if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
+			if !certSeq.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
 				return nil, errors.New("x509: malformed extensions")
 			}
 			if present {
-				if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
-					return nil, errors.New("x509: malformed extensions")
-				}
 				for !extensions.Empty() {
 					var extension cryptobyte.String
 					if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
@@ -1148,6 +1145,15 @@
 			if err != nil {
 				return nil, err
 			}
+			if ext.Id.Equal(oidExtensionAuthorityKeyId) {
+				rl.AuthorityKeyId = ext.Value
+			} else if ext.Id.Equal(oidExtensionCRLNumber) {
+				value := cryptobyte.String(ext.Value)
+				rl.Number = new(big.Int)
+				if !value.ReadASN1Integer(rl.Number) {
+					return nil, errors.New("x509: malformed crl number")
+				}
+			}
 			rl.Extensions = append(rl.Extensions, ext)
 		}
 	}
diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go
index 87eb1f7..950f6d0 100644
--- a/src/crypto/x509/x509.go
+++ b/src/crypto/x509/x509.go
@@ -2109,7 +2109,9 @@
 	// Issuer contains the DN of the issuing certificate.
 	Issuer pkix.Name
 	// AuthorityKeyId is used to identify the public key associated with the
-	// issuing certificate.
+	// issuing certificate. It is populated from the authorityKeyIdentifier
+	// extension when parsing a CRL. It is ignored when creating a CRL; the
+	// extension is populated from the issuing certificate itself.
 	AuthorityKeyId []byte
 
 	Signature []byte
@@ -2125,7 +2127,8 @@
 
 	// Number is used to populate the X.509 v2 cRLNumber extension in the CRL,
 	// which should be a monotonically increasing sequence number for a given
-	// CRL scope and CRL issuer.
+	// CRL scope and CRL issuer. It is also populated from the cRLNumber
+	// extension when parsing a CRL.
 	Number *big.Int
 
 	// ThisUpdate is used to populate the thisUpdate field in the CRL, which
@@ -2193,6 +2196,10 @@
 	if err != nil {
 		return nil, err
 	}
+
+	if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) {
+		return nil, errors.New("x509: CRL number exceeds 20 octets")
+	}
 	crlNum, err := asn1.Marshal(template.Number)
 	if err != nil {
 		return nil, err
diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go
index 8ef6115..cba44f6 100644
--- a/src/crypto/x509/x509_test.go
+++ b/src/crypto/x509/x509_test.go
@@ -2479,6 +2479,40 @@
 			expectedError: "x509: template contains nil Number field",
 		},
 		{
+			name: "long Number",
+			key:  ec256Priv,
+			issuer: &Certificate{
+				KeyUsage: KeyUsageCRLSign,
+				Subject: pkix.Name{
+					CommonName: "testing",
+				},
+				SubjectKeyId: []byte{1, 2, 3},
+			},
+			template: &RevocationList{
+				ThisUpdate: time.Time{}.Add(time.Hour * 24),
+				NextUpdate: time.Time{}.Add(time.Hour * 48),
+				Number:     big.NewInt(0).SetBytes(append([]byte{1}, make([]byte, 20)...)),
+			},
+			expectedError: "x509: CRL number exceeds 20 octets",
+		},
+		{
+			name: "long Number (20 bytes, MSB set)",
+			key:  ec256Priv,
+			issuer: &Certificate{
+				KeyUsage: KeyUsageCRLSign,
+				Subject: pkix.Name{
+					CommonName: "testing",
+				},
+				SubjectKeyId: []byte{1, 2, 3},
+			},
+			template: &RevocationList{
+				ThisUpdate: time.Time{}.Add(time.Hour * 24),
+				NextUpdate: time.Time{}.Add(time.Hour * 48),
+				Number:     big.NewInt(0).SetBytes(append([]byte{255}, make([]byte, 19)...)),
+			},
+			expectedError: "x509: CRL number exceeds 20 octets",
+		},
+		{
 			name: "invalid signature algorithm",
 			key:  ec256Priv,
 			issuer: &Certificate{
@@ -2525,6 +2559,34 @@
 			},
 		},
 		{
+			name: "valid, extra entry extension",
+			key:  ec256Priv,
+			issuer: &Certificate{
+				KeyUsage: KeyUsageCRLSign,
+				Subject: pkix.Name{
+					CommonName: "testing",
+				},
+				SubjectKeyId: []byte{1, 2, 3},
+			},
+			template: &RevocationList{
+				RevokedCertificates: []pkix.RevokedCertificate{
+					{
+						SerialNumber:   big.NewInt(2),
+						RevocationTime: time.Time{}.Add(time.Hour),
+						Extensions: []pkix.Extension{
+							{
+								Id:    []int{2, 5, 29, 99},
+								Value: []byte{5, 0},
+							},
+						},
+					},
+				},
+				Number:     big.NewInt(5),
+				ThisUpdate: time.Time{}.Add(time.Hour * 24),
+				NextUpdate: time.Time{}.Add(time.Hour * 48),
+			},
+		},
+		{
 			name: "valid, Ed25519 key",
 			key:  ed25519Priv,
 			issuer: &Certificate{
@@ -2681,6 +2743,19 @@
 				t.Fatalf("Extensions mismatch: got %v; want %v.",
 					parsedCRL.Extensions[2:], tc.template.ExtraExtensions)
 			}
+
+			if tc.template.Number != nil && parsedCRL.Number == nil {
+				t.Fatalf("Generated CRL missing Number: got nil, want %s",
+					tc.template.Number.String())
+			}
+			if tc.template.Number != nil && tc.template.Number.Cmp(parsedCRL.Number) != 0 {
+				t.Fatalf("Generated CRL has wrong Number: got %s, want %s",
+					parsedCRL.Number.String(), tc.template.Number.String())
+			}
+			if !bytes.Equal(parsedCRL.AuthorityKeyId, expectedAKI) {
+				t.Fatalf("Generated CRL has wrong Number: got %x, want %x",
+					parsedCRL.AuthorityKeyId, expectedAKI)
+			}
 		})
 	}
 }
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 6bc869f..8c58723 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -449,6 +449,16 @@
 // TestTxContextWait tests the transaction behavior when the tx context is canceled
 // during execution of the query.
 func TestTxContextWait(t *testing.T) {
+	testContextWait(t, false)
+}
+
+// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
+// the final connection.
+func TestTxContextWaitNoDiscard(t *testing.T) {
+	testContextWait(t, true)
+}
+
+func testContextWait(t *testing.T, keepConnOnRollback bool) {
 	db := newTestDB(t, "people")
 	defer closeDB(t, db)
 
@@ -458,7 +468,7 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	tx.keepConnOnRollback = false
+	tx.keepConnOnRollback = keepConnOnRollback
 
 	tx.dc.ci.(*fakeConn).waiter = func(c context.Context) {
 		cancel()
@@ -472,36 +482,11 @@
 		t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
 	}
 
-	waitForFree(t, db, 0)
-}
-
-// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
-// the final connection.
-func TestTxContextWaitNoDiscard(t *testing.T) {
-	db := newTestDB(t, "people")
-	defer closeDB(t, db)
-
-	ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
-	defer cancel()
-
-	tx, err := db.BeginTx(ctx, nil)
-	if err != nil {
-		// Guard against the context being canceled before BeginTx completes.
-		if err == context.DeadlineExceeded {
-			t.Skip("tx context canceled prior to first use")
-		}
-		t.Fatal(err)
+	if keepConnOnRollback {
+		waitForFree(t, db, 1)
+	} else {
+		waitForFree(t, db, 0)
 	}
-
-	// This will trigger the *fakeConn.Prepare method which will take time
-	// performing the query. The ctxDriverPrepare func will check the context
-	// after this and close the rows and return an error.
-	_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
-	if err != context.DeadlineExceeded {
-		t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
-	}
-
-	waitForFree(t, db, 1)
 }
 
 // TestUnsupportedOptions checks that the database fails when a driver that
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index 34f302a..eea2924 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -871,8 +871,13 @@
 	return &op
 }
 
+var maxIgnoreNestingDepth = 10000
+
 // decIgnoreOpFor returns the decoding op for a field that has no destination.
-func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
+func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
+	if depth > maxIgnoreNestingDepth {
+		error_(errors.New("invalid nesting depth"))
+	}
 	// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
 	// Return the pointer to the op we're already building.
 	if opPtr := inProgress[wireId]; opPtr != nil {
@@ -896,7 +901,7 @@
 			errorf("bad data: undefined type %s", wireId.string())
 		case wire.ArrayT != nil:
 			elemId := wire.ArrayT.Elem
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
 			}
@@ -904,15 +909,15 @@
 		case wire.MapT != nil:
 			keyId := dec.wireType[wireId].MapT.Key
 			elemId := dec.wireType[wireId].MapT.Elem
-			keyOp := dec.decIgnoreOpFor(keyId, inProgress)
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreMap(state, *keyOp, *elemOp)
 			}
 
 		case wire.SliceT != nil:
 			elemId := wire.SliceT.Elem
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreSlice(state, *elemOp)
 			}
@@ -1073,7 +1078,7 @@
 func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
 	engine := new(decEngine)
 	engine.instr = make([]decInstr, 1) // one item
-	op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
+	op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
 	ovfl := overflow(dec.typeString(remoteId))
 	engine.instr[0] = decInstr{*op, 0, nil, ovfl}
 	engine.numInstr = 1
@@ -1118,7 +1123,7 @@
 		localField, present := srt.FieldByName(wireField.Name)
 		// TODO(r): anonymous names
 		if !present || !isExported(wireField.Name) {
-			op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
+			op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
 			engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
 			continue
 		}
diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
index 1d5dde2..3d49887 100644
--- a/src/encoding/gob/gobencdec_test.go
+++ b/src/encoding/gob/gobencdec_test.go
@@ -12,6 +12,7 @@
 	"fmt"
 	"io"
 	"net"
+	"reflect"
 	"strings"
 	"testing"
 	"time"
@@ -796,3 +797,26 @@
 		t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
 	}
 }
+
+func TestIngoreDepthLimit(t *testing.T) {
+	// We don't test the actual depth limit because it requires building an
+	// extremely large message, which takes quite a while.
+	oldNestingDepth := maxIgnoreNestingDepth
+	maxIgnoreNestingDepth = 100
+	defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
+	b := new(bytes.Buffer)
+	enc := NewEncoder(b)
+	typ := reflect.TypeOf(int(0))
+	nested := reflect.ArrayOf(1, typ)
+	for i := 0; i < 100; i++ {
+		nested = reflect.ArrayOf(1, nested)
+	}
+	badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
+	enc.Encode(badStruct.Interface())
+	dec := NewDecoder(b)
+	var output struct{ Hello int }
+	expectedErr := "invalid nesting depth"
+	if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
+		t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
+	}
+}
diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
index 2575912..a6fb665 100644
--- a/src/encoding/xml/read.go
+++ b/src/encoding/xml/read.go
@@ -152,7 +152,7 @@
 	if val.IsNil() {
 		return errors.New("nil pointer passed to Unmarshal")
 	}
-	return d.unmarshal(val.Elem(), start)
+	return d.unmarshal(val.Elem(), start, 0)
 }
 
 // An UnmarshalError represents an error in the unmarshaling process.
@@ -308,8 +308,15 @@
 	textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
 )
 
+const maxUnmarshalDepth = 10000
+
+var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
+
 // Unmarshal a single XML element into val.
-func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
+	if depth >= maxUnmarshalDepth {
+		return errExeceededMaxUnmarshalDepth
+	}
 	// Find start element if we need it.
 	if start == nil {
 		for {
@@ -402,7 +409,7 @@
 		v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
 
 		// Recur to read element into slice.
-		if err := d.unmarshal(v.Index(n), start); err != nil {
+		if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
 			v.SetLen(n)
 			return err
 		}
@@ -525,13 +532,15 @@
 		case StartElement:
 			consumed := false
 			if sv.IsValid() {
-				consumed, err = d.unmarshalPath(tinfo, sv, nil, &t)
+				// unmarshalPath can call unmarshal, so we need to pass the depth through so that
+				// we can continue to enforce the maximum recusion limit.
+				consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
 				if err != nil {
 					return err
 				}
 				if !consumed && saveAny.IsValid() {
 					consumed = true
-					if err := d.unmarshal(saveAny, &t); err != nil {
+					if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
 						return err
 					}
 				}
@@ -676,7 +685,7 @@
 // The consumed result tells whether XML elements have been consumed
 // from the Decoder until start's matching end element, or if it's
 // still untouched because start is uninteresting for sv's fields.
-func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
 	recurse := false
 Loop:
 	for i := range tinfo.fields {
@@ -691,7 +700,7 @@
 		}
 		if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
 			// It's a perfect match, unmarshal the field.
-			return true, d.unmarshal(finfo.value(sv, initNilPointers), start)
+			return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1)
 		}
 		if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
 			// It's a prefix for the field. Break and recurse
@@ -720,7 +729,9 @@
 		}
 		switch t := tok.(type) {
 		case StartElement:
-			consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t)
+			// the recursion depth of unmarshalPath is limited to the path length specified
+			// by the struct field tag, so we don't increment the depth here.
+			consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
 			if err != nil {
 				return true, err
 			}
@@ -736,12 +747,12 @@
 }
 
 // Skip reads tokens until it has consumed the end element
-// matching the most recent start element already consumed.
-// It recurs if it encounters a start element, so it can be used to
-// skip nested structures.
+// matching the most recent start element already consumed,
+// skipping nested structures.
 // It returns nil if it finds an end element matching the start
 // element; otherwise it returns an error describing the problem.
 func (d *Decoder) Skip() error {
+	var depth int64
 	for {
 		tok, err := d.Token()
 		if err != nil {
@@ -749,11 +760,12 @@
 		}
 		switch tok.(type) {
 		case StartElement:
-			if err := d.Skip(); err != nil {
-				return err
-			}
+			depth++
 		case EndElement:
-			return nil
+			if depth == 0 {
+				return nil
+			}
+			depth--
 		}
 	}
 }
diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
index 6ef55de..58d1edd 100644
--- a/src/encoding/xml/read_test.go
+++ b/src/encoding/xml/read_test.go
@@ -5,8 +5,11 @@
 package xml
 
 import (
+	"bytes"
+	"errors"
 	"io"
 	"reflect"
+	"runtime"
 	"strings"
 	"testing"
 	"time"
@@ -1094,3 +1097,32 @@
 	}
 
 }
+
+func TestCVE202228131(t *testing.T) {
+	type nested struct {
+		Parent *nested `xml:",any"`
+	}
+	var n nested
+	err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
+	if err == nil {
+		t.Fatal("Unmarshal did not fail")
+	} else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
+		t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
+	}
+}
+
+func TestCVE202230633(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes memory exhaustion on js/wasm")
+	}
+	defer func() {
+		p := recover()
+		if p != nil {
+			t.Fatal("Unmarshal panicked")
+		}
+	}()
+	var example struct {
+		Things []string
+	}
+	Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
+}
diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
index e3468f4..d911c8e 100644
--- a/src/go/parser/interface.go
+++ b/src/go/parser/interface.go
@@ -94,8 +94,11 @@
 	defer func() {
 		if e := recover(); e != nil {
 			// resume same panic if it's not a bailout
-			if _, ok := e.(bailout); !ok {
+			bail, ok := e.(bailout)
+			if !ok {
 				panic(e)
+			} else if bail.msg != "" {
+				p.errors.Add(p.file.Position(bail.pos), bail.msg)
 			}
 		}
 
@@ -198,8 +201,11 @@
 	defer func() {
 		if e := recover(); e != nil {
 			// resume same panic if it's not a bailout
-			if _, ok := e.(bailout); !ok {
+			bail, ok := e.(bailout)
+			if !ok {
 				panic(e)
+			} else if bail.msg != "" {
+				p.errors.Add(p.file.Position(bail.pos), bail.msg)
 			}
 		}
 		p.errors.Sort()
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index ca2f24c..d4ad36d 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -59,6 +59,10 @@
 	inRhs   bool // if set, the parser is parsing a rhs expression
 
 	imports []*ast.ImportSpec // list of imports
+
+	// nestLev is used to track and limit the recursion depth
+	// during parsing.
+	nestLev int
 }
 
 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
@@ -108,6 +112,24 @@
 	p.printTrace(")")
 }
 
+// maxNestLev is the deepest we're willing to recurse during parsing
+const maxNestLev int = 1e5
+
+func incNestLev(p *parser) *parser {
+	p.nestLev++
+	if p.nestLev > maxNestLev {
+		p.error(p.pos, "exceeded max nesting depth")
+		panic(bailout{})
+	}
+	return p
+}
+
+// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
+// It is used along with incNestLev in a similar fashion to how un and trace are used.
+func decNestLev(p *parser) {
+	p.nestLev--
+}
+
 // Advance to the next token.
 func (p *parser) next0() {
 	// Because of one-token look-ahead, print the previous token
@@ -218,8 +240,12 @@
 	}
 }
 
-// A bailout panic is raised to indicate early termination.
-type bailout struct{}
+// A bailout panic is raised to indicate early termination. pos and msg are
+// only populated when bailing out of object resolution.
+type bailout struct {
+	pos token.Pos
+	msg string
+}
 
 func (p *parser) error(pos token.Pos, msg string) {
 	if p.trace {
@@ -1247,6 +1273,8 @@
 }
 
 func (p *parser) tryIdentOrType() ast.Expr {
+	defer decNestLev(incNestLev(p))
+
 	switch p.tok {
 	case token.IDENT:
 		typ := p.parseTypeName(nil)
@@ -1657,7 +1685,13 @@
 	if x == nil {
 		x = p.parseOperand()
 	}
-	for {
+	// We track the nesting here rather than at the entry for the function,
+	// since it can iteratively produce a nested output, and we want to
+	// limit how deep a structure we generate.
+	var n int
+	defer func() { p.nestLev -= n }()
+	for n = 1; ; n++ {
+		incNestLev(p)
 		switch p.tok {
 		case token.PERIOD:
 			p.next()
@@ -1717,6 +1751,8 @@
 }
 
 func (p *parser) parseUnaryExpr() ast.Expr {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "UnaryExpr"))
 	}
@@ -1806,7 +1842,13 @@
 	if x == nil {
 		x = p.parseUnaryExpr()
 	}
-	for {
+	// We track the nesting here rather than at the entry for the function,
+	// since it can iteratively produce a nested output, and we want to
+	// limit how deep a structure we generate.
+	var n int
+	defer func() { p.nestLev -= n }()
+	for n = 1; ; n++ {
+		incNestLev(p)
 		op, oprec := p.tokPrec()
 		if oprec < prec1 {
 			return x
@@ -2099,6 +2141,8 @@
 }
 
 func (p *parser) parseIfStmt() *ast.IfStmt {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "IfStmt"))
 	}
@@ -2402,6 +2446,8 @@
 }
 
 func (p *parser) parseStmt() (s ast.Stmt) {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "Statement"))
 	}
diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
index a4f882d..1a46c87 100644
--- a/src/go/parser/parser_test.go
+++ b/src/go/parser/parser_test.go
@@ -10,6 +10,7 @@
 	"go/ast"
 	"go/token"
 	"io/fs"
+	"runtime"
 	"strings"
 	"testing"
 )
@@ -577,3 +578,171 @@
 		t.Errorf("got %q, want %q", comment, "// comment")
 	}
 }
+
+var parseDepthTests = []struct {
+	name   string
+	format string
+	// multipler is used when a single statement may result in more than one
+	// change in the depth level, for instance "1+(..." produces a BinaryExpr
+	// followed by a UnaryExpr, which increments the depth twice. The test
+	// case comment explains which nodes are triggering the multiple depth
+	// changes.
+	parseMultiplier int
+	// scope is true if we should also test the statement for the resolver scope
+	// depth limit.
+	scope bool
+	// scopeMultiplier does the same as parseMultiplier, but for the scope
+	// depths.
+	scopeMultiplier int
+}{
+	// The format expands the part inside « » many times.
+	// A second set of brackets nested inside the first stops the repetition,
+	// so that for example «(«1»)» expands to (((...((((1))))...))).
+	{name: "array", format: "package main; var x «[1]»int"},
+	{name: "slice", format: "package main; var x «[]»int"},
+	{name: "struct", format: "package main; var x «struct { X «int» }»", scope: true},
+	{name: "pointer", format: "package main; var x «*»int"},
+	{name: "func", format: "package main; var x «func()»int", scope: true},
+	{name: "chan", format: "package main; var x «chan »int"},
+	{name: "chan2", format: "package main; var x «<-chan »int"},
+	{name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType
+	{name: "map", format: "package main; var x «map[int]»int"},
+	{name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2},             // Parser nodes: UnaryExpr, CompositeLit
+	{name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2},         // Parser nodes: UnaryExpr, CompositeLit
+	{name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
+	{name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2},    // Parser nodes: CompositeLit, KeyValueExpr
+	{name: "dot", format: "package main; var x = «x.»x"},
+	{name: "index", format: "package main; var x = x«[1]»"},
+	{name: "slice", format: "package main; var x = x«[1:2]»"},
+	{name: "slice3", format: "package main; var x = x«[1:2:3]»"},
+	{name: "dottype", format: "package main; var x = x«.(any)»"},
+	{name: "callseq", format: "package main; var x = x«()»"},
+	{name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr
+	{name: "binary", format: "package main; var x = «1+»1"},
+	{name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr
+	{name: "unary", format: "package main; var x = «^»1"},
+	{name: "addr", format: "package main; var x = «& »x"},
+	{name: "star", format: "package main; var x = «*»x"},
+	{name: "recv", format: "package main; var x = «<-»x"},
+	{name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2},    // Parser nodes: Ident, CallExpr
+	{name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr
+	{name: "label", format: "package main; func main() { «Label:» }"},
+	{name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt
+	{name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true},
+	{name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2},               // Scopes: TypeSwitchStmt, CaseClause
+	{name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
+	{name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2},                            // Scopes: ForStmt, BlockStmt
+	{name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2},                          // Scopes: ForStmt, BlockStmt
+	{name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2},              // Scopes: ForStmt, BlockStmt
+	{name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2},               // Scopes: RangeStmt, BlockStmt
+	{name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2},           // Scopes: RangeStmt, BlockStmt
+	{name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2},        // Scopes: RangeStmt, BlockStmt
+	{name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true},                      // Parser nodes: GoStmt, FuncLit
+	{name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true},                // Parser nodes: DeferStmt, FuncLit
+	{name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true},
+}
+
+// split splits pre«mid»post into pre, mid, post.
+// If the string does not have that form, split returns x, "", "".
+func split(x string) (pre, mid, post string) {
+	start, end := strings.Index(x, "«"), strings.LastIndex(x, "»")
+	if start < 0 || end < 0 {
+		return x, "", ""
+	}
+	return x[:start], x[start+len("«") : end], x[end+len("»"):]
+}
+
+func TestParseDepthLimit(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes call stack exhaustion on js/wasm")
+	}
+	for _, tt := range parseDepthTests {
+		for _, size := range []string{"small", "big"} {
+			t.Run(tt.name+"/"+size, func(t *testing.T) {
+				n := maxNestLev + 1
+				if tt.parseMultiplier > 0 {
+					n /= tt.parseMultiplier
+				}
+				if size == "small" {
+					// Decrease the number of statements by 10, in order to check
+					// that we do not fail when under the limit. 10 is used to
+					// provide some wiggle room for cases where the surrounding
+					// scaffolding syntax adds some noise to the depth that changes
+					// on a per testcase basis.
+					n -= 10
+				}
+
+				pre, mid, post := split(tt.format)
+				if strings.Contains(mid, "«") {
+					left, base, right := split(mid)
+					mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+				} else {
+					mid = strings.Repeat(mid, n)
+				}
+				input := pre + mid + post
+
+				fset := token.NewFileSet()
+				_, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution)
+				if size == "small" {
+					if err != nil {
+						t.Errorf("ParseFile(...): %v (want success)", err)
+					}
+				} else {
+					expected := "exceeded max nesting depth"
+					if err == nil || !strings.HasSuffix(err.Error(), expected) {
+						t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+					}
+				}
+			})
+		}
+	}
+}
+
+func TestScopeDepthLimit(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes call stack exhaustion on js/wasm")
+	}
+	for _, tt := range parseDepthTests {
+		if !tt.scope {
+			continue
+		}
+		for _, size := range []string{"small", "big"} {
+			t.Run(tt.name+"/"+size, func(t *testing.T) {
+				n := maxScopeDepth + 1
+				if tt.scopeMultiplier > 0 {
+					n /= tt.scopeMultiplier
+				}
+				if size == "small" {
+					// Decrease the number of statements by 10, in order to check
+					// that we do not fail when under the limit. 10 is used to
+					// provide some wiggle room for cases where the surrounding
+					// scaffolding syntax adds some noise to the depth that changes
+					// on a per testcase basis.
+					n -= 10
+				}
+
+				pre, mid, post := split(tt.format)
+				if strings.Contains(mid, "«") {
+					left, base, right := split(mid)
+					mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+				} else {
+					mid = strings.Repeat(mid, n)
+				}
+				input := pre + mid + post
+
+				fset := token.NewFileSet()
+				_, err := ParseFile(fset, "", input, DeclarationErrors)
+				if size == "small" {
+					if err != nil {
+						t.Errorf("ParseFile(...): %v (want success)", err)
+					}
+				} else {
+					expected := "exceeded max scope depth during object resolution"
+					if err == nil || !strings.HasSuffix(err.Error(), expected) {
+						t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+					}
+				}
+			})
+		}
+	}
+}
diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go
index 767a5e2..f8ff618 100644
--- a/src/go/parser/resolver.go
+++ b/src/go/parser/resolver.go
@@ -54,6 +54,8 @@
 	file.Unresolved = r.unresolved[0:i]
 }
 
+const maxScopeDepth int = 1e3
+
 type resolver struct {
 	handle  *token.File
 	declErr func(token.Pos, string)
@@ -85,16 +87,19 @@
 }
 
 func (r *resolver) openScope(pos token.Pos) {
+	r.depth++
+	if r.depth > maxScopeDepth {
+		panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"})
+	}
 	if debugResolve {
 		r.trace("opening scope @%v", pos)
-		r.depth++
 	}
 	r.topScope = ast.NewScope(r.topScope)
 }
 
 func (r *resolver) closeScope() {
+	r.depth--
 	if debugResolve {
-		r.depth--
 		r.trace("closing scope")
 	}
 	r.topScope = r.topScope.Outer
diff --git a/src/internal/trace/goroutines.go b/src/internal/trace/goroutines.go
index a5fda48..5da90e0 100644
--- a/src/internal/trace/goroutines.go
+++ b/src/internal/trace/goroutines.go
@@ -4,7 +4,10 @@
 
 package trace
 
-import "sort"
+import (
+	"sort"
+	"strings"
+)
 
 // GDesc contains statistics and execution details of a single goroutine.
 type GDesc struct {
@@ -126,10 +129,17 @@
 	finalStat := g.snapshotStat(lastTs, activeGCStartTime)
 
 	g.GExecutionStat = finalStat
-	for _, s := range g.activeRegions {
-		s.End = trigger
-		s.GExecutionStat = finalStat.sub(s.GExecutionStat)
-		g.Regions = append(g.Regions, s)
+
+	// System goroutines are never part of regions, even though they
+	// "inherit" a task due to creation (EvGoCreate) from within a region.
+	// This may happen e.g. if the first GC is triggered within a region,
+	// starting the GC worker goroutines.
+	if !IsSystemGoroutine(g.Name) {
+		for _, s := range g.activeRegions {
+			s.End = trigger
+			s.GExecutionStat = finalStat.sub(s.GExecutionStat)
+			g.Regions = append(g.Regions, s)
+		}
 	}
 	*(g.gdesc) = gdesc{}
 }
@@ -158,10 +168,13 @@
 		case EvGoCreate:
 			g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gdesc: new(gdesc)}
 			g.blockSchedTime = ev.Ts
-			// When a goroutine is newly created, inherit the
-			// task of the active region. For ease handling of
-			// this case, we create a fake region description with
-			// the task id.
+			// When a goroutine is newly created, inherit the task
+			// of the active region. For ease handling of this
+			// case, we create a fake region description with the
+			// task id. This isn't strictly necessary as this
+			// goroutine may not be assosciated with the task, but
+			// it can be convenient to see all children created
+			// during a region.
 			if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gdesc.activeRegions) > 0 {
 				regions := creatorG.gdesc.activeRegions
 				s := regions[len(regions)-1]
@@ -336,3 +349,9 @@
 	gmap[0] = true // for GC events
 	return gmap
 }
+
+func IsSystemGoroutine(entryFn string) bool {
+	// This mimics runtime.isSystemGoroutine as closely as
+	// possible.
+	return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
+}
diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go
index 45d9cb6..0e529cd 100644
--- a/src/io/fs/glob.go
+++ b/src/io/fs/glob.go
@@ -31,6 +31,16 @@
 // Otherwise, Glob uses ReadDir to traverse the directory tree
 // and look for matches for the pattern.
 func Glob(fsys FS, pattern string) (matches []string, err error) {
+	return globWithLimit(fsys, pattern, 0)
+}
+
+func globWithLimit(fsys FS, pattern string, depth int) (matches []string, err error) {
+	// This limit is added to prevent stack exhaustion issues. See
+	// CVE-2022-30630.
+	const pathSeparatorsLimit = 10000
+	if depth > pathSeparatorsLimit {
+		return nil, path.ErrBadPattern
+	}
 	if fsys, ok := fsys.(GlobFS); ok {
 		return fsys.Glob(pattern)
 	}
@@ -59,9 +69,9 @@
 	}
 
 	var m []string
-	m, err = Glob(fsys, dir)
+	m, err = globWithLimit(fsys, dir, depth+1)
 	if err != nil {
-		return
+		return nil, err
 	}
 	for _, d := range m {
 		matches, err = glob(fsys, d, file, matches)
diff --git a/src/io/fs/glob_test.go b/src/io/fs/glob_test.go
index f19bebe..d052eab 100644
--- a/src/io/fs/glob_test.go
+++ b/src/io/fs/glob_test.go
@@ -8,6 +8,7 @@
 	. "io/fs"
 	"os"
 	"path"
+	"strings"
 	"testing"
 )
 
@@ -55,6 +56,15 @@
 	}
 }
 
+func TestCVE202230630(t *testing.T) {
+	// Prior to CVE-2022-30630, a stack exhaustion would occur given a large
+	// number of separators. There is now a limit of 10,000.
+	_, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001))
+	if err != path.ErrBadPattern {
+		t.Fatalf("Glob returned err=%v, want %v", err, path.ErrBadPattern)
+	}
+}
+
 // contains reports whether vector contains the string s.
 func contains(vector []string, s string) bool {
 	for _, elem := range vector {
diff --git a/src/net/http/fs.go b/src/net/http/fs.go
index 7a1d5f4..4f144eb 100644
--- a/src/net/http/fs.go
+++ b/src/net/http/fs.go
@@ -541,6 +541,7 @@
 	h := w.Header()
 	delete(h, "Content-Type")
 	delete(h, "Content-Length")
+	delete(h, "Content-Encoding")
 	if h.Get("Etag") != "" {
 		delete(h, "Last-Modified")
 	}
diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go
index d627dfd..4be561c 100644
--- a/src/net/http/fs_test.go
+++ b/src/net/http/fs_test.go
@@ -564,6 +564,60 @@
 	}
 }
 
+// Tests that ServeFile does not generate representation metadata when
+// file has not been modified, as per RFC 7232 section 4.1.
+func TestServeFileNotModified_h1(t *testing.T) { testServeFileNotModified(t, h1Mode) }
+func TestServeFileNotModified_h2(t *testing.T) { testServeFileNotModified(t, h2Mode) }
+func testServeFileNotModified(t *testing.T, h2 bool) {
+	defer afterTest(t)
+	cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
+		w.Header().Set("Content-Type", "application/json")
+		w.Header().Set("Content-Encoding", "foo")
+		w.Header().Set("Etag", `"123"`)
+		ServeFile(w, r, "testdata/file")
+
+		// Because the testdata is so small, it would fit in
+		// both the h1 and h2 Server's write buffers. For h1,
+		// sendfile is used, though, forcing a header flush at
+		// the io.Copy. http2 doesn't do a header flush so
+		// buffers all 11 bytes and then adds its own
+		// Content-Length. To prevent the Server's
+		// Content-Length and test ServeFile only, flush here.
+		w.(Flusher).Flush()
+	}))
+	defer cst.close()
+	req, err := NewRequest("GET", cst.ts.URL, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	req.Header.Set("If-None-Match", `"123"`)
+	resp, err := cst.c.Do(req)
+	if err != nil {
+		t.Fatal(err)
+	}
+	b, err := io.ReadAll(resp.Body)
+	resp.Body.Close()
+	if err != nil {
+		t.Fatal("reading Body:", err)
+	}
+	if len(b) != 0 {
+		t.Errorf("non-empty body")
+	}
+	if g, e := resp.StatusCode, StatusNotModified; g != e {
+		t.Errorf("status mismatch: got %d, want %d", g, e)
+	}
+	// HTTP1 transport sets ContentLength to 0.
+	if g, e1, e2 := resp.ContentLength, int64(-1), int64(0); g != e1 && g != e2 {
+		t.Errorf("Content-Length mismatch: got %d, want %d or %d", g, e1, e2)
+	}
+	if resp.Header.Get("Content-Type") != "" {
+		t.Errorf("Content-Type present, but it should not be")
+	}
+	if resp.Header.Get("Content-Encoding") != "" {
+		t.Errorf("Content-Encoding present, but it should not be")
+	}
+}
+
 func TestServeIndexHtml(t *testing.T) {
 	defer afterTest(t)
 
diff --git a/src/net/http/request.go b/src/net/http/request.go
index d091f3c..cead91d 100644
--- a/src/net/http/request.go
+++ b/src/net/http/request.go
@@ -1126,8 +1126,8 @@
 // MaxBytesReader is similar to io.LimitReader but is intended for
 // limiting the size of incoming request bodies. In contrast to
 // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// MaxBytesError for a Read beyond the limit, and closes the
-// underlying reader when its Close method is called.
+// non-nil error of type *MaxBytesError for a Read beyond the limit,
+// and closes the underlying reader when its Close method is called.
 //
 // MaxBytesReader prevents clients from accidentally or maliciously
 // sending a large request and wasting server resources. If possible,
diff --git a/src/net/http/server.go b/src/net/http/server.go
index bc3a463..87dd412 100644
--- a/src/net/http/server.go
+++ b/src/net/http/server.go
@@ -2690,6 +2690,8 @@
 	activeConn map[*conn]struct{}
 	doneChan   chan struct{}
 	onShutdown []func()
+
+	listenerGroup sync.WaitGroup
 }
 
 func (s *Server) getDoneChan() <-chan struct{} {
@@ -2732,6 +2734,15 @@
 	defer srv.mu.Unlock()
 	srv.closeDoneChanLocked()
 	err := srv.closeListenersLocked()
+
+	// Unlock srv.mu while waiting for listenerGroup.
+	// The group Add and Done calls are made with srv.mu held,
+	// to avoid adding a new listener in the window between
+	// us setting inShutdown above and waiting here.
+	srv.mu.Unlock()
+	srv.listenerGroup.Wait()
+	srv.mu.Lock()
+
 	for c := range srv.activeConn {
 		c.rwc.Close()
 		delete(srv.activeConn, c)
@@ -2778,6 +2789,7 @@
 		go f()
 	}
 	srv.mu.Unlock()
+	srv.listenerGroup.Wait()
 
 	pollIntervalBase := time.Millisecond
 	nextPollInterval := func() time.Duration {
@@ -2794,7 +2806,7 @@
 	timer := time.NewTimer(nextPollInterval())
 	defer timer.Stop()
 	for {
-		if srv.closeIdleConns() && srv.numListeners() == 0 {
+		if srv.closeIdleConns() {
 			return lnerr
 		}
 		select {
@@ -2817,12 +2829,6 @@
 	srv.mu.Unlock()
 }
 
-func (s *Server) numListeners() int {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	return len(s.listeners)
-}
-
 // closeIdleConns closes all idle connections and reports whether the
 // server is quiescent.
 func (s *Server) closeIdleConns() bool {
@@ -3157,8 +3163,10 @@
 			return false
 		}
 		s.listeners[ln] = struct{}{}
+		s.listenerGroup.Add(1)
 	} else {
 		delete(s.listeners, ln)
+		s.listenerGroup.Done()
 	}
 	return true
 }
diff --git a/src/net/url/url.go b/src/net/url/url.go
index db4d638..e82ae6a 100644
--- a/src/net/url/url.go
+++ b/src/net/url/url.go
@@ -1193,7 +1193,7 @@
 func (u *URL) JoinPath(elem ...string) *URL {
 	url := *u
 	if len(elem) > 0 {
-		elem = append([]string{u.Path}, elem...)
+		elem = append([]string{u.EscapedPath()}, elem...)
 		p := path.Join(elem...)
 		// path.Join will remove any trailing slashes.
 		// Preserve at least one.
diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go
index 478cc34..263eddf 100644
--- a/src/net/url/url_test.go
+++ b/src/net/url/url_test.go
@@ -2120,6 +2120,16 @@
 			out:  "https://go.googlesource.com/",
 		},
 		{
+			base: "https://go.googlesource.com/a%2fb",
+			elem: []string{"c"},
+			out:  "https://go.googlesource.com/a%2fb/c",
+		},
+		{
+			base: "https://go.googlesource.com/a%2fb",
+			elem: []string{"c%2fd"},
+			out:  "https://go.googlesource.com/a%2fb/c%2fd",
+		},
+		{
 			base: "/",
 			elem: nil,
 			out:  "/",
diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
index 847a7813..b5cc4b8 100644
--- a/src/path/filepath/match.go
+++ b/src/path/filepath/match.go
@@ -240,6 +240,16 @@
 // The only possible returned error is ErrBadPattern, when pattern
 // is malformed.
 func Glob(pattern string) (matches []string, err error) {
+	return globWithLimit(pattern, 0)
+}
+
+func globWithLimit(pattern string, depth int) (matches []string, err error) {
+	// This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
+	const pathSeparatorsLimit = 10000
+	if depth == pathSeparatorsLimit {
+		return nil, ErrBadPattern
+	}
+
 	// Check pattern is well-formed.
 	if _, err := Match(pattern, ""); err != nil {
 		return nil, err
@@ -269,7 +279,7 @@
 	}
 
 	var m []string
-	m, err = Glob(dir)
+	m, err = globWithLimit(dir, depth+1)
 	if err != nil {
 		return
 	}
diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
index 375c41a..d628259 100644
--- a/src/path/filepath/match_test.go
+++ b/src/path/filepath/match_test.go
@@ -155,6 +155,16 @@
 	}
 }
 
+func TestCVE202230632(t *testing.T) {
+	// Prior to CVE-2022-30632, this would cause a stack exhaustion given a
+	// large number of separators (more than 4,000,000). There is now a limit
+	// of 10,000.
+	_, err := Glob("/*" + strings.Repeat("/", 10001))
+	if err != ErrBadPattern {
+		t.Fatalf("Glob returned err=%v, want ErrBadPattern", err)
+	}
+}
+
 func TestGlobError(t *testing.T) {
 	bad := []string{`[]`, `nonexist/[]`}
 	for _, pattern := range bad {
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 7c78590..1f484fb 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -173,10 +173,6 @@
 		bytesAllocated := slotsUsed * int64(s.elemsize)
 		gcController.totalAlloc.Add(bytesAllocated)
 
-		// Update heapLive and flush scanAlloc.
-		gcController.update(bytesAllocated, int64(c.scanAlloc))
-		c.scanAlloc = 0
-
 		// Clear the second allocCount just to be safe.
 		s.allocCountBeforeCache = 0
 	}
@@ -198,6 +194,23 @@
 	// Store the current alloc count for accounting later.
 	s.allocCountBeforeCache = s.allocCount
 
+	// Update heapLive and flush scanAlloc.
+	//
+	// We have not yet allocated anything new into the span, but we
+	// assume that all of its slots will get used, so this makes
+	// heapLive an overestimate.
+	//
+	// When the span gets uncached, we'll fix up this overestimate
+	// if necessary (see releaseAll).
+	//
+	// We pick an overestimate here because an underestimate leads
+	// the pacer to believe that it's in better shape than it is,
+	// which appears to lead to more memory used. See #53738 for
+	// more details.
+	usedBytes := uintptr(s.allocCount) * s.elemsize
+	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
+	c.scanAlloc = 0
+
 	c.alloc[spc] = s
 }
 
@@ -247,6 +260,8 @@
 	scanAlloc := int64(c.scanAlloc)
 	c.scanAlloc = 0
 
+	sg := mheap_.sweepgen
+	dHeapLive := int64(0)
 	for i := range c.alloc {
 		s := c.alloc[i]
 		if s != &emptymspan {
@@ -262,6 +277,15 @@
 			// We assumed earlier that the full span gets allocated.
 			gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
 
+			if s.sweepgen != sg+1 {
+				// refill conservatively counted unallocated slots in gcController.heapLive.
+				// Undo this.
+				//
+				// If this span was cached before sweep, then gcController.heapLive was totally
+				// recomputed since caching this span, so we don't do this for stale spans.
+				dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
+			}
+
 			// Release the span to the mcentral.
 			mheap_.central[i].mcentral.uncacheSpan(s)
 			c.alloc[i] = &emptymspan
@@ -277,8 +301,8 @@
 	c.tinyAllocs = 0
 	memstats.heapStats.release()
 
-	// Updated heapScan.
-	gcController.update(0, scanAlloc)
+	// Update heapLive and heapScan.
+	gcController.update(dHeapLive, scanAlloc)
 }
 
 // prepareForSweep flushes c if the system has entered a new sweep phase
diff --git a/src/runtime/race/README b/src/runtime/race/README
index eb18ad6..ad8f55f 100644
--- a/src/runtime/race/README
+++ b/src/runtime/race/README
@@ -4,9 +4,9 @@
 
 To update the .syso files use golang.org/x/build/cmd/racebuild.
 
-race_darwin_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_freebsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
-race_linux_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
+race_darwin_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_freebsd_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
+race_linux_amd64.syso built with LLVM 127e59048cd3d8dbb80c14b3036918c114089529 and Go 59ab6f351a370a27458755dc69f4a837e55a05a6.
 race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
 race_netbsd_amd64.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8.
 race_windows_amd64.syso built with LLVM 89f7ccea6f6488c443655880229c54db1f180153 and Go f62d3202bf9dbb3a00ad2a2c63ff4fa4188c5d3b.
diff --git a/src/runtime/race/race_darwin_amd64.syso b/src/runtime/race/race_darwin_amd64.syso
index dde17ad..e5d848c 100644
--- a/src/runtime/race/race_darwin_amd64.syso
+++ b/src/runtime/race/race_darwin_amd64.syso
Binary files differ
diff --git a/src/runtime/race/race_freebsd_amd64.syso b/src/runtime/race/race_freebsd_amd64.syso
index 8be9ff7..b3a4383 100644
--- a/src/runtime/race/race_freebsd_amd64.syso
+++ b/src/runtime/race/race_freebsd_amd64.syso
Binary files differ
diff --git a/src/runtime/race/race_linux_amd64.syso b/src/runtime/race/race_linux_amd64.syso
index a23064e..6885610 100644
--- a/src/runtime/race/race_linux_amd64.syso
+++ b/src/runtime/race/race_linux_amd64.syso
Binary files differ
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 197683b..49147ff 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -1120,7 +1120,7 @@
 // system (that is, the finalizer goroutine) is considered a user
 // goroutine.
 func isSystemGoroutine(gp *g, fixed bool) bool {
-	// Keep this in sync with cmd/trace/trace.go:isSystemGoroutine.
+	// Keep this in sync with internal/trace.IsSystemGoroutine.
 	f := findfunc(gp.startpc)
 	if !f.valid() {
 		return false
diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go
index ede8247..554aad4 100644
--- a/src/syscall/exec_linux.go
+++ b/src/syscall/exec_linux.go
@@ -43,8 +43,8 @@
 	// the descriptor of the controlling TTY.
 	// Unlike Setctty, in this case Ctty must be a descriptor
 	// number in the parent process.
-	Foreground   bool
-	Pgid         int            // Child's process group ID if Setpgid.
+	Foreground bool
+	Pgid       int // Child's process group ID if Setpgid.
 	// Pdeathsig, if non-zero, is a signal that the kernel will send to
 	// the child process when the creating thread dies. Note that the signal
 	// is sent on thread termination, which may happen before process termination.
diff --git a/test/fixedbugs/issue53635.go b/test/fixedbugs/issue53635.go
new file mode 100644
index 0000000..bea5493
--- /dev/null
+++ b/test/fixedbugs/issue53635.go
@@ -0,0 +1,31 @@
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+	f[int]()
+}
+
+func f[T any]() {
+	switch []T(nil) {
+	case nil:
+	default:
+		panic("FAIL")
+	}
+
+	switch (func() T)(nil) {
+	case nil:
+	default:
+		panic("FAIL")
+	}
+
+	switch (map[int]T)(nil) {
+	case nil:
+	default:
+		panic("FAIL")
+	}
+}
diff --git a/test/run.go b/test/run.go
index 8934e23..cb1622c 100644
--- a/test/run.go
+++ b/test/run.go
@@ -1966,7 +1966,6 @@
 var go118Failures = setOf(
 	"typeparam/nested.go",     // 1.18 compiler doesn't support function-local types with generics
 	"typeparam/issue51521.go", // 1.18 compiler produces bad panic message and link error
-	"typeparam/issue53419.go", // 1.18 compiler mishandles generic selector resolution
 )
 
 // In all of these cases, the 1.17 compiler reports reasonable errors, but either the