[dev.boringcrypto.go1.17] all: merge go1.17.12 into dev.boringcrypto.go1.17

Change-Id: I652dbbb5721fa0c7be09be34b9ac1a987c331f6d
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index ca5f36e..52f4937 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -78,6 +78,8 @@
 			return x86.AMOVL
 		case 8:
 			return x86.AMOVQ
+		case 16:
+			return x86.AMOVUPS
 		}
 	}
 	panic(fmt.Sprintf("bad store type %v", t))
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index 0e22b61..3ecadb9 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -91,6 +91,11 @@
 				continue
 			}
 			apdecls = append(apdecls, n)
+			if n.Type().Kind() == types.TSSA {
+				// Can happen for TypeInt128 types. This only happens for
+				// spill locations, so not a huge deal.
+				continue
+			}
 			fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
 		}
 	}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index f0e695a..f2c2388 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -625,6 +625,12 @@
 		}
 		return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
 
+	case TSSA:
+		if t != TypeInt128 {
+			base.Fatalf("PtrDataSize: unexpected ssa type %v", t)
+		}
+		return 0
+
 	default:
 		base.Fatalf("PtrDataSize: unexpected type, %v", t)
 		return 0
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 1a9aa69..15d40e7 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -1661,6 +1661,11 @@
 	TypeResultMem = newResults([]*Type{TypeMem})
 )
 
+func init() {
+	TypeInt128.Width = 16
+	TypeInt128.Align = 8
+}
+
 // NewNamed returns a new named type for the given type name. obj should be an
 // ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying
 // type should be set later via SetUnderlying(). References to the type are
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index beb7c46..4f1d556 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -1140,9 +1140,9 @@
 	cmd := t.addCmd(dt, "misc/cgo/test", t.goTest())
 	setEnv(cmd, "GOFLAGS", "-ldflags=-linkmode=auto")
 
-	// Skip internal linking cases on linux/arm64 to support GCC-9.4 and above.
+	// Skip internal linking cases on arm64 to support GCC-9.4 and above.
 	// See issue #39466.
-	skipInternalLink := goarch == "arm64" && goos == "linux"
+	skipInternalLink := goarch == "arm64" && goos != "darwin"
 
 	if t.internalLink() && !skipInternalLink {
 		cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal")
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index 4d4964e..55aecd0 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -440,7 +440,7 @@
 // statLocal returns a RevInfo describing rev in the local git repository.
 // It uses version as info.Version.
 func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) {
-	out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "-n1", "--format=format:%H %ct %D", rev, "--")
+	out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--")
 	if err != nil {
 		return nil, &UnknownRevisionError{Rev: rev}
 	}
diff --git a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt
new file mode 100644
index 0000000..3b19acc
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt
@@ -0,0 +1,28 @@
+env GO111MODULE=on
+
+[!net] skip
+[!exec:git] skip
+
+env GOPROXY=direct
+env HOME=$WORK/home/gopher
+
+
+go env GOPROXY
+stdout 'direct'
+
+exec git config --get log.decorate
+stdout 'full'
+
+# Test that Git log with user's global config '~/gitconfig' has log.decorate=full
+# go mod download has an error 'v1.x.y is not a tag'
+# with go1.16.14: 
+# `go1.16.14 list -m vcs-test.golang.org/git/gitrepo1.git@v1.2.3`
+# will output with error:
+# go list -m: vcs-test.golang.org/git/gitrepo1.git@v1.2.3: invalid version: unknown revision v1.2.3
+# See golang/go#51312.
+go list -m vcs-test.golang.org/git/gitrepo1.git@v1.2.3
+stdout 'vcs-test.golang.org/git/gitrepo1.git v1.2.3'
+
+-- $WORK/home/gopher/.gitconfig --
+[log]
+	decorate = full
\ No newline at end of file
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index 924bce1..237b2b9 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -248,42 +248,40 @@
 		return 0, z.err
 	}
 
-	n, z.err = z.decompressor.Read(p)
-	z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
-	z.size += uint32(n)
-	if z.err != io.EOF {
-		// In the normal case we return here.
-		return n, z.err
+	for n == 0 {
+		n, z.err = z.decompressor.Read(p)
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+		z.size += uint32(n)
+		if z.err != io.EOF {
+			// In the normal case we return here.
+			return n, z.err
+		}
+
+		// Finished file; check checksum and size.
+		if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+			z.err = noEOF(err)
+			return n, z.err
+		}
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return n, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return n, io.EOF
+		}
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			return n, z.err
+		}
 	}
 
-	// Finished file; check checksum and size.
-	if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
-		z.err = noEOF(err)
-		return n, z.err
-	}
-	digest := le.Uint32(z.buf[:4])
-	size := le.Uint32(z.buf[4:8])
-	if digest != z.digest || size != z.size {
-		z.err = ErrChecksum
-		return n, z.err
-	}
-	z.digest, z.size = 0, 0
-
-	// File is ok; check if there is another.
-	if !z.multistream {
-		return n, io.EOF
-	}
-	z.err = nil // Remove io.EOF
-
-	if _, z.err = z.readHeader(); z.err != nil {
-		return n, z.err
-	}
-
-	// Read from next file, if necessary.
-	if n > 0 {
-		return n, nil
-	}
-	return z.Read(p)
+	return n, nil
 }
 
 // Close closes the Reader. It does not close the underlying io.Reader.
diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
index 17c23e8..6fe8ddc 100644
--- a/src/compress/gzip/gunzip_test.go
+++ b/src/compress/gzip/gunzip_test.go
@@ -515,3 +515,19 @@
 		}
 	}
 }
+
+func TestCVE202230631(t *testing.T) {
+	var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
+		0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+	r := bytes.NewReader(bytes.Repeat(empty, 4e6))
+	z, err := NewReader(r)
+	if err != nil {
+		t.Fatalf("NewReader: got %v, want nil", err)
+	}
+	// Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
+	// to stack exhaustion.
+	_, err = z.Read(make([]byte, 10))
+	if err != io.EOF {
+		t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
+	}
+}
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index d2f6c74..0e0ec75 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -871,8 +871,13 @@
 	return &op
 }
 
+var maxIgnoreNestingDepth = 10000
+
 // decIgnoreOpFor returns the decoding op for a field that has no destination.
-func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
+func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
+	if depth > maxIgnoreNestingDepth {
+		error_(errors.New("invalid nesting depth"))
+	}
 	// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
 	// Return the pointer to the op we're already building.
 	if opPtr := inProgress[wireId]; opPtr != nil {
@@ -896,7 +901,7 @@
 			errorf("bad data: undefined type %s", wireId.string())
 		case wire.ArrayT != nil:
 			elemId := wire.ArrayT.Elem
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
 			}
@@ -904,15 +909,15 @@
 		case wire.MapT != nil:
 			keyId := dec.wireType[wireId].MapT.Key
 			elemId := dec.wireType[wireId].MapT.Elem
-			keyOp := dec.decIgnoreOpFor(keyId, inProgress)
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreMap(state, *keyOp, *elemOp)
 			}
 
 		case wire.SliceT != nil:
 			elemId := wire.SliceT.Elem
-			elemOp := dec.decIgnoreOpFor(elemId, inProgress)
+			elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
 			op = func(i *decInstr, state *decoderState, value reflect.Value) {
 				state.dec.ignoreSlice(state, *elemOp)
 			}
@@ -1073,7 +1078,7 @@
 func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
 	engine := new(decEngine)
 	engine.instr = make([]decInstr, 1) // one item
-	op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
+	op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
 	ovfl := overflow(dec.typeString(remoteId))
 	engine.instr[0] = decInstr{*op, 0, nil, ovfl}
 	engine.numInstr = 1
@@ -1118,7 +1123,7 @@
 		localField, present := srt.FieldByName(wireField.Name)
 		// TODO(r): anonymous names
 		if !present || !isExported(wireField.Name) {
-			op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
+			op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
 			engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
 			continue
 		}
diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
index 6d2c8db..1b52ecc 100644
--- a/src/encoding/gob/gobencdec_test.go
+++ b/src/encoding/gob/gobencdec_test.go
@@ -12,6 +12,7 @@
 	"fmt"
 	"io"
 	"net"
+	"reflect"
 	"strings"
 	"testing"
 	"time"
@@ -796,3 +797,26 @@
 		t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
 	}
 }
+
+func TestIngoreDepthLimit(t *testing.T) {
+	// We don't test the actual depth limit because it requires building an
+	// extremely large message, which takes quite a while.
+	oldNestingDepth := maxIgnoreNestingDepth
+	maxIgnoreNestingDepth = 100
+	defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
+	b := new(bytes.Buffer)
+	enc := NewEncoder(b)
+	typ := reflect.TypeOf(int(0))
+	nested := reflect.ArrayOf(1, typ)
+	for i := 0; i < 100; i++ {
+		nested = reflect.ArrayOf(1, nested)
+	}
+	badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
+	enc.Encode(badStruct.Interface())
+	dec := NewDecoder(b)
+	var output struct{ Hello int }
+	expectedErr := "invalid nesting depth"
+	if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
+		t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
+	}
+}
diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
index ef5df3f..c775798 100644
--- a/src/encoding/xml/read.go
+++ b/src/encoding/xml/read.go
@@ -148,7 +148,7 @@
 	if val.Kind() != reflect.Ptr {
 		return errors.New("non-pointer passed to Unmarshal")
 	}
-	return d.unmarshal(val.Elem(), start)
+	return d.unmarshal(val.Elem(), start, 0)
 }
 
 // An UnmarshalError represents an error in the unmarshaling process.
@@ -304,8 +304,15 @@
 	textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
 )
 
+const maxUnmarshalDepth = 10000
+
+var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
+
 // Unmarshal a single XML element into val.
-func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
+	if depth >= maxUnmarshalDepth {
+		return errExeceededMaxUnmarshalDepth
+	}
 	// Find start element if we need it.
 	if start == nil {
 		for {
@@ -398,7 +405,7 @@
 		v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
 
 		// Recur to read element into slice.
-		if err := d.unmarshal(v.Index(n), start); err != nil {
+		if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
 			v.SetLen(n)
 			return err
 		}
@@ -521,13 +528,15 @@
 		case StartElement:
 			consumed := false
 			if sv.IsValid() {
-				consumed, err = d.unmarshalPath(tinfo, sv, nil, &t)
+				// unmarshalPath can call unmarshal, so we need to pass the depth through so that
+				// we can continue to enforce the maximum recusion limit.
+				consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
 				if err != nil {
 					return err
 				}
 				if !consumed && saveAny.IsValid() {
 					consumed = true
-					if err := d.unmarshal(saveAny, &t); err != nil {
+					if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
 						return err
 					}
 				}
@@ -672,7 +681,7 @@
 // The consumed result tells whether XML elements have been consumed
 // from the Decoder until start's matching end element, or if it's
 // still untouched because start is uninteresting for sv's fields.
-func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
 	recurse := false
 Loop:
 	for i := range tinfo.fields {
@@ -687,7 +696,7 @@
 		}
 		if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
 			// It's a perfect match, unmarshal the field.
-			return true, d.unmarshal(finfo.value(sv, initNilPointers), start)
+			return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1)
 		}
 		if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
 			// It's a prefix for the field. Break and recurse
@@ -716,7 +725,9 @@
 		}
 		switch t := tok.(type) {
 		case StartElement:
-			consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t)
+			// the recursion depth of unmarshalPath is limited to the path length specified
+			// by the struct field tag, so we don't increment the depth here.
+			consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
 			if err != nil {
 				return true, err
 			}
@@ -732,12 +743,12 @@
 }
 
 // Skip reads tokens until it has consumed the end element
-// matching the most recent start element already consumed.
-// It recurs if it encounters a start element, so it can be used to
-// skip nested structures.
+// matching the most recent start element already consumed,
+// skipping nested structures.
 // It returns nil if it finds an end element matching the start
 // element; otherwise it returns an error describing the problem.
 func (d *Decoder) Skip() error {
+	var depth int64
 	for {
 		tok, err := d.Token()
 		if err != nil {
@@ -745,11 +756,12 @@
 		}
 		switch tok.(type) {
 		case StartElement:
-			if err := d.Skip(); err != nil {
-				return err
-			}
+			depth++
 		case EndElement:
-			return nil
+			if depth == 0 {
+				return nil
+			}
+			depth--
 		}
 	}
 }
diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
index 8c2e70f..8c940ae 100644
--- a/src/encoding/xml/read_test.go
+++ b/src/encoding/xml/read_test.go
@@ -5,8 +5,11 @@
 package xml
 
 import (
+	"bytes"
+	"errors"
 	"io"
 	"reflect"
+	"runtime"
 	"strings"
 	"testing"
 	"time"
@@ -1079,3 +1082,32 @@
 		t.Fatalf("whitespace attrs: Unmarshal:\nhave: %#+v\nwant: %#+v", v, want)
 	}
 }
+
+func TestCVE202230633(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes memory exhaustion on js/wasm")
+	}
+	defer func() {
+		p := recover()
+		if p != nil {
+			t.Fatal("Unmarshal panicked")
+		}
+	}()
+	var example struct {
+		Things []string
+	}
+	Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
+}
+
+func TestCVE202228131(t *testing.T) {
+	type nested struct {
+		Parent *nested `xml:",any"`
+	}
+	var n nested
+	err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
+	if err == nil {
+		t.Fatal("Unmarshal did not fail")
+	} else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
+		t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
+	}
+}
diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
index 85486d2..eae429e 100644
--- a/src/go/parser/interface.go
+++ b/src/go/parser/interface.go
@@ -97,8 +97,11 @@
 	defer func() {
 		if e := recover(); e != nil {
 			// resume same panic if it's not a bailout
-			if _, ok := e.(bailout); !ok {
+			bail, ok := e.(bailout)
+			if !ok {
 				panic(e)
+			} else if bail.msg != "" {
+				p.errors.Add(p.file.Position(bail.pos), bail.msg)
 			}
 		}
 
@@ -203,8 +206,11 @@
 	defer func() {
 		if e := recover(); e != nil {
 			// resume same panic if it's not a bailout
-			if _, ok := e.(bailout); !ok {
+			bail, ok := e.(bailout)
+			if !ok {
 				panic(e)
+			} else if bail.msg != "" {
+				p.errors.Add(p.file.Position(bail.pos), bail.msg)
 			}
 		}
 		p.errors.Sort()
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index f10c865..2c42b9f 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -60,6 +60,10 @@
 	inRhs   bool // if set, the parser is parsing a rhs expression
 
 	imports []*ast.ImportSpec // list of imports
+
+	// nestLev is used to track and limit the recursion depth
+	// during parsing.
+	nestLev int
 }
 
 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
@@ -110,6 +114,24 @@
 	p.printTrace(")")
 }
 
+// maxNestLev is the deepest we're willing to recurse during parsing
+const maxNestLev int = 1e5
+
+func incNestLev(p *parser) *parser {
+	p.nestLev++
+	if p.nestLev > maxNestLev {
+		p.error(p.pos, "exceeded max nesting depth")
+		panic(bailout{})
+	}
+	return p
+}
+
+// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
+// It is used along with incNestLev in a similar fashion to how un and trace are used.
+func decNestLev(p *parser) {
+	p.nestLev--
+}
+
 // Advance to the next token.
 func (p *parser) next0() {
 	// Because of one-token look-ahead, print the previous token
@@ -222,8 +244,12 @@
 	}
 }
 
-// A bailout panic is raised to indicate early termination.
-type bailout struct{}
+// A bailout panic is raised to indicate early termination. pos and msg are
+// only populated when bailing out of object resolution.
+type bailout struct {
+	pos token.Pos
+	msg string
+}
 
 func (p *parser) error(pos token.Pos, msg string) {
 	if p.trace {
@@ -1119,6 +1145,8 @@
 }
 
 func (p *parser) tryIdentOrType() ast.Expr {
+	defer decNestLev(incNestLev(p))
+
 	switch p.tok {
 	case token.IDENT:
 		typ := p.parseTypeName(nil)
@@ -1531,7 +1559,13 @@
 	}
 
 	x = p.parseOperand()
-	for {
+	// We track the nesting here rather than at the entry for the function,
+	// since it can iteratively produce a nested output, and we want to
+	// limit how deep a structure we generate.
+	var n int
+	defer func() { p.nestLev -= n }()
+	for n = 1; ; n++ {
+		incNestLev(p)
 		switch p.tok {
 		case token.PERIOD:
 			p.next()
@@ -1591,6 +1625,8 @@
 }
 
 func (p *parser) parseUnaryExpr() ast.Expr {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "UnaryExpr"))
 	}
@@ -1673,7 +1709,13 @@
 	}
 
 	x := p.parseUnaryExpr()
-	for {
+	// We track the nesting here rather than at the entry for the function,
+	// since it can iteratively produce a nested output, and we want to
+	// limit how deep a structure we generate.
+	var n int
+	defer func() { p.nestLev -= n }()
+	for n = 1; ; n++ {
+		incNestLev(p)
 		op, oprec := p.tokPrec()
 		if oprec < prec1 {
 			return x
@@ -1962,6 +2004,8 @@
 }
 
 func (p *parser) parseIfStmt() *ast.IfStmt {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "IfStmt"))
 	}
@@ -2265,6 +2309,8 @@
 }
 
 func (p *parser) parseStmt() (s ast.Stmt) {
+	defer decNestLev(incNestLev(p))
+
 	if p.trace {
 		defer un(trace(p, "Statement"))
 	}
diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
index a4f882d..1a46c87 100644
--- a/src/go/parser/parser_test.go
+++ b/src/go/parser/parser_test.go
@@ -10,6 +10,7 @@
 	"go/ast"
 	"go/token"
 	"io/fs"
+	"runtime"
 	"strings"
 	"testing"
 )
@@ -577,3 +578,171 @@
 		t.Errorf("got %q, want %q", comment, "// comment")
 	}
 }
+
+var parseDepthTests = []struct {
+	name   string
+	format string
+	// multipler is used when a single statement may result in more than one
+	// change in the depth level, for instance "1+(..." produces a BinaryExpr
+	// followed by a UnaryExpr, which increments the depth twice. The test
+	// case comment explains which nodes are triggering the multiple depth
+	// changes.
+	parseMultiplier int
+	// scope is true if we should also test the statement for the resolver scope
+	// depth limit.
+	scope bool
+	// scopeMultiplier does the same as parseMultiplier, but for the scope
+	// depths.
+	scopeMultiplier int
+}{
+	// The format expands the part inside « » many times.
+	// A second set of brackets nested inside the first stops the repetition,
+	// so that for example «(«1»)» expands to (((...((((1))))...))).
+	{name: "array", format: "package main; var x «[1]»int"},
+	{name: "slice", format: "package main; var x «[]»int"},
+	{name: "struct", format: "package main; var x «struct { X «int» }»", scope: true},
+	{name: "pointer", format: "package main; var x «*»int"},
+	{name: "func", format: "package main; var x «func()»int", scope: true},
+	{name: "chan", format: "package main; var x «chan »int"},
+	{name: "chan2", format: "package main; var x «<-chan »int"},
+	{name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType
+	{name: "map", format: "package main; var x «map[int]»int"},
+	{name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2},             // Parser nodes: UnaryExpr, CompositeLit
+	{name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2},         // Parser nodes: UnaryExpr, CompositeLit
+	{name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
+	{name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2},    // Parser nodes: CompositeLit, KeyValueExpr
+	{name: "dot", format: "package main; var x = «x.»x"},
+	{name: "index", format: "package main; var x = x«[1]»"},
+	{name: "slice", format: "package main; var x = x«[1:2]»"},
+	{name: "slice3", format: "package main; var x = x«[1:2:3]»"},
+	{name: "dottype", format: "package main; var x = x«.(any)»"},
+	{name: "callseq", format: "package main; var x = x«()»"},
+	{name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr
+	{name: "binary", format: "package main; var x = «1+»1"},
+	{name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr
+	{name: "unary", format: "package main; var x = «^»1"},
+	{name: "addr", format: "package main; var x = «& »x"},
+	{name: "star", format: "package main; var x = «*»x"},
+	{name: "recv", format: "package main; var x = «<-»x"},
+	{name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2},    // Parser nodes: Ident, CallExpr
+	{name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr
+	{name: "label", format: "package main; func main() { «Label:» }"},
+	{name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt
+	{name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true},
+	{name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2},               // Scopes: TypeSwitchStmt, CaseClause
+	{name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
+	{name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2},                            // Scopes: ForStmt, BlockStmt
+	{name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2},                          // Scopes: ForStmt, BlockStmt
+	{name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2},              // Scopes: ForStmt, BlockStmt
+	{name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2},               // Scopes: RangeStmt, BlockStmt
+	{name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2},           // Scopes: RangeStmt, BlockStmt
+	{name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2},        // Scopes: RangeStmt, BlockStmt
+	{name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true},                      // Parser nodes: GoStmt, FuncLit
+	{name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true},                // Parser nodes: DeferStmt, FuncLit
+	{name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true},
+}
+
+// split splits pre«mid»post into pre, mid, post.
+// If the string does not have that form, split returns x, "", "".
+func split(x string) (pre, mid, post string) {
+	start, end := strings.Index(x, "«"), strings.LastIndex(x, "»")
+	if start < 0 || end < 0 {
+		return x, "", ""
+	}
+	return x[:start], x[start+len("«") : end], x[end+len("»"):]
+}
+
+func TestParseDepthLimit(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes call stack exhaustion on js/wasm")
+	}
+	for _, tt := range parseDepthTests {
+		for _, size := range []string{"small", "big"} {
+			t.Run(tt.name+"/"+size, func(t *testing.T) {
+				n := maxNestLev + 1
+				if tt.parseMultiplier > 0 {
+					n /= tt.parseMultiplier
+				}
+				if size == "small" {
+					// Decrease the number of statements by 10, in order to check
+					// that we do not fail when under the limit. 10 is used to
+					// provide some wiggle room for cases where the surrounding
+					// scaffolding syntax adds some noise to the depth that changes
+					// on a per testcase basis.
+					n -= 10
+				}
+
+				pre, mid, post := split(tt.format)
+				if strings.Contains(mid, "«") {
+					left, base, right := split(mid)
+					mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+				} else {
+					mid = strings.Repeat(mid, n)
+				}
+				input := pre + mid + post
+
+				fset := token.NewFileSet()
+				_, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution)
+				if size == "small" {
+					if err != nil {
+						t.Errorf("ParseFile(...): %v (want success)", err)
+					}
+				} else {
+					expected := "exceeded max nesting depth"
+					if err == nil || !strings.HasSuffix(err.Error(), expected) {
+						t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+					}
+				}
+			})
+		}
+	}
+}
+
+func TestScopeDepthLimit(t *testing.T) {
+	if runtime.GOARCH == "wasm" {
+		t.Skip("causes call stack exhaustion on js/wasm")
+	}
+	for _, tt := range parseDepthTests {
+		if !tt.scope {
+			continue
+		}
+		for _, size := range []string{"small", "big"} {
+			t.Run(tt.name+"/"+size, func(t *testing.T) {
+				n := maxScopeDepth + 1
+				if tt.scopeMultiplier > 0 {
+					n /= tt.scopeMultiplier
+				}
+				if size == "small" {
+					// Decrease the number of statements by 10, in order to check
+					// that we do not fail when under the limit. 10 is used to
+					// provide some wiggle room for cases where the surrounding
+					// scaffolding syntax adds some noise to the depth that changes
+					// on a per testcase basis.
+					n -= 10
+				}
+
+				pre, mid, post := split(tt.format)
+				if strings.Contains(mid, "«") {
+					left, base, right := split(mid)
+					mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
+				} else {
+					mid = strings.Repeat(mid, n)
+				}
+				input := pre + mid + post
+
+				fset := token.NewFileSet()
+				_, err := ParseFile(fset, "", input, DeclarationErrors)
+				if size == "small" {
+					if err != nil {
+						t.Errorf("ParseFile(...): %v (want success)", err)
+					}
+				} else {
+					expected := "exceeded max scope depth during object resolution"
+					if err == nil || !strings.HasSuffix(err.Error(), expected) {
+						t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
+					}
+				}
+			})
+		}
+	}
+}
diff --git a/src/go/parser/resolver.go b/src/go/parser/resolver.go
index cf92c7e..f55bdb7 100644
--- a/src/go/parser/resolver.go
+++ b/src/go/parser/resolver.go
@@ -25,6 +25,7 @@
 		declErr:  declErr,
 		topScope: pkgScope,
 		pkgScope: pkgScope,
+		depth:    1,
 	}
 
 	for _, decl := range file.Decls {
@@ -53,6 +54,8 @@
 	file.Unresolved = r.unresolved[0:i]
 }
 
+const maxScopeDepth int = 1e3
+
 type resolver struct {
 	handle  *token.File
 	declErr func(token.Pos, string)
@@ -61,6 +64,7 @@
 	pkgScope   *ast.Scope   // pkgScope.Outer == nil
 	topScope   *ast.Scope   // top-most scope; may be pkgScope
 	unresolved []*ast.Ident // unresolved identifiers
+	depth      int          // scope depth
 
 	// Label scopes
 	// (maintained by open/close LabelScope)
@@ -83,6 +87,10 @@
 }
 
 func (r *resolver) openScope(pos token.Pos) {
+	r.depth++
+	if r.depth > maxScopeDepth {
+		panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"})
+	}
 	if debugResolve {
 		r.dump("opening scope @%v", pos)
 	}
@@ -90,6 +98,7 @@
 }
 
 func (r *resolver) closeScope() {
+	r.depth--
 	if debugResolve {
 		r.dump("closing scope")
 	}
diff --git a/src/io/fs/glob.go b/src/io/fs/glob.go
index 45d9cb6..0e529cd 100644
--- a/src/io/fs/glob.go
+++ b/src/io/fs/glob.go
@@ -31,6 +31,16 @@
 // Otherwise, Glob uses ReadDir to traverse the directory tree
 // and look for matches for the pattern.
 func Glob(fsys FS, pattern string) (matches []string, err error) {
+	return globWithLimit(fsys, pattern, 0)
+}
+
+func globWithLimit(fsys FS, pattern string, depth int) (matches []string, err error) {
+	// This limit is added to prevent stack exhaustion issues. See
+	// CVE-2022-30630.
+	const pathSeparatorsLimit = 10000
+	if depth > pathSeparatorsLimit {
+		return nil, path.ErrBadPattern
+	}
 	if fsys, ok := fsys.(GlobFS); ok {
 		return fsys.Glob(pattern)
 	}
@@ -59,9 +69,9 @@
 	}
 
 	var m []string
-	m, err = Glob(fsys, dir)
+	m, err = globWithLimit(fsys, dir, depth+1)
 	if err != nil {
-		return
+		return nil, err
 	}
 	for _, d := range m {
 		matches, err = glob(fsys, d, file, matches)
diff --git a/src/io/fs/glob_test.go b/src/io/fs/glob_test.go
index f19bebe..d052eab 100644
--- a/src/io/fs/glob_test.go
+++ b/src/io/fs/glob_test.go
@@ -8,6 +8,7 @@
 	. "io/fs"
 	"os"
 	"path"
+	"strings"
 	"testing"
 )
 
@@ -55,6 +56,15 @@
 	}
 }
 
+func TestCVE202230630(t *testing.T) {
+	// Prior to CVE-2022-30630, a stack exhaustion would occur given a large
+	// number of separators. There is now a limit of 10,000.
+	_, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001))
+	if err != path.ErrBadPattern {
+		t.Fatalf("Glob returned err=%v, want %v", err, path.ErrBadPattern)
+	}
+}
+
 // contains reports whether vector contains the string s.
 func contains(vector []string, s string) bool {
 	for _, elem := range vector {
diff --git a/src/net/http/header.go b/src/net/http/header.go
index 4c72dcb..ef4ee7f 100644
--- a/src/net/http/header.go
+++ b/src/net/http/header.go
@@ -101,6 +101,12 @@
 	sv := make([]string, nv) // shared backing array for headers' values
 	h2 := make(Header, len(h))
 	for k, vv := range h {
+		if vv == nil {
+			// Preserve nil values. ReverseProxy distinguishes
+			// between nil and zero-length header values.
+			h2[k] = nil
+			continue
+		}
 		n := copy(sv, vv)
 		h2[k] = sv[:n:n]
 		sv = sv[n:]
diff --git a/src/net/http/header_test.go b/src/net/http/header_test.go
index 4789362..80c0035 100644
--- a/src/net/http/header_test.go
+++ b/src/net/http/header_test.go
@@ -235,6 +235,11 @@
 			in:   Header{"foo": {"bar"}},
 			want: Header{"foo": {"bar"}},
 		},
+		{
+			name: "nil value",
+			in:   Header{"foo": nil},
+			want: Header{"foo": nil},
+		},
 	}
 
 	for _, tt := range tests {
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index 6394da3..bfac783 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -6189,6 +6189,7 @@
 		"fugazi",
 		"foo-bar",
 		"unknown",
+		"\rchunked",
 	}
 
 	for _, badTE := range unsupportedTEs {
diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go
index 85c2e5a..3894007 100644
--- a/src/net/http/transfer.go
+++ b/src/net/http/transfer.go
@@ -639,7 +639,7 @@
 	if len(raw) != 1 {
 		return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
 	}
-	if !ascii.EqualFold(textproto.TrimString(raw[0]), "chunked") {
+	if !ascii.EqualFold(raw[0], "chunked") {
 		return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
 	}
 
diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
index c77a269..55ed1d7 100644
--- a/src/path/filepath/match.go
+++ b/src/path/filepath/match.go
@@ -241,6 +241,16 @@
 // The only possible returned error is ErrBadPattern, when pattern
 // is malformed.
 func Glob(pattern string) (matches []string, err error) {
+	return globWithLimit(pattern, 0)
+}
+
+func globWithLimit(pattern string, depth int) (matches []string, err error) {
+	// This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
+	const pathSeparatorsLimit = 10000
+	if depth == pathSeparatorsLimit {
+		return nil, ErrBadPattern
+	}
+
 	// Check pattern is well-formed.
 	if _, err := Match(pattern, ""); err != nil {
 		return nil, err
@@ -270,7 +280,7 @@
 	}
 
 	var m []string
-	m, err = Glob(dir)
+	m, err = globWithLimit(dir, depth+1)
 	if err != nil {
 		return
 	}
diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
index 375c41a..d628259 100644
--- a/src/path/filepath/match_test.go
+++ b/src/path/filepath/match_test.go
@@ -155,6 +155,16 @@
 	}
 }
 
+func TestCVE202230632(t *testing.T) {
+	// Prior to CVE-2022-30632, this would cause a stack exhaustion given a
+	// large number of separators (more than 4,000,000). There is now a limit
+	// of 10,000.
+	_, err := Glob("/*" + strings.Repeat("/", 10001))
+	if err != ErrBadPattern {
+		t.Fatalf("Glob returned err=%v, want ErrBadPattern", err)
+	}
+}
+
 func TestGlobError(t *testing.T) {
 	bad := []string{`[]`, `nonexist/[]`}
 	for _, pattern := range bad {
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 03ccfe1..3cee4de 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -321,9 +321,9 @@
 
 	// Initialize the metrics beforehand because this could
 	// allocate and skew the stats.
-	semacquire(&metricsSema)
+	metricsLock()
 	initMetrics()
-	semrelease(&metricsSema)
+	metricsUnlock()
 
 	systemstack(func() {
 		// Read memstats first. It's going to flush
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index a9e9591..9930335 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -175,11 +175,11 @@
 	// Assume all objects from this span will be allocated in the
 	// mcache. If it gets uncached, we'll adjust this.
 	stats := memstats.heapStats.acquire()
-	atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
+	atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], int64(s.nelems)-int64(s.allocCount))
 
 	// Flush tinyAllocs.
 	if spc == tinySpanClass {
-		atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
+		atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
 		c.tinyAllocs = 0
 	}
 	memstats.heapStats.release()
@@ -229,8 +229,8 @@
 		throw("out of memory")
 	}
 	stats := memstats.heapStats.acquire()
-	atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
-	atomic.Xadduintptr(&stats.largeAllocCount, 1)
+	atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
+	atomic.Xadd64(&stats.largeAllocCount, 1)
 	memstats.heapStats.release()
 
 	// Update gcController.heapLive and revise pacing if needed.
@@ -261,9 +261,9 @@
 		s := c.alloc[i]
 		if s != &emptymspan {
 			// Adjust nsmallalloc in case the span wasn't fully allocated.
-			n := uintptr(s.nelems) - uintptr(s.allocCount)
+			n := int64(s.nelems) - int64(s.allocCount)
 			stats := memstats.heapStats.acquire()
-			atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
+			atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
 			memstats.heapStats.release()
 			if s.sweepgen != sg+1 {
 				// refill conservatively counted unallocated slots in gcController.heapLive.
@@ -273,7 +273,7 @@
 				// gcController.heapLive was totally recomputed since
 				// caching this span, so we don't do this for
 				// stale spans.
-				atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize))
+				atomic.Xadd64(&gcController.heapLive, -n*int64(s.elemsize))
 			}
 			// Release the span to the mcentral.
 			mheap_.central[i].mcentral.uncacheSpan(s)
@@ -286,7 +286,7 @@
 
 	// Flush tinyAllocs.
 	stats := memstats.heapStats.acquire()
-	atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
+	atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
 	c.tinyAllocs = 0
 	memstats.heapStats.release()
 
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index ba0a920..922dd2f 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -12,9 +12,12 @@
 )
 
 var (
-	// metrics is a map of runtime/metrics keys to
-	// data used by the runtime to sample each metric's
-	// value.
+	// metrics is a map of runtime/metrics keys to data used by the runtime
+	// to sample each metric's value. metricsInit indicates it has been
+	// initialized.
+	//
+	// These fields are protected by metricsSema which should be
+	// locked/unlocked with metricsLock() / metricsUnlock().
 	metricsSema uint32 = 1
 	metricsInit bool
 	metrics     map[string]metricData
@@ -34,6 +37,23 @@
 	compute func(in *statAggregate, out *metricValue)
 }
 
+func metricsLock() {
+	// Acquire the metricsSema but with handoff. Operations are typically
+	// expensive enough that queueing up goroutines and handing off between
+	// them will be noticeably better-behaved.
+	semacquire1(&metricsSema, true, 0, 0)
+	if raceenabled {
+		raceacquire(unsafe.Pointer(&metricsSema))
+	}
+}
+
+func metricsUnlock() {
+	if raceenabled {
+		racerelease(unsafe.Pointer(&metricsSema))
+	}
+	semrelease(&metricsSema)
+}
+
 // initMetrics initializes the metrics map if it hasn't been yet.
 //
 // metricsSema must be held.
@@ -388,13 +408,13 @@
 	memstats.heapStats.read(&a.heapStatsDelta)
 
 	// Calculate derived stats.
-	a.totalAllocs = uint64(a.largeAllocCount)
-	a.totalFrees = uint64(a.largeFreeCount)
-	a.totalAllocated = uint64(a.largeAlloc)
-	a.totalFreed = uint64(a.largeFree)
+	a.totalAllocs = a.largeAllocCount
+	a.totalFrees = a.largeFreeCount
+	a.totalAllocated = a.largeAlloc
+	a.totalFreed = a.largeFree
 	for i := range a.smallAllocCount {
-		na := uint64(a.smallAllocCount[i])
-		nf := uint64(a.smallFreeCount[i])
+		na := a.smallAllocCount[i]
+		nf := a.smallFreeCount[i]
 		a.totalAllocs += na
 		a.totalFrees += nf
 		a.totalAllocated += na * uint64(class_to_size[i])
@@ -546,10 +566,7 @@
 	sl := slice{samplesp, len, cap}
 	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
 
-	// Acquire the metricsSema but with handoff. This operation
-	// is expensive enough that queueing up goroutines and handing
-	// off between them will be noticeably better-behaved.
-	semacquire1(&metricsSema, true, 0, 0)
+	metricsLock()
 
 	// Ensure the map is initialized.
 	initMetrics()
@@ -573,5 +590,5 @@
 		data.compute(&agg, &sample.value)
 	}
 
-	semrelease(&metricsSema)
+	metricsUnlock()
 }
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 8fe3a65..468df4d 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -581,7 +581,7 @@
 			// free slots zeroed.
 			s.needzero = 1
 			stats := memstats.heapStats.acquire()
-			atomic.Xadduintptr(&stats.smallFreeCount[spc.sizeclass()], uintptr(nfreed))
+			atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
 			memstats.heapStats.release()
 		}
 		if !preserve {
@@ -628,8 +628,8 @@
 				mheap_.freeSpan(s)
 			}
 			stats := memstats.heapStats.acquire()
-			atomic.Xadduintptr(&stats.largeFreeCount, 1)
-			atomic.Xadduintptr(&stats.largeFree, size)
+			atomic.Xadd64(&stats.largeFreeCount, 1)
+			atomic.Xadd64(&stats.largeFree, int64(size))
 			memstats.heapStats.release()
 			return true
 		}
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index eeb2a7b..94c507b 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -8,7 +8,6 @@
 
 import (
 	"runtime/internal/atomic"
-	"runtime/internal/sys"
 	"unsafe"
 )
 
@@ -565,29 +564,29 @@
 	memstats.heapStats.unsafeRead(&consStats)
 
 	// Collect large allocation stats.
-	totalAlloc := uint64(consStats.largeAlloc)
-	memstats.nmalloc += uint64(consStats.largeAllocCount)
-	totalFree := uint64(consStats.largeFree)
-	memstats.nfree += uint64(consStats.largeFreeCount)
+	totalAlloc := consStats.largeAlloc
+	memstats.nmalloc += consStats.largeAllocCount
+	totalFree := consStats.largeFree
+	memstats.nfree += consStats.largeFreeCount
 
 	// Collect per-sizeclass stats.
 	for i := 0; i < _NumSizeClasses; i++ {
 		// Malloc stats.
-		a := uint64(consStats.smallAllocCount[i])
+		a := consStats.smallAllocCount[i]
 		totalAlloc += a * uint64(class_to_size[i])
 		memstats.nmalloc += a
 		memstats.by_size[i].nmalloc = a
 
 		// Free stats.
-		f := uint64(consStats.smallFreeCount[i])
+		f := consStats.smallFreeCount[i]
 		totalFree += f * uint64(class_to_size[i])
 		memstats.nfree += f
 		memstats.by_size[i].nfree = f
 	}
 
 	// Account for tiny allocations.
-	memstats.nfree += uint64(consStats.tinyAllocCount)
-	memstats.nmalloc += uint64(consStats.tinyAllocCount)
+	memstats.nfree += consStats.tinyAllocCount
+	memstats.nmalloc += consStats.tinyAllocCount
 
 	// Calculate derived stats.
 	memstats.total_alloc = totalAlloc
@@ -703,17 +702,20 @@
 	inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
 
 	// Allocator stats.
-	tinyAllocCount  uintptr                  // number of tiny allocations
-	largeAlloc      uintptr                  // bytes allocated for large objects
-	largeAllocCount uintptr                  // number of large object allocations
-	smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
-	largeFree       uintptr                  // bytes freed for large objects (>maxSmallSize)
-	largeFreeCount  uintptr                  // number of frees for large objects (>maxSmallSize)
-	smallFreeCount  [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize)
+	//
+	// These are all uint64 because they're cumulative, and could quickly wrap
+	// around otherwise.
+	tinyAllocCount  uint64                  // number of tiny allocations
+	largeAlloc      uint64                  // bytes allocated for large objects
+	largeAllocCount uint64                  // number of large object allocations
+	smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
+	largeFree       uint64                  // bytes freed for large objects (>maxSmallSize)
+	largeFreeCount  uint64                  // number of frees for large objects (>maxSmallSize)
+	smallFreeCount  [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
 
-	// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
-	// Only necessary on 32-bit platforms.
-	_ [(sys.PtrSize / 4) % 2]uint32
+	// NOTE: This struct must be a multiple of 8 bytes in size because it
+	// is stored in an array. If it's not, atomic accesses to the above
+	// fields may be unaligned and fail on 32-bit platforms.
 }
 
 // merge adds in the deltas from b into a.
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 94a61c8..30ef72b 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -12,12 +12,30 @@
 
 // cbs stores all registered Go callbacks.
 var cbs struct {
-	lock  mutex
+	lock  mutex // use cbsLock / cbsUnlock for race instrumentation.
 	ctxt  [cb_max]winCallback
 	index map[winCallbackKey]int
 	n     int
 }
 
+func cbsLock() {
+	lock(&cbs.lock)
+	// compileCallback is used by goenvs prior to completion of schedinit.
+	// raceacquire involves a racecallback to get the proc, which is not
+	// safe prior to scheduler initialization. Thus avoid instrumentation
+	// until then.
+	if raceenabled && mainStarted {
+		raceacquire(unsafe.Pointer(&cbs.lock))
+	}
+}
+
+func cbsUnlock() {
+	if raceenabled && mainStarted {
+		racerelease(unsafe.Pointer(&cbs.lock))
+	}
+	unlock(&cbs.lock)
+}
+
 // winCallback records information about a registered Go callback.
 type winCallback struct {
 	fn     *funcval // Go function
@@ -302,11 +320,11 @@
 
 	key := winCallbackKey{(*funcval)(fn.data), cdecl}
 
-	lock(&cbs.lock) // We don't unlock this in a defer because this is used from the system stack.
+	cbsLock()
 
 	// Check if this callback is already registered.
 	if n, ok := cbs.index[key]; ok {
-		unlock(&cbs.lock)
+		cbsUnlock()
 		return callbackasmAddr(n)
 	}
 
@@ -316,7 +334,7 @@
 	}
 	n := cbs.n
 	if n >= len(cbs.ctxt) {
-		unlock(&cbs.lock)
+		cbsUnlock()
 		throw("too many callback functions")
 	}
 	c := winCallback{key.fn, retPop, abiMap}
@@ -324,7 +342,7 @@
 	cbs.index[key] = n
 	cbs.n++
 
-	unlock(&cbs.lock)
+	cbsUnlock()
 	return callbackasmAddr(n)
 }
 
diff --git a/test/fixedbugs/issue53454.go b/test/fixedbugs/issue53454.go
new file mode 100644
index 0000000..8b16d81
--- /dev/null
+++ b/test/fixedbugs/issue53454.go
@@ -0,0 +1,89 @@
+// compile
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type T1 struct {
+	A T5
+	B T2
+	C T7
+	D T4
+}
+
+type T2 struct {
+	T3
+	A float64
+	E float64
+	C float64
+}
+
+type T3 struct {
+	F float64
+	G float64
+	H float64
+	I float64
+	J float64
+	K float64
+	L float64
+}
+
+type T4 struct {
+	M float64
+	N float64
+	O float64
+	P float64
+}
+
+type T5 struct {
+	Q float64
+	R float64
+	S float64
+	T float64
+	U float64
+	V float64
+}
+
+type T6 struct {
+	T9
+	C T10
+}
+
+type T7 struct {
+	T10
+	T11
+}
+
+type T8 struct {
+	T9
+	C T7
+}
+
+type T9 struct {
+	A T5
+	B T3
+	D T4
+}
+
+type T10 struct {
+	W float64
+}
+
+type T11 struct {
+	X float64
+	Y float64
+}
+
+func MainTest(x T1, y T8, z T6) float64 {
+	return Test(x.B, x.A, x.D, x.C, y.B, y.A, y.D, y.C, z.B, z.A, z.D,
+		T7{
+			T10: T10{
+				W: z.C.W,
+			},
+			T11: T11{},
+		},
+	)
+}
+func Test(a T2, b T5, c T4, d T7, e T3, f T5, g T4, h T7, i T3, j T5, k T4, l T7) float64