all: merge go1.9.3 into dev.boringcrypto.go1.9

Change-Id: I184dd79b7398a8989e0b8284c7c81bb59343c1cb
diff --git a/doc/contrib.html b/doc/contrib.html
index 0290923..c165f39 100644
--- a/doc/contrib.html
+++ b/doc/contrib.html
@@ -117,6 +117,6 @@
 <p>
 Check <a href="//golang.org/issue">the tracker</a> for 
 open issues that interest you. Those labeled
-<a href="https://github.com/golang/go/issues?q=is%3Aopen+is%3Aissue+label%3Ahelpwanted">helpwanted</a>
+<a href="https://github.com/golang/go/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22">help wanted</a>
 are particularly in need of outside help.
 </p>
diff --git a/doc/devel/release.html b/doc/devel/release.html
index eac2ddd..c9c2050 100644
--- a/doc/devel/release.html
+++ b/doc/devel/release.html
@@ -49,6 +49,14 @@
 1.9.2 milestone</a> on our issue tracker for details.
 </p>
 
+<p>
+go1.9.3 (released 2018/01/22) includes fixes to the compiler, runtime,
+and the <code>database/sql</code>, <code>math/big</code>, <code>net/http</code>,
+and <code>net/url</code> packages.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.9.3">Go
+1.9.3 milestone</a> on our issue tracker for details.
+</p>
+
 <h2 id="go1.8">go1.8 (released 2017/02/16)</h2>
 
 <p>
@@ -89,6 +97,16 @@
 1.8.4 milestone</a> on our issue tracker for details.
 </p>
 
+<p>
+go1.8.5 (released 2017/10/25) includes fixes to the compiler, linker, runtime,
+documentation, <code>go</code> command,
+and the <code>crypto/x509</code> and <code>net/smtp</code> packages.
+It includes a fix to a bug introduced in Go 1.8.4 that broke <code>go</code> <code>get</code>
+of non-Git repositories under certain conditions.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.8.5">Go
+1.8.5 milestone</a> on our issue tracker for details.
+</p>
+
 <h2 id="go1.7">go1.7 (released 2016/08/15)</h2>
 
 <p>
@@ -262,7 +280,7 @@
 </p>
 
 <p>
-go1.3.3 (released 2014/09/30) includes further bug fixes to cgo, the runtime package, and the nacl port. 
+go1.3.3 (released 2014/09/30) includes further bug fixes to cgo, the runtime package, and the nacl port.
 See the <a href="https://github.com/golang/go/commits/go1.3.3">change history</a> for details.
 </p>
 
@@ -326,7 +344,7 @@
 </p>
 
 <p>
-The go1 release corresponds to 
+The go1 release corresponds to
 <code><a href="weekly.html#2012-03-27">weekly.2012-03-27</a></code>.
 </p>
 
@@ -342,7 +360,7 @@
 
 <p>
 go1.0.2 (released 2012/06/13) was issued to fix two bugs in the implementation
-of maps using struct or array keys: 
+of maps using struct or array keys:
 <a href="//golang.org/issue/3695">issue 3695</a> and
 <a href="//golang.org/issue/3573">issue 3573</a>.
 It also includes many minor code and documentation fixes.
diff --git a/doc/docs.html b/doc/docs.html
index 92392e2..d75c500 100644
--- a/doc/docs.html
+++ b/doc/docs.html
@@ -1,6 +1,7 @@
 <!--{
 	"Title": "Documentation",
-	"Path": "/doc/"
+	"Path": "/doc/",
+	"Template": true
 }-->
 
 <p>
@@ -33,14 +34,20 @@
 
 <img class="gopher" src="/doc/gopher/doc.png"/>
 
-<h3 id="go_tour"><a href="//tour.golang.org/">A Tour of Go</a></h3>
+<h3 id="go_tour">
+	{{if $.GoogleCN}}
+	  A Tour of Go
+	{{else}}
+	  <a href="//tour.golang.org/">A Tour of Go</a>
+	{{end}}
+</h3>
 <p>
 An interactive introduction to Go in three sections.
 The first section covers basic syntax and data structures; the second discusses
 methods and interfaces; and the third introduces Go's concurrency primitives.
 Each section concludes with a few exercises so you can practice what you've
-learned. You can <a href="//tour.golang.org/">take the tour online</a> or
-install it locally with:
+learned. You can {{if not $.GoogleCN}}<a href="//tour.golang.org/">take the tour
+online</a> or{{end}} install it locally with:
 </p>
 <p>
 <pre>
@@ -51,10 +58,13 @@
 
 <h3 id="code"><a href="code.html">How to write Go code</a></h3>
 <p>
-Also available as a
-<a href="//www.youtube.com/watch?v=XCsL89YtqCs">screencast</a>, this doc
-explains how to use the <a href="/cmd/go/">go command</a> to fetch, build, and
-install packages, commands, and run tests.
+{{if not $.GoogleCN}}
+Also available as a <a href="//www.youtube.com/watch?v=XCsL89YtqCs">screencast</a>, this
+{{else}}
+This
+{{end}}
+doc explains how to use the <a href="/cmd/go/">go command</a>
+to fetch, build, and install packages, commands, and run tests.
 </p>
 
 <h3 id="editors"><a href="editors.html">Editor plugins and IDEs</a></h3>
@@ -115,9 +125,11 @@
 
 <h2 id="articles">Articles</h2>
 
+{{if not $.GoogleCN}}
 <h3 id="blog"><a href="//blog.golang.org/">The Go Blog</a></h3>
 <p>The official blog of the Go project, featuring news and in-depth articles by
 the Go team and guests.</p>
+{{end}}
 
 <h4>Codewalks</h4>
 <p>
@@ -130,6 +142,7 @@
 <li><a href="/doc/articles/wiki/">Writing Web Applications</a> - building a simple web application.</li>
 </ul>
 
+{{if not $.GoogleCN}}
 <h4>Language</h4>
 <ul>
 <li><a href="/blog/json-rpc-tale-of-interfaces">JSON-RPC: a tale of interfaces</a></li>
@@ -150,17 +163,20 @@
 <li><a href="/blog/go-image-package">The Go image package</a> - the fundamentals of the <a href="/pkg/image/">image</a> package.</li>
 <li><a href="/blog/go-imagedraw-package">The Go image/draw package</a> - the fundamentals of the <a href="/pkg/image/draw/">image/draw</a> package.</li>
 </ul>
+{{end}}
 
 <h4>Tools</h4>
 <ul>
 <li><a href="/doc/articles/go_command.html">About the Go command</a> - why we wrote it, what it is, what it's not, and how to use it.</li>
-<li><a href="/blog/c-go-cgo">C? Go? Cgo!</a> - linking against C code with <a href="/cmd/cgo/">cgo</a>.</li>
 <li><a href="/doc/gdb">Debugging Go Code with GDB</a></li>
+<li><a href="/doc/articles/race_detector.html">Data Race Detector</a> - a manual for the data race detector.</li>
+<li><a href="/doc/asm">A Quick Guide to Go's Assembler</a> - an introduction to the assembler used by Go.</li>
+{{if not $.GoogleCN}}
+<li><a href="/blog/c-go-cgo">C? Go? Cgo!</a> - linking against C code with <a href="/cmd/cgo/">cgo</a>.</li>
 <li><a href="/blog/godoc-documenting-go-code">Godoc: documenting Go code</a> - writing good documentation for <a href="/cmd/godoc/">godoc</a>.</li>
 <li><a href="/blog/profiling-go-programs">Profiling Go Programs</a></li>
-<li><a href="/doc/articles/race_detector.html">Data Race Detector</a> - a manual for the data race detector.</li>
 <li><a href="/blog/race-detector">Introducing the Go Race Detector</a> - an introduction to the race detector.</li>
-<li><a href="/doc/asm">A Quick Guide to Go's Assembler</a> - an introduction to the assembler used by Go.</li>
+{{end}}
 </ul>
 
 <h4 id="articles_more">More</h4>
@@ -169,7 +185,7 @@
 <a href="/wiki">Wiki</a> for more Go articles.
 </p>
 
-
+{{if not $.GoogleCN}}
 <h2 id="talks">Talks</h2>
 
 <img class="gopher" src="/doc/gopher/talks.png"/>
@@ -200,7 +216,7 @@
 <p>
 See the <a href="/talks">Go Talks site</a> and <a href="/wiki/GoTalks">wiki page</a> for more Go talks.
 </p>
-
+{{end}}
 
 <h2 id="nonenglish">Non-English Documentation</h2>
 
diff --git a/doc/go1.html b/doc/go1.html
index 1665d74..34e305b 100644
--- a/doc/go1.html
+++ b/doc/go1.html
@@ -775,7 +775,7 @@
 </p>
 
 {{code "/doc/progs/go1.go" `/ErrSyntax/`}}
-		
+
 <p>
 <em>Updating</em>:
 Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
@@ -1827,7 +1827,7 @@
 <tr><td>Uitob(x, b)</td> <td>FormatUint(uint64(x), b)</td></tr>
 <tr><td>Uitob64(x, b)</td> <td>FormatUint(x, b)</td></tr>
 </table>
-		
+
 <p>
 <em>Updating</em>:
 Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
@@ -1841,7 +1841,7 @@
 <h3 id="templates">The template packages</h3>
 
 <p>
-The <code>template</code> and <code>exp/template/html</code> packages have moved to 
+The <code>template</code> and <code>exp/template/html</code> packages have moved to
 <a href="/pkg/text/template/"><code>text/template</code></a> and
 <a href="/pkg/html/template/"><code>html/template</code></a>.
 More significant, the interface to these packages has been simplified.
@@ -2035,4 +2035,4 @@
 Installation details are described on the
 <a href="/doc/install">Getting Started</a> page, while
 the distributions themselves are listed on the
-<a href="https://golang.org/dl/">downloads page</a>.
+<a href="/dl/">downloads page</a>.
diff --git a/doc/help.html b/doc/help.html
index 057d752..f668196 100644
--- a/doc/help.html
+++ b/doc/help.html
@@ -1,6 +1,7 @@
 <!--{
 	"Title": "Help",
-	"Path": "/help/"
+	"Path": "/help/",
+	"Template": true
 }-->
 
 <div id="manual-nav"></div>
@@ -9,6 +10,7 @@
 
 <img class="gopher" src="/doc/gopher/help.png"/>
 
+{{if not $.GoogleCN}}
 <h3 id="mailinglist"><a href="https://groups.google.com/group/golang-nuts">Go Nuts Mailing List</a></h3>
 <p>
 Get help from Go users, and share your work on the official mailing list.
@@ -31,10 +33,12 @@
 <h3 id="irc"><a href="irc:irc.freenode.net/go-nuts">Go IRC Channel</a></h3>
 <p>Get live support at <b>#go-nuts</b> on <b>irc.freenode.net</b>, the official
 Go IRC channel.</p>
+{{end}}
 
 <h3 id="faq"><a href="/doc/faq">Frequently Asked Questions (FAQ)</a></h3>
 <p>Answers to common questions about Go.</p>
 
+{{if not $.GoogleCN}}
 <h2 id="inform">Stay informed</h2>
 
 <h3 id="announce"><a href="https://groups.google.com/group/golang-announce">Go Announcements Mailing List</a></h3>
@@ -64,6 +68,7 @@
 The <a href="https://changelog.com/gotime">Go Time podcast</a> is a panel of Go experts and special guests
 discussing the Go programming language, the community, and everything in between.
 </p>
+{{end}}
 
 <h2 id="community">Community resources</h2>
 
@@ -73,11 +78,13 @@
 meet to talk about Go. Find a chapter near you.
 </p>
 
+{{if not $.GoogleCN}}
 <h3 id="playground"><a href="/play">Go Playground</a></h3>
 <p>A place to write, run, and share Go code.</p>
 
 <h3 id="wiki"><a href="/wiki">Go Wiki</a></h3>
 <p>A wiki maintained by the Go community.</p>
+{{end}}
 
 <h3 id="conduct"><a href="/conduct">Code of Conduct</a></h3>
 <p>
diff --git a/doc/install.html b/doc/install.html
index 7f32f68..98d215a 100644
--- a/doc/install.html
+++ b/doc/install.html
@@ -8,14 +8,14 @@
 <h2 id="download">Download the Go distribution</h2>
 
 <p>
-<a href="https://golang.org/dl/" id="start" class="download">
+<a href="/dl/" id="start" class="download">
 <span class="big">Download Go</span>
 <span class="desc">Click here to visit the downloads page</span>
 </a>
 </p>
 
 <p>
-<a href="https://golang.org/dl/" target="_blank">Official binary
+<a href="/dl/" target="_blank">Official binary
 distributions</a> are available for the FreeBSD (release 8-STABLE and above),
 Linux, Mac OS X (10.8 and above), and Windows operating systems and
 the 32-bit (<code>386</code>) and 64-bit (<code>amd64</code>) x86 processor
@@ -33,7 +33,7 @@
 <h2 id="requirements">System requirements</h2>
 
 <p>
-Go <a href="https://golang.org/dl/">binary distributions</a> are available for these supported operating systems and architectures.
+Go <a href="/dl/">binary distributions</a> are available for these supported operating systems and architectures.
 Please ensure your system meets these requirements before proceeding.
 If your OS or architecture is not on the list, you may be able to
 <a href="/doc/install/source">install from source</a> or
@@ -77,7 +77,7 @@
 <h3 id="tarball">Linux, Mac OS X, and FreeBSD tarballs</h3>
 
 <p>
-<a href="https://golang.org/dl/">Download the archive</a>
+<a href="/dl/">Download the archive</a>
 and extract it into <code>/usr/local</code>, creating a Go tree in
 <code>/usr/local/go</code>. For example:
 </p>
@@ -138,7 +138,7 @@
 <h3 id="osx">Mac OS X package installer</h3>
 
 <p>
-<a href="https://golang.org/dl/">Download the package file</a>,
+<a href="/dl/">Download the package file</a>,
 open it, and follow the prompts to install the Go tools.
 The package installs the Go distribution to <code>/usr/local/go</code>.
 </p>
@@ -167,7 +167,7 @@
 <h4 id="windows_msi">MSI installer</h4>
 
 <p>
-Open the <a href="https://golang.org/dl/">MSI file</a>
+Open the <a href="/dl/">MSI file</a>
 and follow the prompts to install the Go tools.
 By default, the installer puts the Go distribution in <code>c:\Go</code>.
 </p>
@@ -185,7 +185,7 @@
 <h4 id="windows_zip">Zip archive</h4>
 
 <p>
-<a href="https://golang.org/dl/">Download the zip file</a> and extract it into the directory of your choice (we suggest <code>c:\Go</code>).
+<a href="/dl/">Download the zip file</a> and extract it into the directory of your choice (we suggest <code>c:\Go</code>).
 </p>
 
 <p>
diff --git a/doc/root.html b/doc/root.html
index 9bdf927..a5119a9 100644
--- a/doc/root.html
+++ b/doc/root.html
@@ -58,7 +58,7 @@
 
 <div id="gopher"></div>
 
-<a href="https://golang.org/dl/" id="start">
+<a href="/dl/" id="start">
 <span class="big">Download Go</span>
 <span class="desc">
 Binary distributions available for<br>
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index f21a4da..fafed7c 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -86,7 +86,9 @@
 	{"mapaccess2_fat", funcTag, 67},
 	{"mapassign", funcTag, 62},
 	{"mapassign_fast32", funcTag, 63},
+	{"mapassign_fast32ptr", funcTag, 63},
 	{"mapassign_fast64", funcTag, 63},
+	{"mapassign_fast64ptr", funcTag, 63},
 	{"mapassign_faststr", funcTag, 63},
 	{"mapiterinit", funcTag, 68},
 	{"mapdelete", funcTag, 68},
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 7f4846d..c3994c5 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -106,7 +106,9 @@
 func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
 func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
 func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
 func mapdelete(mapType *byte, hmap map[any]any, key *any)
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 76031d1..ab2e382 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -2785,21 +2785,23 @@
 const (
 	mapslow = iota
 	mapfast32
+	mapfast32ptr
 	mapfast64
+	mapfast64ptr
 	mapfaststr
 	nmapfast
 )
 
 type mapnames [nmapfast]string
 
-func mkmapnames(base string) mapnames {
-	return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
+func mkmapnames(base string, ptr string) mapnames {
+	return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
 }
 
-var mapaccess1 mapnames = mkmapnames("mapaccess1")
-var mapaccess2 mapnames = mkmapnames("mapaccess2")
-var mapassign mapnames = mkmapnames("mapassign")
-var mapdelete mapnames = mkmapnames("mapdelete")
+var mapaccess1 mapnames = mkmapnames("mapaccess1", "")
+var mapaccess2 mapnames = mkmapnames("mapaccess2", "")
+var mapassign mapnames = mkmapnames("mapassign", "ptr")
+var mapdelete mapnames = mkmapnames("mapdelete", "")
 
 func mapfast(t *types.Type) int {
 	// Check ../../runtime/hashmap.go:maxValueSize before changing.
@@ -2808,9 +2810,22 @@
 	}
 	switch algtype(t.Key()) {
 	case AMEM32:
-		return mapfast32
+		if !t.Key().HasPointer() {
+			return mapfast32
+		}
+		if Widthptr == 4 {
+			return mapfast32ptr
+		}
+		Fatalf("small pointer %v", t.Key())
 	case AMEM64:
-		return mapfast64
+		if !t.Key().HasPointer() {
+			return mapfast64
+		}
+		if Widthptr == 8 {
+			return mapfast64ptr
+		}
+		// Two-word object, at least one of which is a pointer.
+		// Use the slow path.
 	case ASTRING:
 		return mapfaststr
 	}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 49fcd36..047052d 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -798,45 +798,45 @@
 (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
 
 // combine ADDL into indexed loads and stores
-(MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+(MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
 
-(MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+(MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)  -> (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
 
-(MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-(MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-(MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-(MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+(MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2  [int64(int32(c+2*d))] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4  [int64(int32(c+4*d))] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
 
-(MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
-(MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-(MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+(MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2  [int64(int32(c+2*d))] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)  -> (MOVLstoreidx4  [int64(int32(c+4*d))] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
 
 (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
 	(MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 944a84d..10e49fc 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -819,7 +819,7 @@
 (Store _ (ArrayMake0) mem) -> mem
 (Store dst (ArrayMake1 e) mem) -> (Store {e.Type} dst e mem)
 
-(ArraySelect [0] (Load ptr mem)) -> (Load ptr mem)
+(ArraySelect [0] x:(Load ptr mem)) -> @x.Block (Load <v.Type> ptr mem)
 
 // Putting [1]{*byte} and similar into direct interfaces.
 (IMake typ (ArrayMake1 val)) -> (IMake typ val)
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 137e5fc..a0d69bc 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -1103,12 +1103,15 @@
 			if v.Op == OpKeepAlive {
 				// Make sure the argument to v is still live here.
 				s.advanceUses(v)
-				vi := &s.values[v.Args[0].ID]
-				if vi.spill != nil {
+				a := v.Args[0]
+				vi := &s.values[a.ID]
+				if vi.regs == 0 && !vi.rematerializeable {
 					// Use the spill location.
-					v.SetArg(0, vi.spill)
+					// This forces later liveness analysis to make the
+					// value live at this point.
+					v.SetArg(0, s.makeSpill(a, b))
 				} else {
-					// No need to keep unspilled values live.
+					// In-register and rematerializeable values are already live.
 					// These are typically rematerializeable constants like nil,
 					// or values of a variable that were modified since the last call.
 					v.Op = OpCopy
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index f9cddd0..468be07 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -3148,7 +3148,7 @@
 func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool {
 	// match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3162,7 +3162,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3171,7 +3171,7 @@
 	}
 	// match: (MOVBloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem)
 	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3185,7 +3185,7 @@
 		ptr := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3194,7 +3194,7 @@
 	}
 	// match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVBloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3208,7 +3208,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3217,7 +3217,7 @@
 	}
 	// match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem)
 	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVBloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3231,7 +3231,7 @@
 		ptr := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3747,7 +3747,7 @@
 func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool {
 	// match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3762,7 +3762,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3772,7 +3772,7 @@
 	}
 	// match: (MOVBstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem)
 	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3787,7 +3787,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3797,7 +3797,7 @@
 	}
 	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVBstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3812,7 +3812,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -3822,7 +3822,7 @@
 	}
 	// match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem)
 	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVBstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -3837,7 +3837,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4486,7 +4486,7 @@
 	}
 	// match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4500,7 +4500,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4509,7 +4509,7 @@
 	}
 	// match: (MOVLloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem)
 	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4523,7 +4523,7 @@
 		ptr := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4532,7 +4532,7 @@
 	}
 	// match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVLloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4546,7 +4546,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4555,7 +4555,7 @@
 	}
 	// match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem)
 	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVLloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4569,7 +4569,7 @@
 		ptr := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4581,7 +4581,7 @@
 func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool {
 	// match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+	// result: (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4595,7 +4595,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -4604,7 +4604,7 @@
 	}
 	// match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+	// result: (MOVLloadidx4  [int64(int32(c+4*d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -4618,7 +4618,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = c + 4*d
+		v.AuxInt = int64(int32(c + 4*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5114,7 +5114,7 @@
 	}
 	// match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5129,7 +5129,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5139,7 +5139,7 @@
 	}
 	// match: (MOVLstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem)
 	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5154,7 +5154,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5164,7 +5164,7 @@
 	}
 	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5179,7 +5179,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5189,7 +5189,7 @@
 	}
 	// match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem)
 	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5204,7 +5204,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5217,7 +5217,7 @@
 func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool {
 	// match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5232,7 +5232,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5242,7 +5242,7 @@
 	}
 	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+	// result: (MOVLstoreidx4  [int64(int32(c+4*d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5257,7 +5257,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = c + 4*d
+		v.AuxInt = int64(int32(c + 4*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5431,7 +5431,7 @@
 func rewriteValue386_Op386MOVSDloadidx1_0(v *Value) bool {
 	// match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVSDloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5445,7 +5445,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5454,7 +5454,7 @@
 	}
 	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVSDloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5468,7 +5468,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5480,7 +5480,7 @@
 func rewriteValue386_Op386MOVSDloadidx8_0(v *Value) bool {
 	// match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+	// result: (MOVSDloadidx8 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5494,7 +5494,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVSDloadidx8)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5503,7 +5503,7 @@
 	}
 	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+	// result: (MOVSDloadidx8 [int64(int32(c+8*d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5517,7 +5517,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVSDloadidx8)
-		v.AuxInt = c + 8*d
+		v.AuxInt = int64(int32(c + 8*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5677,7 +5677,7 @@
 func rewriteValue386_Op386MOVSDstoreidx1_0(v *Value) bool {
 	// match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSDstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5692,7 +5692,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5702,7 +5702,7 @@
 	}
 	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSDstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5717,7 +5717,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5730,7 +5730,7 @@
 func rewriteValue386_Op386MOVSDstoreidx8_0(v *Value) bool {
 	// match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSDstoreidx8 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5745,7 +5745,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSDstoreidx8)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5755,7 +5755,7 @@
 	}
 	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+	// result: (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5770,7 +5770,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSDstoreidx8)
-		v.AuxInt = c + 8*d
+		v.AuxInt = int64(int32(c + 8*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5944,7 +5944,7 @@
 func rewriteValue386_Op386MOVSSloadidx1_0(v *Value) bool {
 	// match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVSSloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5958,7 +5958,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5967,7 +5967,7 @@
 	}
 	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVSSloadidx1 [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -5981,7 +5981,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -5993,7 +5993,7 @@
 func rewriteValue386_Op386MOVSSloadidx4_0(v *Value) bool {
 	// match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
+	// result: (MOVSSloadidx4 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6007,7 +6007,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVSSloadidx4)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6016,7 +6016,7 @@
 	}
 	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
+	// result: (MOVSSloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6030,7 +6030,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVSSloadidx4)
-		v.AuxInt = c + 4*d
+		v.AuxInt = int64(int32(c + 4*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6190,7 +6190,7 @@
 func rewriteValue386_Op386MOVSSstoreidx1_0(v *Value) bool {
 	// match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSSstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6205,7 +6205,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6215,7 +6215,7 @@
 	}
 	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSSstoreidx1 [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6230,7 +6230,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6243,7 +6243,7 @@
 func rewriteValue386_Op386MOVSSstoreidx4_0(v *Value) bool {
 	// match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+	// result: (MOVSSstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6258,7 +6258,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSSstoreidx4)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6268,7 +6268,7 @@
 	}
 	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
+	// result: (MOVSSstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6283,7 +6283,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVSSstoreidx4)
-		v.AuxInt = c + 4*d
+		v.AuxInt = int64(int32(c + 4*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6715,7 +6715,7 @@
 	}
 	// match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6729,7 +6729,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6738,7 +6738,7 @@
 	}
 	// match: (MOVWloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem)
 	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVWloadidx1 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6752,7 +6752,7 @@
 		ptr := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6761,7 +6761,7 @@
 	}
 	// match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVWloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6775,7 +6775,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6784,7 +6784,7 @@
 	}
 	// match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem)
 	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+	// result: (MOVWloadidx1  [int64(int32(c+d))]   {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6798,7 +6798,7 @@
 		ptr := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6810,7 +6810,7 @@
 func rewriteValue386_Op386MOVWloadidx2_0(v *Value) bool {
 	// match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
 	// cond:
-	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+	// result: (MOVWloadidx2 [int64(int32(c+d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6824,7 +6824,7 @@
 		idx := v.Args[1]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -6833,7 +6833,7 @@
 	}
 	// match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
 	// cond:
-	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+	// result: (MOVWloadidx2  [int64(int32(c+2*d))] {sym} ptr idx mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -6847,7 +6847,7 @@
 		idx := v_1.Args[0]
 		mem := v.Args[2]
 		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = c + 2*d
+		v.AuxInt = int64(int32(c + 2*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -7590,7 +7590,7 @@
 	}
 	// match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -7605,7 +7605,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -7615,7 +7615,7 @@
 	}
 	// match: (MOVWstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem)
 	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -7630,7 +7630,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -7640,7 +7640,7 @@
 	}
 	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -7655,7 +7655,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -7665,7 +7665,7 @@
 	}
 	// match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem)
 	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx1  [int64(int32(c+d))]   {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -7680,7 +7680,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -8118,7 +8118,7 @@
 	_ = b
 	// match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
 	// cond:
-	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx2 [int64(int32(c+d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -8133,7 +8133,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = c + d
+		v.AuxInt = int64(int32(c + d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
@@ -8143,7 +8143,7 @@
 	}
 	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
 	// cond:
-	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+	// result: (MOVWstoreidx2  [int64(int32(c+2*d))] {sym} ptr idx val mem)
 	for {
 		c := v.AuxInt
 		sym := v.Aux
@@ -8158,7 +8158,7 @@
 		val := v.Args[2]
 		mem := v.Args[3]
 		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = c + 2*d
+		v.AuxInt = int64(int32(c + 2*d))
 		v.Aux = sym
 		v.AddArg(ptr)
 		v.AddArg(idx)
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index c67e4f9..fddfebd 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -5670,6 +5670,8 @@
 	return false
 }
 func rewriteValuegeneric_OpArraySelect_0(v *Value) bool {
+	b := v.Block
+	_ = b
 	// match: (ArraySelect (ArrayMake1 x))
 	// cond:
 	// result: x
@@ -5684,23 +5686,26 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ArraySelect [0] (Load ptr mem))
+	// match: (ArraySelect [0] x:(Load ptr mem))
 	// cond:
-	// result: (Load ptr mem)
+	// result: @x.Block (Load <v.Type> ptr mem)
 	for {
 		if v.AuxInt != 0 {
 			break
 		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpLoad {
+		x := v.Args[0]
+		if x.Op != OpLoad {
 			break
 		}
-		_ = v_0.Args[1]
-		ptr := v_0.Args[0]
-		mem := v_0.Args[1]
-		v.reset(OpLoad)
-		v.AddArg(ptr)
-		v.AddArg(mem)
+		_ = x.Args[1]
+		ptr := x.Args[0]
+		mem := x.Args[1]
+		b = x.Block
+		v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+		v.reset(OpCopy)
+		v.AddArg(v0)
+		v0.AddArg(ptr)
+		v0.AddArg(mem)
 		return true
 	}
 	// match: (ArraySelect [0] x:(IData _))
diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
index cdac000..7a36224 100644
--- a/src/cmd/compile/internal/x86/387.go
+++ b/src/cmd/compile/internal/x86/387.go
@@ -46,6 +46,9 @@
 		case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
 			p.From.Scale = 1
 			p.From.Index = v.Args[1].Reg()
+			if p.From.Index == x86.REG_SP {
+				p.From.Reg, p.From.Index = p.From.Index, p.From.Reg
+			}
 		case ssa.Op386MOVSSloadidx4:
 			p.From.Scale = 4
 			p.From.Index = v.Args[1].Reg()
@@ -95,6 +98,9 @@
 		case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
 			p.To.Scale = 1
 			p.To.Index = v.Args[1].Reg()
+			if p.To.Index == x86.REG_SP {
+				p.To.Reg, p.To.Index = p.To.Index, p.To.Reg
+			}
 		case ssa.Op386MOVSSstoreidx4:
 			p.To.Scale = 4
 			p.To.Index = v.Args[1].Reg()
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index 89976c7..97f4016 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -1769,14 +1769,20 @@
 
 // Commit commits the transaction.
 func (tx *Tx) Commit() error {
-	if !atomic.CompareAndSwapInt32(&tx.done, 0, 1) {
-		return ErrTxDone
-	}
+	// Check context first to avoid transaction leak.
+	// If put it behind tx.done CompareAndSwap statement, we cant't ensure
+	// the consistency between tx.done and the real COMMIT operation.
 	select {
 	default:
 	case <-tx.ctx.Done():
+		if atomic.LoadInt32(&tx.done) == 1 {
+			return ErrTxDone
+		}
 		return tx.ctx.Err()
 	}
+	if !atomic.CompareAndSwapInt32(&tx.done, 0, 1) {
+		return ErrTxDone
+	}
 	var err error
 	withLock(tx.dc, func() {
 		err = tx.txi.Commit()
diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go
index 42e810b..46e2ff1 100644
--- a/src/math/big/int_test.go
+++ b/src/math/big/int_test.go
@@ -1536,6 +1536,26 @@
 	}
 }
 
+// We can't test this together with the other Exp tests above because
+// it requires a different receiver setup.
+func TestIssue22830(t *testing.T) {
+	one := new(Int).SetInt64(1)
+	base, _ := new(Int).SetString("84555555300000000000", 10)
+	mod, _ := new(Int).SetString("66666670001111111111", 10)
+	want, _ := new(Int).SetString("17888885298888888889", 10)
+
+	var tests = []int64{
+		0, 1, -1,
+	}
+
+	for _, n := range tests {
+		m := NewInt(n)
+		if got := m.Exp(base, one, mod); got.Cmp(want) != 0 {
+			t.Errorf("(%v).Exp(%s, 1, %s) = %s, want %s", n, base, mod, got, want)
+		}
+	}
+}
+
 func BenchmarkSqrt(b *testing.B) {
 	n, _ := new(Int).SetString("1"+strings.Repeat("0", 1001), 10)
 	b.ResetTimer()
diff --git a/src/math/big/nat.go b/src/math/big/nat.go
index 889eacb..f2cdbdb 100644
--- a/src/math/big/nat.go
+++ b/src/math/big/nat.go
@@ -566,8 +566,8 @@
 	// determine if z can be reused
 	// TODO(gri) should find a better solution - this if statement
 	//           is very costly (see e.g. time pidigits -s -n 10000)
-	if alias(z, uIn) || alias(z, v) {
-		z = nil // z is an alias for uIn or v - cannot reuse
+	if alias(z, u) || alias(z, uIn) || alias(z, v) {
+		z = nil // z is an alias for u or uIn or v - cannot reuse
 	}
 	q = z.make(m + 1)
 
diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
index 83f1671..2c78b22 100644
--- a/src/net/http/h2_bundle.go
+++ b/src/net/http/h2_bundle.go
@@ -8222,6 +8222,14 @@
 		return nil
 	}
 	if f.Length > 0 {
+		if cs.req.Method == "HEAD" && len(data) > 0 {
+			cc.logf("protocol error: received DATA on a HEAD request")
+			rl.endStreamError(cs, http2StreamError{
+				StreamID: f.StreamID,
+				Code:     http2ErrCodeProtocol,
+			})
+			return nil
+		}
 		// Check connection-level flow control.
 		cc.mu.Lock()
 		if cs.inflow.available() >= int32(f.Length) {
diff --git a/src/net/url/url.go b/src/net/url/url.go
index 2ac2472..70556a4 100644
--- a/src/net/url/url.go
+++ b/src/net/url/url.go
@@ -545,6 +545,9 @@
 		return nil, host, nil
 	}
 	userinfo := authority[:i]
+	if !validUserinfo(userinfo) {
+		return nil, "", errors.New("net/url: invalid userinfo")
+	}
 	if !strings.Contains(userinfo, ":") {
 		if userinfo, err = unescape(userinfo, encodeUserPassword); err != nil {
 			return nil, "", err
@@ -1051,3 +1054,33 @@
 	*u = *u1
 	return nil
 }
+
+// validUserinfo reports whether s is a valid userinfo string per RFC 3986
+// Section 3.2.1:
+//     userinfo    = *( unreserved / pct-encoded / sub-delims / ":" )
+//     unreserved  = ALPHA / DIGIT / "-" / "." / "_" / "~"
+//     sub-delims  = "!" / "$" / "&" / "'" / "(" / ")"
+//                   / "*" / "+" / "," / ";" / "="
+//
+// It doesn't validate pct-encoded. The caller does that via func unescape.
+func validUserinfo(s string) bool {
+	for _, r := range s {
+		if 'A' <= r && r <= 'Z' {
+			continue
+		}
+		if 'a' <= r && r <= 'z' {
+			continue
+		}
+		if '0' <= r && r <= '9' {
+			continue
+		}
+		switch r {
+		case '-', '.', '_', ':', '~', '!', '$', '&', '\'',
+			'(', ')', '*', '+', ',', ';', '=', '%', '@':
+			continue
+		default:
+			return false
+		}
+	}
+	return true
+}
diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go
index 6c3bb21..5d97412 100644
--- a/src/net/url/url_test.go
+++ b/src/net/url/url_test.go
@@ -1683,3 +1683,10 @@
 		t.Errorf("json decoded to: %s\nwant: %s\n", u1, u)
 	}
 }
+
+func TestInvalidUserPassword(t *testing.T) {
+	_, err := Parse("http://us\ner:pass\nword@foo.com/")
+	if got, wantsub := fmt.Sprint(err), "net/url: invalid userinfo"; !strings.Contains(got, wantsub) {
+		t.Errorf("error = %q; want substring %q", got, wantsub)
+	}
+}
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index a5cbbad..57fe796 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -411,3 +411,16 @@
 		t.Errorf("expected %q got %v", want, got)
 	}
 }
+
+func TestSigStackSwapping(t *testing.T) {
+	switch runtime.GOOS {
+	case "plan9", "windows":
+		t.Skip("no sigaltstack on %s", runtime.GOOS)
+	}
+	t.Parallel()
+	got := runTestProg(t, "testprogcgo", "SigStack")
+	want := "OK\n"
+	if got != want {
+		t.Errorf("expected %q got %v", want, got)
+	}
+}
diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go
index 67b9787..c07992b 100644
--- a/src/runtime/hashmap_fast.go
+++ b/src/runtime/hashmap_fast.go
@@ -507,6 +507,94 @@
 	return val
 }
 
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+	if h == nil {
+		panic(plainError("assignment to entry in nil map"))
+	}
+	if raceenabled {
+		callerpc := getcallerpc(unsafe.Pointer(&t))
+		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+	}
+	if h.flags&hashWriting != 0 {
+		throw("concurrent map writes")
+	}
+	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+	// Set hashWriting after calling alg.hash for consistency with mapassign.
+	h.flags |= hashWriting
+
+	if h.buckets == nil {
+		h.buckets = newarray(t.bucket, 1)
+	}
+
+again:
+	bucket := hash & (uintptr(1)<<h.B - 1)
+	if h.growing() {
+		growWork(t, h, bucket)
+	}
+	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+	top := uint8(hash >> (sys.PtrSize*8 - 8))
+	if top < minTopHash {
+		top += minTopHash
+	}
+
+	var inserti *uint8
+	var insertk unsafe.Pointer
+	var val unsafe.Pointer
+	for {
+		for i := uintptr(0); i < bucketCnt; i++ {
+			if b.tophash[i] != top {
+				if b.tophash[i] == empty && inserti == nil {
+					inserti = &b.tophash[i]
+					insertk = add(unsafe.Pointer(b), dataOffset+i*4)
+					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+				}
+				continue
+			}
+			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+			if k != key {
+				continue
+			}
+			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+			goto done
+		}
+		ovf := b.overflow(t)
+		if ovf == nil {
+			break
+		}
+		b = ovf
+	}
+
+	// Did not find mapping for key. Allocate new cell & add entry.
+
+	// If we hit the max load factor or we have too many overflow buckets,
+	// and we're not already in the middle of growing, start growing.
+	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+		hashGrow(t, h)
+		goto again // Growing the table invalidates everything, so try again
+	}
+
+	if inserti == nil {
+		// all current buckets are full, allocate a new one.
+		newb := h.newoverflow(t, b)
+		inserti = &newb.tophash[0]
+		insertk = add(unsafe.Pointer(newb), dataOffset)
+		val = add(insertk, bucketCnt*4)
+	}
+
+	// store new key/value at insert position
+	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+	*inserti = top
+	h.count++
+
+done:
+	if h.flags&hashWriting == 0 {
+		throw("concurrent map writes")
+	}
+	h.flags &^= hashWriting
+	return val
+}
+
 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
 	if h == nil {
 		panic(plainError("assignment to entry in nil map"))
@@ -595,6 +683,94 @@
 	return val
 }
 
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+	if h == nil {
+		panic(plainError("assignment to entry in nil map"))
+	}
+	if raceenabled {
+		callerpc := getcallerpc(unsafe.Pointer(&t))
+		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+	}
+	if h.flags&hashWriting != 0 {
+		throw("concurrent map writes")
+	}
+	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+	// Set hashWriting after calling alg.hash for consistency with mapassign.
+	h.flags |= hashWriting
+
+	if h.buckets == nil {
+		h.buckets = newarray(t.bucket, 1)
+	}
+
+again:
+	bucket := hash & (uintptr(1)<<h.B - 1)
+	if h.growing() {
+		growWork(t, h, bucket)
+	}
+	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+	top := uint8(hash >> (sys.PtrSize*8 - 8))
+	if top < minTopHash {
+		top += minTopHash
+	}
+
+	var inserti *uint8
+	var insertk unsafe.Pointer
+	var val unsafe.Pointer
+	for {
+		for i := uintptr(0); i < bucketCnt; i++ {
+			if b.tophash[i] != top {
+				if b.tophash[i] == empty && inserti == nil {
+					inserti = &b.tophash[i]
+					insertk = add(unsafe.Pointer(b), dataOffset+i*8)
+					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+				}
+				continue
+			}
+			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+			if k != key {
+				continue
+			}
+			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+			goto done
+		}
+		ovf := b.overflow(t)
+		if ovf == nil {
+			break
+		}
+		b = ovf
+	}
+
+	// Did not find mapping for key. Allocate new cell & add entry.
+
+	// If we hit the max load factor or we have too many overflow buckets,
+	// and we're not already in the middle of growing, start growing.
+	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+		hashGrow(t, h)
+		goto again // Growing the table invalidates everything, so try again
+	}
+
+	if inserti == nil {
+		// all current buckets are full, allocate a new one.
+		newb := h.newoverflow(t, b)
+		inserti = &newb.tophash[0]
+		insertk = add(unsafe.Pointer(newb), dataOffset)
+		val = add(insertk, bucketCnt*8)
+	}
+
+	// store new key/value at insert position
+	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+	*inserti = top
+	h.count++
+
+done:
+	if h.flags&hashWriting == 0 {
+		throw("concurrent map writes")
+	}
+	h.flags &^= hashWriting
+	return val
+}
+
 func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
 	if h == nil {
 		panic(plainError("assignment to entry in nil map"))
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index b708720..e690991 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -1251,7 +1251,12 @@
 
 	gcResetMarkState()
 
-	work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
+	work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
+	if work.stwprocs > ncpu {
+		// This is used to compute CPU time of the STW phases,
+		// so it can't be more than ncpu, even if GOMAXPROCS is.
+		work.stwprocs = ncpu
+	}
 	work.heap0 = atomic.Load64(&memstats.heap_live)
 	work.pauseNS = 0
 	work.mode = mode
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index 5d4b5a6..3b65a2c 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -153,3 +153,6 @@
 	// TODO: Enable profiling interrupts.
 	getg().m.profilehz = hz
 }
+
+// gsignalStack is unused on Plan 9.
+type gsignalStack struct{}
diff --git a/src/runtime/os_nacl.go b/src/runtime/os_nacl.go
index 18e6ce6..1284832 100644
--- a/src/runtime/os_nacl.go
+++ b/src/runtime/os_nacl.go
@@ -285,6 +285,9 @@
 func sigignore(uint32)                                    {}
 func closeonexec(int32)                                   {}
 
+// gsignalStack is unused on nacl.
+type gsignalStack struct{}
+
 var writelock uint32 // test-and-set spin lock for write
 
 /*
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 6871d9c..90364ed 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -386,10 +386,11 @@
 	divmod  uint32 // div/mod denominator for arm - known to liblink
 
 	// Fields not known to debuggers.
-	procid        uint64     // for debuggers, but offset not hard-coded
-	gsignal       *g         // signal-handling g
-	sigmask       sigset     // storage for saved signal mask
-	tls           [6]uintptr // thread-local storage (for x86 extern register)
+	procid        uint64       // for debuggers, but offset not hard-coded
+	gsignal       *g           // signal-handling g
+	goSigStack    gsignalStack // Go-allocated signal handling stack
+	sigmask       sigset       // storage for saved signal mask
+	tls           [6]uintptr   // thread-local storage (for x86 extern register)
 	mstartfn      func()
 	curg          *g       // current running goroutine
 	caughtsig     guintptr // goroutine running during fatal signal
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 539b165..a495634 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -702,7 +702,7 @@
 		signalstack(&_g_.m.gsignal.stack)
 		_g_.m.newSigstack = true
 	} else {
-		setGsignalStack(&st, nil)
+		setGsignalStack(&st, &_g_.m.goSigStack)
 		_g_.m.newSigstack = false
 	}
 }
@@ -732,6 +732,14 @@
 	if getg().m.newSigstack {
 		st := stackt{ss_flags: _SS_DISABLE}
 		sigaltstack(&st, nil)
+	} else {
+		// We got the signal stack from someone else. Restore
+		// the Go-allocated stack in case this M gets reused
+		// for another thread (e.g., it's an extram). Also, on
+		// Android, libc allocates a signal stack for all
+		// threads, so it's important to restore the Go stack
+		// even on Go-created threads so we can free it.
+		restoreGsignalStack(&getg().m.goSigStack)
 	}
 }
 
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 73bd5b5..8a63e58 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -223,3 +223,6 @@
 	// It's okay to leave this empty for now: if crash returns
 	// the ordinary exit-after-panic happens.
 }
+
+// gsignalStack is unused on Windows.
+type gsignalStack struct{}
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index e0dc3e1..79e110f 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -139,11 +139,31 @@
 	RET
 
 // func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$16
-	// Be careful. We're calling a function with gcc calling convention here.
-	// We're guaranteed 128 bytes on entry, and we've taken 16, and the
-	// call uses another 8.
-	// That leaves 104 for the gettime code to use. Hope that's enough!
+TEXT runtime·walltime(SB),NOSPLIT,$0-12
+	// We don't know how much stack space the VDSO code will need,
+	// so switch to g0.
+	// In particular, a kernel configured with CONFIG_OPTIMIZE_INLINING=n
+	// and hardening can use a full page of stack space in gettime_sym
+	// due to stack probes inserted to avoid stack/heap collisions.
+	// See issue #20427.
+
+	MOVQ	SP, BP	// Save old SP; BP unchanged by C code.
+
+	get_tls(CX)
+	MOVQ	g(CX), AX
+	MOVQ	g_m(AX), CX
+	MOVQ	m_curg(CX), DX
+
+	CMPQ	AX, DX		// Only switch if on curg.
+	JNE	noswitch
+
+	MOVQ	m_g0(CX), DX
+	MOVQ	(g_sched+gobuf_sp)(DX), SP	// Set SP to g0 stack
+
+noswitch:
+	SUBQ	$16, SP		// Space for results
+	ANDQ	$~15, SP	// Align for C code
+
 	MOVQ	runtime·__vdso_clock_gettime_sym(SB), AX
 	CMPQ	AX, $0
 	JEQ	fallback
@@ -152,6 +172,7 @@
 	CALL	AX
 	MOVQ	0(SP), AX	// sec
 	MOVQ	8(SP), DX	// nsec
+	MOVQ	BP, SP		// Restore real SP
 	MOVQ	AX, sec+0(FP)
 	MOVL	DX, nsec+8(FP)
 	RET
@@ -163,13 +184,31 @@
 	MOVQ	0(SP), AX	// sec
 	MOVL	8(SP), DX	// usec
 	IMULQ	$1000, DX
+	MOVQ	BP, SP		// Restore real SP
 	MOVQ	AX, sec+0(FP)
 	MOVL	DX, nsec+8(FP)
 	RET
 
-TEXT runtime·nanotime(SB),NOSPLIT,$16
-	// Duplicate time.now here to avoid using up precious stack space.
-	// See comment above in time.now.
+TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+	// Switch to g0 stack. See comment above in runtime·walltime.
+
+	MOVQ	SP, BP	// Save old SP; BX unchanged by C code.
+
+	get_tls(CX)
+	MOVQ	g(CX), AX
+	MOVQ	g_m(AX), CX
+	MOVQ	m_curg(CX), DX
+
+	CMPQ	AX, DX		// Only switch if on curg.
+	JNE	noswitch
+
+	MOVQ	m_g0(CX), DX
+	MOVQ	(g_sched+gobuf_sp)(DX), SP	// Set SP to g0 stack
+
+noswitch:
+	SUBQ	$16, SP		// Space for results
+	ANDQ	$~15, SP	// Align for C code
+
 	MOVQ	runtime·__vdso_clock_gettime_sym(SB), AX
 	CMPQ	AX, $0
 	JEQ	fallback
@@ -178,6 +217,7 @@
 	CALL	AX
 	MOVQ	0(SP), AX	// sec
 	MOVQ	8(SP), DX	// nsec
+	MOVQ	BP, SP		// Restore real SP
 	// sec is in AX, nsec in DX
 	// return nsec in AX
 	IMULQ	$1000000000, AX
@@ -191,6 +231,7 @@
 	CALL	AX
 	MOVQ	0(SP), AX	// sec
 	MOVL	8(SP), DX	// usec
+	MOVQ	BP, SP		// Restore real SP
 	IMULQ	$1000, DX
 	// sec is in AX, nsec in DX
 	// return nsec in AX
diff --git a/src/runtime/testdata/testprogcgo/sigstack.go b/src/runtime/testdata/testprogcgo/sigstack.go
new file mode 100644
index 0000000..e30a559
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/sigstack.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+// Test handling of Go-allocated signal stacks when calling from
+// C-created threads with and without signal stacks. (See issue
+// #22930.)
+
+package main
+
+/*
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#ifndef MAP_STACK
+#define MAP_STACK 0
+#endif
+
+extern void SigStackCallback();
+
+static void* WithSigStack(void* arg __attribute__((unused))) {
+	// Set up an alternate system stack.
+	void* base = mmap(0, SIGSTKSZ, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON|MAP_STACK, -1, 0);
+	if (base == MAP_FAILED) {
+		perror("mmap failed");
+		abort();
+	}
+	stack_t st = {}, ost = {};
+	st.ss_sp = (char*)base;
+	st.ss_flags = 0;
+	st.ss_size = SIGSTKSZ;
+	if (sigaltstack(&st, &ost) < 0) {
+		perror("sigaltstack failed");
+		abort();
+	}
+
+	// Call Go.
+	SigStackCallback();
+
+	// Disable signal stack and protect it so we can detect reuse.
+	if (ost.ss_flags & SS_DISABLE) {
+		// Darwin libsystem has a bug where it checks ss_size
+		// even if SS_DISABLE is set. (The kernel gets it right.)
+		ost.ss_size = SIGSTKSZ;
+	}
+	if (sigaltstack(&ost, NULL) < 0) {
+		perror("sigaltstack restore failed");
+		abort();
+	}
+	mprotect(base, SIGSTKSZ, PROT_NONE);
+	return NULL;
+}
+
+static void* WithoutSigStack(void* arg __attribute__((unused))) {
+	SigStackCallback();
+	return NULL;
+}
+
+static void DoThread(int sigstack) {
+	pthread_t tid;
+	if (sigstack) {
+		pthread_create(&tid, NULL, WithSigStack, NULL);
+	} else {
+		pthread_create(&tid, NULL, WithoutSigStack, NULL);
+	}
+	pthread_join(tid, NULL);
+}
+*/
+import "C"
+
+func init() {
+	register("SigStack", SigStack)
+}
+
+func SigStack() {
+	C.DoThread(0)
+	C.DoThread(1)
+	C.DoThread(0)
+	C.DoThread(1)
+	println("OK")
+}
+
+var BadPtr *int
+
+//export SigStackCallback
+func SigStackCallback() {
+	// Cause the Go signal handler to run.
+	defer func() { recover() }()
+	*BadPtr = 42
+}
diff --git a/test/fixedbugs/issue21655.go b/test/fixedbugs/issue21655.go
index 4060c8d..66d4e3a 100644
--- a/test/fixedbugs/issue21655.go
+++ b/test/fixedbugs/issue21655.go
@@ -38,3 +38,25 @@
 // like 0x80000000 and silently using them as
 // signed 32 bit offsets.)
 // f4 was ok, but testing it can't hurt.
+
+func f7(ss []*string, i int) string {
+	const offset = 3 << 29 // 3<<29 * 4 = 3<<31 = 1<<31 mod 1<<32.
+	if i > offset {
+		return *ss[i-offset]
+	}
+	return ""
+}
+func f8(ss []*string, i int) string {
+	const offset = 3<<29 + 10
+	if i > offset {
+		return *ss[i-offset]
+	}
+	return ""
+}
+func f9(ss []*string, i int) string {
+	const offset = 3<<29 - 10
+	if i > offset {
+		return *ss[i-offset]
+	}
+	return ""
+}
diff --git a/test/fixedbugs/issue22429.go b/test/fixedbugs/issue22429.go
new file mode 100644
index 0000000..289b434
--- /dev/null
+++ b/test/fixedbugs/issue22429.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure SSA->assembly pass can handle SP as an index register.
+
+package p
+
+type T struct {
+	a,b,c,d float32
+}
+
+func f(a *[8]T, i,j,k int) float32 {
+	b := *a
+	return b[i].a + b[j].b + b[k].c
+}
diff --git a/test/fixedbugs/issue22458.go b/test/fixedbugs/issue22458.go
new file mode 100644
index 0000000..5c89929
--- /dev/null
+++ b/test/fixedbugs/issue22458.go
@@ -0,0 +1,26 @@
+// compile
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure KeepAlive introduces a use of the spilled variable.
+
+package main
+
+import "runtime"
+
+type node struct {
+        next *node
+}
+
+var x bool
+
+func main() {
+        var head *node
+        for x {
+                head = &node{head}
+        }
+
+        runtime.KeepAlive(head)
+}
diff --git a/test/fixedbugs/issue22683.go b/test/fixedbugs/issue22683.go
new file mode 100644
index 0000000..a59a0ed
--- /dev/null
+++ b/test/fixedbugs/issue22683.go
@@ -0,0 +1,30 @@
+// cmpout
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+)
+
+type foo struct {
+	bar [1]*int
+}
+
+func main() {
+	ch := make(chan foo, 2)
+	var a int
+	var b [1]*int
+	b[0] = &a
+	ch <- foo{bar: b}
+	close(ch)
+
+	for v := range ch {
+		for i := 0; i < 1; i++ {
+			fmt.Println(v.bar[0] != nil)
+		}
+	}
+}
diff --git a/test/fixedbugs/issue22683.out b/test/fixedbugs/issue22683.out
new file mode 100644
index 0000000..27ba77d
--- /dev/null
+++ b/test/fixedbugs/issue22683.out
@@ -0,0 +1 @@
+true
diff --git a/test/fixedbugs/issue22781.go b/test/fixedbugs/issue22781.go
new file mode 100644
index 0000000..5ad82398
--- /dev/null
+++ b/test/fixedbugs/issue22781.go
@@ -0,0 +1,29 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime/debug"
+
+type T struct {
+	// >= 16 bytes to avoid tiny alloc.
+	a, b int
+}
+
+func main() {
+	debug.SetGCPercent(1)
+	for i := 0; i < 100000; i++ {
+		m := make(map[*T]struct{}, 0)
+		for j := 0; j < 20; j++ {
+			// During the call to mapassign_fast64, the key argument
+			// was incorrectly treated as a uint64. If the stack was
+			// scanned during that call, the only pointer to k was
+			// missed, leading to *k being collected prematurely.
+			k := new(T)
+			m[k] = struct{}{}
+		}
+	}
+}