[dev.regabi] all: merge master (5faf941) into dev.regabi

Merge List:

+ 2021-02-16 5faf941df0 internal/goversion: update Version to 1.17
+ 2021-02-16 6f3da9d2f6 README: pull gopher image from website
+ 2021-02-16 098504c73f cmd/link: generate trampoline for inter-dependent packages
+ 2021-02-16 1004a7cb31 runtime/metrics: update documentation to current interface
+ 2021-02-16 6530f2617f doc/go1.16: remove draft notice
+ 2021-02-16 353e111455 doc/go1.16: fix mismatched id attribute
+ 2021-02-16 f0d23c9dbb internal/poll: netpollcheckerr before sendfile
+ 2021-02-16 0cb3415154 doc: remove all docs not tied to distribution
+ 2021-02-16 626ef08127 doc: remove install.html and install-source.html
+ 2021-02-16 30641e36aa internal/poll: if copy_file_range returns 0, assume it failed
+ 2021-02-15 33d72fd412 doc/faq: update generics entry to reflect accepted proposal
+ 2021-02-15 852ce7c212 cmd/go: provide a more helpful suggestion for "go vet -?"
+ 2021-02-13 66c27093d0 cmd/link: fix typo in link_test.go

Change-Id: I98f047b79b93c5ceb344dd43408bcb919b23aeb3
diff --git a/codereview.cfg b/codereview.cfg
new file mode 100644
index 0000000..a23b0a0
--- /dev/null
+++ b/codereview.cfg
@@ -0,0 +1,2 @@
+branch: dev.regabi
+parent-branch: master
diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index a62e551..026d8ab 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -109,6 +109,10 @@
 	register["SB"] = RSB
 	register["FP"] = RFP
 	register["PC"] = RPC
+	if linkArch == &x86.Linkamd64 {
+		// Alias g to R14
+		register["g"] = x86.REGG
+	}
 	// Register prefix not used on this architecture.
 
 	instructions := make(map[string]obj.As)
diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go
index 2e83e17..c6def15 100644
--- a/src/cmd/asm/internal/asm/operand_test.go
+++ b/src/cmd/asm/internal/asm/operand_test.go
@@ -259,6 +259,7 @@
 	{"R15", "R15"},
 	{"R8", "R8"},
 	{"R9", "R9"},
+	{"g", "R14"},
 	{"SI", "SI"},
 	{"SP", "SP"},
 	{"X0", "X0"},
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 154cf9c..f1d37bc 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -305,7 +305,7 @@
 // references and writes symabis information to w.
 //
 // The symabis format is documented at
-// cmd/compile/internal/gc.readSymABIs.
+// cmd/compile/internal/ssagen.ReadSymABIs.
 func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
 	switch word {
 	case "TEXT":
diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go
deleted file mode 100644
index 6625ccf..0000000
--- a/src/cmd/compile/fmt_test.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements TestFormats; a test that verifies
-// format strings in the compiler (this directory and all
-// subdirectories, recursively).
-//
-// TestFormats finds potential (Printf, etc.) format strings.
-// If they are used in a call, the format verbs are verified
-// based on the matching argument type against a precomputed
-// map of valid formats (knownFormats). This map can be used to
-// automatically rewrite format strings across all compiler
-// files with the -r flag.
-//
-// The format map needs to be updated whenever a new (type,
-// format) combination is found and the format verb is not
-// 'v' or 'T' (as in "%v" or "%T"). To update the map auto-
-// matically from the compiler source's use of format strings,
-// use the -u flag. (Whether formats are valid for the values
-// to be formatted must be verified manually, of course.)
-//
-// The -v flag prints out the names of all functions called
-// with a format string, the names of files that were not
-// processed, and any format rewrites made (with -r).
-//
-// Run as: go test -run Formats [-r][-u][-v]
-//
-// Known shortcomings:
-// - indexed format strings ("%[2]s", etc.) are not supported
-//   (the test will fail)
-// - format strings that are not simple string literals cannot
-//   be updated automatically
-//   (the test will fail with respective warnings)
-// - format strings in _test packages outside the current
-//   package are not processed
-//   (the test will report those files)
-//
-package main_test
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/constant"
-	"go/format"
-	"go/importer"
-	"go/parser"
-	"go/token"
-	"go/types"
-	"internal/testenv"
-	"io"
-	"io/fs"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-	"testing"
-	"unicode/utf8"
-)
-
-var (
-	rewrite = flag.Bool("r", false, "rewrite format strings")
-	update  = flag.Bool("u", false, "update known formats")
-)
-
-// The following variables collect information across all processed files.
-var (
-	fset          = token.NewFileSet()
-	formatStrings = make(map[*ast.BasicLit]bool)      // set of all potential format strings found
-	foundFormats  = make(map[string]bool)             // set of all formats found
-	callSites     = make(map[*ast.CallExpr]*callSite) // map of all calls
-)
-
-// A File is a corresponding (filename, ast) pair.
-type File struct {
-	name string
-	ast  *ast.File
-}
-
-func TestFormats(t *testing.T) {
-	if testing.Short() && testenv.Builder() == "" {
-		t.Skip("Skipping in short mode")
-	}
-	testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
-
-	// process all directories
-	filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error {
-		if info.IsDir() {
-			if info.Name() == "testdata" {
-				return filepath.SkipDir
-			}
-
-			importPath := filepath.Join("cmd/compile", path)
-			if ignoredPackages[filepath.ToSlash(importPath)] {
-				return filepath.SkipDir
-			}
-
-			pkg, err := build.Import(importPath, path, 0)
-			if err != nil {
-				if _, ok := err.(*build.NoGoError); ok {
-					return nil // nothing to do here
-				}
-				t.Fatal(err)
-			}
-			collectPkgFormats(t, pkg)
-		}
-		return nil
-	})
-
-	// test and rewrite formats
-	updatedFiles := make(map[string]File) // files that were rewritten
-	for _, p := range callSites {
-		// test current format literal and determine updated one
-		out := formatReplace(p.str, func(index int, in string) string {
-			if in == "*" {
-				return in // cannot rewrite '*' (as in "%*d")
-			}
-			// in != '*'
-			typ := p.types[index]
-			format := typ + " " + in // e.g., "*Node %n"
-
-			// check if format is known
-			out, known := knownFormats[format]
-
-			// record format if not yet found
-			_, found := foundFormats[format]
-			if !found {
-				foundFormats[format] = true
-			}
-
-			// report an error if the format is unknown and this is the first
-			// time we see it; ignore "%v" and "%T" which are always valid
-			if !known && !found && in != "%v" && in != "%T" {
-				t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
-			}
-
-			if out == "" {
-				out = in
-			}
-			return out
-		})
-
-		// replace existing format literal if it changed
-		if out != p.str {
-			// we cannot replace the argument if it's not a string literal for now
-			// (e.g., it may be "foo" + "bar")
-			lit, ok := p.arg.(*ast.BasicLit)
-			if !ok {
-				delete(callSites, p.call) // treat as if we hadn't found this site
-				continue
-			}
-
-			if testing.Verbose() {
-				fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
-			}
-
-			// find argument index of format argument
-			index := -1
-			for i, arg := range p.call.Args {
-				if p.arg == arg {
-					index = i
-					break
-				}
-			}
-			if index < 0 {
-				// we may have processed the same call site twice,
-				// but that shouldn't happen
-				panic("internal error: matching argument not found")
-			}
-
-			// replace literal
-			new := *lit                    // make a copy
-			new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
-			p.call.Args[index] = &new
-			updatedFiles[p.file.name] = p.file
-		}
-	}
-
-	// write dirty files back
-	var filesUpdated bool
-	if len(updatedFiles) > 0 && *rewrite {
-		for _, file := range updatedFiles {
-			var buf bytes.Buffer
-			if err := format.Node(&buf, fset, file.ast); err != nil {
-				t.Errorf("WARNING: gofmt %s failed: %v", file.name, err)
-				continue
-			}
-			if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
-				t.Errorf("WARNING: writing %s failed: %v", file.name, err)
-				continue
-			}
-			fmt.Printf("updated %s\n", file.name)
-			filesUpdated = true
-		}
-	}
-
-	// report the names of all functions called with a format string
-	if len(callSites) > 0 && testing.Verbose() {
-		set := make(map[string]bool)
-		for _, p := range callSites {
-			set[nodeString(p.call.Fun)] = true
-		}
-		var list []string
-		for s := range set {
-			list = append(list, s)
-		}
-		fmt.Println("\nFunctions called with a format string")
-		writeList(os.Stdout, list)
-	}
-
-	// update formats
-	if len(foundFormats) > 0 && *update {
-		var list []string
-		for s := range foundFormats {
-			list = append(list, fmt.Sprintf("%q: \"\",", s))
-		}
-		var buf bytes.Buffer
-		buf.WriteString(knownFormatsHeader)
-		writeList(&buf, list)
-		buf.WriteString("}\n")
-		out, err := format.Source(buf.Bytes())
-		const outfile = "fmtmap_test.go"
-		if err != nil {
-			t.Errorf("WARNING: gofmt %s failed: %v", outfile, err)
-			out = buf.Bytes() // continue with unformatted source
-		}
-		if err = ioutil.WriteFile(outfile, out, 0644); err != nil {
-			t.Errorf("WARNING: updating format map failed: %v", err)
-		}
-	}
-
-	// check that knownFormats is up to date
-	if !*rewrite && !*update {
-		var mismatch bool
-		for s := range foundFormats {
-			if _, ok := knownFormats[s]; !ok {
-				mismatch = true
-				break
-			}
-		}
-		if !mismatch {
-			for s := range knownFormats {
-				if _, ok := foundFormats[s]; !ok {
-					mismatch = true
-					break
-				}
-			}
-		}
-		if mismatch {
-			t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'")
-		}
-	}
-
-	// all format strings of calls must be in the formatStrings set (self-verification)
-	for _, p := range callSites {
-		if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-			if formatStrings[lit] {
-				// ok
-				delete(formatStrings, lit)
-			} else {
-				// this should never happen
-				panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
-			}
-		}
-	}
-
-	// if we have any strings left, we may need to update them manually
-	if len(formatStrings) > 0 && filesUpdated {
-		var list []string
-		for lit := range formatStrings {
-			list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
-		}
-		fmt.Println("\nWARNING: Potentially missed format strings")
-		writeList(os.Stdout, list)
-		t.Fail()
-	}
-
-	fmt.Println()
-}
-
-// A callSite describes a function call that appears to contain
-// a format string.
-type callSite struct {
-	file  File
-	call  *ast.CallExpr // call containing the format string
-	arg   ast.Expr      // format argument (string literal or constant)
-	str   string        // unquoted format string
-	types []string      // argument types
-}
-
-func collectPkgFormats(t *testing.T, pkg *build.Package) {
-	// collect all files
-	var filenames []string
-	filenames = append(filenames, pkg.GoFiles...)
-	filenames = append(filenames, pkg.CgoFiles...)
-	filenames = append(filenames, pkg.TestGoFiles...)
-
-	// TODO(gri) verify _test files outside package
-	for _, name := range pkg.XTestGoFiles {
-		// don't process this test itself
-		if name != "fmt_test.go" && testing.Verbose() {
-			fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
-		}
-	}
-
-	// make filenames relative to .
-	for i, name := range filenames {
-		filenames[i] = filepath.Join(pkg.Dir, name)
-	}
-
-	// parse all files
-	files := make([]*ast.File, len(filenames))
-	for i, filename := range filenames {
-		f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
-		if err != nil {
-			t.Fatal(err)
-		}
-		files[i] = f
-	}
-
-	// typecheck package
-	conf := types.Config{Importer: importer.Default()}
-	etypes := make(map[ast.Expr]types.TypeAndValue)
-	if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
-		t.Fatal(err)
-	}
-
-	// collect all potential format strings (for extra verification later)
-	for _, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if s, ok := stringLit(n); ok && isFormat(s) {
-				formatStrings[n.(*ast.BasicLit)] = true
-			}
-			return true
-		})
-	}
-
-	// collect all formats/arguments of calls with format strings
-	for index, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if call, ok := n.(*ast.CallExpr); ok {
-				if ignoredFunctions[nodeString(call.Fun)] {
-					return true
-				}
-				// look for an arguments that might be a format string
-				for i, arg := range call.Args {
-					if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
-						// make sure we have enough arguments
-						n := numFormatArgs(s)
-						if i+1+n > len(call.Args) {
-							t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun))
-							break // ignore this call
-						}
-						// assume last n arguments are to be formatted;
-						// determine their types
-						argTypes := make([]string, n)
-						for i, arg := range call.Args[len(call.Args)-n:] {
-							if tv, ok := etypes[arg]; ok {
-								argTypes[i] = typeString(tv.Type)
-							}
-						}
-						// collect call site
-						if callSites[call] != nil {
-							panic("internal error: file processed twice?")
-						}
-						callSites[call] = &callSite{
-							file:  File{filenames[index], file},
-							call:  call,
-							arg:   arg,
-							str:   s,
-							types: argTypes,
-						}
-						break // at most one format per argument list
-					}
-				}
-			}
-			return true
-		})
-	}
-}
-
-// writeList writes list in sorted order to w.
-func writeList(w io.Writer, list []string) {
-	sort.Strings(list)
-	for _, s := range list {
-		fmt.Fprintln(w, "\t", s)
-	}
-}
-
-// posString returns a string representation of n's position
-// in the form filename:line:col: .
-func posString(n ast.Node) string {
-	if n == nil {
-		return ""
-	}
-	return fset.Position(n.Pos()).String()
-}
-
-// nodeString returns a string representation of n.
-func nodeString(n ast.Node) string {
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, n); err != nil {
-		log.Fatal(err) // should always succeed
-	}
-	return buf.String()
-}
-
-// typeString returns a string representation of n.
-func typeString(typ types.Type) string {
-	return filepath.ToSlash(typ.String())
-}
-
-// stringLit returns the unquoted string value and true if
-// n represents a string literal; otherwise it returns ""
-// and false.
-func stringLit(n ast.Node) (string, bool) {
-	if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-		s, err := strconv.Unquote(lit.Value)
-		if err != nil {
-			log.Fatal(err) // should not happen with correct ASTs
-		}
-		return s, true
-	}
-	return "", false
-}
-
-// stringVal returns the (unquoted) string value and true if
-// tv is a string constant; otherwise it returns "" and false.
-func stringVal(tv types.TypeAndValue) (string, bool) {
-	if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
-		return constant.StringVal(tv.Value), true
-	}
-	return "", false
-}
-
-// formatIter iterates through the string s in increasing
-// index order and calls f for each format specifier '%..v'.
-// The arguments for f describe the specifier's index range.
-// If a format specifier contains a "*", f is called with
-// the index range for "*" alone, before being called for
-// the entire specifier. The result of f is the index of
-// the rune at which iteration continues.
-func formatIter(s string, f func(i, j int) int) {
-	i := 0     // index after current rune
-	var r rune // current rune
-
-	next := func() {
-		r1, w := utf8.DecodeRuneInString(s[i:])
-		if w == 0 {
-			r1 = -1 // signal end-of-string
-		}
-		r = r1
-		i += w
-	}
-
-	flags := func() {
-		for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
-			next()
-		}
-	}
-
-	index := func() {
-		if r == '[' {
-			log.Fatalf("cannot handle indexed arguments: %s", s)
-		}
-	}
-
-	digits := func() {
-		index()
-		if r == '*' {
-			i = f(i-1, i)
-			next()
-			return
-		}
-		for '0' <= r && r <= '9' {
-			next()
-		}
-	}
-
-	for next(); r >= 0; next() {
-		if r == '%' {
-			i0 := i
-			next()
-			flags()
-			digits()
-			if r == '.' {
-				next()
-				digits()
-			}
-			index()
-			// accept any letter (a-z, A-Z) as format verb;
-			// ignore anything else
-			if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
-				i = f(i0-1, i)
-			}
-		}
-	}
-}
-
-// isFormat reports whether s contains format specifiers.
-func isFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = true
-		return len(s) // stop iteration
-	})
-	return
-}
-
-// oneFormat reports whether s is exactly one format specifier.
-func oneFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = i == 0 && j == len(s)
-		return j
-	})
-	return
-}
-
-// numFormatArgs returns the number of format specifiers in s.
-func numFormatArgs(s string) int {
-	count := 0
-	formatIter(s, func(i, j int) int {
-		count++
-		return j
-	})
-	return count
-}
-
-// formatReplace replaces the i'th format specifier s in the incoming
-// string in with the result of f(i, s) and returns the new string.
-func formatReplace(in string, f func(i int, s string) string) string {
-	var buf []byte
-	i0 := 0
-	index := 0
-	formatIter(in, func(i, j int) int {
-		if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
-			buf = append(buf, in[i0:i]...)
-			buf = append(buf, f(index, sub)...)
-			i0 = j
-		}
-		index++
-		return j
-	})
-	return string(append(buf, in[i0:]...))
-}
-
-// ignoredPackages is the set of packages which can
-// be ignored.
-var ignoredPackages = map[string]bool{}
-
-// ignoredFunctions is the set of functions which may have
-// format-like arguments but which don't do any formatting and
-// thus may be ignored.
-var ignoredFunctions = map[string]bool{}
-
-func init() {
-	// verify that knownFormats entries are correctly formatted
-	for key, val := range knownFormats {
-		// key must be "typename format", and format starts with a '%'
-		// (formats containing '*' alone are not collected in this map)
-		i := strings.Index(key, "%")
-		if i < 0 || !oneFormat(key[i:]) {
-			log.Fatalf("incorrect knownFormats key: %q", key)
-		}
-		// val must be "format" or ""
-		if val != "" && !oneFormat(val) {
-			log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
-		}
-	}
-}
-
-const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the knownFormats map which records the valid
-// formats for a given type. The valid formats must correspond to
-// supported compiler formats implemented in fmt.go, or whatever
-// other format verbs are implemented for the given type. The map may
-// also be used to change the use of a format verb across all compiler
-// sources automatically (for instance, if the implementation of fmt.go
-// changes), by using the -r option together with the new formats in the
-// map. To generate this file automatically from the existing source,
-// run: go test -run Formats -u.
-//
-// See the package comment in fmt_test.go for additional information.
-
-package main_test
-
-// knownFormats entries are of the form "typename format" -> "newformat".
-// An absent entry means that the format is not recognized as valid.
-// An empty new format means that the format should remain unchanged.
-var knownFormats = map[string]string{
-`
diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go
deleted file mode 100644
index 0811df7..0000000
--- a/src/cmd/compile/fmtmap_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the knownFormats map which records the valid
-// formats for a given type. The valid formats must correspond to
-// supported compiler formats implemented in fmt.go, or whatever
-// other format verbs are implemented for the given type. The map may
-// also be used to change the use of a format verb across all compiler
-// sources automatically (for instance, if the implementation of fmt.go
-// changes), by using the -r option together with the new formats in the
-// map. To generate this file automatically from the existing source,
-// run: go test -run Formats -u.
-//
-// See the package comment in fmt_test.go for additional information.
-
-package main_test
-
-// knownFormats entries are of the form "typename format" -> "newformat".
-// An absent entry means that the format is not recognized as valid.
-// An empty new format means that the format should remain unchanged.
-var knownFormats = map[string]string{
-	"*bytes.Buffer %s":                                "",
-	"*cmd/compile/internal/gc.EscLocation %v":         "",
-	"*cmd/compile/internal/gc.Mpflt %v":               "",
-	"*cmd/compile/internal/gc.Mpint %v":               "",
-	"*cmd/compile/internal/gc.Node %#v":               "",
-	"*cmd/compile/internal/gc.Node %+S":               "",
-	"*cmd/compile/internal/gc.Node %+v":               "",
-	"*cmd/compile/internal/gc.Node %L":                "",
-	"*cmd/compile/internal/gc.Node %S":                "",
-	"*cmd/compile/internal/gc.Node %j":                "",
-	"*cmd/compile/internal/gc.Node %p":                "",
-	"*cmd/compile/internal/gc.Node %v":                "",
-	"*cmd/compile/internal/ssa.Block %s":              "",
-	"*cmd/compile/internal/ssa.Block %v":              "",
-	"*cmd/compile/internal/ssa.Func %s":               "",
-	"*cmd/compile/internal/ssa.Func %v":               "",
-	"*cmd/compile/internal/ssa.Register %s":           "",
-	"*cmd/compile/internal/ssa.Register %v":           "",
-	"*cmd/compile/internal/ssa.SparseTreeNode %v":     "",
-	"*cmd/compile/internal/ssa.Value %s":              "",
-	"*cmd/compile/internal/ssa.Value %v":              "",
-	"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
-	"*cmd/compile/internal/types.Field %p":            "",
-	"*cmd/compile/internal/types.Field %v":            "",
-	"*cmd/compile/internal/types.Sym %0S":             "",
-	"*cmd/compile/internal/types.Sym %S":              "",
-	"*cmd/compile/internal/types.Sym %p":              "",
-	"*cmd/compile/internal/types.Sym %v":              "",
-	"*cmd/compile/internal/types.Type %#L":            "",
-	"*cmd/compile/internal/types.Type %#v":            "",
-	"*cmd/compile/internal/types.Type %+v":            "",
-	"*cmd/compile/internal/types.Type %-S":            "",
-	"*cmd/compile/internal/types.Type %0S":            "",
-	"*cmd/compile/internal/types.Type %L":             "",
-	"*cmd/compile/internal/types.Type %S":             "",
-	"*cmd/compile/internal/types.Type %p":             "",
-	"*cmd/compile/internal/types.Type %s":             "",
-	"*cmd/compile/internal/types.Type %v":             "",
-	"*cmd/internal/obj.Addr %v":                       "",
-	"*cmd/internal/obj.LSym %v":                       "",
-	"*math/big.Float %f":                              "",
-	"*math/big.Int %#x":                               "",
-	"*math/big.Int %s":                                "",
-	"*math/big.Int %v":                                "",
-	"[16]byte %x":                                     "",
-	"[]*cmd/compile/internal/ssa.Block %v":            "",
-	"[]*cmd/compile/internal/ssa.Value %v":            "",
-	"[][]string %q":                                   "",
-	"[]byte %s":                                       "",
-	"[]byte %x":                                       "",
-	"[]cmd/compile/internal/ssa.Edge %v":              "",
-	"[]cmd/compile/internal/ssa.ID %v":                "",
-	"[]cmd/compile/internal/ssa.posetNode %v":         "",
-	"[]cmd/compile/internal/ssa.posetUndo %v":         "",
-	"[]cmd/compile/internal/syntax.token %s":          "",
-	"[]string %v":                                     "",
-	"[]uint32 %v":                                     "",
-	"bool %v":                                         "",
-	"byte %08b":                                       "",
-	"byte %c":                                         "",
-	"byte %q":                                         "",
-	"byte %v":                                         "",
-	"cmd/compile/internal/arm.shift %d":               "",
-	"cmd/compile/internal/gc.Class %d":                "",
-	"cmd/compile/internal/gc.Class %s":                "",
-	"cmd/compile/internal/gc.Class %v":                "",
-	"cmd/compile/internal/gc.Ctype %d":                "",
-	"cmd/compile/internal/gc.Ctype %v":                "",
-	"cmd/compile/internal/gc.Nodes %#v":               "",
-	"cmd/compile/internal/gc.Nodes %+v":               "",
-	"cmd/compile/internal/gc.Nodes %.v":               "",
-	"cmd/compile/internal/gc.Nodes %v":                "",
-	"cmd/compile/internal/gc.Op %#v":                  "",
-	"cmd/compile/internal/gc.Op %v":                   "",
-	"cmd/compile/internal/gc.Val %#v":                 "",
-	"cmd/compile/internal/gc.Val %T":                  "",
-	"cmd/compile/internal/gc.Val %v":                  "",
-	"cmd/compile/internal/gc.fmtMode %d":              "",
-	"cmd/compile/internal/gc.initKind %d":             "",
-	"cmd/compile/internal/gc.itag %v":                 "",
-	"cmd/compile/internal/ssa.BranchPrediction %d":    "",
-	"cmd/compile/internal/ssa.Edge %v":                "",
-	"cmd/compile/internal/ssa.GCNode %v":              "",
-	"cmd/compile/internal/ssa.ID %d":                  "",
-	"cmd/compile/internal/ssa.ID %v":                  "",
-	"cmd/compile/internal/ssa.LocalSlot %s":           "",
-	"cmd/compile/internal/ssa.LocalSlot %v":           "",
-	"cmd/compile/internal/ssa.Location %s":            "",
-	"cmd/compile/internal/ssa.Op %s":                  "",
-	"cmd/compile/internal/ssa.Op %v":                  "",
-	"cmd/compile/internal/ssa.Sym %v":                 "",
-	"cmd/compile/internal/ssa.ValAndOff %s":           "",
-	"cmd/compile/internal/ssa.domain %v":              "",
-	"cmd/compile/internal/ssa.flagConstant %s":        "",
-	"cmd/compile/internal/ssa.posetNode %v":           "",
-	"cmd/compile/internal/ssa.posetTestOp %v":         "",
-	"cmd/compile/internal/ssa.rbrank %d":              "",
-	"cmd/compile/internal/ssa.regMask %d":             "",
-	"cmd/compile/internal/ssa.register %d":            "",
-	"cmd/compile/internal/ssa.relation %s":            "",
-	"cmd/compile/internal/syntax.Error %q":            "",
-	"cmd/compile/internal/syntax.Expr %#v":            "",
-	"cmd/compile/internal/syntax.LitKind %d":          "",
-	"cmd/compile/internal/syntax.Node %T":             "",
-	"cmd/compile/internal/syntax.Operator %s":         "",
-	"cmd/compile/internal/syntax.Pos %s":              "",
-	"cmd/compile/internal/syntax.Pos %v":              "",
-	"cmd/compile/internal/syntax.position %s":         "",
-	"cmd/compile/internal/syntax.token %q":            "",
-	"cmd/compile/internal/syntax.token %s":            "",
-	"cmd/compile/internal/types.EType %d":             "",
-	"cmd/compile/internal/types.EType %s":             "",
-	"cmd/compile/internal/types.EType %v":             "",
-	"cmd/internal/obj.ABI %v":                         "",
-	"error %v":                                        "",
-	"float64 %.2f":                                    "",
-	"float64 %.3f":                                    "",
-	"float64 %.6g":                                    "",
-	"float64 %g":                                      "",
-	"int %#x":                                         "",
-	"int %-12d":                                       "",
-	"int %-6d":                                        "",
-	"int %-8o":                                        "",
-	"int %02d":                                        "",
-	"int %6d":                                         "",
-	"int %c":                                          "",
-	"int %d":                                          "",
-	"int %v":                                          "",
-	"int %x":                                          "",
-	"int16 %d":                                        "",
-	"int16 %x":                                        "",
-	"int32 %#x":                                       "",
-	"int32 %d":                                        "",
-	"int32 %v":                                        "",
-	"int32 %x":                                        "",
-	"int64 %#x":                                       "",
-	"int64 %+d":                                       "",
-	"int64 %-10d":                                     "",
-	"int64 %.5d":                                      "",
-	"int64 %d":                                        "",
-	"int64 %v":                                        "",
-	"int64 %x":                                        "",
-	"int8 %d":                                         "",
-	"int8 %v":                                         "",
-	"int8 %x":                                         "",
-	"interface{} %#v":                                 "",
-	"interface{} %T":                                  "",
-	"interface{} %p":                                  "",
-	"interface{} %q":                                  "",
-	"interface{} %s":                                  "",
-	"interface{} %v":                                  "",
-	"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
-	"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
-	"map[cmd/compile/internal/ssa.ID]uint32 %v":                            "",
-	"map[int64]uint32 %v":  "",
-	"math/big.Accuracy %s": "",
-	"reflect.Type %s":      "",
-	"rune %#U":             "",
-	"rune %c":              "",
-	"rune %q":              "",
-	"string %-*s":          "",
-	"string %-16s":         "",
-	"string %-6s":          "",
-	"string %q":            "",
-	"string %s":            "",
-	"string %v":            "",
-	"time.Duration %d":     "",
-	"time.Duration %v":     "",
-	"uint %04x":            "",
-	"uint %5d":             "",
-	"uint %d":              "",
-	"uint %x":              "",
-	"uint16 %d":            "",
-	"uint16 %x":            "",
-	"uint32 %#U":           "",
-	"uint32 %#x":           "",
-	"uint32 %d":            "",
-	"uint32 %v":            "",
-	"uint32 %x":            "",
-	"uint64 %08x":          "",
-	"uint64 %b":            "",
-	"uint64 %d":            "",
-	"uint64 %x":            "",
-	"uint8 %#x":            "",
-	"uint8 %d":             "",
-	"uint8 %v":             "",
-	"uint8 %x":             "",
-	"uintptr %d":           "",
-}
diff --git a/src/cmd/compile/internal-abi.md b/src/cmd/compile/internal-abi.md
new file mode 100644
index 0000000..f4ef2cc
--- /dev/null
+++ b/src/cmd/compile/internal-abi.md
@@ -0,0 +1,628 @@
+# Go internal ABI specification
+
+This document describes Go’s internal application binary interface
+(ABI), known as ABIInternal.
+Go's ABI defines the layout of data in memory and the conventions for
+calling between Go functions.
+This ABI is *unstable* and will change between Go versions.
+If you’re writing assembly code, please instead refer to Go’s
+[assembly documentation](/doc/asm.html), which describes Go’s stable
+ABI, known as ABI0.
+
+All functions defined in Go source follow ABIInternal.
+However, ABIInternal and ABI0 functions are able to call each other
+through transparent *ABI wrappers*, described in the [internal calling
+convention proposal](https://golang.org/design/27539-internal-abi).
+
+Go uses a common ABI design across all architectures.
+We first describe the common ABI, and then cover per-architecture
+specifics.
+
+*Rationale*: For the reasoning behind using a common ABI across
+architectures instead of the platform ABI, see the [register-based Go
+calling convention proposal](https://golang.org/design/40724-register-calling).
+
+## Memory layout
+
+Go's built-in types have the following sizes and alignments.
+Many, though not all, of these sizes are guaranteed by the [language
+specification](/doc/go_spec.html#Size_and_alignment_guarantees).
+Those that aren't guaranteed may change in future versions of Go (for
+example, we've considered changing the alignment of int64 on 32-bit).
+
+| Type | 64-bit |       | 32-bit |       |
+| ---  | ---    | ---   | ---    | ---   |
+|      | Size   | Align | Size   | Align |
+| bool, uint8, int8  | 1  | 1 | 1  | 1 |
+| uint16, int16      | 2  | 2 | 2  | 2 |
+| uint32, int32      | 4  | 4 | 4  | 4 |
+| uint64, int64      | 8  | 8 | 8  | 4 |
+| int, uint          | 8  | 8 | 4  | 4 |
+| float32            | 4  | 4 | 4  | 4 |
+| float64            | 8  | 8 | 8  | 4 |
+| complex64          | 8  | 4 | 8  | 4 |
+| complex128         | 16 | 8 | 16 | 4 |
+| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
+
+The types `byte` and `rune` are aliases for `uint8` and `int32`,
+respectively, and hence have the same size and alignment as these
+types.
+
+The layout of `map`, `chan`, and `func` types is equivalent to *T.
+
+To describe the layout of the remaining composite types, we first
+define the layout of a *sequence* S of N fields with types
+t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
+We define the byte offset at which each field begins relative to a
+base address of 0, as well as the size and alignment of the sequence
+as follows:
+
+```
+offset(S, i) = 0  if i = 1
+             = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
+alignof(S)   = 1  if N = 0
+             = max(alignof(t_i) | 1 <= i <= N)
+sizeof(S)    = 0  if N = 0
+             = align(offset(S, N) + sizeof(t_N), alignof(S))
+```
+
+Where sizeof(T) and alignof(T) are the size and alignment of type T,
+respectively, and align(x, y) rounds x up to a multiple of y.
+
+The `interface{}` type is a sequence of 1. a pointer to the runtime type
+description for the interface's dynamic type and 2. an `unsafe.Pointer`
+data field.
+Any other interface type (besides the empty interface) is a sequence
+of 1. a pointer to the runtime "itab" that gives the method pointers and
+the type of the data field and 2. an `unsafe.Pointer` data field.
+An interface can be "direct" or "indirect" depending on the dynamic
+type: a direct interface stores the value directly in the data field,
+and an indirect interface stores a pointer to the value in the data
+field.
+An interface can only be direct if the value consists of a single
+pointer word.
+
+An array type `[N]T` is a sequence of N fields of type T.
+
+The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
+backing store, an `int` giving the `len` of the slice, and an `int`
+giving the `cap` of the slice.
+
+The `string` type is a sequence of a `*[len]byte` pointer to the
+string backing store, and an `int` giving the `len` of the string.
+
+A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
+sequence t1, ..., tM, tP, where tP is either:
+
+- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
+- Empty (size 0 and align 1) otherwise.
+
+The padding byte prevents creating a past-the-end pointer by taking
+the address of the final, empty fN field.
+
+Note that user-written assembly code should generally not depend on Go
+type layout and should instead use the constants defined in
+[`go_asm.h`](/doc/asm.html#data-offsets).
+
+## Function call argument and result passing
+
+Function calls pass arguments and results using a combination of the
+stack and machine registers.
+Each argument or result is passed either entirely in registers or
+entirely on the stack.
+Because access to registers is generally faster than access to the
+stack, arguments and results are preferentially passed in registers.
+However, any argument or result that contains a non-trivial array or
+does not fit entirely in the remaining available registers is passed
+on the stack.
+
+Each architecture defines a sequence of integer registers and a
+sequence of floating-point registers.
+At a high level, arguments and results are recursively broken down
+into values of base types and these base values are assigned to
+registers from these sequences.
+
+Arguments and results can share the same registers, but do not share
+the same stack space.
+Beyond the arguments and results passed on the stack, the caller also
+reserves spill space on the stack for all register-based arguments
+(but does not populate this space).
+
+The receiver, arguments, and results of function or method F are
+assigned to registers or the stack using the following algorithm:
+
+1. Let NI and NFP be the length of integer and floating-point register
+   sequences defined by the architecture.
+   Let I and FP be 0; these are the indexes of the next integer and
+   floating-pointer register.
+   Let S, the type sequence defining the stack frame, be empty.
+1. If F is a method, assign F’s receiver.
+1. For each argument A of F, assign A.
+1. Add a pointer-alignment field to S. This has size 0 and the same
+   alignment as `uintptr`.
+1. Reset I and FP to 0.
+1. For each result R of F, assign R.
+1. Add a pointer-alignment field to S.
+1. For each register-assigned receiver and argument of F, let T be its
+   type and add T to the stack sequence S.
+   This is the argument's (or receiver's) spill space and will be
+   uninitialized at the call.
+1. Add a pointer-alignment field to S.
+
+Assigning a receiver, argument, or result V of underlying type T works
+as follows:
+
+1. Remember I and FP.
+1. Try to register-assign V.
+1. If step 2 failed, reset I and FP to the values from step 1, add T
+   to the stack sequence S, and assign V to this field in S.
+
+Register-assignment of a value V of underlying type T works as follows:
+
+1. If T is a boolean or integral type that fits in an integer
+   register, assign V to register I and increment I.
+1. If T is an integral type that fits in two integer registers, assign
+   the least significant and most significant halves of V to registers
+   I and I+1, respectively, and increment I by 2
+1. If T is a floating-point type and can be represented without loss
+   of precision in a floating-point register, assign V to register FP
+   and increment FP.
+1. If T is a complex type, recursively register-assign its real and
+   imaginary parts.
+1. If T is a pointer type, map type, channel type, or function type,
+   assign V to register I and increment I.
+1. If T is a string type, interface type, or slice type, recursively
+   register-assign V’s components (2 for strings and interfaces, 3 for
+   slices).
+1. If T is a struct type, recursively register-assign each field of V.
+1. If T is an array type of length 0, do nothing.
+1. If T is an array type of length 1, recursively register-assign its
+   one element.
+1. If T is an array type of length > 1, fail.
+1. If I > NI or FP > NFP, fail.
+1. If any recursive assignment above fails, fail.
+
+The above algorithm produces an assignment of each receiver, argument,
+and result to registers or to a field in the stack sequence.
+The final stack sequence looks like: stack-assigned receiver,
+stack-assigned arguments, pointer-alignment, stack-assigned results,
+pointer-alignment, spill space for each register-assigned argument,
+pointer-alignment.
+The following diagram shows what this stack frame looks like on the
+stack, using the typical convention where address 0 is at the bottom:
+
+    +------------------------------+
+    |             . . .            |
+    | 2nd reg argument spill space |
+    | 1st reg argument spill space |
+    | <pointer-sized alignment>    |
+    |             . . .            |
+    | 2nd stack-assigned result    |
+    | 1st stack-assigned result    |
+    | <pointer-sized alignment>    |
+    |             . . .            |
+    | 2nd stack-assigned argument  |
+    | 1st stack-assigned argument  |
+    | stack-assigned receiver      |
+    +------------------------------+ ↓ lower addresses
+
+To perform a call, the caller reserves space starting at the lowest
+address in its stack frame for the call stack frame, stores arguments
+in the registers and argument stack fields determined by the above
+algorithm, and performs the call.
+At the time of a call, spill space, result stack fields, and result
+registers are left uninitialized.
+Upon return, the callee must have stored results to all result
+registers and result stack fields determined by the above algorithm.
+
+There are no callee-save registers, so a call may overwrite any
+register that doesn’t have a fixed meaning, including argument
+registers.
+
+### Example
+
+Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
+struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
+architecture with hypothetical integer registers R0–R9.
+
+On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
+stack frame is laid out in the following sequence:
+
+    a2      [2]uintptr
+    r1.x    uintptr
+    r1.y    [2]uintptr
+    a1Spill uint8
+    a2Spill uint8
+    _       [6]uint8  // alignment padding
+
+In the stack frame, only the `a2` field is initialized on entry; the
+rest of the frame is left uninitialized.
+
+On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
+and `r1.x` and `r1.y` are initialized in the stack frame.
+
+There are several things to note in this example.
+First, `a2` and `r1` are stack-assigned because they contain arrays.
+The other arguments and results are register-assigned.
+Result `r2` is decomposed into its components, which are individually
+register-assigned.
+On the stack, the stack-assigned arguments appear at lower addresses
+than the stack-assigned results, which appear at lower addresses than
+the argument spill area.
+Only arguments, not results, are assigned a spill area on the stack.
+
+### Rationale
+
+Each base value is assigned to its own register to optimize
+construction and access.
+An alternative would be to pack multiple sub-word values into
+registers, or to simply map an argument's in-memory layout to
+registers (this is common in C ABIs), but this typically adds cost to
+pack and unpack these values.
+Modern architectures have more than enough registers to pass all
+arguments and results this way for nearly all functions (see the
+appendix), so there’s little downside to spreading base values across
+registers.
+
+Arguments that can’t be fully assigned to registers are passed
+entirely on the stack in case the callee takes the address of that
+argument.
+If an argument could be split across the stack and registers and the
+callee took its address, it would need to be reconstructed in memory,
+a process that would be proportional to the size of the argument.
+
+Non-trivial arrays are always passed on the stack because indexing
+into an array typically requires a computed offset, which generally
+isn’t possible with registers.
+Arrays in general are rare in function signatures (only 0.7% of
+functions in the Go 1.15 standard library and 0.2% in kubelet).
+We considered allowing array fields to be passed on the stack while
+the rest of an argument’s fields are passed in registers, but this
+creates the same problems as other large structs if the callee takes
+the address of an argument, and would benefit <0.1% of functions in
+kubelet (and even these very little).
+
+We make exceptions for 0 and 1-element arrays because these don’t
+require computed offsets, and 1-element arrays are already decomposed
+in the compiler’s SSA representation.
+
+The ABI assignment algorithm above is equivalent to Go’s stack-based
+ABI0 calling convention if there are zero architecture registers.
+This is intended to ease the transition to the register-based internal
+ABI and make it easy for the compiler to generate either calling
+convention.
+An architecture may still define register meanings that aren’t
+compatible with ABI0, but these differences should be easy to account
+for in the compiler.
+
+The algorithm reserves spill space for arguments in the caller’s frame
+so that the compiler can generate a stack growth path that spills into
+this reserved space.
+If the callee has to grow the stack, it may not be able to reserve
+enough additional stack space in its own frame to spill these, which
+is why it’s important that the caller do so.
+These slots also act as the home location if these arguments need to
+be spilled for any other reason, which simplifies traceback printing.
+
+There are several options for how to lay out the argument spill space.
+We chose to lay out each argument according to its type's usual memory
+layout but to separate the spill space from the regular argument
+space.
+Using the usual memory layout simplifies the compiler because it
+already understands this layout.
+Also, if a function takes the address of a register-assigned argument,
+the compiler must spill that argument to memory in its usual memory
+layout and it's more convenient to use the argument spill space for
+this purpose.
+
+Alternatively, the spill space could be structured around argument
+registers.
+In this approach, the stack growth spill path would spill each
+argument register to a register-sized stack word.
+However, if the function takes the address of a register-assigned
+argument, the compiler would have to reconstruct it in memory layout
+elsewhere on the stack.
+
+The spill space could also be interleaved with the stack-assigned
+arguments so the arguments appear in order whether they are register-
+or stack-assigned.
+This would be close to ABI0, except that register-assigned arguments
+would be uninitialized on the stack and there's no need to reserve
+stack space for register-assigned results.
+We expect separating the spill space to perform better because of
+memory locality.
+Separating the space is also potentially simpler for `reflect` calls
+because this allows `reflect` to summarize the spill space as a single
+number.
+Finally, the long-term intent is to remove reserved spill slots
+entirely – allowing most functions to be called without any stack
+setup and easing the introduction of callee-save registers – and
+separating the spill space makes that transition easier.
+
+## Closures
+
+A func value (e.g., `var x func()`) is a pointer to a closure object.
+A closure object begins with a pointer-sized program counter
+representing the entry point of the function, followed by zero or more
+bytes containing the closed-over environment.
+
+Closure calls follow the same conventions as static function and
+method calls, with one addition. Each architecture specifies a
+*closure context pointer* register and calls to closures store the
+address of the closure object in the closure context pointer register
+prior to the call.
+
+## Software floating-point mode
+
+In "softfloat" mode, the ABI simply treats the hardware as having zero
+floating-point registers.
+As a result, any arguments containing floating-point values will be
+passed on the stack.
+
+*Rationale*: Softfloat mode is about compatibility over performance
+and is not commonly used.
+Hence, we keep the ABI as simple as possible in this case, rather than
+adding additional rules for passing floating-point values in integer
+registers.
+
+## Architecture specifics
+
+This section describes per-architecture register mappings, as well as
+other per-architecture special cases.
+
+### amd64 architecture
+
+The amd64 architecture uses the following sequence of 9 registers for
+integer arguments and results:
+
+    RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
+
+It uses X0 – X14 for floating-point arguments and results.
+
+*Rationale*: These sequences are chosen from the available registers
+to be relatively easy to remember.
+
+Registers R12 and R13 are permanent scratch registers.
+R15 is a scratch register except in dynamically linked binaries.
+
+*Rationale*: Some operations such as stack growth and reflection calls
+need dedicated scratch registers in order to manipulate call frames
+without corrupting arguments or results.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Body meaning |
+| --- | --- | --- |
+| RSP | Stack pointer | Fixed |
+| RBP | Frame pointer | Fixed |
+| RDX | Closure context pointer | Scratch |
+| R12 | None | Scratch |
+| R13 | None | Scratch |
+| R14 | Current goroutine | Scratch |
+| R15 | GOT reference temporary | Fixed if dynlink |
+| X15 | Zero value | Fixed |
+
+TODO: We may start with the existing TLS-based g and move to R14
+later.
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention except for R14 and X15, which will have
+to be restored on transitions from ABI0 code to ABIInternal code.
+In ABI0, these are undefined, so transitions from ABIInternal to ABI0
+can ignore these registers.
+
+*Rationale*: For the current goroutine pointer, we chose a register
+that requires an additional REX byte.
+While this adds one byte to every function prologue, it is hardly ever
+accessed outside the function prologue and we expect making more
+single-byte registers available to be a net win.
+
+*Rationale*: We designate X15 as a fixed zero register because
+functions often have to bulk zero their stack frames, and this is more
+efficient with a designated zero register.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 8 bytes.
+
+The amd64 architecture does not use a link register.
+
+A function's stack frame is laid out as follows:
+
+    +------------------------------+
+    | return PC                    |
+    | RBP on entry                 |
+    | ... locals ...               |
+    | ... outgoing arguments ...   |
+    +------------------------------+ ↓ lower addresses
+
+The "return PC" is pushed as part of the standard amd64 `CALL`
+operation.
+On entry, a function subtracts from RSP to open its stack frame and
+saves the value of RBP directly below the return PC.
+A leaf function that does not require any stack space may omit the
+saved RBP.
+
+The Go ABI's use of RBP as a frame pointer register is compatible with
+amd64 platform conventions so that Go can inter-operate with platform
+debuggers and profilers.
+
+#### Flags
+
+The direction flag (D) is always cleared (set to the “forward”
+direction) at a call.
+The arithmetic status flags are treated like scratch registers and not
+preserved across calls.
+All other bits in RFLAGS are system flags.
+
+The CPU is always in MMX technology state (not x87 mode).
+
+*Rationale*: Go on amd64 uses the XMM registers and never uses the x87
+registers, so it makes sense to assume the CPU is in MMX mode.
+Otherwise, any function that used the XMM registers would have to
+execute an EMMS instruction before calling another function or
+returning (this is the case in the SysV ABI).
+
+At calls, the MXCSR control bits are always set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| FZ | 15 | 0 | Do not flush to zero |
+| RC | 14/13 | 0 (RN) | Round to nearest |
+| PM | 12 | 1 | Precision masked |
+| UM | 11 | 1 | Underflow masked |
+| OM | 10 | 1 | Overflow masked |
+| ZM | 9 | 1 | Divide-by-zero masked |
+| DM | 8 | 1 | Denormal operations masked |
+| IM | 7 | 1 | Invalid operations masked |
+| DAZ | 6 | 0 | Do not zero de-normals |
+
+The MXCSR status bits are callee-save.
+
+*Rationale*: Having a fixed MXCSR control configuration allows Go
+functions to use SSE operations without modifying or saving the MXCSR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+The above fixed configuration matches the process initialization
+control bits specified by the ELF AMD64 ABI.
+
+The x87 floating-point control word is not used by Go on amd64.
+
+## Future directions
+
+### Spill path improvements
+
+The ABI currently reserves spill space for argument registers so the
+compiler can statically generate an argument spill path before calling
+into `runtime.morestack` to grow the stack.
+This ensures there will be sufficient spill space even when the stack
+is nearly exhausted and keeps stack growth and stack scanning
+essentially unchanged from ABI0.
+
+However, this wastes stack space (the median wastage is 16 bytes per
+call), resulting in larger stacks and increased cache footprint.
+A better approach would be to reserve stack space only when spilling.
+One way to ensure enough space is available to spill would be for
+every function to ensure there is enough space for the function's own
+frame *as well as* the spill space of all functions it calls.
+For most functions, this would change the threshold for the prologue
+stack growth check.
+For `nosplit` functions, this would change the threshold used in the
+linker's static stack size check.
+
+Allocating spill space in the callee rather than the caller may also
+allow for faster reflection calls in the common case where a function
+takes only register arguments, since it would allow reflection to make
+these calls directly without allocating any frame.
+
+The statically-generated spill path also increases code size.
+It is possible to instead have a generic spill path in the runtime, as
+part of `morestack`.
+However, this complicates reserving the spill space, since spilling
+all possible register arguments would, in most cases, take
+significantly more space than spilling only those used by a particular
+function.
+Some options are to spill to a temporary space and copy back only the
+registers used by the function, or to grow the stack if necessary
+before spilling to it (using a temporary space if necessary), or to
+use a heap-allocated space if insufficient stack space is available.
+These options all add enough complexity that we will have to make this
+decision based on the actual code size growth caused by the static
+spill paths.
+
+### Clobber sets
+
+As defined, the ABI does not use callee-save registers.
+This significantly simplifies the garbage collector and the compiler's
+register allocator, but at some performance cost.
+A potentially better balance for Go code would be to use *clobber
+sets*: for each function, the compiler records the set of registers it
+clobbers (including those clobbered by functions it calls) and any
+register not clobbered by function F can remain live across calls to
+F.
+
+This is generally a good fit for Go because Go's package DAG allows
+function metadata like the clobber set to flow up the call graph, even
+across package boundaries.
+Clobber sets would require relatively little change to the garbage
+collector, unlike general callee-save registers.
+One disadvantage of clobber sets over callee-save registers is that
+they don't help with indirect function calls or interface method
+calls, since static information isn't available in these cases.
+
+### Large aggregates
+
+Go encourages passing composite values by value, and this simplifies
+reasoning about mutation and races.
+However, this comes at a performance cost for large composite values.
+It may be possible to instead transparently pass large composite
+values by reference and delay copying until it is actually necessary.
+
+## Appendix: Register usage analysis
+
+In order to understand the impacts of the above design on register
+usage, we
+[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
+impact of the above ABI on a large code base: cmd/kubelet from
+[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
+
+The following table shows the impact of different numbers of available
+integer and floating-point registers on argument assignment:
+
+```
+|      |        |       |      stack args |          spills |     stack total |
+| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
+|    0 |      0 |  6.3% |  32 | 152 | 256 |   0 |   0 |   0 |  32 | 152 | 256 |
+|    0 |      8 |  6.4% |  32 | 152 | 256 |   0 |   0 |   0 |  32 | 152 | 256 |
+|    1 |      8 | 21.3% |  24 | 144 | 248 |   8 |   8 |   8 |  32 | 152 | 256 |
+|    2 |      8 | 38.9% |  16 | 128 | 224 |   8 |  16 |  16 |  24 | 136 | 240 |
+|    3 |      8 | 57.0% |   0 | 120 | 224 |  16 |  24 |  24 |  24 | 136 | 240 |
+|    4 |      8 | 73.0% |   0 | 120 | 216 |  16 |  32 |  32 |  24 | 136 | 232 |
+|    5 |      8 | 83.3% |   0 | 112 | 216 |  16 |  40 |  40 |  24 | 136 | 232 |
+|    6 |      8 | 87.5% |   0 | 112 | 208 |  16 |  48 |  48 |  24 | 136 | 232 |
+|    7 |      8 | 89.8% |   0 | 112 | 208 |  16 |  48 |  56 |  24 | 136 | 232 |
+|    8 |      8 | 91.3% |   0 | 112 | 200 |  16 |  56 |  64 |  24 | 136 | 232 |
+|    9 |      8 | 92.1% |   0 | 112 | 192 |  16 |  56 |  72 |  24 | 136 | 232 |
+|   10 |      8 | 92.6% |   0 | 104 | 192 |  16 |  56 |  72 |  24 | 136 | 232 |
+|   11 |      8 | 93.1% |   0 | 104 | 184 |  16 |  56 |  80 |  24 | 128 | 232 |
+|   12 |      8 | 93.4% |   0 | 104 | 176 |  16 |  56 |  88 |  24 | 128 | 232 |
+|   13 |      8 | 94.0% |   0 |  88 | 176 |  16 |  56 |  96 |  24 | 128 | 232 |
+|   14 |      8 | 94.4% |   0 |  80 | 152 |  16 |  64 | 104 |  24 | 128 | 232 |
+|   15 |      8 | 94.6% |   0 |  80 | 152 |  16 |  64 | 112 |  24 | 128 | 232 |
+|   16 |      8 | 94.9% |   0 |  16 | 152 |  16 |  64 | 112 |  24 | 128 | 232 |
+|    ∞ |      8 | 99.8% |   0 |   0 |   0 |  24 | 112 | 216 |  24 | 120 | 216 |
+```
+
+The first two columns show the number of available integer and
+floating-point registers.
+The first row shows the results for 0 integer and 0 floating-point
+registers, which is equivalent to ABI0.
+We found that any reasonable number of floating-point registers has
+the same effect, so we fixed it at 8 for all other rows.
+
+The “% fit” column gives the fraction of functions where all arguments
+and results are register-assigned and no arguments are passed on the
+stack.
+The three “stack args” columns give the median, 95th and 99th
+percentile number of bytes of stack arguments.
+The “spills” columns likewise summarize the number of bytes in
+on-stack spill space.
+And “stack total” summarizes the sum of stack arguments and on-stack
+spill slots.
+Note that these are three different distributions; for example,
+there’s no single function that takes 0 stack argument bytes, 16 spill
+bytes, and 24 total stack bytes.
+
+From this, we can see that the fraction of functions that fit entirely
+in registers grows very slowly once it reaches about 90%, though
+curiously there is a small minority of functions that could benefit
+from a huge number of registers.
+Making 9 integer registers available on amd64 puts it in this realm.
+We also see that the stack space required for most functions is fairly
+small.
+While the increasing space required for spills largely balances out
+the decreasing space required for stack arguments as the number of
+available registers increases, there is a general reduction in the
+total stack space required with more available registers.
+This does, however, suggest that eliminating spill slots in the future
+would noticeably reduce stack requirements.
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
new file mode 100644
index 0000000..e935821
--- /dev/null
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -0,0 +1,461 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"fmt"
+	"sync"
+)
+
+//......................................................................
+//
+// Public/exported bits of the ABI utilities.
+//
+
+// ABIParamResultInfo stores the results of processing a given
+// function type to compute stack layout and register assignments. For
+// each input and output parameter we capture whether the param was
+// register-assigned (and to which register(s)) or the stack offset
+// for the param if is not going to be passed in registers according
+// to the rules in the Go internal ABI specification (1.17).
+type ABIParamResultInfo struct {
+	inparams          []ABIParamAssignment // Includes receiver for method calls.  Does NOT include hidden closure pointer.
+	outparams         []ABIParamAssignment
+	offsetToSpillArea int64
+	spillAreaSize     int64
+	config            *ABIConfig // to enable String() method
+}
+
+func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
+	return a.inparams
+}
+
+func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
+	return a.outparams
+}
+
+func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
+	return a.inparams[i]
+}
+
+func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
+	return a.outparams[i]
+}
+
+func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
+	return a.offsetToSpillArea
+}
+
+func (a *ABIParamResultInfo) SpillAreaSize() int64 {
+	return a.spillAreaSize
+}
+
+// RegIndex stores the index into the set of machine registers used by
+// the ABI on a specific architecture for parameter passing.  RegIndex
+// values 0 through N-1 (where N is the number of integer registers
+// used for param passing according to the ABI rules) describe integer
+// registers; values N through M (where M is the number of floating
+// point registers used).  Thus if the ABI says there are 5 integer
+// registers and 7 floating point registers, then RegIndex value of 4
+// indicates the 5th integer register, and a RegIndex value of 11
+// indicates the 7th floating point register.
+type RegIndex uint8
+
+// ABIParamAssignment holds information about how a specific param or
+// result will be passed: in registers (in which case 'Registers' is
+// populated) or on the stack (in which case 'Offset' is set to a
+// non-negative stack offset. The values in 'Registers' are indices (as
+// described above), not architected registers.
+type ABIParamAssignment struct {
+	Type      *types.Type
+	Registers []RegIndex
+	offset    int32
+}
+
+// Offset returns the stack offset for addressing the parameter that "a" describes.
+// This will panic if "a" describes a register-allocated parameter.
+func (a *ABIParamAssignment) Offset() int32 {
+	if len(a.Registers) > 0 {
+		panic("Register allocated parameters have no offset")
+	}
+	return a.offset
+}
+
+// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes.
+// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.)
+// then that will be the address.
+// This will panic if "a" describes a stack-allocated parameter.
+func (a *ABIParamAssignment) SpillOffset() int32 {
+	if len(a.Registers) == 0 {
+		panic("Stack-allocated parameters have no spill offset")
+	}
+	return a.offset
+}
+
+// RegAmounts holds a specified number of integer/float registers.
+type RegAmounts struct {
+	intRegs   int
+	floatRegs int
+}
+
+// ABIConfig captures the number of registers made available
+// by the ABI rules for parameter passing and result returning.
+type ABIConfig struct {
+	// Do we need anything more than this?
+	regAmounts       RegAmounts
+	regsForTypeCache map[*types.Type]int
+}
+
+// NewABIConfig returns a new ABI configuration for an architecture with
+// iRegsCount integer/pointer registers and fRegsCount floating point registers.
+func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
+	return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
+}
+
+// NumParamRegs returns the number of parameter registers used for a given type,
+// without regard for the number available.
+func (a *ABIConfig) NumParamRegs(t *types.Type) int {
+	if n, ok := a.regsForTypeCache[t]; ok {
+		return n
+	}
+
+	if t.IsScalar() || t.IsPtrShaped() {
+		var n int
+		if t.IsComplex() {
+			n = 2
+		} else {
+			n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
+		}
+		a.regsForTypeCache[t] = n
+		return n
+	}
+	typ := t.Kind()
+	n := 0
+	switch typ {
+	case types.TARRAY:
+		n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			n += a.NumParamRegs(f.Type)
+		}
+	case types.TSLICE:
+		n = a.NumParamRegs(synthSlice)
+	case types.TSTRING:
+		n = a.NumParamRegs(synthString)
+	case types.TINTER:
+		n = a.NumParamRegs(synthIface)
+	}
+	a.regsForTypeCache[t] = n
+	return n
+}
+
+// ABIAnalyze takes a function type 't' and an ABI rules description
+// 'config' and analyzes the function to determine how its parameters
+// and results will be passed (in registers or on the stack), returning
+// an ABIParamResultInfo object that holds the results of the analysis.
+func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo {
+	setup()
+	s := assignState{
+		rTotal: config.regAmounts,
+	}
+	result := ABIParamResultInfo{config: config}
+
+	// Receiver
+	ft := t.FuncType()
+	if t.NumRecvs() != 0 {
+		rfsl := ft.Receiver.FieldSlice()
+		result.inparams = append(result.inparams,
+			s.assignParamOrReturn(rfsl[0].Type, false))
+	}
+
+	// Inputs
+	ifsl := ft.Params.FieldSlice()
+	for _, f := range ifsl {
+		result.inparams = append(result.inparams,
+			s.assignParamOrReturn(f.Type, false))
+	}
+	s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
+
+	// Outputs
+	s.rUsed = RegAmounts{}
+	ofsl := ft.Results.FieldSlice()
+	for _, f := range ofsl {
+		result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true))
+	}
+	// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+	// TODO in theory could align offset only to minimum required by spilled data types.
+	result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+	result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+
+	return result
+}
+
+//......................................................................
+//
+// Non-public portions.
+
+// regString produces a human-readable version of a RegIndex.
+func (c *RegAmounts) regString(r RegIndex) string {
+	if int(r) < c.intRegs {
+		return fmt.Sprintf("I%d", int(r))
+	} else if int(r) < c.intRegs+c.floatRegs {
+		return fmt.Sprintf("F%d", int(r)-c.intRegs)
+	}
+	return fmt.Sprintf("<?>%d", r)
+}
+
+// toString method renders an ABIParamAssignment in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
+	regs := "R{"
+	offname := "spilloffset" // offset is for spill for register(s)
+	if len(ri.Registers) == 0 {
+		offname = "offset" // offset is for memory arg
+	}
+	for _, r := range ri.Registers {
+		regs += " " + config.regAmounts.regString(r)
+	}
+	return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
+}
+
+// toString method renders an ABIParamResultInfo in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamResultInfo) String() string {
+	res := ""
+	for k, p := range ri.inparams {
+		res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
+	}
+	for k, r := range ri.outparams {
+		res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
+	}
+	res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
+		ri.offsetToSpillArea, ri.spillAreaSize)
+	return res
+}
+
+// assignState holds intermediate state during the register assigning process
+// for a given function signature.
+type assignState struct {
+	rTotal      RegAmounts // total reg amounts from ABI rules
+	rUsed       RegAmounts // regs used by params completely assigned so far
+	pUsed       RegAmounts // regs used by the current param (or pieces therein)
+	stackOffset int64      // current stack offset
+	spillOffset int64      // current spill offset
+}
+
+// align returns a rounded up to t's alignment
+func align(a int64, t *types.Type) int64 {
+	return alignTo(a, int(t.Align))
+}
+
+// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
+func alignTo(a int64, t int) int64 {
+	if t == 0 {
+		return a
+	}
+	return types.Rnd(a, int64(t))
+}
+
+// stackSlot returns a stack offset for a param or result of the
+// specified type.
+func (state *assignState) stackSlot(t *types.Type) int64 {
+	rv := align(state.stackOffset, t)
+	state.stackOffset = rv + t.Width
+	return rv
+}
+
+// allocateRegs returns a set of register indices for a parameter or result
+// that we've just determined to be register-assignable. The number of registers
+// needed is assumed to be stored in state.pUsed.
+func (state *assignState) allocateRegs() []RegIndex {
+	regs := []RegIndex{}
+
+	// integer
+	for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
+		regs = append(regs, RegIndex(r))
+	}
+	state.rUsed.intRegs += state.pUsed.intRegs
+
+	// floating
+	for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
+		regs = append(regs, RegIndex(r+state.rTotal.intRegs))
+	}
+	state.rUsed.floatRegs += state.pUsed.floatRegs
+
+	return regs
+}
+
+// regAllocate creates a register ABIParamAssignment object for a param
+// or result with the specified type, as a final step (this assumes
+// that all of the safety/suitability analysis is complete).
+func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment {
+	spillLoc := int64(-1)
+	if !isReturn {
+		// Spill for register-resident t must be aligned for storage of a t.
+		spillLoc = align(state.spillOffset, t)
+		state.spillOffset = spillLoc + t.Size()
+	}
+	return ABIParamAssignment{
+		Type:      t,
+		Registers: state.allocateRegs(),
+		offset:    int32(spillLoc),
+	}
+}
+
+// stackAllocate creates a stack memory ABIParamAssignment object for
+// a param or result with the specified type, as a final step (this
+// assumes that all of the safety/suitability analysis is complete).
+func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment {
+	return ABIParamAssignment{
+		Type:   t,
+		offset: int32(state.stackSlot(t)),
+	}
+}
+
+// intUsed returns the number of integer registers consumed
+// at a given point within an assignment stage.
+func (state *assignState) intUsed() int {
+	return state.rUsed.intRegs + state.pUsed.intRegs
+}
+
+// floatUsed returns the number of floating point registers consumed at
+// a given point within an assignment stage.
+func (state *assignState) floatUsed() int {
+	return state.rUsed.floatRegs + state.pUsed.floatRegs
+}
+
+// regassignIntegral examines a param/result of integral type 't' to
+// determines whether it can be register-assigned. Returns TRUE if we
+// can register allocate, FALSE otherwise (and updates state
+// accordingly).
+func (state *assignState) regassignIntegral(t *types.Type) bool {
+	regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
+	if t.IsComplex() {
+		regsNeeded = 2
+	}
+
+	// Floating point and complex.
+	if t.IsFloat() || t.IsComplex() {
+		if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
+			// not enough regs
+			return false
+		}
+		state.pUsed.floatRegs += regsNeeded
+		return true
+	}
+
+	// Non-floating point
+	if regsNeeded+state.intUsed() > state.rTotal.intRegs {
+		// not enough regs
+		return false
+	}
+	state.pUsed.intRegs += regsNeeded
+	return true
+}
+
+// regassignArray processes an array type (or array component within some
+// other enclosing type) to determine if it can be register assigned.
+// Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignArray(t *types.Type) bool {
+
+	nel := t.NumElem()
+	if nel == 0 {
+		return true
+	}
+	if nel > 1 {
+		// Not an array of length 1: stack assign
+		return false
+	}
+	// Visit element
+	return state.regassign(t.Elem())
+}
+
+// regassignStruct processes a struct type (or struct component within
+// some other enclosing type) to determine if it can be register
+// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignStruct(t *types.Type) bool {
+	for _, field := range t.FieldSlice() {
+		if !state.regassign(field.Type) {
+			return false
+		}
+	}
+	return true
+}
+
+// synthOnce ensures that we only create the synth* fake types once.
+var synthOnce sync.Once
+
+// synthSlice, synthString, and syncIface are synthesized struct types
+// meant to capture the underlying implementations of string/slice/interface.
+var synthSlice *types.Type
+var synthString *types.Type
+var synthIface *types.Type
+
+// setup performs setup for the register assignment utilities, manufacturing
+// a small set of synthesized types that we'll need along the way.
+func setup() {
+	synthOnce.Do(func() {
+		fname := types.BuiltinPkg.Lookup
+		nxp := src.NoXPos
+		unsp := types.Types[types.TUNSAFEPTR]
+		ui := types.Types[types.TUINTPTR]
+		synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("ptr"), unsp),
+			types.NewField(nxp, fname("len"), ui),
+			types.NewField(nxp, fname("cap"), ui),
+		})
+		synthString = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("data"), unsp),
+			types.NewField(nxp, fname("len"), ui),
+		})
+		synthIface = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("f1"), unsp),
+			types.NewField(nxp, fname("f2"), unsp),
+		})
+	})
+}
+
+// regassign examines a given param type (or component within some
+// composite) to determine if it can be register assigned.  Returns
+// TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassign(pt *types.Type) bool {
+	typ := pt.Kind()
+	if pt.IsScalar() || pt.IsPtrShaped() {
+		return state.regassignIntegral(pt)
+	}
+	switch typ {
+	case types.TARRAY:
+		return state.regassignArray(pt)
+	case types.TSTRUCT:
+		return state.regassignStruct(pt)
+	case types.TSLICE:
+		return state.regassignStruct(synthSlice)
+	case types.TSTRING:
+		return state.regassignStruct(synthString)
+	case types.TINTER:
+		return state.regassignStruct(synthIface)
+	default:
+		panic("not expected")
+	}
+}
+
+// assignParamOrReturn processes a given receiver, param, or result
+// of type 'pt' to determine whether it can be register assigned.
+// The result of the analysis is recorded in the result
+// ABIParamResultInfo held in 'state'.
+func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment {
+	state.pUsed = RegAmounts{}
+	if pt.Width == types.BADWIDTH {
+		panic("should never happen")
+	} else if pt.Width == 0 {
+		return state.stackAllocate(pt)
+	} else if state.regassign(pt) {
+		return state.regAllocate(pt, isReturn)
+	} else {
+		return state.stackAllocate(pt)
+	}
+}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index af58440..ce1c402 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -5,13 +5,13 @@
 package amd64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/x86"
 )
 
 var leaptr = x86.ALEAQ
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &x86.Linkamd64
 	arch.REGSP = x86.REGSP
 	arch.MAXWIDTH = 1 << 50
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 0c1456f..aefdb14 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -5,7 +5,10 @@
 package amd64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 	"cmd/internal/objabi"
@@ -19,8 +22,8 @@
 const (
 	dzBlocks    = 16 // number of MOV/ADD blocks
 	dzBlockLen  = 4  // number of clears per block
-	dzBlockSize = 19 // size of instructions in a single block
-	dzMovSize   = 4  // size of single MOV instruction w/ offset
+	dzBlockSize = 23 // size of instructions in a single block
+	dzMovSize   = 5  // size of single MOV instruction w/ offset
 	dzLeaqSize  = 4  // size of single LEAQ instruction
 	dzClearStep = 16 // number of bytes cleared by each MOV instruction
 
@@ -51,7 +54,7 @@
 	return -dzClearStep * (dzBlockLen - tailSteps)
 }
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
 	const (
 		ax = 1 << iota
 		x0
@@ -61,67 +64,67 @@
 		return p
 	}
 
-	if cnt%int64(gc.Widthreg) != 0 {
+	if cnt%int64(types.RegSize) != 0 {
 		// should only happen with nacl
-		if cnt%int64(gc.Widthptr) != 0 {
-			gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+		if cnt%int64(types.PtrSize) != 0 {
+			base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
 		}
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
-		p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
-		off += int64(gc.Widthptr)
-		cnt -= int64(gc.Widthptr)
+		p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+		off += int64(types.PtrSize)
+		cnt -= int64(types.PtrSize)
 	}
 
 	if cnt == 8 {
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
-		p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
-	} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
+		p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+	} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
 		if *state&x0 == 0 {
-			p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+			p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
 			*state |= x0
 		}
 
 		for i := int64(0); i < cnt/16; i++ {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
 		}
 
 		if cnt%16 != 0 {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
 		}
-	} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
+	} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
 		if *state&x0 == 0 {
-			p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+			p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
 			*state |= x0
 		}
-		p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
-		p.To.Sym = gc.Duffzero
+		p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+		p.To.Sym = ir.Syms.Duffzero
 
 		if cnt%16 != 0 {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
 		}
 	} else {
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
 
-		p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
-		p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-		p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+		p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	// This is a hardware nop (1-byte 0x90) instruction,
 	// even though we describe it as an explicit XCHGL here.
 	// Particularly, this does not zero the high 32 bits
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 5ff05a0..4938e4b 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -8,16 +8,18 @@
 	"fmt"
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 )
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 	flive := b.FlagsLiveAtEnd
 	for _, c := range b.ControlValues() {
 		flive = c.Type.IsFlags() || flive
@@ -110,7 +112,7 @@
 //     dest := dest(To) op src(From)
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
 	p := s.Prog(op)
 	p.From.Type = obj.TYPE_REG
 	p.To.Type = obj.TYPE_REG
@@ -164,7 +166,35 @@
 	return off, adj
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func getgFromTLS(s *ssagen.State, r int16) {
+	// See the comments in cmd/internal/obj/x86/obj6.go
+	// near CanUse1InsnTLS for a detailed explanation of these instructions.
+	if x86.CanUse1InsnTLS(base.Ctxt) {
+		// MOVQ (TLS), r
+		p := s.Prog(x86.AMOVQ)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Reg = x86.REG_TLS
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = r
+	} else {
+		// MOVQ TLS, r
+		// MOVQ (r)(TLS*1), r
+		p := s.Prog(x86.AMOVQ)
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = x86.REG_TLS
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = r
+		q := s.Prog(x86.AMOVQ)
+		q.From.Type = obj.TYPE_MEM
+		q.From.Reg = r
+		q.From.Index = x86.REG_TLS
+		q.From.Scale = 1
+		q.To.Type = obj.TYPE_REG
+		q.To.Reg = r
+	}
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpAMD64VFMADD231SD:
 		p := s.Prog(v.Op.Asm())
@@ -630,12 +660,12 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = o
 		}
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 	case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@@ -671,7 +701,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Args[1].Reg()
 	case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@@ -679,20 +709,20 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.From, v, sc.Off())
+		ssagen.AddAux2(&p.From, v, sc.Off())
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = sc.Val()
 	case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Args[2].Reg()
 	case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
 		sc := v.AuxValAndOff()
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux2(&p.From, v, sc.Off())
+		ssagen.AddAux2(&p.From, v, sc.Off())
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = sc.Val()
 	case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@@ -732,14 +762,14 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
 		ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@@ -751,7 +781,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
 		ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
 		ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@@ -763,7 +793,7 @@
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[2].Reg()
 		memIdx(&p.To, v)
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
 		sc := v.AuxValAndOff()
 		off := sc.Off()
@@ -786,7 +816,7 @@
 			p := s.Prog(asm)
 			p.To.Type = obj.TYPE_MEM
 			p.To.Reg = v.Args[0].Reg()
-			gc.AddAux2(&p.To, v, off)
+			ssagen.AddAux2(&p.To, v, off)
 			break
 		}
 		fallthrough
@@ -801,7 +831,7 @@
 		p.From.Offset = val
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, off)
+		ssagen.AddAux2(&p.To, v, off)
 
 	case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
 		p := s.Prog(v.Op.Asm())
@@ -810,7 +840,21 @@
 		p.From.Offset = sc.Val()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
+	case ssa.OpAMD64MOVOstorezero:
+		if s.ABI != obj.ABIInternal {
+			v.Fatalf("MOVOstorezero can be only used in ABIInternal functions")
+		}
+		if !base.Flag.ABIWrap {
+			// zeroing X15 manually if wrappers are not used
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+		}
+		p := s.Prog(v.Op.Asm())
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = x86.REG_X15
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = v.Args[0].Reg()
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
 		ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
 		ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@@ -835,7 +879,7 @@
 			p.From.Type = obj.TYPE_NONE
 		}
 		memIdx(&p.To, v)
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
 		ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
 		ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@@ -865,7 +909,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
@@ -891,13 +935,20 @@
 		p.From.Reg = r
 		p.From.Index = i
 
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
 			v.Fatalf("input[0] and output not in same register %s", v.LongString())
 		}
 	case ssa.OpAMD64DUFFZERO:
+		if s.ABI != obj.ABIInternal {
+			v.Fatalf("MOVOconst can be only used in ABIInternal functions")
+		}
+		if !base.Flag.ABIWrap {
+			// zeroing X15 manually if wrappers are not used
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+		}
 		off := duffStart(v.AuxInt)
 		adj := duffAdj(v.AuxInt)
 		var p *obj.Prog
@@ -911,18 +962,12 @@
 		}
 		p = s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = off
-	case ssa.OpAMD64MOVOconst:
-		if v.AuxInt != 0 {
-			v.Fatalf("MOVOconst can only do constant=0")
-		}
-		r := v.Reg()
-		opregreg(s, x86.AXORPS, r, r)
 	case ssa.OpAMD64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		if v.AuxInt%16 != 0 {
 			v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
 		}
@@ -949,7 +994,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
@@ -961,44 +1006,37 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpAMD64LoweredHasCPUFeature:
 		p := s.Prog(x86.AMOVBQZX)
 		p.From.Type = obj.TYPE_MEM
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64LoweredGetClosurePtr:
 		// Closure pointer is DX.
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpAMD64LoweredGetG:
-		r := v.Reg()
-		// See the comments in cmd/internal/obj/x86/obj6.go
-		// near CanUse1InsnTLS for a detailed explanation of these instructions.
-		if x86.CanUse1InsnTLS(gc.Ctxt) {
-			// MOVQ (TLS), r
-			p := s.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		} else {
-			// MOVQ TLS, r
-			// MOVQ (r)(TLS*1), r
-			p := s.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			q := s.Prog(x86.AMOVQ)
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = r
-			q.From.Index = x86.REG_TLS
-			q.From.Scale = 1
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = r
+		if base.Flag.ABIWrap {
+			v.Fatalf("LoweredGetG should not appear in new ABI")
 		}
-	case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
+		r := v.Reg()
+		getgFromTLS(s, r)
+	case ssa.OpAMD64CALLstatic:
+		if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
+		s.Call(v)
+		if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
+	case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
 		s.Call(v)
 
 	case ssa.OpAMD64LoweredGetCallerPC:
@@ -1012,12 +1050,12 @@
 	case ssa.OpAMD64LoweredGetCallerSP:
 		// caller's SP is the address of the first arg
 		mov := x86.AMOVQ
-		if gc.Widthptr == 4 {
+		if types.PtrSize == 4 {
 			mov = x86.AMOVL
 		}
 		p := s.Prog(mov)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+		p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -1027,14 +1065,14 @@
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
 		// arg0 is in DI. Set sym to match where regalloc put arg1.
-		p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
+		p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
 
 	case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
-		s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+		s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
 
 	case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
 		ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
@@ -1115,7 +1153,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 
 	case ssa.OpAMD64SETNEF:
 		p := s.Prog(v.Op.Asm())
@@ -1164,14 +1202,14 @@
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 	case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@@ -1184,7 +1222,7 @@
 		p.From.Reg = r
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
 		r := v.Reg0()
 		if r != v.Args[0].Reg() {
@@ -1196,7 +1234,7 @@
 		p.From.Reg = r
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
 		if v.Args[1].Reg() != x86.REG_AX {
 			v.Fatalf("input[1] not in AX %s", v.LongString())
@@ -1207,7 +1245,7 @@
 		p.From.Reg = v.Args[2].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p = s.Prog(x86.ASETEQ)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
@@ -1218,20 +1256,20 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpClobber:
 		p := s.Prog(x86.AMOVL)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 0xdeaddead
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = x86.REG_SP
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p = s.Prog(x86.AMOVL)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 0xdeaddead
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = x86.REG_SP
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p.To.Offset += 4
 	default:
 		v.Fatalf("genValue not implemented: %s", v.LongString())
@@ -1257,22 +1295,22 @@
 	ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
 }
 
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
 }
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockDefer:
 		// defer returns in rax:
@@ -1285,16 +1323,22 @@
 		p.To.Reg = x86.REG_AX
 		p = s.Prog(x86.AJNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
 		s.Prog(obj.ARET)
 	case ssa.BlockRetJmp:
+		if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
 		p := s.Prog(obj.ARET)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index 20e2f43..81959ae 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -5,13 +5,13 @@
 package arm
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/arm"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &arm.Linkarm
 	arch.REGSP = arm.REGSP
 	arch.MAXWIDTH = (1 << 32) - 1
@@ -20,7 +20,7 @@
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
 
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index bd8d7ff..f2c6763 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -5,49 +5,51 @@
 package arm
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm"
 )
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 	if *r0 == 0 {
-		p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+		p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
 		*r0 = 1
 	}
 
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+	} else if cnt <= int64(128*types.PtrSize) {
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
 		p.Reg = arm.REGSP
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
 	} else {
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
 		p.Reg = arm.REGSP
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
 		p.Reg = arm.REG_R1
-		p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+		p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
 		p1 := p
 		p.Scond |= arm.C_PBIT
-		p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
 		p.Reg = arm.REG_R2
-		p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
+		p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(arm.AAND)
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = arm.REG_R0
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 765a771..729d2da 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -9,9 +9,11 @@
 	"math"
 	"math/bits"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm"
@@ -91,7 +93,7 @@
 }
 
 // genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(makeshift(r1, typ, n))
@@ -109,7 +111,7 @@
 }
 
 // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
-func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(makeregshift(r1, typ, r2))
@@ -143,7 +145,7 @@
 	return 0xffffffff, 0
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpARMMOVWreg:
 		if v.Type.IsMemory() {
@@ -181,7 +183,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -192,7 +194,7 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpARMADD,
 		ssa.OpARMADC,
 		ssa.OpARMSUB,
@@ -543,10 +545,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVW $off(SP), R
 			wantreg = "SP"
@@ -566,7 +568,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpARMMOVBstore,
@@ -579,7 +581,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
 		// this is just shift 0 bits
 		fallthrough
@@ -700,7 +702,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Udiv
+		p.To.Sym = ir.Syms.Udiv
 	case ssa.OpARMLoweredWB:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
@@ -710,39 +712,39 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(8) // space used in callee args area by assembly stubs
 	case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
 		s.UseArgs(12) // space used in callee args area by assembly stubs
 	case ssa.OpARMDUFFZERO:
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpARMDUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpARMLoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(arm.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = arm.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpARMLoweredZero:
 		// MOVW.P	Rarg2, 4(R1)
@@ -777,7 +779,7 @@
 		p2.Reg = arm.REG_R1
 		p3 := s.Prog(arm.ABLE)
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARMLoweredMove:
 		// MOVW.P	4(R1), Rtmp
 		// MOVW.P	Rtmp, 4(R2)
@@ -818,7 +820,7 @@
 		p3.Reg = arm.REG_R1
 		p4 := s.Prog(arm.ABLE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 	case ssa.OpARMEqual,
 		ssa.OpARMNotEqual,
 		ssa.OpARMLessThan,
@@ -844,12 +846,12 @@
 		p.To.Reg = v.Reg()
 	case ssa.OpARMLoweredGetClosurePtr:
 		// Closure pointer is R7 (arm.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpARMLoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(arm.AMOVW)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -899,24 +901,24 @@
 }
 
 // To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
 }
 
 // To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockDefer:
@@ -929,11 +931,11 @@
 		p.Reg = arm.REG_R0
 		p = s.Prog(arm.ABNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockExit:
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 40d6e17..d3db37e 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -5,12 +5,12 @@
 package arm64
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/arm64"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &arm64.Linkarm64
 	arch.REGSP = arm64.REGSP
 	arch.MAXWIDTH = 1 << 50
@@ -20,7 +20,7 @@
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
 
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index f3fec03..8364535 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -5,7 +5,9 @@
 package arm64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
 	"cmd/internal/objabi"
@@ -22,52 +24,52 @@
 	return frame
 }
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
-		if cnt%(2*int64(gc.Widthptr)) != 0 {
-			p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
-			off += int64(gc.Widthptr)
-			cnt -= int64(gc.Widthptr)
+	} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+		if cnt%(2*int64(types.PtrSize)) != 0 {
+			p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+			off += int64(types.PtrSize)
+			cnt -= int64(types.PtrSize)
 		}
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
 		p.Reg = arm64.REG_R20
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
 	} else {
 		// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
 		// We are at the function entry, where no register is live, so it is okay to clobber
 		// other registers
 		const rtmp = arm64.REG_R20
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
 		p.Reg = arm64.REGRT1
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
 		p.Reg = arm64.REGRT1
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
 		p.Scond = arm64.C_XPRE
 		p1 := p
-		p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
 		p.Reg = arm64.REGRT2
-		p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
+		p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(arm64.AHINT)
 	p.From.Type = obj.TYPE_CONST
 	return p
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 4358851..73e74e1 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -7,9 +7,11 @@
 import (
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
@@ -81,7 +83,7 @@
 }
 
 // genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = makeshift(r1, typ, n)
@@ -110,7 +112,7 @@
 	return mop
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpARM64MOVDreg:
 		if v.Type.IsMemory() {
@@ -148,7 +150,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -159,7 +161,7 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpARM64ADD,
 		ssa.OpARM64SUB,
 		ssa.OpARM64AND,
@@ -393,10 +395,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVD $off(SP), R
 			wantreg = "SP"
@@ -417,7 +419,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpARM64MOVBloadidx,
@@ -444,7 +446,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 	case ssa.OpARM64MOVBstore,
@@ -461,7 +463,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstoreidx,
 		ssa.OpARM64MOVHstoreidx,
 		ssa.OpARM64MOVWstoreidx,
@@ -482,7 +484,7 @@
 		p.From.Offset = int64(v.Args[2].Reg())
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstorezero,
 		ssa.OpARM64MOVHstorezero,
 		ssa.OpARM64MOVWstorezero,
@@ -492,7 +494,7 @@
 		p.From.Reg = arm64.REGZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstorezeroidx,
 		ssa.OpARM64MOVHstorezeroidx,
 		ssa.OpARM64MOVWstorezeroidx,
@@ -511,7 +513,7 @@
 		p.From.Offset = int64(arm64.REGZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64BFI,
 		ssa.OpARM64BFXIL:
 		r := v.Reg()
@@ -580,7 +582,7 @@
 		p2.From.Type = obj.TYPE_REG
 		p2.From.Reg = arm64.REGTMP
 		p2.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p2, p)
+		p2.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicExchange64Variant,
 		ssa.OpARM64LoweredAtomicExchange32Variant:
 		swap := arm64.ASWPALD
@@ -634,7 +636,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = arm64.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicAdd64Variant,
 		ssa.OpARM64LoweredAtomicAdd32Variant:
 		// LDADDAL	Rarg1, (Rarg0), Rout
@@ -698,13 +700,13 @@
 		p4.From.Type = obj.TYPE_REG
 		p4.From.Reg = arm64.REGTMP
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 		p5 := s.Prog(arm64.ACSET)
 		p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
 		p5.From.Reg = arm64.COND_EQ
 		p5.To.Type = obj.TYPE_REG
 		p5.To.Reg = out
-		gc.Patch(p2, p5)
+		p2.To.SetTarget(p5)
 	case ssa.OpARM64LoweredAtomicCas64Variant,
 		ssa.OpARM64LoweredAtomicCas32Variant:
 		// Rarg0: ptr
@@ -792,7 +794,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = arm64.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicAnd8Variant,
 		ssa.OpARM64LoweredAtomicAnd32Variant:
 		atomic_clear := arm64.ALDCLRALW
@@ -959,7 +961,7 @@
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpARM64LoweredZero:
 		// STP.P	(ZR,ZR), 16(R16)
@@ -980,12 +982,12 @@
 		p2.Reg = arm64.REG_R16
 		p3 := s.Prog(arm64.ABLE)
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpARM64LoweredMove:
 		// MOVD.P	8(R16), Rtmp
@@ -1013,7 +1015,7 @@
 		p3.Reg = arm64.REG_R16
 		p4 := s.Prog(arm64.ABLE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 	case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
 		s.Call(v)
 	case ssa.OpARM64LoweredWB:
@@ -1025,21 +1027,21 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 	case ssa.OpARM64LoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(arm64.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = arm64.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpARM64Equal,
 		ssa.OpARM64NotEqual,
@@ -1067,12 +1069,12 @@
 		p.To.Reg = v.Reg()
 	case ssa.OpARM64LoweredGetClosurePtr:
 		// Closure pointer is R26 (arm64.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpARM64LoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(arm64.AMOVD)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -1142,24 +1144,24 @@
 }
 
 // To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
 }
 
 // To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockDefer:
@@ -1172,11 +1174,11 @@
 		p.Reg = arm64.REG_R0
 		p = s.Prog(arm64.ABNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockExit:
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
new file mode 100644
index 0000000..3b9bc3a
--- /dev/null
+++ b/src/cmd/compile/internal/base/base.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"os"
+)
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+	atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+	for i := len(atExitFuncs) - 1; i >= 0; i-- {
+		f := atExitFuncs[i]
+		atExitFuncs = atExitFuncs[:i]
+		f()
+	}
+	os.Exit(code)
+}
+
+// To enable tracing support (-t flag), set EnableTrace to true.
+const EnableTrace = false
+
+func Compiling(pkgs []string) bool {
+	if Ctxt.Pkgpath != "" {
+		for _, p := range pkgs {
+			if Ctxt.Pkgpath == p {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// The racewalk pass is currently handled in three parts.
+//
+// First, for flag_race, it inserts calls to racefuncenter and
+// racefuncexit at the start and end (respectively) of each
+// function. This is handled below.
+//
+// Second, during buildssa, it inserts appropriate instrumentation
+// calls immediately before each memory load or store. This is handled
+// by the (*state).instrument method in ssa.go, so here we just set
+// the Func.InstrumentBody flag as needed. For background on why this
+// is done during SSA construction rather than a separate SSA pass,
+// see issue #19054.
+//
+// Third we remove calls to racefuncenter and racefuncexit, for leaf
+// functions without instrumented operations. This is done as part of
+// ssa opt pass via special rule.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var NoInstrumentPkgs = []string{
+	"runtime/internal/atomic",
+	"runtime/internal/sys",
+	"runtime/internal/math",
+	"runtime",
+	"runtime/race",
+	"runtime/msan",
+	"internal/cpu",
+}
+
+// Don't insert racefuncenterfp/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var NoRacePkgs = []string{"sync", "sync/atomic"}
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
new file mode 100644
index 0000000..164941b
--- /dev/null
+++ b/src/cmd/compile/internal/base/debug.go
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"cmd/internal/objabi"
+)
+
+// Debug holds the parsed debugging configuration values.
+var Debug = DebugFlags{
+	Fieldtrack: &objabi.Fieldtrack_enabled,
+}
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+	Append        int    `help:"print information about append compilation"`
+	Checkptr      int    `help:"instrument unsafe pointer conversions"`
+	Closure       int    `help:"print information about closure compilation"`
+	DclStack      int    `help:"run internal dclstack check"`
+	Defer         int    `help:"print information about defer compilation"`
+	DisableNil    int    `help:"disable nil checks"`
+	DumpPtrs      int    `help:"show Node pointers values in dump output"`
+	DwarfInl      int    `help:"print information about DWARF inlined function creation"`
+	Export        int    `help:"print export data"`
+	Fieldtrack    *int   `help:"enable field tracking"`
+	GCProg        int    `help:"print dump of GC programs"`
+	Libfuzzer     int    `help:"enable coverage instrumentation for libfuzzer"`
+	LocationLists int    `help:"print information about DWARF location list creation"`
+	Nil           int    `help:"print information about nil checks"`
+	PCTab         string `help:"print named pc-value table"`
+	Panic         int    `help:"show all compiler panics"`
+	Slice         int    `help:"print information about slice compilation"`
+	SoftFloat     int    `help:"force compiler to emit soft-float code"`
+	TypeAssert    int    `help:"print information about type assertion inlining"`
+	TypecheckInl  int    `help:"eager typechecking of inline function bodies"`
+	WB            int    `help:"print information about write barriers"`
+	ABIWrap       int    `help:"print information about ABI wrapper generation"`
+
+	any bool // set when any of the values have been set
+}
+
+// Any reports whether any of the debug flags have been set.
+func (d *DebugFlags) Any() bool { return d.any }
+
+type debugField struct {
+	name string
+	help string
+	val  interface{} // *int or *string
+}
+
+var debugTab []debugField
+
+func init() {
+	v := reflect.ValueOf(&Debug).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Name == "any" {
+			continue
+		}
+		name := strings.ToLower(f.Name)
+		help := f.Tag.Get("help")
+		if help == "" {
+			panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
+		}
+		ptr := v.Field(i).Addr().Interface()
+		switch ptr.(type) {
+		default:
+			panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
+		case *int, *string:
+			// ok
+		case **int:
+			ptr = *ptr.(**int) // record the *int itself
+		}
+		debugTab = append(debugTab, debugField{name, help, ptr})
+	}
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
+
+// parseDebug parses the -d debug string argument.
+func parseDebug(debugstr string) {
+	// parse -d argument
+	if debugstr == "" {
+		return
+	}
+	Debug.any = true
+Split:
+	for _, name := range strings.Split(debugstr, ",") {
+		if name == "" {
+			continue
+		}
+		// display help about the -d option itself and quit
+		if name == "help" {
+			fmt.Print(debugHelpHeader)
+			maxLen := len("ssa/help")
+			for _, t := range debugTab {
+				if len(t.name) > maxLen {
+					maxLen = len(t.name)
+				}
+			}
+			for _, t := range debugTab {
+				fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
+			}
+			// ssa options have their own help
+			fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
+			fmt.Print(debugHelpFooter)
+			os.Exit(0)
+		}
+		val, valstring, haveInt := 1, "", true
+		if i := strings.IndexAny(name, "=:"); i >= 0 {
+			var err error
+			name, valstring = name[:i], name[i+1:]
+			val, err = strconv.Atoi(valstring)
+			if err != nil {
+				val, haveInt = 1, false
+			}
+		}
+		for _, t := range debugTab {
+			if t.name != name {
+				continue
+			}
+			switch vp := t.val.(type) {
+			case nil:
+				// Ignore
+			case *string:
+				*vp = valstring
+			case *int:
+				if !haveInt {
+					log.Fatalf("invalid debug value %v", name)
+				}
+				*vp = val
+			default:
+				panic("bad debugtab type")
+			}
+			continue Split
+		}
+		// special case for ssa for now
+		if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
+			// expect form ssa/phase/flag
+			// e.g. -d=ssa/generic_cse/time
+			// _ in phase name also matches space
+			phase := name[4:]
+			flag := "debug" // default flag is debug
+			if i := strings.Index(phase, "/"); i >= 0 {
+				flag = phase[i+1:]
+				phase = phase[:i]
+			}
+			err := DebugSSA(phase, flag, val, valstring)
+			if err != "" {
+				log.Fatalf(err)
+			}
+			continue Split
+		}
+		log.Fatalf("unknown debug key -d %s\n", name)
+	}
+}
+
+const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
+
+<key> is one of:
+
+`
+
+const debugHelpFooter = `
+<value> is key-specific.
+
+Key "checkptr" supports values:
+	"0": instrumentation disabled
+	"1": conversions involving unsafe.Pointer are instrumented
+	"2": conversions to unsafe.Pointer force heap allocation
+
+Key "pctab" supports values:
+	"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
+`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
new file mode 100644
index 0000000..c38bbe6
--- /dev/null
+++ b/src/cmd/compile/internal/base/flag.go
@@ -0,0 +1,459 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"cmd/internal/objabi"
+	"cmd/internal/sys"
+)
+
+func usage() {
+	fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+	objabi.Flagprint(os.Stderr)
+	Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+	// Single letters
+	B CountFlag    "help:\"disable bounds checking\""
+	C CountFlag    "help:\"disable printing of columns in error messages\""
+	D string       "help:\"set relative `path` for local imports\""
+	E CountFlag    "help:\"debug symbol export\""
+	I func(string) "help:\"add `directory` to import search path\""
+	K CountFlag    "help:\"debug missing line numbers\""
+	L CountFlag    "help:\"show full file names in error messages\""
+	N CountFlag    "help:\"disable optimizations\""
+	S CountFlag    "help:\"print assembly listing\""
+	// V is added by objabi.AddVersionFlag
+	W CountFlag "help:\"debug parse tree after type checking\""
+
+	LowerC int          "help:\"concurrency during compilation (1 means no concurrency)\""
+	LowerD func(string) "help:\"enable debugging settings; try -d help\""
+	LowerE CountFlag    "help:\"no limit on number of errors reported\""
+	LowerH CountFlag    "help:\"halt on error\""
+	LowerJ CountFlag    "help:\"debug runtime-initialized variables\""
+	LowerL CountFlag    "help:\"disable inlining\""
+	LowerM CountFlag    "help:\"print optimization decisions\""
+	LowerO string       "help:\"write output to `file`\""
+	LowerP *string      "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+	LowerR CountFlag    "help:\"debug generated wrappers\""
+	LowerT bool         "help:\"enable tracing for debugging the compiler\""
+	LowerW CountFlag    "help:\"debug type checking\""
+	LowerV *bool        "help:\"increase debug verbosity\""
+
+	// Special characters
+	Percent          int  "flag:\"%\" help:\"debug non-static initializers\""
+	CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+	// Longer names
+	ABIWrap            bool         "help:\"enable generation of ABI wrappers\""
+	ABIWrapLimit       int          "help:\"emit at most N ABI wrappers (for debugging)\""
+	AsmHdr             string       "help:\"write assembly header to `file`\""
+	Bench              string       "help:\"append benchmark times to `file`\""
+	BlockProfile       string       "help:\"write block profile to `file`\""
+	BuildID            string       "help:\"record `id` as the build id in the export metadata\""
+	CPUProfile         string       "help:\"write cpu profile to `file`\""
+	Complete           bool         "help:\"compiling complete package (no C or assembly)\""
+	Dwarf              bool         "help:\"generate DWARF symbols\""
+	DwarfBASEntries    *bool        "help:\"use base address selection entries in DWARF\""                        // &Ctxt.UseBASEntries, set below
+	DwarfLocationLists *bool        "help:\"add location lists to DWARF in optimized mode\""                      // &Ctxt.Flag_locationlists, set below
+	Dynlink            *bool        "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+	EmbedCfg           func(string) "help:\"read go:embed configuration from `file`\""
+	GenDwarfInl        int          "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+	GoVersion          string       "help:\"required version of the runtime\""
+	ImportCfg          func(string) "help:\"read import configuration from `file`\""
+	ImportMap          func(string) "help:\"add `definition` of the form source=actual to import map\""
+	InstallSuffix      string       "help:\"set pkg directory `suffix`\""
+	JSON               string       "help:\"version,file for JSON compiler/optimizer detail output\""
+	Lang               string       "help:\"Go language version source code expects\""
+	LinkObj            string       "help:\"write linker-specific object to `file`\""
+	LinkShared         *bool        "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+	Live               CountFlag    "help:\"debug liveness analysis\""
+	MSan               bool         "help:\"build code compatible with C/C++ memory sanitizer\""
+	MemProfile         string       "help:\"write memory profile to `file`\""
+	MemProfileRate     int64        "help:\"set runtime.MemProfileRate to `rate`\""
+	MutexProfile       string       "help:\"write mutex profile to `file`\""
+	NoLocalImports     bool         "help:\"reject local (relative) imports\""
+	Pack               bool         "help:\"write to file.a instead of file.o\""
+	Race               bool         "help:\"enable race detector\""
+	Shared             *bool        "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+	SmallFrames        bool         "help:\"reduce the size limit for stack allocated objects\""      // small stacks, to diagnose GC latency; see golang.org/issue/27732
+	Spectre            string       "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+	Std                bool         "help:\"compiling standard library\""
+	SymABIs            string       "help:\"read symbol ABIs from `file`\""
+	TraceProfile       string       "help:\"write an execution trace to `file`\""
+	TrimPath           string       "help:\"remove `prefix` from recorded source file paths\""
+	WB                 bool         "help:\"enable write barrier\"" // TODO: remove
+
+	// Configuration derived from flags; not a flag itself.
+	Cfg struct {
+		Embed struct { // set by -embedcfg
+			Patterns map[string][]string
+			Files    map[string]string
+		}
+		ImportDirs   []string          // appended to by -I
+		ImportMap    map[string]string // set by -importmap OR -importcfg
+		PackageFile  map[string]string // set by -importcfg; nil means not in use
+		SpectreIndex bool              // set by -spectre=index or -spectre=all
+		// Whether we are adding any sort of code instrumentation, such as
+		// when the race detector is enabled.
+		Instrumenting bool
+	}
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+	Flag.I = addImportDir
+
+	Flag.LowerC = 1
+	Flag.LowerD = parseDebug
+	Flag.LowerP = &Ctxt.Pkgpath
+	Flag.LowerV = &Ctxt.Debugvlog
+
+	Flag.ABIWrap = objabi.Regabi_enabled != 0
+	Flag.Dwarf = objabi.GOARCH != "wasm"
+	Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+	Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+	*Flag.DwarfLocationLists = true
+	Flag.Dynlink = &Ctxt.Flag_dynlink
+	Flag.EmbedCfg = readEmbedCfg
+	Flag.GenDwarfInl = 2
+	Flag.ImportCfg = readImportCfg
+	Flag.ImportMap = addImportMap
+	Flag.LinkShared = &Ctxt.Flag_linkshared
+	Flag.Shared = &Ctxt.Flag_shared
+	Flag.WB = true
+
+	Flag.Cfg.ImportMap = make(map[string]string)
+
+	objabi.AddVersionFlag() // -V
+	registerFlags()
+	objabi.Flagparse(usage)
+
+	if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
+		log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
+	}
+	if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
+		log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
+	}
+	if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
+		log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
+	}
+	parseSpectre(Flag.Spectre) // left as string for RecordFlags
+
+	Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+	Ctxt.Flag_optimize = Flag.N == 0
+	Ctxt.Debugasm = int(Flag.S)
+
+	if flag.NArg() < 1 {
+		usage()
+	}
+
+	if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+		fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+		Exit(2)
+	}
+
+	if Flag.LowerO == "" {
+		p := flag.Arg(0)
+		if i := strings.LastIndex(p, "/"); i >= 0 {
+			p = p[i+1:]
+		}
+		if runtime.GOOS == "windows" {
+			if i := strings.LastIndex(p, `\`); i >= 0 {
+				p = p[i+1:]
+			}
+		}
+		if i := strings.LastIndex(p, "."); i >= 0 {
+			p = p[:i]
+		}
+		suffix := ".o"
+		if Flag.Pack {
+			suffix = ".a"
+		}
+		Flag.LowerO = p + suffix
+	}
+
+	if Flag.Race && Flag.MSan {
+		log.Fatal("cannot use both -race and -msan")
+	}
+	if Flag.Race || Flag.MSan {
+		// -race and -msan imply -d=checkptr for now.
+		Debug.Checkptr = 1
+	}
+
+	if Flag.CompilingRuntime && Flag.N != 0 {
+		log.Fatal("cannot disable optimizations while compiling runtime")
+	}
+	if Flag.LowerC < 1 {
+		log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+	}
+	if Flag.LowerC > 1 && !concurrentBackendAllowed() {
+		log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+	}
+
+	if Flag.CompilingRuntime {
+		// Runtime can't use -d=checkptr, at least not yet.
+		Debug.Checkptr = 0
+
+		// Fuzzing the runtime isn't interesting either.
+		Debug.Libfuzzer = 0
+	}
+
+	// set via a -d flag
+	Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+	var (
+		boolType      = reflect.TypeOf(bool(false))
+		intType       = reflect.TypeOf(int(0))
+		stringType    = reflect.TypeOf(string(""))
+		ptrBoolType   = reflect.TypeOf(new(bool))
+		ptrIntType    = reflect.TypeOf(new(int))
+		ptrStringType = reflect.TypeOf(new(string))
+		countType     = reflect.TypeOf(CountFlag(0))
+		funcType      = reflect.TypeOf((func(string))(nil))
+	)
+
+	v := reflect.ValueOf(&Flag).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Name == "Cfg" {
+			continue
+		}
+
+		var name string
+		if len(f.Name) == 1 {
+			name = f.Name
+		} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+			name = string(rune(f.Name[5] + 'a' - 'A'))
+		} else {
+			name = strings.ToLower(f.Name)
+		}
+		if tag := f.Tag.Get("flag"); tag != "" {
+			name = tag
+		}
+
+		help := f.Tag.Get("help")
+		if help == "" {
+			panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+		}
+
+		if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+			panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+		}
+
+		switch f.Type {
+		case boolType:
+			p := v.Field(i).Addr().Interface().(*bool)
+			flag.BoolVar(p, name, *p, help)
+		case intType:
+			p := v.Field(i).Addr().Interface().(*int)
+			flag.IntVar(p, name, *p, help)
+		case stringType:
+			p := v.Field(i).Addr().Interface().(*string)
+			flag.StringVar(p, name, *p, help)
+		case ptrBoolType:
+			p := v.Field(i).Interface().(*bool)
+			flag.BoolVar(p, name, *p, help)
+		case ptrIntType:
+			p := v.Field(i).Interface().(*int)
+			flag.IntVar(p, name, *p, help)
+		case ptrStringType:
+			p := v.Field(i).Interface().(*string)
+			flag.StringVar(p, name, *p, help)
+		case countType:
+			p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+			objabi.Flagcount(name, help, p)
+		case funcType:
+			f := v.Field(i).Interface().(func(string))
+			objabi.Flagfn1(name, help, f)
+		}
+	}
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+	// TODO(rsc): Many of these are fine. Remove them.
+	return Flag.Percent == 0 &&
+		Flag.E == 0 &&
+		Flag.K == 0 &&
+		Flag.L == 0 &&
+		Flag.LowerH == 0 &&
+		Flag.LowerJ == 0 &&
+		Flag.LowerM == 0 &&
+		Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+	if !concurrentFlagOk() {
+		return false
+	}
+
+	// Debug.S by itself is ok, because all printing occurs
+	// while writing the object file, and that is non-concurrent.
+	// Adding Debug_vlog, however, causes Debug.S to also print
+	// while flushing the plist, which happens concurrently.
+	if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
+		return false
+	}
+	// TODO: Test and delete this condition.
+	if objabi.Fieldtrack_enabled != 0 {
+		return false
+	}
+	// TODO: fix races and enable the following flags
+	if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
+		return false
+	}
+	return true
+}
+
+func addImportDir(dir string) {
+	if dir != "" {
+		Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+	}
+}
+
+func addImportMap(s string) {
+	if Flag.Cfg.ImportMap == nil {
+		Flag.Cfg.ImportMap = make(map[string]string)
+	}
+	if strings.Count(s, "=") != 1 {
+		log.Fatal("-importmap argument must be of the form source=actual")
+	}
+	i := strings.Index(s, "=")
+	source, actual := s[:i], s[i+1:]
+	if source == "" || actual == "" {
+		log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+	}
+	Flag.Cfg.ImportMap[source] = actual
+}
+
+func readImportCfg(file string) {
+	if Flag.Cfg.ImportMap == nil {
+		Flag.Cfg.ImportMap = make(map[string]string)
+	}
+	Flag.Cfg.PackageFile = map[string]string{}
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		log.Fatalf("-importcfg: %v", err)
+	}
+
+	for lineNum, line := range strings.Split(string(data), "\n") {
+		lineNum++ // 1-based
+		line = strings.TrimSpace(line)
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+
+		var verb, args string
+		if i := strings.Index(line, " "); i < 0 {
+			verb = line
+		} else {
+			verb, args = line[:i], strings.TrimSpace(line[i+1:])
+		}
+		var before, after string
+		if i := strings.Index(args, "="); i >= 0 {
+			before, after = args[:i], args[i+1:]
+		}
+		switch verb {
+		default:
+			log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+		case "importmap":
+			if before == "" || after == "" {
+				log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+			}
+			Flag.Cfg.ImportMap[before] = after
+		case "packagefile":
+			if before == "" || after == "" {
+				log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+			}
+			Flag.Cfg.PackageFile[before] = after
+		}
+	}
+}
+
+func readEmbedCfg(file string) {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		log.Fatalf("-embedcfg: %v", err)
+	}
+	if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+		log.Fatalf("%s: %v", file, err)
+	}
+	if Flag.Cfg.Embed.Patterns == nil {
+		log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+	}
+	if Flag.Cfg.Embed.Files == nil {
+		log.Fatalf("%s: invalid embedcfg: missing Files", file)
+	}
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+	for _, f := range strings.Split(s, ",") {
+		f = strings.TrimSpace(f)
+		switch f {
+		default:
+			log.Fatalf("unknown setting -spectre=%s", f)
+		case "":
+			// nothing
+		case "all":
+			Flag.Cfg.SpectreIndex = true
+			Ctxt.Retpoline = true
+		case "index":
+			Flag.Cfg.SpectreIndex = true
+		case "ret":
+			Ctxt.Retpoline = true
+		}
+	}
+
+	if Flag.Cfg.SpectreIndex {
+		switch objabi.GOARCH {
+		case "amd64":
+			// ok
+		default:
+			log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go
new file mode 100644
index 0000000..49fe435
--- /dev/null
+++ b/src/cmd/compile/internal/base/link.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+// TODO(mdempsky): These should probably be obj.Link methods.
+
+// PkgLinksym returns the linker symbol for name within the given
+// package prefix. For user packages, prefix should be the package
+// path encoded with objabi.PathToPrefix.
+func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
+	if name == "_" {
+		// TODO(mdempsky): Cleanup callers and Fatalf instead.
+		return linksym(prefix, "_", abi)
+	}
+	return linksym(prefix, prefix+"."+name, abi)
+}
+
+// Linkname returns the linker symbol for the given name as it might
+// appear within a //go:linkname directive.
+func Linkname(name string, abi obj.ABI) *obj.LSym {
+	return linksym("_", name, abi)
+}
+
+// linksym is an internal helper function for implementing the above
+// exported APIs.
+func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
+	return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
new file mode 100644
index 0000000..668c600
--- /dev/null
+++ b/src/cmd/compile/internal/base/print.go
@@ -0,0 +1,264 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"fmt"
+	"os"
+	"runtime/debug"
+	"sort"
+	"strings"
+
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+	pos src.XPos
+	msg string
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+	errorMsgs       []errorMsg
+	numErrors       int // number of entries in errorMsgs that are errors (as opposed to warnings)
+	numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+	return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported
+func SyntaxErrors() int {
+	return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	// Only add the position if know the position.
+	// See issue golang.org/issue/11361.
+	if pos.IsKnown() {
+		msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+	}
+	errorMsgs = append(errorMsgs, errorMsg{
+		pos: pos,
+		msg: msg + "\n",
+	})
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+	if Ctxt == nil {
+		return "???"
+	}
+	return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int           { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+	if Ctxt != nil && Ctxt.Bso != nil {
+		Ctxt.Bso.Flush()
+	}
+	if len(errorMsgs) == 0 {
+		return
+	}
+	sort.Stable(byPos(errorMsgs))
+	for i, err := range errorMsgs {
+		if i == 0 || err.msg != errorMsgs[i-1].msg {
+			fmt.Printf("%s", err.msg)
+		}
+	}
+	errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+	syntax src.XPos // source position of last syntax error
+	other  src.XPos // source position of last non-syntax error
+	msg    string   // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+	p := Ctxt.PosTable.Pos(a)
+	q := Ctxt.PosTable.Pos(b)
+	return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+	ErrorfAt(Pos, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+
+	if strings.HasPrefix(msg, "syntax error") {
+		numSyntaxErrors++
+		// only one syntax error per line, no matter what error
+		if sameline(lasterror.syntax, pos) {
+			return
+		}
+		lasterror.syntax = pos
+	} else {
+		// only one of multiple equal non-syntax errors per line
+		// (FlushErrors shows only one of them, so we filter them
+		// here as best as we can (they may not appear in order)
+		// so that we don't count them here and exit early, and
+		// then have nothing to show for.)
+		if sameline(lasterror.other, pos) && lasterror.msg == msg {
+			return
+		}
+		lasterror.other = pos
+		lasterror.msg = msg
+	}
+
+	addErrorMsg(pos, "%s", msg)
+	numErrors++
+
+	hcrash()
+	if numErrors >= 10 && Flag.LowerE == 0 {
+		FlushErrors()
+		fmt.Printf("%v: too many errors\n", FmtPos(pos))
+		ErrorExit()
+	}
+}
+
+// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
+func ErrorfVers(lang string, format string, args ...interface{}) {
+	Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+	if len(errorMsgs) == 0 {
+		return
+	}
+	e := &errorMsgs[len(errorMsgs)-1]
+	if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+		e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+	}
+}
+
+// Warnf reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+	WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+	addErrorMsg(pos, format, args...)
+	if Flag.LowerM != 0 {
+		FlushErrors()
+	}
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+	FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+	FlushErrors()
+
+	if Debug.Panic != 0 || numErrors == 0 {
+		fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+		fmt.Printf(format, args...)
+		fmt.Printf("\n")
+
+		// If this is a released compiler version, ask for a bug report.
+		if strings.HasPrefix(objabi.Version, "go") {
+			fmt.Printf("\n")
+			fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+			fmt.Printf("https://golang.org/issue/new\n")
+		} else {
+			// Not a release; dump a stack trace, too.
+			fmt.Println()
+			os.Stdout.Write(debug.Stack())
+			fmt.Println()
+		}
+	}
+
+	hcrash()
+	ErrorExit()
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+	if Flag.LowerH != 0 {
+		FlushErrors()
+		if Flag.LowerO != "" {
+			os.Remove(Flag.LowerO)
+		}
+		panic("-h")
+	}
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+	FlushErrors()
+	if Flag.LowerO != "" {
+		os.Remove(Flag.LowerO)
+	}
+	os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+	if Errors() > 0 {
+		ErrorExit()
+	}
+}
+
+var AutogeneratedPos src.XPos
diff --git a/src/cmd/compile/internal/gc/timings.go b/src/cmd/compile/internal/base/timings.go
similarity index 99%
rename from src/cmd/compile/internal/gc/timings.go
rename to src/cmd/compile/internal/base/timings.go
index 56b3899..f599f4e 100644
--- a/src/cmd/compile/internal/gc/timings.go
+++ b/src/cmd/compile/internal/base/timings.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package base
 
 import (
 	"fmt"
@@ -11,6 +11,8 @@
 	"time"
 )
 
+var Timer Timings
+
 // Timings collects the execution times of labeled phases
 // which are added trough a sequence of Start/Stop calls.
 // Events may be associated with each phase via AddEvent.
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
new file mode 100644
index 0000000..bcac1fe
--- /dev/null
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -0,0 +1,190 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+	"math/bits"
+
+	"cmd/compile/internal/base"
+)
+
+const (
+	wordBits  = 32
+	wordMask  = wordBits - 1
+	wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+	N int32    // number of bits in vector
+	B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+	nword := (n + wordBits - 1) / wordBits
+	return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+	words []uint32
+	nbit  int32
+	nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+	nword := (nbit + wordBits - 1) / wordBits
+	size := int64(nword) * int64(count)
+	if int64(int32(size*4)) != size*4 {
+		base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+	}
+	return Bulk{
+		words: make([]uint32, size),
+		nbit:  nbit,
+		nword: nword,
+	}
+}
+
+func (b *Bulk) Next() BitVec {
+	out := BitVec{b.nbit, b.words[:b.nword]}
+	b.words = b.words[b.nword:]
+	return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+	if bv1.N != bv2.N {
+		base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+	}
+	for i, x := range bv1.B {
+		if x != bv2.B[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+	copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+	if i >= bv.N {
+		return -1
+	}
+
+	// Jump i ahead to next word with bits.
+	if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+		i &^= wordMask
+		i += wordBits
+		for i < bv.N && bv.B[i>>wordShift] == 0 {
+			i += wordBits
+		}
+	}
+
+	if i >= bv.N {
+		return -1
+	}
+
+	// Find 1 bit.
+	w := bv.B[i>>wordShift] >> uint(i&wordMask)
+	i += int32(bits.TrailingZeros32(w))
+
+	return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+	for _, x := range bv.B {
+		if x != 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (bv BitVec) Not() {
+	for i, x := range bv.B {
+		bv.B[i] = ^x
+	}
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x | src2.B[i]
+	}
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x & src2.B[i]
+	}
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x &^ src2.B[i]
+	}
+}
+
+func (bv BitVec) String() string {
+	s := make([]byte, 2+bv.N)
+	copy(s, "#*")
+	for i := int32(0); i < bv.N; i++ {
+		ch := byte('0')
+		if bv.Get(i) {
+			ch = '1'
+		}
+		s[2+i] = ch
+	}
+	return string(s)
+}
+
+func (bv BitVec) Clear() {
+	for i := range bv.B {
+		bv.B[i] = 0
+	}
+}
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
new file mode 100644
index 0000000..5202037
--- /dev/null
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -0,0 +1,152 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deadcode
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+)
+
+func Func(fn *ir.Func) {
+	stmts(&fn.Body)
+
+	if len(fn.Body) == 0 {
+		return
+	}
+
+	for _, n := range fn.Body {
+		if len(n.Init()) > 0 {
+			return
+		}
+		switch n.Op() {
+		case ir.OIF:
+			n := n.(*ir.IfStmt)
+			if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
+				return
+			}
+		case ir.OFOR:
+			n := n.(*ir.ForStmt)
+			if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
+				return
+			}
+		default:
+			return
+		}
+	}
+
+	fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
+}
+
+func stmts(nn *ir.Nodes) {
+	var lastLabel = -1
+	for i, n := range *nn {
+		if n != nil && n.Op() == ir.OLABEL {
+			lastLabel = i
+		}
+	}
+	for i, n := range *nn {
+		// Cut is set to true when all nodes after i'th position
+		// should be removed.
+		// In other words, it marks whole slice "tail" as dead.
+		cut := false
+		if n == nil {
+			continue
+		}
+		if n.Op() == ir.OIF {
+			n := n.(*ir.IfStmt)
+			n.Cond = expr(n.Cond)
+			if ir.IsConst(n.Cond, constant.Bool) {
+				var body ir.Nodes
+				if ir.BoolVal(n.Cond) {
+					n.Else = ir.Nodes{}
+					body = n.Body
+				} else {
+					n.Body = ir.Nodes{}
+					body = n.Else
+				}
+				// If "then" or "else" branch ends with panic or return statement,
+				// it is safe to remove all statements after this node.
+				// isterminating is not used to avoid goto-related complications.
+				// We must be careful not to deadcode-remove labels, as they
+				// might be the target of a goto. See issue 28616.
+				if body := body; len(body) != 0 {
+					switch body[(len(body) - 1)].Op() {
+					case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
+						if i > lastLabel {
+							cut = true
+						}
+					}
+				}
+			}
+		}
+
+		if len(n.Init()) != 0 {
+			stmts(n.(ir.InitNode).PtrInit())
+		}
+		switch n.Op() {
+		case ir.OBLOCK:
+			n := n.(*ir.BlockStmt)
+			stmts(&n.List)
+		case ir.OFOR:
+			n := n.(*ir.ForStmt)
+			stmts(&n.Body)
+		case ir.OIF:
+			n := n.(*ir.IfStmt)
+			stmts(&n.Body)
+			stmts(&n.Else)
+		case ir.ORANGE:
+			n := n.(*ir.RangeStmt)
+			stmts(&n.Body)
+		case ir.OSELECT:
+			n := n.(*ir.SelectStmt)
+			for _, cas := range n.Cases {
+				stmts(&cas.Body)
+			}
+		case ir.OSWITCH:
+			n := n.(*ir.SwitchStmt)
+			for _, cas := range n.Cases {
+				stmts(&cas.Body)
+			}
+		}
+
+		if cut {
+			*nn = (*nn)[:i+1]
+			break
+		}
+	}
+}
+
+func expr(n ir.Node) ir.Node {
+	// Perform dead-code elimination on short-circuited boolean
+	// expressions involving constants with the intent of
+	// producing a constant 'if' condition.
+	switch n.Op() {
+	case ir.OANDAND:
+		n := n.(*ir.LogicalExpr)
+		n.X = expr(n.X)
+		n.Y = expr(n.Y)
+		if ir.IsConst(n.X, constant.Bool) {
+			if ir.BoolVal(n.X) {
+				return n.Y // true && x => x
+			} else {
+				return n.X // false && x => false
+			}
+		}
+	case ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		n.X = expr(n.X)
+		n.Y = expr(n.Y)
+		if ir.IsConst(n.X, constant.Bool) {
+			if ir.BoolVal(n.X) {
+				return n.X // true || x => true
+			} else {
+				return n.Y // false || x => x
+			}
+		}
+	}
+	return n
+}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
new file mode 100644
index 0000000..60ba208
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -0,0 +1,85 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package devirtualize implements a simple "devirtualization"
+// optimization pass, which replaces interface method calls with
+// direct concrete-type method calls where possible.
+package devirtualize
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+)
+
+// Func devirtualizes calls within fn where possible.
+func Func(fn *ir.Func) {
+	ir.CurFunc = fn
+	ir.VisitList(fn.Body, func(n ir.Node) {
+		if call, ok := n.(*ir.CallExpr); ok {
+			Call(call)
+		}
+	})
+}
+
+// Call devirtualizes the given call if possible.
+func Call(call *ir.CallExpr) {
+	if call.Op() != ir.OCALLINTER {
+		return
+	}
+	sel := call.X.(*ir.SelectorExpr)
+	r := ir.StaticValue(sel.X)
+	if r.Op() != ir.OCONVIFACE {
+		return
+	}
+	recv := r.(*ir.ConvExpr)
+
+	typ := recv.X.Type()
+	if typ.IsInterface() {
+		return
+	}
+
+	dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
+	dt.SetType(typ)
+	x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
+	switch x.Op() {
+	case ir.ODOTMETH:
+		x := x.(*ir.SelectorExpr)
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
+		}
+		call.SetOp(ir.OCALLMETH)
+		call.X = x
+	case ir.ODOTINTER:
+		// Promoted method from embedded interface-typed field (#42279).
+		x := x.(*ir.SelectorExpr)
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
+		}
+		call.SetOp(ir.OCALLINTER)
+		call.X = x
+	default:
+		// TODO(mdempsky): Turn back into Fatalf after more testing.
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
+		}
+		return
+	}
+
+	// Duplicated logic from typecheck for function call return
+	// value types.
+	//
+	// Receiver parameter size may have changed; need to update
+	// call.Type to get correct stack offsets for result
+	// parameters.
+	types.CheckSize(x.Type())
+	switch ft := x.Type(); ft.NumResults() {
+	case 0:
+	case 1:
+		call.SetType(ft.Results().Field(0).Type)
+	default:
+		call.SetType(ft.Results())
+	}
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
new file mode 100644
index 0000000..dd22c03
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -0,0 +1,458 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/types"
+	"cmd/internal/dwarf"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
+	fn := curfn.(*ir.Func)
+
+	if fn.Nname != nil {
+		expect := fn.Linksym()
+		if fnsym.ABI() == obj.ABI0 {
+			expect = fn.LinksymABI(obj.ABI0)
+		}
+		if fnsym != expect {
+			base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+		}
+	}
+
+	// Back when there were two different *Funcs for a function, this code
+	// was not consistent about whether a particular *Node being processed
+	// was an ODCLFUNC or ONAME node. Partly this is because inlined function
+	// bodies have no ODCLFUNC node, which was it's own inconsistency.
+	// In any event, the handling of the two different nodes for DWARF purposes
+	// was subtly different, likely in unintended ways. CL 272253 merged the
+	// two nodes' Func fields, so that code sees the same *Func whether it is
+	// holding the ODCLFUNC or the ONAME. This resulted in changes in the
+	// DWARF output. To preserve the existing DWARF output and leave an
+	// intentional change for a future CL, this code does the following when
+	// fn.Op == ONAME:
+	//
+	// 1. Disallow use of createComplexVars in createDwarfVars.
+	//    It was not possible to reach that code for an ONAME before,
+	//    because the DebugInfo was set only on the ODCLFUNC Func.
+	//    Calling into it in the ONAME case causes an index out of bounds panic.
+	//
+	// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+	//    not the ONAME Func. Populating apdecls for the ONAME case results
+	//    in selected being populated after createSimpleVars is called in
+	//    createDwarfVars, and then that causes the loop to skip all the entries
+	//    in dcl, meaning that the RecordAutoType calls don't happen.
+	//
+	// These two adjustments keep toolstash -cmp working for now.
+	// Deciding the right answer is, as they say, future work.
+	//
+	// We can tell the difference between the old ODCLFUNC and ONAME
+	// cases by looking at the infosym.Name. If it's empty, DebugInfo is
+	// being called from (*obj.Link).populateDWARF, which used to use
+	// the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+	// DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+	// which used to use the ONAME form.
+	isODCLFUNC := infosym.Name == ""
+
+	var apdecls []*ir.Name
+	// Populate decls for fn.
+	if isODCLFUNC {
+		for _, n := range fn.Dcl {
+			if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+				continue
+			}
+			switch n.Class {
+			case ir.PAUTO:
+				if !n.Used() {
+					// Text == nil -> generating abstract function
+					if fnsym.Func().Text != nil {
+						base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+					}
+					continue
+				}
+			case ir.PPARAM, ir.PPARAMOUT:
+			default:
+				continue
+			}
+			apdecls = append(apdecls, n)
+			fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+		}
+	}
+
+	decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
+
+	// For each type referenced by the functions auto vars but not
+	// already referenced by a dwarf var, attach an R_USETYPE relocation to
+	// the function symbol to insure that the type included in DWARF
+	// processing during linking.
+	typesyms := []*obj.LSym{}
+	for t, _ := range fnsym.Func().Autot {
+		typesyms = append(typesyms, t)
+	}
+	sort.Sort(obj.BySymName(typesyms))
+	for _, sym := range typesyms {
+		r := obj.Addrel(infosym)
+		r.Sym = sym
+		r.Type = objabi.R_USETYPE
+	}
+	fnsym.Func().Autot = nil
+
+	var varScopes []ir.ScopeID
+	for _, decl := range decls {
+		pos := declPos(decl)
+		varScopes = append(varScopes, findScope(fn.Marks, pos))
+	}
+
+	scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
+	var inlcalls dwarf.InlCalls
+	if base.Flag.GenDwarfInl > 0 {
+		inlcalls = assembleInlines(fnsym, dwarfVars)
+	}
+	return scopes, inlcalls
+}
+
+func declPos(decl *ir.Name) src.XPos {
+	return decl.Canonical().Pos()
+}
+
+// createDwarfVars process fn, returning a list of DWARF variables and the
+// Nodes they represent.
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
+	// Collect a raw list of DWARF vars.
+	var vars []*dwarf.Var
+	var decls []*ir.Name
+	var selected ir.NameSet
+	if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
+		decls, vars, selected = createComplexVars(fnsym, fn)
+	} else {
+		decls, vars, selected = createSimpleVars(fnsym, apDecls)
+	}
+
+	dcl := apDecls
+	if fnsym.WasInlined() {
+		dcl = preInliningDcls(fnsym)
+	}
+
+	// If optimization is enabled, the list above will typically be
+	// missing some of the original pre-optimization variables in the
+	// function (they may have been promoted to registers, folded into
+	// constants, dead-coded away, etc).  Input arguments not eligible
+	// for SSA optimization are also missing.  Here we add back in entries
+	// for selected missing vars. Note that the recipe below creates a
+	// conservative location. The idea here is that we want to
+	// communicate to the user that "yes, there is a variable named X
+	// in this function, but no, I don't have enough information to
+	// reliably report its contents."
+	// For non-SSA-able arguments, however, the correct information
+	// is known -- they have a single home on the stack.
+	for _, n := range dcl {
+		if selected.Has(n) {
+			continue
+		}
+		c := n.Sym().Name[0]
+		if c == '.' || n.Type().IsUntyped() {
+			continue
+		}
+		if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
+			// SSA-able args get location lists, and may move in and
+			// out of registers, so those are handled elsewhere.
+			// Autos and named output params seem to get handled
+			// with VARDEF, which creates location lists.
+			// Args not of SSA-able type are treated here; they
+			// are homed on the stack in a single place for the
+			// entire call.
+			vars = append(vars, createSimpleVar(fnsym, n))
+			decls = append(decls, n)
+			continue
+		}
+		typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+		decls = append(decls, n)
+		abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+		isReturnValue := (n.Class == ir.PPARAMOUT)
+		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+			abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+		}
+		if n.Esc() == ir.EscHeap {
+			// The variable in question has been promoted to the heap.
+			// Its address is in n.Heapaddr.
+			// TODO(thanm): generate a better location expression
+		}
+		inlIndex := 0
+		if base.Flag.GenDwarfInl > 1 {
+			if n.InlFormal() || n.InlLocal() {
+				inlIndex = posInlIndex(n.Pos()) + 1
+				if n.InlFormal() {
+					abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+				}
+			}
+		}
+		declpos := base.Ctxt.InnermostPos(n.Pos())
+		vars = append(vars, &dwarf.Var{
+			Name:          n.Sym().Name,
+			IsReturnValue: isReturnValue,
+			Abbrev:        abbrev,
+			StackOffset:   int32(n.FrameOffset()),
+			Type:          base.Ctxt.Lookup(typename),
+			DeclFile:      declpos.RelFilename(),
+			DeclLine:      declpos.RelLine(),
+			DeclCol:       declpos.Col(),
+			InlIndex:      int32(inlIndex),
+			ChildIndex:    -1,
+		})
+		// Record go type of to insure that it gets emitted by the linker.
+		fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+	}
+
+	return decls, vars
+}
+
+// Given a function that was inlined at some point during the
+// compilation, return a sorted list of nodes corresponding to the
+// autos/locals in that function prior to inlining. If this is a
+// function that is not local to the package being compiled, then the
+// names of the variables may have been "versioned" to avoid conflicts
+// with local vars; disregard this versioning when sorting.
+func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
+	fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
+	var rdcl []*ir.Name
+	for _, n := range fn.Inl.Dcl {
+		c := n.Sym().Name[0]
+		// Avoid reporting "_" parameters, since if there are more than
+		// one, it can result in a collision later on, as in #23179.
+		if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
+			continue
+		}
+		rdcl = append(rdcl, n)
+	}
+	return rdcl
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+	var vars []*dwarf.Var
+	var decls []*ir.Name
+	var selected ir.NameSet
+	for _, n := range apDecls {
+		if ir.IsAutoTmp(n) {
+			continue
+		}
+
+		decls = append(decls, n)
+		vars = append(vars, createSimpleVar(fnsym, n))
+		selected.Add(n)
+	}
+	return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
+	var abbrev int
+	var offs int64
+
+	switch n.Class {
+	case ir.PAUTO:
+		offs = n.FrameOffset()
+		abbrev = dwarf.DW_ABRV_AUTO
+		if base.Ctxt.FixedFrameSize() == 0 {
+			offs -= int64(types.PtrSize)
+		}
+		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+			// There is a word space for FP on ARM64 even if the frame pointer is disabled
+			offs -= int64(types.PtrSize)
+		}
+
+	case ir.PPARAM, ir.PPARAMOUT:
+		abbrev = dwarf.DW_ABRV_PARAM
+		offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+	default:
+		base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
+	}
+
+	typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+	delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
+	inlIndex := 0
+	if base.Flag.GenDwarfInl > 1 {
+		if n.InlFormal() || n.InlLocal() {
+			inlIndex = posInlIndex(n.Pos()) + 1
+			if n.InlFormal() {
+				abbrev = dwarf.DW_ABRV_PARAM
+			}
+		}
+	}
+	declpos := base.Ctxt.InnermostPos(declPos(n))
+	return &dwarf.Var{
+		Name:          n.Sym().Name,
+		IsReturnValue: n.Class == ir.PPARAMOUT,
+		IsInlFormal:   n.InlFormal(),
+		Abbrev:        abbrev,
+		StackOffset:   int32(offs),
+		Type:          base.Ctxt.Lookup(typename),
+		DeclFile:      declpos.RelFilename(),
+		DeclLine:      declpos.RelLine(),
+		DeclCol:       declpos.Col(),
+		InlIndex:      int32(inlIndex),
+		ChildIndex:    -1,
+	}
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+	debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+
+	// Produce a DWARF variable entry for each user variable.
+	var decls []*ir.Name
+	var vars []*dwarf.Var
+	var ssaVars ir.NameSet
+
+	for varID, dvar := range debugInfo.Vars {
+		n := dvar
+		ssaVars.Add(n)
+		for _, slot := range debugInfo.VarSlots[varID] {
+			ssaVars.Add(debugInfo.Slots[slot].N)
+		}
+
+		if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+			decls = append(decls, n)
+			vars = append(vars, dvar)
+		}
+	}
+
+	return decls, vars, ssaVars
+}
+
+// createComplexVar builds a single DWARF variable entry and location list.
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+	debug := fn.DebugInfo.(*ssa.FuncDebug)
+	n := debug.Vars[varID]
+
+	var abbrev int
+	switch n.Class {
+	case ir.PAUTO:
+		abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+	case ir.PPARAM, ir.PPARAMOUT:
+		abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+	default:
+		return nil
+	}
+
+	gotype := reflectdata.TypeLinksym(n.Type())
+	delete(fnsym.Func().Autot, gotype)
+	typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
+	inlIndex := 0
+	if base.Flag.GenDwarfInl > 1 {
+		if n.InlFormal() || n.InlLocal() {
+			inlIndex = posInlIndex(n.Pos()) + 1
+			if n.InlFormal() {
+				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+			}
+		}
+	}
+	declpos := base.Ctxt.InnermostPos(n.Pos())
+	dvar := &dwarf.Var{
+		Name:          n.Sym().Name,
+		IsReturnValue: n.Class == ir.PPARAMOUT,
+		IsInlFormal:   n.InlFormal(),
+		Abbrev:        abbrev,
+		Type:          base.Ctxt.Lookup(typename),
+		// The stack offset is used as a sorting key, so for decomposed
+		// variables just give it the first one. It's not used otherwise.
+		// This won't work well if the first slot hasn't been assigned a stack
+		// location, but it's not obvious how to do better.
+		StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+		DeclFile:    declpos.RelFilename(),
+		DeclLine:    declpos.RelLine(),
+		DeclCol:     declpos.Col(),
+		InlIndex:    int32(inlIndex),
+		ChildIndex:  -1,
+	}
+	list := debug.LocationLists[varID]
+	if len(list) != 0 {
+		dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+			debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+		}
+	}
+	return dvar
+}
+
+// RecordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func RecordFlags(flags ...string) {
+	if base.Ctxt.Pkgpath == "" {
+		// We can't record the flags if we don't know what the
+		// package name is.
+		return
+	}
+
+	type BoolFlag interface {
+		IsBoolFlag() bool
+	}
+	type CountFlag interface {
+		IsCountFlag() bool
+	}
+	var cmd bytes.Buffer
+	for _, name := range flags {
+		f := flag.Lookup(name)
+		if f == nil {
+			continue
+		}
+		getter := f.Value.(flag.Getter)
+		if getter.String() == f.DefValue {
+			// Flag has default value, so omit it.
+			continue
+		}
+		if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+			val, ok := getter.Get().(bool)
+			if ok && val {
+				fmt.Fprintf(&cmd, " -%s", f.Name)
+				continue
+			}
+		}
+		if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+			val, ok := getter.Get().(int)
+			if ok && val == 1 {
+				fmt.Fprintf(&cmd, " -%s", f.Name)
+				continue
+			}
+		}
+		fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+	}
+
+	if cmd.Len() == 0 {
+		return
+	}
+	s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
+	s.Type = objabi.SDWARFCUINFO
+	// Sometimes (for example when building tests) we can link
+	// together two package main archives. So allow dups.
+	s.Set(obj.AttrDuplicateOK, true)
+	base.Ctxt.Data = append(base.Ctxt.Data, s)
+	s.P = cmd.Bytes()[1:]
+}
+
+// RecordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func RecordPackageName() {
+	s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
+	s.Type = objabi.SDWARFCUINFO
+	// Sometimes (for example when building tests) we can link
+	// together two package main archives. So allow dups.
+	s.Set(obj.AttrDuplicateOK, true)
+	base.Ctxt.Data = append(base.Ctxt.Data, s)
+	s.P = []byte(types.LocalPkg.Name)
+}
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
similarity index 85%
rename from src/cmd/compile/internal/gc/dwinl.go
rename to src/cmd/compile/internal/dwarfgen/dwinl.go
index bb5ae61..d5687cb 100644
--- a/src/cmd/compile/internal/gc/dwinl.go
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -2,14 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package dwarfgen
 
 import (
+	"fmt"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/internal/dwarf"
 	"cmd/internal/obj"
 	"cmd/internal/src"
-	"fmt"
-	"strings"
 )
 
 // To identify variables by original source position.
@@ -26,8 +29,8 @@
 func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
 	var inlcalls dwarf.InlCalls
 
-	if Debug_gendwarfinl != 0 {
-		Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+	if base.Debug.DwarfInl != 0 {
+		base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
 	}
 
 	// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@@ -106,7 +109,7 @@
 			}
 			m = makePreinlineDclMap(fnsym)
 		} else {
-			ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+			ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
 			m = makePreinlineDclMap(ifnlsym)
 		}
 
@@ -181,7 +184,7 @@
 	}
 
 	// Debugging
-	if Debug_gendwarfinl != 0 {
+	if base.Debug.DwarfInl != 0 {
 		dumpInlCalls(inlcalls)
 		dumpInlVars(dwVars)
 	}
@@ -204,16 +207,17 @@
 // late in the compilation when it is determined that we need an
 // abstract function DIE for an inlined routine imported from a
 // previously compiled package.
-func genAbstractFunc(fn *obj.LSym) {
-	ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+func AbstractFunc(fn *obj.LSym) {
+	ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
 	if ifn == nil {
-		Ctxt.Diag("failed to locate precursor fn for %v", fn)
+		base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
 		return
 	}
-	if Debug_gendwarfinl != 0 {
-		Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+	_ = ifn.(*ir.Func)
+	if base.Debug.DwarfInl != 0 {
+		base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
 	}
-	Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+	base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
 }
 
 // Undo any versioning performed when a name was written
@@ -235,15 +239,15 @@
 	dcl := preInliningDcls(fnsym)
 	m := make(map[varPos]int)
 	for i, n := range dcl {
-		pos := Ctxt.InnermostPos(n.Pos)
+		pos := base.Ctxt.InnermostPos(n.Pos())
 		vp := varPos{
-			DeclName: unversion(n.Sym.Name),
+			DeclName: unversion(n.Sym().Name),
 			DeclFile: pos.RelFilename(),
 			DeclLine: pos.RelLine(),
 			DeclCol:  pos.Col(),
 		}
 		if _, found := m[vp]; found {
-			Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
+			base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
 		}
 		m[vp] = i
 	}
@@ -260,17 +264,17 @@
 	// is one. We do this first so that parents appear before their
 	// children in the resulting table.
 	parCallIdx := -1
-	parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+	parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
 	if parInlIdx >= 0 {
 		parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
 	}
 
 	// Create new entry for this inline
-	inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
-	callXPos := Ctxt.InlTree.CallPos(inlIdx)
-	absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
-	pb := Ctxt.PosTable.Pos(callXPos).Base()
-	callFileSym := Ctxt.Lookup(pb.SymFilename())
+	inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+	callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+	absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+	pb := base.Ctxt.PosTable.Pos(callXPos).Base()
+	callFileSym := base.Ctxt.Lookup(pb.SymFilename())
 	ic := dwarf.InlCall{
 		InlIndex:  inlIdx,
 		CallFile:  callFileSym,
@@ -298,7 +302,7 @@
 // the index for a node from the inlined body of D will refer to the
 // call to D from C. Whew.
 func posInlIndex(xpos src.XPos) int {
-	pos := Ctxt.PosTable.Pos(xpos)
+	pos := base.Ctxt.PosTable.Pos(xpos)
 	if b := pos.Base(); b != nil {
 		ii := b.InliningIndex()
 		if ii >= 0 {
@@ -324,7 +328,7 @@
 	// Append range to correct inlined call
 	callIdx, found := imap[ii]
 	if !found {
-		Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+		base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
 	}
 	call := &calls[callIdx]
 	call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@@ -332,23 +336,23 @@
 
 func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
 	for i := 0; i < ilevel; i++ {
-		Ctxt.Logf("  ")
+		base.Ctxt.Logf("  ")
 	}
 	ic := inlcalls.Calls[idx]
-	callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
-	Ctxt.Logf("  %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+	callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+	base.Ctxt.Logf("  %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
 	for _, f := range ic.InlVars {
-		Ctxt.Logf(" %v", f.Name)
+		base.Ctxt.Logf(" %v", f.Name)
 	}
-	Ctxt.Logf(" ) C: (")
+	base.Ctxt.Logf(" ) C: (")
 	for _, k := range ic.Children {
-		Ctxt.Logf(" %v", k)
+		base.Ctxt.Logf(" %v", k)
 	}
-	Ctxt.Logf(" ) R:")
+	base.Ctxt.Logf(" ) R:")
 	for _, r := range ic.Ranges {
-		Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+		base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
 	}
-	Ctxt.Logf("\n")
+	base.Ctxt.Logf("\n")
 	for _, k := range ic.Children {
 		dumpInlCall(inlcalls, k, ilevel+1)
 	}
@@ -373,7 +377,7 @@
 		if dwv.IsInAbstract {
 			ia = 1
 		}
-		Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+		base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
 	}
 }
 
@@ -410,7 +414,7 @@
 
 	// Callee
 	ic := inlCalls.Calls[idx]
-	callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+	callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
 	calleeRanges := ic.Ranges
 
 	// Caller
@@ -418,14 +422,14 @@
 	parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
 	if parentIdx != -1 {
 		pic := inlCalls.Calls[parentIdx]
-		caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+		caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
 		parentRanges = pic.Ranges
 	}
 
 	// Callee ranges contained in caller ranges?
 	c, m := rangesContainsAll(parentRanges, calleeRanges)
 	if !c {
-		Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+		base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
 	}
 
 	// Now visit kids
diff --git a/src/cmd/compile/internal/dwarfgen/marker.go b/src/cmd/compile/internal/dwarfgen/marker.go
new file mode 100644
index 0000000..ec6ce45
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/marker.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/internal/src"
+)
+
+// A ScopeMarker tracks scope nesting and boundaries for later use
+// during DWARF generation.
+type ScopeMarker struct {
+	parents []ir.ScopeID
+	marks   []ir.Mark
+}
+
+// checkPos validates the given position and returns the current scope.
+func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
+	if !pos.IsKnown() {
+		base.Fatalf("unknown scope position")
+	}
+
+	if len(m.marks) == 0 {
+		return 0
+	}
+
+	last := &m.marks[len(m.marks)-1]
+	if xposBefore(pos, last.Pos) {
+		base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
+	}
+	return last.Scope
+}
+
+// Push records a transition to a new child scope of the current scope.
+func (m *ScopeMarker) Push(pos src.XPos) {
+	current := m.checkPos(pos)
+
+	m.parents = append(m.parents, current)
+	child := ir.ScopeID(len(m.parents))
+
+	m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
+}
+
+// Pop records a transition back to the current scope's parent.
+func (m *ScopeMarker) Pop(pos src.XPos) {
+	current := m.checkPos(pos)
+
+	parent := m.parents[current-1]
+
+	m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
+}
+
+// Unpush removes the current scope, which must be empty.
+func (m *ScopeMarker) Unpush() {
+	i := len(m.marks) - 1
+	current := m.marks[i].Scope
+
+	if current != ir.ScopeID(len(m.parents)) {
+		base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
+	}
+
+	m.parents = m.parents[:current-1]
+	m.marks = m.marks[:i]
+}
+
+// WriteTo writes the recorded scope marks to the given function,
+// and resets the marker for reuse.
+func (m *ScopeMarker) WriteTo(fn *ir.Func) {
+	m.compactMarks()
+
+	fn.Parents = make([]ir.ScopeID, len(m.parents))
+	copy(fn.Parents, m.parents)
+	m.parents = m.parents[:0]
+
+	fn.Marks = make([]ir.Mark, len(m.marks))
+	copy(fn.Marks, m.marks)
+	m.marks = m.marks[:0]
+}
+
+func (m *ScopeMarker) compactMarks() {
+	n := 0
+	for _, next := range m.marks {
+		if n > 0 && next.Pos == m.marks[n-1].Pos {
+			m.marks[n-1].Scope = next.Scope
+			continue
+		}
+		m.marks[n] = next
+		n++
+	}
+	m.marks = m.marks[:n]
+}
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go
similarity index 78%
rename from src/cmd/compile/internal/gc/scope.go
rename to src/cmd/compile/internal/dwarfgen/scope.go
index e66b859..1c040ed 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/dwarfgen/scope.go
@@ -2,21 +2,24 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package dwarfgen
 
 import (
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/internal/dwarf"
 	"cmd/internal/obj"
 	"cmd/internal/src"
-	"sort"
 )
 
 // See golang.org/issue/20390.
 func xposBefore(p, q src.XPos) bool {
-	return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
+	return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
 }
 
-func findScope(marks []Mark, pos src.XPos) ScopeID {
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
 	i := sort.Search(len(marks), func(i int) bool {
 		return xposBefore(pos, marks[i].Pos)
 	})
@@ -26,20 +29,20 @@
 	return marks[i-1].Scope
 }
 
-func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
 	// Initialize the DWARF scope tree based on lexical scopes.
-	dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
-	for i, parent := range fn.Func.Parents {
+	dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
+	for i, parent := range fn.Parents {
 		dwarfScopes[i+1].Parent = int32(parent)
 	}
 
 	scopeVariables(dwarfVars, varScopes, dwarfScopes)
-	scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+	scopePCs(fnsym, fn.Marks, dwarfScopes)
 	return compactScopes(dwarfScopes)
 }
 
 // scopeVariables assigns DWARF variable records to their scopes.
-func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
 	sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
 
 	i0 := 0
@@ -56,7 +59,7 @@
 }
 
 // scopePCs assigns PC ranges to their scopes.
-func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
 	// If there aren't any child scopes (in particular, when scope
 	// tracking is disabled), we can skip a whole lot of work.
 	if len(marks) == 0 {
@@ -89,7 +92,7 @@
 
 type varsByScopeAndOffset struct {
 	vars   []*dwarf.Var
-	scopes []ScopeID
+	scopes []ir.ScopeID
 }
 
 func (v varsByScopeAndOffset) Len() int {
diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/scope_test.go
rename to src/cmd/compile/internal/dwarfgen/scope_test.go
index b0e038d..fcfcf85 100644
--- a/src/cmd/compile/internal/gc/scope_test.go
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -2,10 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc_test
+package dwarfgen
 
 import (
-	"cmd/internal/objfile"
 	"debug/dwarf"
 	"fmt"
 	"internal/testenv"
@@ -18,6 +17,8 @@
 	"strconv"
 	"strings"
 	"testing"
+
+	"cmd/internal/objfile"
 )
 
 type testline struct {
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
new file mode 100644
index 0000000..58cad73
--- /dev/null
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -0,0 +1,2137 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+	"fmt"
+	"math"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// Escape analysis.
+//
+// Here we analyze functions to determine which Go variables
+// (including implicit allocations such as calls to "new" or "make",
+// composite literals, etc.) can be allocated on the stack. The two
+// key invariants we have to ensure are: (1) pointers to stack objects
+// cannot be stored in the heap, and (2) pointers to a stack object
+// cannot outlive that object (e.g., because the declaring function
+// returned and destroyed the object's stack frame, or its space is
+// reused across loop iterations for logically distinct variables).
+//
+// We implement this with a static data-flow analysis of the AST.
+// First, we construct a directed weighted graph where vertices
+// (termed "locations") represent variables allocated by statements
+// and expressions, and edges represent assignments between variables
+// (with weights representing addressing/dereference counts).
+//
+// Next we walk the graph looking for assignment paths that might
+// violate the invariants stated above. If a variable v's address is
+// stored in the heap or elsewhere that may outlive it, then v is
+// marked as requiring heap allocation.
+//
+// To support interprocedural analysis, we also record data-flow from
+// each function's parameters to the heap and to its result
+// parameters. This information is summarized as "parameter tags",
+// which are used at static call sites to improve escape analysis of
+// function arguments.
+
+// Constructing the location graph.
+//
+// Every allocating statement (e.g., variable declaration) or
+// expression (e.g., "new" or "make") is first mapped to a unique
+// "location."
+//
+// We also model every Go assignment as a directed edges between
+// locations. The number of dereference operations minus the number of
+// addressing operations is recorded as the edge's weight (termed
+// "derefs"). For example:
+//
+//     p = &q    // -1
+//     p = q     //  0
+//     p = *q    //  1
+//     p = **q   //  2
+//
+//     p = **&**&q  // 2
+//
+// Note that the & operator can only be applied to addressable
+// expressions, and the expression &x itself is not addressable, so
+// derefs cannot go below -1.
+//
+// Every Go language construct is lowered into this representation,
+// generally without sensitivity to flow, path, or context; and
+// without distinguishing elements within a compound variable. For
+// example:
+//
+//     var x struct { f, g *int }
+//     var u []*int
+//
+//     x.f = u[0]
+//
+// is modeled simply as
+//
+//     x = *u
+//
+// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
+// u[2], etc. However, we do record the implicit dereference involved
+// in indexing a slice.
+
+// A batch holds escape analysis state that's shared across an entire
+// batch of functions being analyzed at once.
+type batch struct {
+	allLocs  []*location
+	closures []closure
+
+	heapLoc  location
+	blankLoc location
+}
+
+// A closure holds a closure expression and its spill hole (i.e.,
+// where the hole representing storing into its closure record).
+type closure struct {
+	k   hole
+	clo *ir.ClosureExpr
+}
+
+// An escape holds state specific to a single function being analyzed
+// within a batch.
+type escape struct {
+	*batch
+
+	curfn *ir.Func // function being analyzed
+
+	labels map[*types.Sym]labelState // known labels
+
+	// loopDepth counts the current loop nesting depth within
+	// curfn. It increments within each "for" loop and at each
+	// label with a corresponding backwards "goto" (i.e.,
+	// unstructured loop).
+	loopDepth int
+}
+
+// An location represents an abstract location that stores a Go
+// variable.
+type location struct {
+	n         ir.Node  // represented variable or expression, if any
+	curfn     *ir.Func // enclosing function
+	edges     []edge   // incoming edges
+	loopDepth int      // loopDepth at declaration
+
+	// resultIndex records the tuple index (starting at 1) for
+	// PPARAMOUT variables within their function's result type.
+	// For non-PPARAMOUT variables it's 0.
+	resultIndex int
+
+	// derefs and walkgen are used during walkOne to track the
+	// minimal dereferences from the walk root.
+	derefs  int // >= -1
+	walkgen uint32
+
+	// dst and dstEdgeindex track the next immediate assignment
+	// destination location during walkone, along with the index
+	// of the edge pointing back to this location.
+	dst        *location
+	dstEdgeIdx int
+
+	// queued is used by walkAll to track whether this location is
+	// in the walk queue.
+	queued bool
+
+	// escapes reports whether the represented variable's address
+	// escapes; that is, whether the variable must be heap
+	// allocated.
+	escapes bool
+
+	// transient reports whether the represented expression's
+	// address does not outlive the statement; that is, whether
+	// its storage can be immediately reused.
+	transient bool
+
+	// paramEsc records the represented parameter's leak set.
+	paramEsc leaks
+
+	captured   bool // has a closure captured this variable?
+	reassigned bool // has this variable been reassigned?
+	addrtaken  bool // has this variable's address been taken?
+}
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+	src    *location
+	derefs int // >= -1
+	notes  *note
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+	text := ""
+	switch n.Esc() {
+	case ir.EscUnknown:
+		break
+
+	case ir.EscHeap:
+		text = "esc(h)"
+
+	case ir.EscNone:
+		text = "esc(no)"
+
+	case ir.EscNever:
+		text = "esc(N)"
+
+	default:
+		text = fmt.Sprintf("esc(%d)", n.Esc())
+	}
+
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+			if text != "" {
+				text += " "
+			}
+			text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+		}
+	}
+
+	return text
+}
+
+// Batch performs escape analysis on a minimal batch of
+// functions.
+func Batch(fns []*ir.Func, recursive bool) {
+	for _, fn := range fns {
+		if fn.Op() != ir.ODCLFUNC {
+			base.Fatalf("unexpected node: %v", fn)
+		}
+	}
+
+	var b batch
+	b.heapLoc.escapes = true
+
+	// Construct data-flow graph from syntax trees.
+	for _, fn := range fns {
+		if base.Flag.W > 1 {
+			s := fmt.Sprintf("\nbefore escape %v", fn)
+			ir.Dump(s, fn)
+		}
+		b.initFunc(fn)
+	}
+	for _, fn := range fns {
+		if !fn.IsHiddenClosure() {
+			b.walkFunc(fn)
+		}
+	}
+
+	// We've walked the function bodies, so we've seen everywhere a
+	// variable might be reassigned or have it's address taken. Now we
+	// can decide whether closures should capture their free variables
+	// by value or reference.
+	for _, closure := range b.closures {
+		b.flowClosure(closure.k, closure.clo)
+	}
+	b.closures = nil
+
+	for _, loc := range b.allLocs {
+		if why := HeapAllocReason(loc.n); why != "" {
+			b.flow(b.heapHole().addr(loc.n, why), loc)
+		}
+	}
+
+	b.walkAll()
+	b.finish(fns)
+}
+
+func (b *batch) with(fn *ir.Func) *escape {
+	return &escape{
+		batch:     b,
+		curfn:     fn,
+		loopDepth: 1,
+	}
+}
+
+func (b *batch) initFunc(fn *ir.Func) {
+	e := b.with(fn)
+	if fn.Esc() != escFuncUnknown {
+		base.Fatalf("unexpected node: %v", fn)
+	}
+	fn.SetEsc(escFuncPlanned)
+	if base.Flag.LowerM > 3 {
+		ir.Dump("escAnalyze", fn)
+	}
+
+	// Allocate locations for local variables.
+	for _, n := range fn.Dcl {
+		if n.Op() == ir.ONAME {
+			e.newLoc(n, false)
+		}
+	}
+
+	// Initialize resultIndex for result parameters.
+	for i, f := range fn.Type().Results().FieldSlice() {
+		e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
+	}
+}
+
+func (b *batch) walkFunc(fn *ir.Func) {
+	e := b.with(fn)
+	fn.SetEsc(escFuncStarted)
+
+	// Identify labels that mark the head of an unstructured loop.
+	ir.Visit(fn, func(n ir.Node) {
+		switch n.Op() {
+		case ir.OLABEL:
+			n := n.(*ir.LabelStmt)
+			if e.labels == nil {
+				e.labels = make(map[*types.Sym]labelState)
+			}
+			e.labels[n.Label] = nonlooping
+
+		case ir.OGOTO:
+			// If we visited the label before the goto,
+			// then this is a looping label.
+			n := n.(*ir.BranchStmt)
+			if e.labels[n.Label] == nonlooping {
+				e.labels[n.Label] = looping
+			}
+		}
+	})
+
+	e.block(fn.Body)
+
+	if len(e.labels) != 0 {
+		base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
+	}
+}
+
+func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
+	for _, cv := range clo.Func.ClosureVars {
+		n := cv.Canonical()
+		loc := b.oldLoc(cv)
+		if !loc.captured {
+			base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
+		}
+
+		// Capture by value for variables <= 128 bytes that are never reassigned.
+		n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
+		if !n.Byval() {
+			n.SetAddrtaken(true)
+		}
+
+		if base.Flag.LowerM > 1 {
+			how := "ref"
+			if n.Byval() {
+				how = "value"
+			}
+			base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
+		}
+
+		// Flow captured variables to closure.
+		k := k
+		if !cv.Byval() {
+			k = k.addr(cv, "reference")
+		}
+		b.flow(k.note(cv, "captured by a closure"), loc)
+	}
+}
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+//     if x {
+//         e.discard(n.Left)
+//     } else {
+//         e.value(k, n.Left)
+//     }
+//
+// or
+//
+//     if x {
+//         k = e.discardHole()
+//     }
+//     e.value(k, n.Left)
+//
+// Do NOT write:
+//
+//    // BAD: possibly loses side-effects within n.Left
+//    if !x {
+//        e.value(k, n.Left)
+//    }
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+	if n == nil {
+		return
+	}
+
+	lno := ir.SetPos(n)
+	defer func() {
+		base.Pos = lno
+	}()
+
+	if base.Flag.LowerM > 2 {
+		fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+	}
+
+	e.stmts(n.Init())
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected stmt: %v", n)
+
+	case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+		// nop
+
+	case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+		// TODO(mdempsky): Handle dead code?
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		e.stmts(n.List)
+
+	case ir.ODCL:
+		// Record loop depth at declaration.
+		n := n.(*ir.Decl)
+		if !ir.IsBlank(n.X) {
+			e.dcl(n.X)
+		}
+
+	case ir.OLABEL:
+		n := n.(*ir.LabelStmt)
+		switch e.labels[n.Label] {
+		case nonlooping:
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+			}
+		case looping:
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+			}
+			e.loopDepth++
+		default:
+			base.Fatalf("label missing tag")
+		}
+		delete(e.labels, n.Label)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		e.discard(n.Cond)
+		e.block(n.Body)
+		e.block(n.Else)
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		e.loopDepth++
+		e.discard(n.Cond)
+		e.stmt(n.Post)
+		e.block(n.Body)
+		e.loopDepth--
+
+	case ir.ORANGE:
+		// for Key, Value = range X { Body }
+		n := n.(*ir.RangeStmt)
+
+		// X is evaluated outside the loop.
+		tmp := e.newLoc(nil, false)
+		e.expr(tmp.asHole(), n.X)
+
+		e.loopDepth++
+		ks := e.addrs([]ir.Node{n.Key, n.Value})
+		if n.X.Type().IsArray() {
+			e.flow(ks[1].note(n, "range"), tmp)
+		} else {
+			e.flow(ks[1].deref(n, "range-deref"), tmp)
+		}
+		e.reassigned(ks, n)
+
+		e.block(n.Body)
+		e.loopDepth--
+
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+
+		if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+			var ks []hole
+			if guard.Tag != nil {
+				for _, cas := range n.Cases {
+					cv := cas.Var
+					k := e.dcl(cv) // type switch variables have no ODCL.
+					if cv.Type().HasPointers() {
+						ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+					}
+				}
+			}
+			e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+		} else {
+			e.discard(n.Tag)
+		}
+
+		for _, cas := range n.Cases {
+			e.discards(cas.List)
+			e.block(cas.Body)
+		}
+
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		for _, cas := range n.Cases {
+			e.stmt(cas.Comm)
+			e.block(cas.Body)
+		}
+	case ir.ORECV:
+		// TODO(mdempsky): Consider e.discard(n.Left).
+		n := n.(*ir.UnaryExpr)
+		e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		e.discard(n.Chan)
+		e.assignHeap(n.Value, "send", n)
+
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		// TODO(mdempsky): Worry about OLSH/ORSH?
+		e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+	case ir.OAS2:
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+	case ir.OAS2DOTTYPE: // v, ok = x.(type)
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+	case ir.OAS2MAPR: // v, ok = m[k]
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+	case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+	case ir.OAS2FUNC:
+		n := n.(*ir.AssignListStmt)
+		e.stmts(n.Rhs[0].Init())
+		ks := e.addrs(n.Lhs)
+		e.call(ks, n.Rhs[0], nil)
+		e.reassigned(ks, n)
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		results := e.curfn.Type().Results().FieldSlice()
+		dsts := make([]ir.Node, len(results))
+		for i, res := range results {
+			dsts[i] = res.Nname.(*ir.Name)
+		}
+		e.assignList(dsts, n.Results, "return", n)
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		e.call(nil, n, nil)
+	case ir.OGO, ir.ODEFER:
+		n := n.(*ir.GoDeferStmt)
+		e.stmts(n.Call.Init())
+		e.call(nil, n.Call, n)
+
+	case ir.OTAILCALL:
+		// TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
+	}
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+	for _, n := range l {
+		e.stmt(n)
+	}
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+	old := e.loopDepth
+	e.stmts(l)
+	e.loopDepth = old
+}
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+	if n == nil {
+		return
+	}
+	e.stmts(n.Init())
+	e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+	if n == nil {
+		return
+	}
+
+	lno := ir.SetPos(n)
+	defer func() {
+		base.Pos = lno
+	}()
+
+	uintptrEscapesHack := k.uintptrEscapesHack
+	k.uintptrEscapesHack = false
+
+	if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
+		// nop
+	} else if k.derefs >= 0 && !n.Type().HasPointers() {
+		k.dst = &e.blankLoc
+	}
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected expr: %v", n)
+
+	case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+		// nop
+
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+			return
+		}
+		if n.IsClosureVar() && n.Defn == nil {
+			return // ".this" from method value wrapper
+		}
+		e.flow(k, e.oldLoc(n))
+
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+		n := n.(*ir.UnaryExpr)
+		e.discard(n.X)
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		e.discard(n.X)
+		e.discard(n.Y)
+	case ir.OANDAND, ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		e.discard(n.X)
+		e.discard(n.Y)
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+	case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+		n := n.(*ir.SelectorExpr)
+		e.expr(k.note(n, "dot"), n.X)
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+	case ir.ODOTTYPE, ir.ODOTTYPE2:
+		n := n.(*ir.TypeAssertExpr)
+		e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		if n.X.Type().IsArray() {
+			e.expr(k.note(n, "fixed-array-index-of"), n.X)
+		} else {
+			// TODO(mdempsky): Fix why reason text.
+			e.expr(k.deref(n, "dot of pointer"), n.X)
+		}
+		e.discard(n.Index)
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.X)
+		e.discard(n.Index)
+	case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+		n := n.(*ir.SliceExpr)
+		e.expr(k.note(n, "slice"), n.X)
+		e.discard(n.Low)
+		e.discard(n.High)
+		e.discard(n.Max)
+
+	case ir.OCONV, ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+			// When -d=checkptr=2 is enabled, treat
+			// conversions to unsafe.Pointer as an
+			// escaping operation. This allows better
+			// runtime instrumentation, since we can more
+			// easily detect object boundaries on the heap
+			// than the stack.
+			e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+		} else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+			e.unsafeValue(k, n.X)
+		} else {
+			e.expr(k, n.X)
+		}
+	case ir.OCONVIFACE:
+		n := n.(*ir.ConvExpr)
+		if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+			k = e.spill(k, n)
+		}
+		e.expr(k.note(n, "interface-converted"), n.X)
+
+	case ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		e.discard(n.X)
+
+	case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
+		e.call([]hole{k}, n, nil)
+
+	case ir.ONEW:
+		n := n.(*ir.UnaryExpr)
+		e.spill(k, n)
+
+	case ir.OMAKESLICE:
+		n := n.(*ir.MakeExpr)
+		e.spill(k, n)
+		e.discard(n.Len)
+		e.discard(n.Cap)
+	case ir.OMAKECHAN:
+		n := n.(*ir.MakeExpr)
+		e.discard(n.Len)
+	case ir.OMAKEMAP:
+		n := n.(*ir.MakeExpr)
+		e.spill(k, n)
+		e.discard(n.Len)
+
+	case ir.ORECOVER:
+		// nop
+
+	case ir.OCALLPART:
+		// Flow the receiver argument to both the closure and
+		// to the receiver parameter.
+
+		n := n.(*ir.SelectorExpr)
+		closureK := e.spill(k, n)
+
+		m := n.Selection
+
+		// We don't know how the method value will be called
+		// later, so conservatively assume the result
+		// parameters all flow to the heap.
+		//
+		// TODO(mdempsky): Change ks into a callback, so that
+		// we don't have to create this slice?
+		var ks []hole
+		for i := m.Type.NumResults(); i > 0; i-- {
+			ks = append(ks, e.heapHole())
+		}
+		name, _ := m.Nname.(*ir.Name)
+		paramK := e.tagHole(ks, name, m.Type.Recv())
+
+		e.expr(e.teeHole(paramK, closureK), n.X)
+
+	case ir.OPTRLIT:
+		n := n.(*ir.AddrExpr)
+		e.expr(e.spill(k, n), n.X)
+
+	case ir.OARRAYLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elt := range n.List {
+			if elt.Op() == ir.OKEY {
+				elt = elt.(*ir.KeyExpr).Value
+			}
+			e.expr(k.note(n, "array literal element"), elt)
+		}
+
+	case ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		k = e.spill(k, n)
+		k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
+
+		for _, elt := range n.List {
+			if elt.Op() == ir.OKEY {
+				elt = elt.(*ir.KeyExpr).Value
+			}
+			e.expr(k.note(n, "slice-literal-element"), elt)
+		}
+
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elt := range n.List {
+			e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+		}
+
+	case ir.OMAPLIT:
+		n := n.(*ir.CompLitExpr)
+		e.spill(k, n)
+
+		// Map keys and values are always stored in the heap.
+		for _, elt := range n.List {
+			elt := elt.(*ir.KeyExpr)
+			e.assignHeap(elt.Key, "map literal key", n)
+			e.assignHeap(elt.Value, "map literal value", n)
+		}
+
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		k = e.spill(k, n)
+		e.closures = append(e.closures, closure{k, n})
+
+		if fn := n.Func; fn.IsHiddenClosure() {
+			for _, cv := range fn.ClosureVars {
+				if loc := e.oldLoc(cv); !loc.captured {
+					loc.captured = true
+
+					// Ignore reassignments to the variable in straightline code
+					// preceding the first capture by a closure.
+					if loc.loopDepth == e.loopDepth {
+						loc.reassigned = false
+					}
+				}
+			}
+
+			for _, n := range fn.Dcl {
+				// Add locations for local variables of the
+				// closure, if needed, in case we're not including
+				// the closure func in the batch for escape
+				// analysis (happens for escape analysis called
+				// from reflectdata.methodWrapper)
+				if n.Op() == ir.ONAME && n.Opt == nil {
+					e.with(fn).newLoc(n, false)
+				}
+			}
+			e.walkFunc(fn)
+		}
+
+	case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+		n := n.(*ir.ConvExpr)
+		e.spill(k, n)
+		e.discard(n.X)
+
+	case ir.OADDSTR:
+		n := n.(*ir.AddStringExpr)
+		e.spill(k, n)
+
+		// Arguments of OADDSTR never escape;
+		// runtime.concatstrings makes sure of that.
+		e.discards(n.List)
+	}
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+	if n.Type().Kind() != types.TUINTPTR {
+		base.Fatalf("unexpected type %v for %v", n.Type(), n)
+	}
+	if k.addrtaken {
+		base.Fatalf("unexpected addrtaken")
+	}
+
+	e.stmts(n.Init())
+
+	switch n.Op() {
+	case ir.OCONV, ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if n.X.Type().IsUnsafePtr() {
+			e.expr(k, n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		if ir.IsReflectHeaderDataField(n) {
+			e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+		n := n.(*ir.UnaryExpr)
+		e.unsafeValue(k, n.X)
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+		n := n.(*ir.BinaryExpr)
+		e.unsafeValue(k, n.X)
+		e.unsafeValue(k, n.Y)
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		e.unsafeValue(k, n.X)
+		// RHS need not be uintptr-typed (#32959) and can't meaningfully
+		// flow pointers anyway.
+		e.discard(n.Y)
+	default:
+		e.exprSkipInit(e.discardHole(), n)
+	}
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+	e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+	for _, n := range l {
+		e.discard(n)
+	}
+}
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+	if n == nil || ir.IsBlank(n) {
+		// Can happen in select case, range, maybe others.
+		return e.discardHole()
+	}
+
+	k := e.heapHole()
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected addr: %v", n)
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PEXTERN {
+			break
+		}
+		k = e.oldLoc(n).asHole()
+	case ir.OLINKSYMOFFSET:
+		break
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		k = e.addr(n.X)
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.Index)
+		if n.X.Type().IsArray() {
+			k = e.addr(n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.ODEREF, ir.ODOTPTR:
+		e.discard(n)
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.X)
+		e.assignHeap(n.Index, "key of map put", n)
+	}
+
+	return k
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+	var ks []hole
+	for _, n := range l {
+		ks = append(ks, e.addr(n))
+	}
+	return ks
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+	if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+		if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+			// Zero-value assignment for variable declared without an
+			// explicit initial value. Assume this is its initialization
+			// statement.
+			return
+		}
+	}
+
+	for _, k := range ks {
+		loc := k.dst
+		// Variables declared by range statements are assigned on every iteration.
+		if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+			continue
+		}
+		loc.reassigned = true
+	}
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+	ks := e.addrs(dsts)
+	for i, k := range ks {
+		var src ir.Node
+		if i < len(srcs) {
+			src = srcs[i]
+		}
+
+		if dst := dsts[i]; dst != nil {
+			// Detect implicit conversion of uintptr to unsafe.Pointer when
+			// storing into reflect.{Slice,String}Header.
+			if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+				e.unsafeValue(e.heapHole().note(where, why), src)
+				continue
+			}
+
+			// Filter out some no-op assignments for escape analysis.
+			if src != nil && isSelfAssign(dst, src) {
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+				}
+				k = e.discardHole()
+			}
+		}
+
+		e.expr(k.note(where, why), src)
+	}
+
+	e.reassigned(ks, where)
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+	e.expr(e.heapHole().note(where, why), src)
+}
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows; where is the OGO/ODEFER context of the call, if any.
+func (e *escape) call(ks []hole, call, where ir.Node) {
+	topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
+	if topLevelDefer {
+		// force stack allocation of defer record, unless
+		// open-coded defers are used (see ssa.go)
+		where.SetEsc(ir.EscNever)
+	}
+
+	argument := func(k hole, arg ir.Node) {
+		if topLevelDefer {
+			// Top level defers arguments don't escape to
+			// heap, but they do need to last until end of
+			// function.
+			k = e.later(k)
+		} else if where != nil {
+			k = e.heapHole()
+		}
+
+		e.expr(k.note(call, "call parameter"), arg)
+	}
+
+	switch call.Op() {
+	default:
+		ir.Dump("esc", call)
+		base.Fatalf("unexpected call op: %v", call.Op())
+
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+		call := call.(*ir.CallExpr)
+		typecheck.FixVariadicCall(call)
+
+		// Pick out the function callee, if statically known.
+		var fn *ir.Name
+		switch call.Op() {
+		case ir.OCALLFUNC:
+			switch v := ir.StaticValue(call.X); {
+			case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC:
+				fn = v.(*ir.Name)
+			case v.Op() == ir.OCLOSURE:
+				fn = v.(*ir.ClosureExpr).Func.Nname
+			}
+		case ir.OCALLMETH:
+			fn = ir.MethodExprName(call.X)
+		}
+
+		fntype := call.X.Type()
+		if fn != nil {
+			fntype = fn.Type()
+		}
+
+		if ks != nil && fn != nil && e.inMutualBatch(fn) {
+			for i, result := range fn.Type().Results().FieldSlice() {
+				e.expr(ks[i], ir.AsNode(result.Nname))
+			}
+		}
+
+		if r := fntype.Recv(); r != nil {
+			argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X)
+		} else {
+			// Evaluate callee function expression.
+			argument(e.discardHole(), call.X)
+		}
+
+		args := call.Args
+		for i, param := range fntype.Params().FieldSlice() {
+			argument(e.tagHole(ks, fn, param), args[i])
+		}
+
+	case ir.OAPPEND:
+		call := call.(*ir.CallExpr)
+		args := call.Args
+
+		// Appendee slice may flow directly to the result, if
+		// it has enough capacity. Alternatively, a new heap
+		// slice might be allocated, and all slice elements
+		// might flow to heap.
+		appendeeK := ks[0]
+		if args[0].Type().Elem().HasPointers() {
+			appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+		}
+		argument(appendeeK, args[0])
+
+		if call.IsDDD {
+			appendedK := e.discardHole()
+			if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+				appendedK = e.heapHole().deref(call, "appended slice...")
+			}
+			argument(appendedK, args[1])
+		} else {
+			for _, arg := range args[1:] {
+				argument(e.heapHole(), arg)
+			}
+		}
+
+	case ir.OCOPY:
+		call := call.(*ir.BinaryExpr)
+		argument(e.discardHole(), call.X)
+
+		copiedK := e.discardHole()
+		if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+			copiedK = e.heapHole().deref(call, "copied slice")
+		}
+		argument(copiedK, call.Y)
+
+	case ir.OPANIC:
+		call := call.(*ir.UnaryExpr)
+		argument(e.heapHole(), call.X)
+
+	case ir.OCOMPLEX:
+		call := call.(*ir.BinaryExpr)
+		argument(e.discardHole(), call.X)
+		argument(e.discardHole(), call.Y)
+	case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		call := call.(*ir.CallExpr)
+		for _, arg := range call.Args {
+			argument(e.discardHole(), arg)
+		}
+	case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+		call := call.(*ir.UnaryExpr)
+		argument(e.discardHole(), call.X)
+	}
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+	// If this is a dynamic call, we can't rely on param.Note.
+	if fn == nil {
+		return e.heapHole()
+	}
+
+	if e.inMutualBatch(fn) {
+		return e.addr(ir.AsNode(param.Nname))
+	}
+
+	// Call to previously tagged function.
+
+	if param.Note == UintptrEscapesNote {
+		k := e.heapHole()
+		k.uintptrEscapesHack = true
+		return k
+	}
+
+	var tagKs []hole
+
+	esc := parseLeaks(param.Note)
+	if x := esc.Heap(); x >= 0 {
+		tagKs = append(tagKs, e.heapHole().shift(x))
+	}
+
+	if ks != nil {
+		for i := 0; i < numEscResults; i++ {
+			if x := esc.Result(i); x >= 0 {
+				tagKs = append(tagKs, ks[i].shift(x))
+			}
+		}
+	}
+
+	return e.teeHole(tagKs...)
+}
+
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+	if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+		if fn.Defn.Esc() == escFuncUnknown {
+			base.Fatalf("graph inconsistency: %v", fn)
+		}
+		return true
+	}
+	return false
+}
+
+// An hole represents a context for evaluation a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+	dst    *location
+	derefs int // >= -1
+	notes  *note
+
+	// addrtaken indicates whether this context is taking the address of
+	// the expression, independent of whether the address will actually
+	// be stored into a variable.
+	addrtaken bool
+
+	// uintptrEscapesHack indicates this context is evaluating an
+	// argument for a //go:uintptrescapes function.
+	uintptrEscapesHack bool
+}
+
+type note struct {
+	next  *note
+	where ir.Node
+	why   string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+	if where == nil || why == "" {
+		base.Fatalf("note: missing where/why")
+	}
+	if base.Flag.LowerM >= 2 || logopt.Enabled() {
+		k.notes = &note{
+			next:  k.notes,
+			where: where,
+			why:   why,
+		}
+	}
+	return k
+}
+
+func (k hole) shift(delta int) hole {
+	k.derefs += delta
+	if k.derefs < -1 {
+		base.Fatalf("derefs underflow: %v", k.derefs)
+	}
+	k.addrtaken = delta < 0
+	return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole  { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+	if !t.IsInterface() && !types.IsDirectIface(t) {
+		k = k.shift(1)
+	}
+	return k.note(where, why)
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+	if len(ks) == 0 {
+		return e.discardHole()
+	}
+	if len(ks) == 1 {
+		return ks[0]
+	}
+	// TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+	// new temporary location ltmp, wire it into place, and return
+	// a hole for "ltmp = _".
+	loc := e.newLoc(nil, true)
+	for _, k := range ks {
+		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
+		// semantically equivalent. To combine holes like "l1
+		// = _" and "l2 = &_", we'd need to wire them as "l1 =
+		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+		// instead.
+		if k.derefs < 0 {
+			base.Fatalf("teeHole: negative derefs")
+		}
+
+		e.flow(k, loc)
+	}
+	return loc.asHole()
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+	if n.Curfn != e.curfn || n.IsClosureVar() {
+		base.Fatalf("bad declaration of %v", n)
+	}
+	loc := e.oldLoc(n)
+	loc.loopDepth = e.loopDepth
+	return loc.asHole()
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+	loc := e.newLoc(n, true)
+	e.flow(k.addr(n, "spill"), loc)
+	return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+	loc := e.newLoc(nil, false)
+	e.flow(k, loc)
+	return loc.asHole()
+}
+
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
+	if e.curfn == nil {
+		base.Fatalf("e.curfn isn't set")
+	}
+	if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+		base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+	}
+
+	if n != nil && n.Op() == ir.ONAME {
+		n = n.(*ir.Name).Canonical()
+	}
+	loc := &location{
+		n:         n,
+		curfn:     e.curfn,
+		loopDepth: e.loopDepth,
+		transient: transient,
+	}
+	e.allLocs = append(e.allLocs, loc)
+	if n != nil {
+		if n.Op() == ir.ONAME {
+			n := n.(*ir.Name)
+			if n.Curfn != e.curfn {
+				base.Fatalf("curfn mismatch: %v != %v", n.Curfn, e.curfn)
+			}
+
+			if n.Opt != nil {
+				base.Fatalf("%v already has a location", n)
+			}
+			n.Opt = loc
+		}
+	}
+	return loc
+}
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+	return n.Canonical().Opt.(*location)
+}
+
+func (l *location) asHole() hole {
+	return hole{dst: l}
+}
+
+func (b *batch) flow(k hole, src *location) {
+	if k.addrtaken {
+		src.addrtaken = true
+	}
+
+	dst := k.dst
+	if dst == &b.blankLoc {
+		return
+	}
+	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+		return
+	}
+	if dst.escapes && k.derefs < 0 { // dst = &src
+		if base.Flag.LowerM >= 2 || logopt.Enabled() {
+			pos := base.FmtPos(src.n.Pos())
+			if base.Flag.LowerM >= 2 {
+				fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+			}
+			explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+			if logopt.Enabled() {
+				var e_curfn *ir.Func // TODO(mdempsky): Fix.
+				logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+			}
+
+		}
+		src.escapes = true
+		return
+	}
+
+	// TODO(mdempsky): Deduplicate edges?
+	dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole    { return b.heapLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+	// We use a work queue to keep track of locations that we need
+	// to visit, and repeatedly walk until we reach a fixed point.
+	//
+	// We walk once from each location (including the heap), and
+	// then re-enqueue each location on its transition from
+	// transient->!transient and !escapes->escapes, which can each
+	// happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
+	todo := make([]*location, 0, len(b.allLocs)+1)
+	enqueue := func(loc *location) {
+		if !loc.queued {
+			todo = append(todo, loc)
+			loc.queued = true
+		}
+	}
+
+	for _, loc := range b.allLocs {
+		enqueue(loc)
+	}
+	enqueue(&b.heapLoc)
+
+	var walkgen uint32
+	for len(todo) > 0 {
+		root := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+		root.queued = false
+
+		walkgen++
+		b.walkOne(root, walkgen, enqueue)
+	}
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+	// The data flow graph has negative edges (from addressing
+	// operations), so we use the Bellman-Ford algorithm. However,
+	// we don't have to worry about infinite negative cycles since
+	// we bound intermediate dereference counts to 0.
+
+	root.walkgen = walkgen
+	root.derefs = 0
+	root.dst = nil
+
+	todo := []*location{root} // LIFO queue
+	for len(todo) > 0 {
+		l := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+
+		derefs := l.derefs
+
+		// If l.derefs < 0, then l's address flows to root.
+		addressOf := derefs < 0
+		if addressOf {
+			// For a flow path like "root = &l; l = x",
+			// l's address flows to root, but x's does
+			// not. We recognize this by lower bounding
+			// derefs at 0.
+			derefs = 0
+
+			// If l's address flows to a non-transient
+			// location, then l can't be transiently
+			// allocated.
+			if !root.transient && l.transient {
+				l.transient = false
+				enqueue(l)
+			}
+		}
+
+		if b.outlives(root, l) {
+			// l's value flows to root. If l is a function
+			// parameter and root is the heap or a
+			// corresponding result parameter, then record
+			// that value flow for tagging the function
+			// later.
+			if l.isName(ir.PPARAM) {
+				if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+					if base.Flag.LowerM >= 2 {
+						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+					}
+					explanation := b.explainPath(root, l)
+					if logopt.Enabled() {
+						var e_curfn *ir.Func // TODO(mdempsky): Fix.
+						logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+					}
+				}
+				l.leakTo(root, derefs)
+			}
+
+			// If l's address flows somewhere that
+			// outlives it, then l needs to be heap
+			// allocated.
+			if addressOf && !l.escapes {
+				if logopt.Enabled() || base.Flag.LowerM >= 2 {
+					if base.Flag.LowerM >= 2 {
+						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+					}
+					explanation := b.explainPath(root, l)
+					if logopt.Enabled() {
+						var e_curfn *ir.Func // TODO(mdempsky): Fix.
+						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+					}
+				}
+				l.escapes = true
+				enqueue(l)
+				continue
+			}
+		}
+
+		for i, edge := range l.edges {
+			if edge.src.escapes {
+				continue
+			}
+			d := derefs + edge.derefs
+			if edge.src.walkgen != walkgen || edge.src.derefs > d {
+				edge.src.walkgen = walkgen
+				edge.src.derefs = d
+				edge.src.dst = l
+				edge.src.dstEdgeIdx = i
+				todo = append(todo, edge.src)
+			}
+		}
+	}
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+	visited := make(map[*location]bool)
+	pos := base.FmtPos(src.n.Pos())
+	var explanation []*logopt.LoggedOpt
+	for {
+		// Prevent infinite loop.
+		if visited[src] {
+			if base.Flag.LowerM >= 2 {
+				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+			}
+			break
+		}
+		visited[src] = true
+		dst := src.dst
+		edge := &dst.edges[src.dstEdgeIdx]
+		if edge.src != src {
+			base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+		}
+
+		explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+		if dst == root {
+			break
+		}
+		src = dst
+	}
+
+	return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+	ops := "&"
+	if derefs >= 0 {
+		ops = strings.Repeat("*", derefs)
+	}
+	print := base.Flag.LowerM >= 2
+
+	flow := fmt.Sprintf("   flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+	if print {
+		fmt.Printf("%s:%s\n", pos, flow)
+	}
+	if logopt.Enabled() {
+		var epos src.XPos
+		if notes != nil {
+			epos = notes.where.Pos()
+		} else if srcloc != nil && srcloc.n != nil {
+			epos = srcloc.n.Pos()
+		}
+		var e_curfn *ir.Func // TODO(mdempsky): Fix.
+		explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+	}
+
+	for note := notes; note != nil; note = note.next {
+		if print {
+			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+		}
+		if logopt.Enabled() {
+			var e_curfn *ir.Func // TODO(mdempsky): Fix.
+			explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
+				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
+		}
+	}
+	return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+	if l == &b.heapLoc {
+		return "{heap}"
+	}
+	if l.n == nil {
+		// TODO(mdempsky): Omit entirely.
+		return "{temp}"
+	}
+	if l.n.Op() == ir.ONAME {
+		return fmt.Sprintf("%v", l.n)
+	}
+	return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+	// The heap outlives everything.
+	if l.escapes {
+		return true
+	}
+
+	// We don't know what callers do with returned values, so
+	// pessimistically we need to assume they flow to the heap and
+	// outlive everything too.
+	if l.isName(ir.PPARAMOUT) {
+		// Exception: Directly called closures can return
+		// locations allocated outside of them without forcing
+		// them to the heap. For example:
+		//
+		//    var u int  // okay to stack allocate
+		//    *(func() *int { return &u }()) = 42
+		if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+			return false
+		}
+
+		return true
+	}
+
+	// If l and other are within the same function, then l
+	// outlives other if it was declared outside other's loop
+	// scope. For example:
+	//
+	//    var l *int
+	//    for {
+	//        l = new(int)
+	//    }
+	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+		return true
+	}
+
+	// If other is declared within a child closure of where l is
+	// declared, then l outlives it. For example:
+	//
+	//    var l *int
+	//    func() {
+	//        l = new(int)
+	//    }
+	if containsClosure(l.curfn, other.curfn) {
+		return true
+	}
+
+	return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+	// Common case.
+	if f == c {
+		return false
+	}
+
+	// Closures within function Foo are named like "Foo.funcN..."
+	// TODO(mdempsky): Better way to recognize this.
+	fn := f.Sym().Name
+	cn := c.Sym().Name
+	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+	// If sink is a result parameter and we can fit return bits
+	// into the escape analysis tag, then record a return leak.
+	if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+		ri := sink.resultIndex - 1
+		if ri < numEscResults {
+			// Leak to result parameter.
+			l.paramEsc.AddResult(ri, derefs)
+			return
+		}
+	}
+
+	// Otherwise, record as heap leak.
+	l.paramEsc.AddHeap(derefs)
+}
+
+func (b *batch) finish(fns []*ir.Func) {
+	// Record parameter tags for package export data.
+	for _, fn := range fns {
+		fn.SetEsc(escFuncTagged)
+
+		narg := 0
+		for _, fs := range &types.RecvsParams {
+			for _, f := range fs(fn.Type()).Fields().Slice() {
+				narg++
+				f.Note = b.paramTag(fn, narg, f)
+			}
+		}
+	}
+
+	for _, loc := range b.allLocs {
+		n := loc.n
+		if n == nil {
+			continue
+		}
+		if n.Op() == ir.ONAME {
+			n := n.(*ir.Name)
+			n.Opt = nil
+		}
+
+		// Update n.Esc based on escape analysis results.
+
+		if loc.escapes {
+			if n.Op() == ir.ONAME {
+				if base.Flag.CompilingRuntime {
+					base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
+				}
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+				}
+			} else {
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(n.Pos(), "%v escapes to heap", n)
+				}
+				if logopt.Enabled() {
+					var e_curfn *ir.Func // TODO(mdempsky): Fix.
+					logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
+				}
+			}
+			n.SetEsc(ir.EscHeap)
+		} else {
+			if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+				base.WarnfAt(n.Pos(), "%v does not escape", n)
+			}
+			n.SetEsc(ir.EscNone)
+			if loc.transient {
+				switch n.Op() {
+				case ir.OCLOSURE:
+					n := n.(*ir.ClosureExpr)
+					n.SetTransient(true)
+				case ir.OCALLPART:
+					n := n.(*ir.SelectorExpr)
+					n.SetTransient(true)
+				case ir.OSLICELIT:
+					n := n.(*ir.CompLitExpr)
+					n.SetTransient(true)
+				}
+			}
+		}
+	}
+}
+
+func (l *location) isName(c ir.Class) bool {
+	return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+const numEscResults = 7
+
+// An leaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type leaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l leaks) Empty() bool { return l == leaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+	if old := l.get(i); old < 0 || derefs < old {
+		l.set(i, derefs)
+	}
+}
+
+func (l *leaks) set(i, derefs int) {
+	v := derefs + 1
+	if v < 0 {
+		base.Fatalf("invalid derefs count: %v", derefs)
+	}
+	if v > math.MaxUint8 {
+		v = math.MaxUint8
+	}
+
+	l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+	// If we have a path to the heap, then there's no use in
+	// keeping equal or longer paths elsewhere.
+	if x := l.Heap(); x >= 0 {
+		for i := 0; i < numEscResults; i++ {
+			if l.Result(i) >= x {
+				l.setResult(i, -1)
+			}
+		}
+	}
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+	if l.Heap() == 0 {
+		// Space optimization: empty string encodes more
+		// efficiently in export data.
+		return ""
+	}
+	if s, ok := leakTagCache[l]; ok {
+		return s
+	}
+
+	n := len(l)
+	for n > 0 && l[n-1] == 0 {
+		n--
+	}
+	s := "esc:" + string(l[:n])
+	leakTagCache[l] = s
+	return s
+}
+
+// parseLeaks parses a binary string representing a leaks
+func parseLeaks(s string) leaks {
+	var l leaks
+	if !strings.HasPrefix(s, "esc:") {
+		l.AddHeap(0)
+		return l
+	}
+	copy(l[:], s[4:])
+	return l
+}
+
+func Funcs(all []ir.Node) {
+	ir.VisitFuncsBottomUp(all, Batch)
+}
+
+const (
+	escFuncUnknown = 0 + iota
+	escFuncPlanned
+	escFuncStarted
+	escFuncTagged
+)
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
+
+const (
+	looping labelState = 1 + iota
+	nonlooping
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+	// Detect the following special case.
+	//
+	//	func (b *Buffer) Foo() {
+	//		n, m := ...
+	//		b.buf = b.buf[n:m]
+	//	}
+	//
+	// This assignment is a no-op for escape analysis,
+	// it does not store any new pointers into b that were not already there.
+	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+	// Here we assume that the statement will not contain calls,
+	// that is, that order will move any calls to init.
+	// Otherwise base ONAME value could change between the moments
+	// when we evaluate it for dst and for src.
+
+	// dst is ONAME dereference.
+	var dstX ir.Node
+	switch dst.Op() {
+	default:
+		return false
+	case ir.ODEREF:
+		dst := dst.(*ir.StarExpr)
+		dstX = dst.X
+	case ir.ODOTPTR:
+		dst := dst.(*ir.SelectorExpr)
+		dstX = dst.X
+	}
+	if dstX.Op() != ir.ONAME {
+		return false
+	}
+	// src is a slice operation.
+	switch src.Op() {
+	case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+		// OK.
+	case ir.OSLICEARR, ir.OSLICE3ARR:
+		// Since arrays are embedded into containing object,
+		// slice of non-pointer array will introduce a new pointer into b that was not already there
+		// (pointer to b itself). After such assignment, if b contents escape,
+		// b escapes as well. If we ignore such OSLICEARR, we will conclude
+		// that b does not escape when b contents do.
+		//
+		// Pointer to an array is OK since it's not stored inside b directly.
+		// For slicing an array (not pointer to array), there is an implicit OADDR.
+		// We check that to determine non-pointer array slicing.
+		src := src.(*ir.SliceExpr)
+		if src.X.Op() == ir.OADDR {
+			return false
+		}
+	default:
+		return false
+	}
+	// slice is applied to ONAME dereference.
+	var baseX ir.Node
+	switch base := src.(*ir.SliceExpr).X; base.Op() {
+	default:
+		return false
+	case ir.ODEREF:
+		base := base.(*ir.StarExpr)
+		baseX = base.X
+	case ir.ODOTPTR:
+		base := base.(*ir.SelectorExpr)
+		baseX = base.X
+	}
+	if baseX.Op() != ir.ONAME {
+		return false
+	}
+	// dst and src reference the same base ONAME.
+	return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+	if isSliceSelfAssign(dst, src) {
+		return true
+	}
+
+	// Detect trivial assignments that assign back to the same object.
+	//
+	// It covers these cases:
+	//	val.x = val.y
+	//	val.x[i] = val.y[j]
+	//	val.x1.x2 = val.x1.y2
+	//	... etc
+	//
+	// These assignments do not change assigned object lifetime.
+
+	if dst == nil || src == nil || dst.Op() != src.Op() {
+		return false
+	}
+
+	// The expression prefix must be both "safe" and identical.
+	switch dst.Op() {
+	case ir.ODOT, ir.ODOTPTR:
+		// Safe trailing accessors that are permitted to differ.
+		dst := dst.(*ir.SelectorExpr)
+		src := src.(*ir.SelectorExpr)
+		return ir.SameSafeExpr(dst.X, src.X)
+	case ir.OINDEX:
+		dst := dst.(*ir.IndexExpr)
+		src := src.(*ir.IndexExpr)
+		if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+			return false
+		}
+		return ir.SameSafeExpr(dst.X, src.X)
+	default:
+		return false
+	}
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+	// We may want to use a list of "memory safe" ops instead of generally
+	// "side-effect free", which would include all calls and other ops that can
+	// allocate or change global state. For now, it's safer to start with the latter.
+	//
+	// We're ignoring things like division by zero, index out of range,
+	// and nil pointer dereference here.
+
+	// TODO(rsc): It seems like it should be possible to replace this with
+	// an ir.Any looking for any op that's not the ones in the case statement.
+	// But that produces changes in the compiled output detected by buildall.
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL:
+		return false
+
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+		n := n.(*ir.BinaryExpr)
+		return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+	case ir.OCONVNOP, ir.OCONV:
+		n := n.(*ir.ConvExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+		n := n.(*ir.UnaryExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.ODOT, ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		return mayAffectMemory(n.X)
+
+	default:
+		return true
+	}
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+	if n == nil || n.Type() == nil {
+		return ""
+	}
+
+	// Parameters are always passed via the stack.
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+			return ""
+		}
+	}
+
+	if n.Type().Width > ir.MaxStackVarSize {
+		return "too large for stack"
+	}
+
+	if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+
+	if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+	if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+
+	if n.Op() == ir.OMAKESLICE {
+		n := n.(*ir.MakeExpr)
+		r := n.Cap
+		if r == nil {
+			r = n.Len
+		}
+		if !ir.IsSmallIntConst(r) {
+			return "non-constant size"
+		}
+		if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width {
+			return "too large for stack"
+		}
+	}
+
+	return ""
+}
+
+// This special tag is applied to uintptr variables
+// that we believe may hold unsafe.Pointers for
+// calls into assembly functions.
+const UnsafeUintptrNote = "unsafe-uintptr"
+
+// This special tag is applied to uintptr parameters of functions
+// marked go:uintptrescapes.
+const UintptrEscapesNote = "uintptr-escapes"
+
+func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+	name := func() string {
+		if f.Sym != nil {
+			return f.Sym.Name
+		}
+		return fmt.Sprintf("arg#%d", narg)
+	}
+
+	if len(fn.Body) == 0 {
+		// Assume that uintptr arguments must be held live across the call.
+		// This is most important for syscall.Syscall.
+		// See golang.org/issue/13372.
+		// This really doesn't have much to do with escape analysis per se,
+		// but we are reusing the ability to annotate an individual function
+		// argument and pass those annotations along to importing code.
+		if f.Type.IsUintptr() {
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+			}
+			return UnsafeUintptrNote
+		}
+
+		if !f.Type.HasPointers() { // don't bother tagging for scalars
+			return ""
+		}
+
+		var esc leaks
+
+		// External functions are assumed unsafe, unless
+		// //go:noescape is given before the declaration.
+		if fn.Pragma&ir.Noescape != 0 {
+			if base.Flag.LowerM != 0 && f.Sym != nil {
+				base.WarnfAt(f.Pos, "%v does not escape", name())
+			}
+		} else {
+			if base.Flag.LowerM != 0 && f.Sym != nil {
+				base.WarnfAt(f.Pos, "leaking param: %v", name())
+			}
+			esc.AddHeap(0)
+		}
+
+		return esc.Encode()
+	}
+
+	if fn.Pragma&ir.UintptrEscapes != 0 {
+		if f.Type.IsUintptr() {
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+			}
+			return UintptrEscapesNote
+		}
+		if f.IsDDD() && f.Type.Elem().IsUintptr() {
+			// final argument is ...uintptr.
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+			}
+			return UintptrEscapesNote
+		}
+	}
+
+	if !f.Type.HasPointers() { // don't bother tagging for scalars
+		return ""
+	}
+
+	// Unnamed parameters are unused and therefore do not escape.
+	if f.Sym == nil || f.Sym.IsBlank() {
+		var esc leaks
+		return esc.Encode()
+	}
+
+	n := f.Nname.(*ir.Name)
+	loc := b.oldLoc(n)
+	esc := loc.paramEsc
+	esc.Optimize()
+
+	if base.Flag.LowerM != 0 && !loc.escapes {
+		if esc.Empty() {
+			base.WarnfAt(f.Pos, "%v does not escape", name())
+		}
+		if x := esc.Heap(); x >= 0 {
+			if x == 0 {
+				base.WarnfAt(f.Pos, "leaking param: %v", name())
+			} else {
+				// TODO(mdempsky): Mention level=x like below?
+				base.WarnfAt(f.Pos, "leaking param content: %v", name())
+			}
+		}
+		for i := 0; i < numEscResults; i++ {
+			if x := esc.Result(i); x >= 0 {
+				res := fn.Type().Results().Field(i).Sym
+				base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+			}
+		}
+	}
+
+	return esc.Encode()
+}
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
deleted file mode 100644
index 2f7fa27..0000000
--- a/src/cmd/compile/internal/gc/alg.go
+++ /dev/null
@@ -1,959 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"fmt"
-	"sort"
-)
-
-// AlgKind describes the kind of algorithms used for comparing and
-// hashing a Type.
-type AlgKind int
-
-//go:generate stringer -type AlgKind -trimprefix A
-
-const (
-	// These values are known by runtime.
-	ANOEQ AlgKind = iota
-	AMEM0
-	AMEM8
-	AMEM16
-	AMEM32
-	AMEM64
-	AMEM128
-	ASTRING
-	AINTER
-	ANILINTER
-	AFLOAT32
-	AFLOAT64
-	ACPLX64
-	ACPLX128
-
-	// Type can be compared/hashed as regular memory.
-	AMEM AlgKind = 100
-
-	// Type needs special comparison/hashing functions.
-	ASPECIAL AlgKind = -1
-)
-
-// IsComparable reports whether t is a comparable type.
-func IsComparable(t *types.Type) bool {
-	a, _ := algtype1(t)
-	return a != ANOEQ
-}
-
-// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func IsRegularMemory(t *types.Type) bool {
-	a, _ := algtype1(t)
-	return a == AMEM
-}
-
-// IncomparableField returns an incomparable Field of struct Type t, if any.
-func IncomparableField(t *types.Type) *types.Field {
-	for _, f := range t.FieldSlice() {
-		if !IsComparable(f.Type) {
-			return f
-		}
-	}
-	return nil
-}
-
-// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
-// t must be comparable.
-func EqCanPanic(t *types.Type) bool {
-	switch t.Etype {
-	default:
-		return false
-	case TINTER:
-		return true
-	case TARRAY:
-		return EqCanPanic(t.Elem())
-	case TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
-				return true
-			}
-		}
-		return false
-	}
-}
-
-// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
-// instead of the general AMEM kind when possible.
-func algtype(t *types.Type) AlgKind {
-	a, _ := algtype1(t)
-	if a == AMEM {
-		switch t.Width {
-		case 0:
-			return AMEM0
-		case 1:
-			return AMEM8
-		case 2:
-			return AMEM16
-		case 4:
-			return AMEM32
-		case 8:
-			return AMEM64
-		case 16:
-			return AMEM128
-		}
-	}
-
-	return a
-}
-
-// algtype1 returns the AlgKind used for comparing and hashing Type t.
-// If it returns ANOEQ, it also returns the component type of t that
-// makes it incomparable.
-func algtype1(t *types.Type) (AlgKind, *types.Type) {
-	if t.Broke() {
-		return AMEM, nil
-	}
-	if t.Noalg() {
-		return ANOEQ, t
-	}
-
-	switch t.Etype {
-	case TANY, TFORW:
-		// will be defined later.
-		return ANOEQ, t
-
-	case TINT8, TUINT8, TINT16, TUINT16,
-		TINT32, TUINT32, TINT64, TUINT64,
-		TINT, TUINT, TUINTPTR,
-		TBOOL, TPTR,
-		TCHAN, TUNSAFEPTR:
-		return AMEM, nil
-
-	case TFUNC, TMAP:
-		return ANOEQ, t
-
-	case TFLOAT32:
-		return AFLOAT32, nil
-
-	case TFLOAT64:
-		return AFLOAT64, nil
-
-	case TCOMPLEX64:
-		return ACPLX64, nil
-
-	case TCOMPLEX128:
-		return ACPLX128, nil
-
-	case TSTRING:
-		return ASTRING, nil
-
-	case TINTER:
-		if t.IsEmptyInterface() {
-			return ANILINTER, nil
-		}
-		return AINTER, nil
-
-	case TSLICE:
-		return ANOEQ, t
-
-	case TARRAY:
-		a, bad := algtype1(t.Elem())
-		switch a {
-		case AMEM:
-			return AMEM, nil
-		case ANOEQ:
-			return ANOEQ, bad
-		}
-
-		switch t.NumElem() {
-		case 0:
-			// We checked above that the element type is comparable.
-			return AMEM, nil
-		case 1:
-			// Single-element array is same as its lone element.
-			return a, nil
-		}
-
-		return ASPECIAL, nil
-
-	case TSTRUCT:
-		fields := t.FieldSlice()
-
-		// One-field struct is same as that one field alone.
-		if len(fields) == 1 && !fields[0].Sym.IsBlank() {
-			return algtype1(fields[0].Type)
-		}
-
-		ret := AMEM
-		for i, f := range fields {
-			// All fields must be comparable.
-			a, bad := algtype1(f.Type)
-			if a == ANOEQ {
-				return ANOEQ, bad
-			}
-
-			// Blank fields, padded fields, fields with non-memory
-			// equality need special compare.
-			if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
-				ret = ASPECIAL
-			}
-		}
-
-		return ret, nil
-	}
-
-	Fatalf("algtype1: unexpected type %v", t)
-	return 0, nil
-}
-
-// genhash returns a symbol which is the closure used to compute
-// the hash of a value of type t.
-// Note: the generated function must match runtime.typehash exactly.
-func genhash(t *types.Type) *obj.LSym {
-	switch algtype(t) {
-	default:
-		// genhash is only called for types that have equality
-		Fatalf("genhash %v", t)
-	case AMEM0:
-		return sysClosure("memhash0")
-	case AMEM8:
-		return sysClosure("memhash8")
-	case AMEM16:
-		return sysClosure("memhash16")
-	case AMEM32:
-		return sysClosure("memhash32")
-	case AMEM64:
-		return sysClosure("memhash64")
-	case AMEM128:
-		return sysClosure("memhash128")
-	case ASTRING:
-		return sysClosure("strhash")
-	case AINTER:
-		return sysClosure("interhash")
-	case ANILINTER:
-		return sysClosure("nilinterhash")
-	case AFLOAT32:
-		return sysClosure("f32hash")
-	case AFLOAT64:
-		return sysClosure("f64hash")
-	case ACPLX64:
-		return sysClosure("c64hash")
-	case ACPLX128:
-		return sysClosure("c128hash")
-	case AMEM:
-		// For other sizes of plain memory, we build a closure
-		// that calls memhash_varlen. The size of the memory is
-		// encoded in the first slot of the closure.
-		closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
-		if len(closure.P) > 0 { // already generated
-			return closure
-		}
-		if memhashvarlen == nil {
-			memhashvarlen = sysfunc("memhash_varlen")
-		}
-		ot := 0
-		ot = dsymptr(closure, ot, memhashvarlen, 0)
-		ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
-		ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
-		return closure
-	case ASPECIAL:
-		break
-	}
-
-	closure := typesymprefix(".hashfunc", t).Linksym()
-	if len(closure.P) > 0 { // already generated
-		return closure
-	}
-
-	// Generate hash functions for subtypes.
-	// There are cases where we might not use these hashes,
-	// but in that case they will get dead-code eliminated.
-	// (And the closure generated by genhash will also get
-	// dead-code eliminated, as we call the subtype hashers
-	// directly.)
-	switch t.Etype {
-	case types.TARRAY:
-		genhash(t.Elem())
-	case types.TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			genhash(f.Type)
-		}
-	}
-
-	sym := typesymprefix(".hash", t)
-	if Debug.r != 0 {
-		fmt.Printf("genhash %v %v %v\n", closure, sym, t)
-	}
-
-	lineno = autogeneratedPos // less confusing than end of input
-	dclcontext = PEXTERN
-
-	// func sym(p *T, h uintptr) uintptr
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set2(
-		namedfield("p", types.NewPtr(t)),
-		namedfield("h", types.Types[TUINTPTR]),
-	)
-	tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
-
-	fn := dclfunc(sym, tfn)
-	np := asNode(tfn.Type.Params().Field(0).Nname)
-	nh := asNode(tfn.Type.Params().Field(1).Nname)
-
-	switch t.Etype {
-	case types.TARRAY:
-		// An array of pure memory would be handled by the
-		// standard algorithm, so the element type must not be
-		// pure memory.
-		hashel := hashfor(t.Elem())
-
-		n := nod(ORANGE, nil, nod(ODEREF, np, nil))
-		ni := newname(lookup("i"))
-		ni.Type = types.Types[TINT]
-		n.List.Set1(ni)
-		n.SetColas(true)
-		colasdefn(n.List.Slice(), n)
-		ni = n.List.First()
-
-		// h = hashel(&p[i], h)
-		call := nod(OCALL, hashel, nil)
-
-		nx := nod(OINDEX, np, ni)
-		nx.SetBounded(true)
-		na := nod(OADDR, nx, nil)
-		call.List.Append(na)
-		call.List.Append(nh)
-		n.Nbody.Append(nod(OAS, nh, call))
-
-		fn.Nbody.Append(n)
-
-	case types.TSTRUCT:
-		// Walk the struct using memhash for runs of AMEM
-		// and calling specific hash functions for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank fields.
-			if f.Sym.IsBlank() {
-				i++
-				continue
-			}
-
-			// Hash non-memory fields with appropriate hash function.
-			if !IsRegularMemory(f.Type) {
-				hashel := hashfor(f.Type)
-				call := nod(OCALL, hashel, nil)
-				nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-				na := nod(OADDR, nx, nil)
-				call.List.Append(na)
-				call.List.Append(nh)
-				fn.Nbody.Append(nod(OAS, nh, call))
-				i++
-				continue
-			}
-
-			// Otherwise, hash a maximal length run of raw memory.
-			size, next := memrun(t, i)
-
-			// h = hashel(&p.first, size, h)
-			hashel := hashmem(f.Type)
-			call := nod(OCALL, hashel, nil)
-			nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-			na := nod(OADDR, nx, nil)
-			call.List.Append(na)
-			call.List.Append(nh)
-			call.List.Append(nodintconst(size))
-			fn.Nbody.Append(nod(OAS, nh, call))
-
-			i = next
-		}
-	}
-
-	r := nod(ORETURN, nil, nil)
-	r.List.Append(nh)
-	fn.Nbody.Append(r)
-
-	if Debug.r != 0 {
-		dumplist("genhash body", fn.Nbody)
-	}
-
-	funcbody()
-
-	fn.Func.SetDupok(true)
-	fn = typecheck(fn, ctxStmt)
-
-	Curfn = fn
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-	Curfn = nil
-
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	fn.Func.SetNilCheckDisabled(true)
-	xtop = append(xtop, fn)
-
-	// Build closure. It doesn't close over any variables, so
-	// it contains just the function pointer.
-	dsymptr(closure, 0, sym.Linksym(), 0)
-	ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
-
-	return closure
-}
-
-func hashfor(t *types.Type) *Node {
-	var sym *types.Sym
-
-	switch a, _ := algtype1(t); a {
-	case AMEM:
-		Fatalf("hashfor with AMEM type")
-	case AINTER:
-		sym = Runtimepkg.Lookup("interhash")
-	case ANILINTER:
-		sym = Runtimepkg.Lookup("nilinterhash")
-	case ASTRING:
-		sym = Runtimepkg.Lookup("strhash")
-	case AFLOAT32:
-		sym = Runtimepkg.Lookup("f32hash")
-	case AFLOAT64:
-		sym = Runtimepkg.Lookup("f64hash")
-	case ACPLX64:
-		sym = Runtimepkg.Lookup("c64hash")
-	case ACPLX128:
-		sym = Runtimepkg.Lookup("c128hash")
-	default:
-		// Note: the caller of hashfor ensured that this symbol
-		// exists and has a body by calling genhash for t.
-		sym = typesymprefix(".hash", t)
-	}
-
-	n := newname(sym)
-	setNodeNameFunc(n)
-	n.Type = functype(nil, []*Node{
-		anonfield(types.NewPtr(t)),
-		anonfield(types.Types[TUINTPTR]),
-	}, []*Node{
-		anonfield(types.Types[TUINTPTR]),
-	})
-	return n
-}
-
-// sysClosure returns a closure which will call the
-// given runtime function (with no closed-over variables).
-func sysClosure(name string) *obj.LSym {
-	s := sysvar(name + "·f")
-	if len(s.P) == 0 {
-		f := sysfunc(name)
-		dsymptr(s, 0, f, 0)
-		ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	}
-	return s
-}
-
-// geneq returns a symbol which is the closure used to compute
-// equality for two objects of type t.
-func geneq(t *types.Type) *obj.LSym {
-	switch algtype(t) {
-	case ANOEQ:
-		// The runtime will panic if it tries to compare
-		// a type with a nil equality function.
-		return nil
-	case AMEM0:
-		return sysClosure("memequal0")
-	case AMEM8:
-		return sysClosure("memequal8")
-	case AMEM16:
-		return sysClosure("memequal16")
-	case AMEM32:
-		return sysClosure("memequal32")
-	case AMEM64:
-		return sysClosure("memequal64")
-	case AMEM128:
-		return sysClosure("memequal128")
-	case ASTRING:
-		return sysClosure("strequal")
-	case AINTER:
-		return sysClosure("interequal")
-	case ANILINTER:
-		return sysClosure("nilinterequal")
-	case AFLOAT32:
-		return sysClosure("f32equal")
-	case AFLOAT64:
-		return sysClosure("f64equal")
-	case ACPLX64:
-		return sysClosure("c64equal")
-	case ACPLX128:
-		return sysClosure("c128equal")
-	case AMEM:
-		// make equality closure. The size of the type
-		// is encoded in the closure.
-		closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
-		if len(closure.P) != 0 {
-			return closure
-		}
-		if memequalvarlen == nil {
-			memequalvarlen = sysvar("memequal_varlen") // asm func
-		}
-		ot := 0
-		ot = dsymptr(closure, ot, memequalvarlen, 0)
-		ot = duintptr(closure, ot, uint64(t.Width))
-		ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
-		return closure
-	case ASPECIAL:
-		break
-	}
-
-	closure := typesymprefix(".eqfunc", t).Linksym()
-	if len(closure.P) > 0 { // already generated
-		return closure
-	}
-	sym := typesymprefix(".eq", t)
-	if Debug.r != 0 {
-		fmt.Printf("geneq %v\n", t)
-	}
-
-	// Autogenerate code for equality of structs and arrays.
-
-	lineno = autogeneratedPos // less confusing than end of input
-	dclcontext = PEXTERN
-
-	// func sym(p, q *T) bool
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set2(
-		namedfield("p", types.NewPtr(t)),
-		namedfield("q", types.NewPtr(t)),
-	)
-	tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
-
-	fn := dclfunc(sym, tfn)
-	np := asNode(tfn.Type.Params().Field(0).Nname)
-	nq := asNode(tfn.Type.Params().Field(1).Nname)
-	nr := asNode(tfn.Type.Results().Field(0).Nname)
-
-	// Label to jump to if an equality test fails.
-	neq := autolabel(".neq")
-
-	// We reach here only for types that have equality but
-	// cannot be handled by the standard algorithms,
-	// so t must be either an array or a struct.
-	switch t.Etype {
-	default:
-		Fatalf("geneq %v", t)
-
-	case TARRAY:
-		nelem := t.NumElem()
-
-		// checkAll generates code to check the equality of all array elements.
-		// If unroll is greater than nelem, checkAll generates:
-		//
-		// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
-		// } else {
-		//   return
-		// }
-		//
-		// And so on.
-		//
-		// Otherwise it generates:
-		//
-		// for i := 0; i < nelem; i++ {
-		//   if eq(p[i], q[i]) {
-		//   } else {
-		//     goto neq
-		//   }
-		// }
-		//
-		// TODO(josharian): consider doing some loop unrolling
-		// for larger nelem as well, processing a few elements at a time in a loop.
-		checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
-			// checkIdx generates a node to check for equality at index i.
-			checkIdx := func(i *Node) *Node {
-				// pi := p[i]
-				pi := nod(OINDEX, np, i)
-				pi.SetBounded(true)
-				pi.Type = t.Elem()
-				// qi := q[i]
-				qi := nod(OINDEX, nq, i)
-				qi.SetBounded(true)
-				qi.Type = t.Elem()
-				return eq(pi, qi)
-			}
-
-			if nelem <= unroll {
-				if last {
-					// Do last comparison in a different manner.
-					nelem--
-				}
-				// Generate a series of checks.
-				for i := int64(0); i < nelem; i++ {
-					// if check {} else { goto neq }
-					nif := nod(OIF, checkIdx(nodintconst(i)), nil)
-					nif.Rlist.Append(nodSym(OGOTO, nil, neq))
-					fn.Nbody.Append(nif)
-				}
-				if last {
-					fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
-				}
-			} else {
-				// Generate a for loop.
-				// for i := 0; i < nelem; i++
-				i := temp(types.Types[TINT])
-				init := nod(OAS, i, nodintconst(0))
-				cond := nod(OLT, i, nodintconst(nelem))
-				post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
-				loop := nod(OFOR, cond, post)
-				loop.Ninit.Append(init)
-				// if eq(pi, qi) {} else { goto neq }
-				nif := nod(OIF, checkIdx(i), nil)
-				nif.Rlist.Append(nodSym(OGOTO, nil, neq))
-				loop.Nbody.Append(nif)
-				fn.Nbody.Append(loop)
-				if last {
-					fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
-				}
-			}
-		}
-
-		switch t.Elem().Etype {
-		case TSTRING:
-			// Do two loops. First, check that all the lengths match (cheap).
-			// Second, check that all the contents match (expensive).
-			// TODO: when the array size is small, unroll the length match checks.
-			checkAll(3, false, func(pi, qi *Node) *Node {
-				// Compare lengths.
-				eqlen, _ := eqstring(pi, qi)
-				return eqlen
-			})
-			checkAll(1, true, func(pi, qi *Node) *Node {
-				// Compare contents.
-				_, eqmem := eqstring(pi, qi)
-				return eqmem
-			})
-		case TFLOAT32, TFLOAT64:
-			checkAll(2, true, func(pi, qi *Node) *Node {
-				// p[i] == q[i]
-				return nod(OEQ, pi, qi)
-			})
-		// TODO: pick apart structs, do them piecemeal too
-		default:
-			checkAll(1, true, func(pi, qi *Node) *Node {
-				// p[i] == q[i]
-				return nod(OEQ, pi, qi)
-			})
-		}
-
-	case TSTRUCT:
-		// Build a list of conditions to satisfy.
-		// The conditions are a list-of-lists. Conditions are reorderable
-		// within each inner list. The outer lists must be evaluated in order.
-		var conds [][]*Node
-		conds = append(conds, []*Node{})
-		and := func(n *Node) {
-			i := len(conds) - 1
-			conds[i] = append(conds[i], n)
-		}
-
-		// Walk the struct using memequal for runs of AMEM
-		// and calling specific equality tests for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank-named fields.
-			if f.Sym.IsBlank() {
-				i++
-				continue
-			}
-
-			// Compare non-memory fields with field equality.
-			if !IsRegularMemory(f.Type) {
-				if EqCanPanic(f.Type) {
-					// Enforce ordering by starting a new set of reorderable conditions.
-					conds = append(conds, []*Node{})
-				}
-				p := nodSym(OXDOT, np, f.Sym)
-				q := nodSym(OXDOT, nq, f.Sym)
-				switch {
-				case f.Type.IsString():
-					eqlen, eqmem := eqstring(p, q)
-					and(eqlen)
-					and(eqmem)
-				default:
-					and(nod(OEQ, p, q))
-				}
-				if EqCanPanic(f.Type) {
-					// Also enforce ordering after something that can panic.
-					conds = append(conds, []*Node{})
-				}
-				i++
-				continue
-			}
-
-			// Find maximal length run of memory-only fields.
-			size, next := memrun(t, i)
-
-			// TODO(rsc): All the calls to newname are wrong for
-			// cross-package unexported fields.
-			if s := fields[i:next]; len(s) <= 2 {
-				// Two or fewer fields: use plain field equality.
-				for _, f := range s {
-					and(eqfield(np, nq, f.Sym))
-				}
-			} else {
-				// More than two fields: use memequal.
-				and(eqmem(np, nq, f.Sym, size))
-			}
-			i = next
-		}
-
-		// Sort conditions to put runtime calls last.
-		// Preserve the rest of the ordering.
-		var flatConds []*Node
-		for _, c := range conds {
-			isCall := func(n *Node) bool {
-				return n.Op == OCALL || n.Op == OCALLFUNC
-			}
-			sort.SliceStable(c, func(i, j int) bool {
-				return !isCall(c[i]) && isCall(c[j])
-			})
-			flatConds = append(flatConds, c...)
-		}
-
-		if len(flatConds) == 0 {
-			fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
-		} else {
-			for _, c := range flatConds[:len(flatConds)-1] {
-				// if cond {} else { goto neq }
-				n := nod(OIF, c, nil)
-				n.Rlist.Append(nodSym(OGOTO, nil, neq))
-				fn.Nbody.Append(n)
-			}
-			fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
-		}
-	}
-
-	// ret:
-	//   return
-	ret := autolabel(".ret")
-	fn.Nbody.Append(nodSym(OLABEL, nil, ret))
-	fn.Nbody.Append(nod(ORETURN, nil, nil))
-
-	// neq:
-	//   r = false
-	//   return (or goto ret)
-	fn.Nbody.Append(nodSym(OLABEL, nil, neq))
-	fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
-	if EqCanPanic(t) || hasCall(fn) {
-		// Epilogue is large, so share it with the equal case.
-		fn.Nbody.Append(nodSym(OGOTO, nil, ret))
-	} else {
-		// Epilogue is small, so don't bother sharing.
-		fn.Nbody.Append(nod(ORETURN, nil, nil))
-	}
-	// TODO(khr): the epilogue size detection condition above isn't perfect.
-	// We should really do a generic CL that shares epilogues across
-	// the board. See #24936.
-
-	if Debug.r != 0 {
-		dumplist("geneq body", fn.Nbody)
-	}
-
-	funcbody()
-
-	fn.Func.SetDupok(true)
-	fn = typecheck(fn, ctxStmt)
-
-	Curfn = fn
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-	Curfn = nil
-
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	// Disable checknils while compiling this code.
-	// We are comparing a struct or an array,
-	// neither of which can be nil, and our comparisons
-	// are shallow.
-	fn.Func.SetNilCheckDisabled(true)
-	xtop = append(xtop, fn)
-
-	// Generate a closure which points at the function we just generated.
-	dsymptr(closure, 0, sym.Linksym(), 0)
-	ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	return closure
-}
-
-func hasCall(n *Node) bool {
-	if n.Op == OCALL || n.Op == OCALLFUNC {
-		return true
-	}
-	if n.Left != nil && hasCall(n.Left) {
-		return true
-	}
-	if n.Right != nil && hasCall(n.Right) {
-		return true
-	}
-	for _, x := range n.Ninit.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.Nbody.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.List.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.Rlist.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	return false
-}
-
-// eqfield returns the node
-// 	p.field == q.field
-func eqfield(p *Node, q *Node, field *types.Sym) *Node {
-	nx := nodSym(OXDOT, p, field)
-	ny := nodSym(OXDOT, q, field)
-	ne := nod(OEQ, nx, ny)
-	return ne
-}
-
-// eqstring returns the nodes
-//   len(s) == len(t)
-// and
-//   memequal(s.ptr, t.ptr, len(s))
-// which can be used to construct string equality comparison.
-// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *Node) (eqlen, eqmem *Node) {
-	s = conv(s, types.Types[TSTRING])
-	t = conv(t, types.Types[TSTRING])
-	sptr := nod(OSPTR, s, nil)
-	tptr := nod(OSPTR, t, nil)
-	slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
-	tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
-
-	fn := syslook("memequal")
-	fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
-	call := nod(OCALL, fn, nil)
-	call.List.Append(sptr, tptr, slen.copy())
-	call = typecheck(call, ctxExpr|ctxMultiOK)
-
-	cmp := nod(OEQ, slen, tlen)
-	cmp = typecheck(cmp, ctxExpr)
-	cmp.Type = types.Types[TBOOL]
-	return cmp, call
-}
-
-// eqinterface returns the nodes
-//   s.tab == t.tab (or s.typ == t.typ, as appropriate)
-// and
-//   ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
-// which can be used to construct interface equality comparison.
-// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
-	if !types.Identical(s.Type, t.Type) {
-		Fatalf("eqinterface %v %v", s.Type, t.Type)
-	}
-	// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
-	// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
-	var fn *Node
-	if s.Type.IsEmptyInterface() {
-		fn = syslook("efaceeq")
-	} else {
-		fn = syslook("ifaceeq")
-	}
-
-	stab := nod(OITAB, s, nil)
-	ttab := nod(OITAB, t, nil)
-	sdata := nod(OIDATA, s, nil)
-	tdata := nod(OIDATA, t, nil)
-	sdata.Type = types.Types[TUNSAFEPTR]
-	tdata.Type = types.Types[TUNSAFEPTR]
-	sdata.SetTypecheck(1)
-	tdata.SetTypecheck(1)
-
-	call := nod(OCALL, fn, nil)
-	call.List.Append(stab, sdata, tdata)
-	call = typecheck(call, ctxExpr|ctxMultiOK)
-
-	cmp := nod(OEQ, stab, ttab)
-	cmp = typecheck(cmp, ctxExpr)
-	cmp.Type = types.Types[TBOOL]
-	return cmp, call
-}
-
-// eqmem returns the node
-// 	memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
-	nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
-	ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
-	nx = typecheck(nx, ctxExpr)
-	ny = typecheck(ny, ctxExpr)
-
-	fn, needsize := eqmemfunc(size, nx.Type.Elem())
-	call := nod(OCALL, fn, nil)
-	call.List.Append(nx)
-	call.List.Append(ny)
-	if needsize {
-		call.List.Append(nodintconst(size))
-	}
-
-	return call
-}
-
-func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
-	switch size {
-	default:
-		fn = syslook("memequal")
-		needsize = true
-	case 1, 2, 4, 8, 16:
-		buf := fmt.Sprintf("memequal%d", int(size)*8)
-		fn = syslook(buf)
-	}
-
-	fn = substArgTypes(fn, t, t)
-	return fn, needsize
-}
-
-// memrun finds runs of struct fields for which memory-only algs are appropriate.
-// t is the parent struct type, and start is the field index at which to start the run.
-// size is the length in bytes of the memory included in the run.
-// next is the index just after the end of the memory run.
-func memrun(t *types.Type, start int) (size int64, next int) {
-	next = start
-	for {
-		next++
-		if next == t.NumFields() {
-			break
-		}
-		// Stop run after a padded field.
-		if ispaddedfield(t, next-1) {
-			break
-		}
-		// Also, stop before a blank or non-memory field.
-		if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
-			break
-		}
-	}
-	return t.Field(next-1).End() - t.Field(start).Offset, next
-}
-
-// ispaddedfield reports whether the i'th field of struct type t is followed
-// by padding.
-func ispaddedfield(t *types.Type, i int) bool {
-	if !t.IsStruct() {
-		Fatalf("ispaddedfield called non-struct %v", t)
-	}
-	end := t.Width
-	if i+1 < t.NumFields() {
-		end = t.Field(i + 1).Offset
-	}
-	return t.Field(i).End() != end
-}
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
deleted file mode 100644
index a3a0c8f..0000000
--- a/src/cmd/compile/internal/gc/align.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"fmt"
-	"sort"
-)
-
-// sizeCalculationDisabled indicates whether it is safe
-// to calculate Types' widths and alignments. See dowidth.
-var sizeCalculationDisabled bool
-
-// machine size and rounding alignment is dictated around
-// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
-var defercalc int
-
-func Rnd(o int64, r int64) int64 {
-	if r < 1 || r > 8 || r&(r-1) != 0 {
-		Fatalf("rnd %d", r)
-	}
-	return (o + r - 1) &^ (r - 1)
-}
-
-// expandiface computes the method set for interface type t by
-// expanding embedded interfaces.
-func expandiface(t *types.Type) {
-	seen := make(map[*types.Sym]*types.Field)
-	var methods []*types.Field
-
-	addMethod := func(m *types.Field, explicit bool) {
-		switch prev := seen[m.Sym]; {
-		case prev == nil:
-			seen[m.Sym] = m
-		case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
-			return
-		default:
-			yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
-		}
-		methods = append(methods, m)
-	}
-
-	for _, m := range t.Methods().Slice() {
-		if m.Sym == nil {
-			continue
-		}
-
-		checkwidth(m.Type)
-		addMethod(m, true)
-	}
-
-	for _, m := range t.Methods().Slice() {
-		if m.Sym != nil {
-			continue
-		}
-
-		if !m.Type.IsInterface() {
-			yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
-			m.SetBroke(true)
-			t.SetBroke(true)
-			// Add to fields so that error messages
-			// include the broken embedded type when
-			// printing t.
-			// TODO(mdempsky): Revisit this.
-			methods = append(methods, m)
-			continue
-		}
-
-		// Embedded interface: duplicate all methods
-		// (including broken ones, if any) and add to t's
-		// method set.
-		for _, t1 := range m.Type.Fields().Slice() {
-			f := types.NewField()
-			f.Pos = m.Pos // preserve embedding position
-			f.Sym = t1.Sym
-			f.Type = t1.Type
-			f.SetBroke(t1.Broke())
-			addMethod(f, false)
-		}
-	}
-
-	sort.Sort(methcmp(methods))
-
-	if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
-		yyerrorl(typePos(t), "interface too large")
-	}
-	for i, m := range methods {
-		m.Offset = int64(i) * int64(Widthptr)
-	}
-
-	// Access fields directly to avoid recursively calling dowidth
-	// within Type.Fields().
-	t.Extra.(*types.Interface).Fields.Set(methods)
-}
-
-func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
-	starto := o
-	maxalign := int32(flag)
-	if maxalign < 1 {
-		maxalign = 1
-	}
-	lastzero := int64(0)
-	for _, f := range t.Fields().Slice() {
-		if f.Type == nil {
-			// broken field, just skip it so that other valid fields
-			// get a width.
-			continue
-		}
-
-		dowidth(f.Type)
-		if int32(f.Type.Align) > maxalign {
-			maxalign = int32(f.Type.Align)
-		}
-		if f.Type.Align > 0 {
-			o = Rnd(o, int64(f.Type.Align))
-		}
-		f.Offset = o
-		if n := asNode(f.Nname); n != nil {
-			// addrescapes has similar code to update these offsets.
-			// Usually addrescapes runs after widstruct,
-			// in which case we could drop this,
-			// but function closure functions are the exception.
-			// NOTE(rsc): This comment may be stale.
-			// It's possible the ordering has changed and this is
-			// now the common case. I'm not sure.
-			if n.Name.Param.Stackcopy != nil {
-				n.Name.Param.Stackcopy.Xoffset = o
-				n.Xoffset = 0
-			} else {
-				n.Xoffset = o
-			}
-		}
-
-		w := f.Type.Width
-		if w < 0 {
-			Fatalf("invalid width %d", f.Type.Width)
-		}
-		if w == 0 {
-			lastzero = o
-		}
-		o += w
-		maxwidth := thearch.MAXWIDTH
-		// On 32-bit systems, reflect tables impose an additional constraint
-		// that each field start offset must fit in 31 bits.
-		if maxwidth < 1<<32 {
-			maxwidth = 1<<31 - 1
-		}
-		if o >= maxwidth {
-			yyerrorl(typePos(errtype), "type %L too large", errtype)
-			o = 8 // small but nonzero
-		}
-	}
-
-	// For nonzero-sized structs which end in a zero-sized thing, we add
-	// an extra byte of padding to the type. This padding ensures that
-	// taking the address of the zero-sized thing can't manufacture a
-	// pointer to the next object in the heap. See issue 9401.
-	if flag == 1 && o > starto && o == lastzero {
-		o++
-	}
-
-	// final width is rounded
-	if flag != 0 {
-		o = Rnd(o, int64(maxalign))
-	}
-	t.Align = uint8(maxalign)
-
-	// type width only includes back to first field's offset
-	t.Width = o - starto
-
-	return o
-}
-
-// findTypeLoop searches for an invalid type declaration loop involving
-// type t and reports whether one is found. If so, path contains the
-// loop.
-//
-// path points to a slice used for tracking the sequence of types
-// visited. Using a pointer to a slice allows the slice capacity to
-// grow and limit reallocations.
-func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
-	// We implement a simple DFS loop-finding algorithm. This
-	// could be faster, but type cycles are rare.
-
-	if t.Sym != nil {
-		// Declared type. Check for loops and otherwise
-		// recurse on the type expression used in the type
-		// declaration.
-
-		for i, x := range *path {
-			if x == t {
-				*path = (*path)[i:]
-				return true
-			}
-		}
-
-		*path = append(*path, t)
-		if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
-			return true
-		}
-		*path = (*path)[:len(*path)-1]
-	} else {
-		// Anonymous type. Recurse on contained types.
-
-		switch t.Etype {
-		case TARRAY:
-			if findTypeLoop(t.Elem(), path) {
-				return true
-			}
-		case TSTRUCT:
-			for _, f := range t.Fields().Slice() {
-				if findTypeLoop(f.Type, path) {
-					return true
-				}
-			}
-		case TINTER:
-			for _, m := range t.Methods().Slice() {
-				if m.Type.IsInterface() { // embedded interface
-					if findTypeLoop(m.Type, path) {
-						return true
-					}
-				}
-			}
-		}
-	}
-
-	return false
-}
-
-func reportTypeLoop(t *types.Type) {
-	if t.Broke() {
-		return
-	}
-
-	var l []*types.Type
-	if !findTypeLoop(t, &l) {
-		Fatalf("failed to find type loop for: %v", t)
-	}
-
-	// Rotate loop so that the earliest type declaration is first.
-	i := 0
-	for j, t := range l[1:] {
-		if typePos(t).Before(typePos(l[i])) {
-			i = j + 1
-		}
-	}
-	l = append(l[i:], l[:i]...)
-
-	var msg bytes.Buffer
-	fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
-	for _, t := range l {
-		fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
-		t.SetBroke(true)
-	}
-	fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
-	yyerrorl(typePos(l[0]), msg.String())
-}
-
-// dowidth calculates and stores the size and alignment for t.
-// If sizeCalculationDisabled is set, and the size/alignment
-// have not already been calculated, it calls Fatal.
-// This is used to prevent data races in the back end.
-func dowidth(t *types.Type) {
-	// Calling dowidth when typecheck tracing enabled is not safe.
-	// See issue #33658.
-	if enableTrace && skipDowidthForTracing {
-		return
-	}
-	if Widthptr == 0 {
-		Fatalf("dowidth without betypeinit")
-	}
-
-	if t == nil {
-		return
-	}
-
-	if t.Width == -2 {
-		reportTypeLoop(t)
-		t.Width = 0
-		t.Align = 1
-		return
-	}
-
-	if t.WidthCalculated() {
-		return
-	}
-
-	if sizeCalculationDisabled {
-		if t.Broke() {
-			// break infinite recursion from Fatal call below
-			return
-		}
-		t.SetBroke(true)
-		Fatalf("width not calculated: %v", t)
-	}
-
-	// break infinite recursion if the broken recursive type
-	// is referenced again
-	if t.Broke() && t.Width == 0 {
-		return
-	}
-
-	// defer checkwidth calls until after we're done
-	defercheckwidth()
-
-	lno := lineno
-	if asNode(t.Nod) != nil {
-		lineno = asNode(t.Nod).Pos
-	}
-
-	t.Width = -2
-	t.Align = 0 // 0 means use t.Width, below
-
-	et := t.Etype
-	switch et {
-	case TFUNC, TCHAN, TMAP, TSTRING:
-		break
-
-	// simtype == 0 during bootstrap
-	default:
-		if simtype[t.Etype] != 0 {
-			et = simtype[t.Etype]
-		}
-	}
-
-	var w int64
-	switch et {
-	default:
-		Fatalf("dowidth: unknown type: %v", t)
-
-	// compiler-specific stuff
-	case TINT8, TUINT8, TBOOL:
-		// bool is int8
-		w = 1
-
-	case TINT16, TUINT16:
-		w = 2
-
-	case TINT32, TUINT32, TFLOAT32:
-		w = 4
-
-	case TINT64, TUINT64, TFLOAT64:
-		w = 8
-		t.Align = uint8(Widthreg)
-
-	case TCOMPLEX64:
-		w = 8
-		t.Align = 4
-
-	case TCOMPLEX128:
-		w = 16
-		t.Align = uint8(Widthreg)
-
-	case TPTR:
-		w = int64(Widthptr)
-		checkwidth(t.Elem())
-
-	case TUNSAFEPTR:
-		w = int64(Widthptr)
-
-	case TINTER: // implemented as 2 pointers
-		w = 2 * int64(Widthptr)
-		t.Align = uint8(Widthptr)
-		expandiface(t)
-
-	case TCHAN: // implemented as pointer
-		w = int64(Widthptr)
-
-		checkwidth(t.Elem())
-
-		// make fake type to check later to
-		// trigger channel argument check.
-		t1 := types.NewChanArgs(t)
-		checkwidth(t1)
-
-	case TCHANARGS:
-		t1 := t.ChanArgs()
-		dowidth(t1) // just in case
-		if t1.Elem().Width >= 1<<16 {
-			yyerrorl(typePos(t1), "channel element type too large (>64kB)")
-		}
-		w = 1 // anything will do
-
-	case TMAP: // implemented as pointer
-		w = int64(Widthptr)
-		checkwidth(t.Elem())
-		checkwidth(t.Key())
-
-	case TFORW: // should have been filled in
-		reportTypeLoop(t)
-		w = 1 // anything will do
-
-	case TANY:
-		// dummy type; should be replaced before use.
-		Fatalf("dowidth any")
-
-	case TSTRING:
-		if sizeofString == 0 {
-			Fatalf("early dowidth string")
-		}
-		w = sizeofString
-		t.Align = uint8(Widthptr)
-
-	case TARRAY:
-		if t.Elem() == nil {
-			break
-		}
-
-		dowidth(t.Elem())
-		if t.Elem().Width != 0 {
-			cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
-			if uint64(t.NumElem()) > cap {
-				yyerrorl(typePos(t), "type %L larger than address space", t)
-			}
-		}
-		w = t.NumElem() * t.Elem().Width
-		t.Align = t.Elem().Align
-
-	case TSLICE:
-		if t.Elem() == nil {
-			break
-		}
-		w = sizeofSlice
-		checkwidth(t.Elem())
-		t.Align = uint8(Widthptr)
-
-	case TSTRUCT:
-		if t.IsFuncArgStruct() {
-			Fatalf("dowidth fn struct %v", t)
-		}
-		w = widstruct(t, t, 0, 1)
-
-	// make fake type to check later to
-	// trigger function argument computation.
-	case TFUNC:
-		t1 := types.NewFuncArgs(t)
-		checkwidth(t1)
-		w = int64(Widthptr) // width of func type is pointer
-
-	// function is 3 cated structures;
-	// compute their widths as side-effect.
-	case TFUNCARGS:
-		t1 := t.FuncArgs()
-		w = widstruct(t1, t1.Recvs(), 0, 0)
-		w = widstruct(t1, t1.Params(), w, Widthreg)
-		w = widstruct(t1, t1.Results(), w, Widthreg)
-		t1.Extra.(*types.Func).Argwid = w
-		if w%int64(Widthreg) != 0 {
-			Warn("bad type %v %d\n", t1, w)
-		}
-		t.Align = 1
-	}
-
-	if Widthptr == 4 && w != int64(int32(w)) {
-		yyerrorl(typePos(t), "type %v too large", t)
-	}
-
-	t.Width = w
-	if t.Align == 0 {
-		if w == 0 || w > 8 || w&(w-1) != 0 {
-			Fatalf("invalid alignment for %v", t)
-		}
-		t.Align = uint8(w)
-	}
-
-	lineno = lno
-
-	resumecheckwidth()
-}
-
-// when a type's width should be known, we call checkwidth
-// to compute it.  during a declaration like
-//
-//	type T *struct { next T }
-//
-// it is necessary to defer the calculation of the struct width
-// until after T has been initialized to be a pointer to that struct.
-// similarly, during import processing structs may be used
-// before their definition.  in those situations, calling
-// defercheckwidth() stops width calculations until
-// resumecheckwidth() is called, at which point all the
-// checkwidths that were deferred are executed.
-// dowidth should only be called when the type's size
-// is needed immediately.  checkwidth makes sure the
-// size is evaluated eventually.
-
-var deferredTypeStack []*types.Type
-
-func checkwidth(t *types.Type) {
-	if t == nil {
-		return
-	}
-
-	// function arg structs should not be checked
-	// outside of the enclosing function.
-	if t.IsFuncArgStruct() {
-		Fatalf("checkwidth %v", t)
-	}
-
-	if defercalc == 0 {
-		dowidth(t)
-		return
-	}
-
-	// if type has not yet been pushed on deferredTypeStack yet, do it now
-	if !t.Deferwidth() {
-		t.SetDeferwidth(true)
-		deferredTypeStack = append(deferredTypeStack, t)
-	}
-}
-
-func defercheckwidth() {
-	defercalc++
-}
-
-func resumecheckwidth() {
-	if defercalc == 1 {
-		for len(deferredTypeStack) > 0 {
-			t := deferredTypeStack[len(deferredTypeStack)-1]
-			deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
-			t.SetDeferwidth(false)
-			dowidth(t)
-		}
-	}
-
-	defercalc--
-}
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
deleted file mode 100644
index 10f21f8..0000000
--- a/src/cmd/compile/internal/gc/bexport.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-)
-
-type exporter struct {
-	marked map[*types.Type]bool // types already seen by markType
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
-	if p.marked[t] {
-		return
-	}
-	p.marked[t] = true
-
-	// If this is a named type, mark all of its associated
-	// methods. Skip interface types because t.Methods contains
-	// only their unexpanded method set (i.e., exclusive of
-	// interface embeddings), and the switch statement below
-	// handles their full method set.
-	if t.Sym != nil && t.Etype != TINTER {
-		for _, m := range t.Methods().Slice() {
-			if types.IsExported(m.Sym.Name) {
-				p.markType(m.Type)
-			}
-		}
-	}
-
-	// Recursively mark any types that can be produced given a
-	// value of type t: dereferencing a pointer; indexing or
-	// iterating over an array, slice, or map; receiving from a
-	// channel; accessing a struct field or interface method; or
-	// calling a function.
-	//
-	// Notably, we don't mark function parameter types, because
-	// the user already needs some way to construct values of
-	// those types.
-	switch t.Etype {
-	case TPTR, TARRAY, TSLICE:
-		p.markType(t.Elem())
-
-	case TCHAN:
-		if t.ChanDir().CanRecv() {
-			p.markType(t.Elem())
-		}
-
-	case TMAP:
-		p.markType(t.Key())
-		p.markType(t.Elem())
-
-	case TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
-				p.markType(f.Type)
-			}
-		}
-
-	case TFUNC:
-		// If t is the type of a function or method, then
-		// t.Nname() is its ONAME. Mark its inline body and
-		// any recursively called functions for export.
-		inlFlood(asNode(t.Nname()))
-
-		for _, f := range t.Results().FieldSlice() {
-			p.markType(f.Type)
-		}
-
-	case TINTER:
-		for _, f := range t.FieldSlice() {
-			if types.IsExported(f.Sym.Name) {
-				p.markType(f.Type)
-			}
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
-	// Objects
-	packageTag = -(iota + 1)
-	constTag
-	typeTag
-	varTag
-	funcTag
-	endTag
-
-	// Types
-	namedTag
-	arrayTag
-	sliceTag
-	dddTag
-	structTag
-	pointerTag
-	signatureTag
-	interfaceTag
-	mapTag
-	chanTag
-
-	// Values
-	falseTag
-	trueTag
-	int64Tag
-	floatTag
-	fractionTag // not used by gc
-	complexTag
-	stringTag
-	nilTag
-	unknownTag // not used by gc (only appears in packages with errors)
-
-	// Type aliases
-	aliasTag
-)
-
-var predecl []*types.Type // initialized lazily
-
-func predeclared() []*types.Type {
-	if predecl == nil {
-		// initialize lazily to be sure that all
-		// elements have been initialized before
-		predecl = []*types.Type{
-			// basic types
-			types.Types[TBOOL],
-			types.Types[TINT],
-			types.Types[TINT8],
-			types.Types[TINT16],
-			types.Types[TINT32],
-			types.Types[TINT64],
-			types.Types[TUINT],
-			types.Types[TUINT8],
-			types.Types[TUINT16],
-			types.Types[TUINT32],
-			types.Types[TUINT64],
-			types.Types[TUINTPTR],
-			types.Types[TFLOAT32],
-			types.Types[TFLOAT64],
-			types.Types[TCOMPLEX64],
-			types.Types[TCOMPLEX128],
-			types.Types[TSTRING],
-
-			// basic type aliases
-			types.Bytetype,
-			types.Runetype,
-
-			// error
-			types.Errortype,
-
-			// untyped types
-			types.UntypedBool,
-			types.UntypedInt,
-			types.UntypedRune,
-			types.UntypedFloat,
-			types.UntypedComplex,
-			types.UntypedString,
-			types.Types[TNIL],
-
-			// package unsafe
-			types.Types[TUNSAFEPTR],
-
-			// invalid type (package contains errors)
-			types.Types[Txxx],
-
-			// any type, for builtin export data
-			types.Types[TANY],
-		}
-	}
-	return predecl
-}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
deleted file mode 100644
index 911ac4c0..0000000
--- a/src/cmd/compile/internal/gc/bimport.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/internal/src"
-)
-
-// numImport tracks how often a package with a given name is imported.
-// It is used to provide a better error message (by using the package
-// path to disambiguate) if a package that appears multiple times with
-// the same name appears in an error message.
-var numImport = make(map[string]int)
-
-func npos(pos src.XPos, n *Node) *Node {
-	n.Pos = pos
-	return n
-}
-
-func builtinCall(op Op) *Node {
-	return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
-}
diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go
index 967f75a..2e13d6b 100644
--- a/src/cmd/compile/internal/gc/bootstrap.go
+++ b/src/cmd/compile/internal/gc/bootstrap.go
@@ -6,8 +6,11 @@
 
 package gc
 
-import "runtime"
+import (
+	"cmd/compile/internal/base"
+	"runtime"
+)
 
 func startMutexProfiling() {
-	Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+	base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
 }
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
deleted file mode 100644
index e04f23e..0000000
--- a/src/cmd/compile/internal/gc/builtin.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Code generated by mkbuiltin.go. DO NOT EDIT.
-
-package gc
-
-import "cmd/compile/internal/types"
-
-var runtimeDecls = [...]struct {
-	name string
-	tag  int
-	typ  int
-}{
-	{"newobject", funcTag, 4},
-	{"mallocgc", funcTag, 8},
-	{"panicdivide", funcTag, 9},
-	{"panicshift", funcTag, 9},
-	{"panicmakeslicelen", funcTag, 9},
-	{"panicmakeslicecap", funcTag, 9},
-	{"throwinit", funcTag, 9},
-	{"panicwrap", funcTag, 9},
-	{"gopanic", funcTag, 11},
-	{"gorecover", funcTag, 14},
-	{"goschedguarded", funcTag, 9},
-	{"goPanicIndex", funcTag, 16},
-	{"goPanicIndexU", funcTag, 18},
-	{"goPanicSliceAlen", funcTag, 16},
-	{"goPanicSliceAlenU", funcTag, 18},
-	{"goPanicSliceAcap", funcTag, 16},
-	{"goPanicSliceAcapU", funcTag, 18},
-	{"goPanicSliceB", funcTag, 16},
-	{"goPanicSliceBU", funcTag, 18},
-	{"goPanicSlice3Alen", funcTag, 16},
-	{"goPanicSlice3AlenU", funcTag, 18},
-	{"goPanicSlice3Acap", funcTag, 16},
-	{"goPanicSlice3AcapU", funcTag, 18},
-	{"goPanicSlice3B", funcTag, 16},
-	{"goPanicSlice3BU", funcTag, 18},
-	{"goPanicSlice3C", funcTag, 16},
-	{"goPanicSlice3CU", funcTag, 18},
-	{"printbool", funcTag, 19},
-	{"printfloat", funcTag, 21},
-	{"printint", funcTag, 23},
-	{"printhex", funcTag, 25},
-	{"printuint", funcTag, 25},
-	{"printcomplex", funcTag, 27},
-	{"printstring", funcTag, 29},
-	{"printpointer", funcTag, 30},
-	{"printuintptr", funcTag, 31},
-	{"printiface", funcTag, 30},
-	{"printeface", funcTag, 30},
-	{"printslice", funcTag, 30},
-	{"printnl", funcTag, 9},
-	{"printsp", funcTag, 9},
-	{"printlock", funcTag, 9},
-	{"printunlock", funcTag, 9},
-	{"concatstring2", funcTag, 34},
-	{"concatstring3", funcTag, 35},
-	{"concatstring4", funcTag, 36},
-	{"concatstring5", funcTag, 37},
-	{"concatstrings", funcTag, 39},
-	{"cmpstring", funcTag, 40},
-	{"intstring", funcTag, 43},
-	{"slicebytetostring", funcTag, 44},
-	{"slicebytetostringtmp", funcTag, 45},
-	{"slicerunetostring", funcTag, 48},
-	{"stringtoslicebyte", funcTag, 50},
-	{"stringtoslicerune", funcTag, 53},
-	{"slicecopy", funcTag, 54},
-	{"decoderune", funcTag, 55},
-	{"countrunes", funcTag, 56},
-	{"convI2I", funcTag, 57},
-	{"convT16", funcTag, 58},
-	{"convT32", funcTag, 58},
-	{"convT64", funcTag, 58},
-	{"convTstring", funcTag, 58},
-	{"convTslice", funcTag, 58},
-	{"convT2E", funcTag, 59},
-	{"convT2Enoptr", funcTag, 59},
-	{"convT2I", funcTag, 59},
-	{"convT2Inoptr", funcTag, 59},
-	{"assertE2I", funcTag, 57},
-	{"assertE2I2", funcTag, 60},
-	{"assertI2I", funcTag, 57},
-	{"assertI2I2", funcTag, 60},
-	{"panicdottypeE", funcTag, 61},
-	{"panicdottypeI", funcTag, 61},
-	{"panicnildottype", funcTag, 62},
-	{"ifaceeq", funcTag, 64},
-	{"efaceeq", funcTag, 64},
-	{"fastrand", funcTag, 66},
-	{"makemap64", funcTag, 68},
-	{"makemap", funcTag, 69},
-	{"makemap_small", funcTag, 70},
-	{"mapaccess1", funcTag, 71},
-	{"mapaccess1_fast32", funcTag, 72},
-	{"mapaccess1_fast64", funcTag, 72},
-	{"mapaccess1_faststr", funcTag, 72},
-	{"mapaccess1_fat", funcTag, 73},
-	{"mapaccess2", funcTag, 74},
-	{"mapaccess2_fast32", funcTag, 75},
-	{"mapaccess2_fast64", funcTag, 75},
-	{"mapaccess2_faststr", funcTag, 75},
-	{"mapaccess2_fat", funcTag, 76},
-	{"mapassign", funcTag, 71},
-	{"mapassign_fast32", funcTag, 72},
-	{"mapassign_fast32ptr", funcTag, 72},
-	{"mapassign_fast64", funcTag, 72},
-	{"mapassign_fast64ptr", funcTag, 72},
-	{"mapassign_faststr", funcTag, 72},
-	{"mapiterinit", funcTag, 77},
-	{"mapdelete", funcTag, 77},
-	{"mapdelete_fast32", funcTag, 78},
-	{"mapdelete_fast64", funcTag, 78},
-	{"mapdelete_faststr", funcTag, 78},
-	{"mapiternext", funcTag, 79},
-	{"mapclear", funcTag, 80},
-	{"makechan64", funcTag, 82},
-	{"makechan", funcTag, 83},
-	{"chanrecv1", funcTag, 85},
-	{"chanrecv2", funcTag, 86},
-	{"chansend1", funcTag, 88},
-	{"closechan", funcTag, 30},
-	{"writeBarrier", varTag, 90},
-	{"typedmemmove", funcTag, 91},
-	{"typedmemclr", funcTag, 92},
-	{"typedslicecopy", funcTag, 93},
-	{"selectnbsend", funcTag, 94},
-	{"selectnbrecv", funcTag, 95},
-	{"selectnbrecv2", funcTag, 97},
-	{"selectsetpc", funcTag, 98},
-	{"selectgo", funcTag, 99},
-	{"block", funcTag, 9},
-	{"makeslice", funcTag, 100},
-	{"makeslice64", funcTag, 101},
-	{"makeslicecopy", funcTag, 102},
-	{"growslice", funcTag, 104},
-	{"memmove", funcTag, 105},
-	{"memclrNoHeapPointers", funcTag, 106},
-	{"memclrHasPointers", funcTag, 106},
-	{"memequal", funcTag, 107},
-	{"memequal0", funcTag, 108},
-	{"memequal8", funcTag, 108},
-	{"memequal16", funcTag, 108},
-	{"memequal32", funcTag, 108},
-	{"memequal64", funcTag, 108},
-	{"memequal128", funcTag, 108},
-	{"f32equal", funcTag, 109},
-	{"f64equal", funcTag, 109},
-	{"c64equal", funcTag, 109},
-	{"c128equal", funcTag, 109},
-	{"strequal", funcTag, 109},
-	{"interequal", funcTag, 109},
-	{"nilinterequal", funcTag, 109},
-	{"memhash", funcTag, 110},
-	{"memhash0", funcTag, 111},
-	{"memhash8", funcTag, 111},
-	{"memhash16", funcTag, 111},
-	{"memhash32", funcTag, 111},
-	{"memhash64", funcTag, 111},
-	{"memhash128", funcTag, 111},
-	{"f32hash", funcTag, 111},
-	{"f64hash", funcTag, 111},
-	{"c64hash", funcTag, 111},
-	{"c128hash", funcTag, 111},
-	{"strhash", funcTag, 111},
-	{"interhash", funcTag, 111},
-	{"nilinterhash", funcTag, 111},
-	{"int64div", funcTag, 112},
-	{"uint64div", funcTag, 113},
-	{"int64mod", funcTag, 112},
-	{"uint64mod", funcTag, 113},
-	{"float64toint64", funcTag, 114},
-	{"float64touint64", funcTag, 115},
-	{"float64touint32", funcTag, 116},
-	{"int64tofloat64", funcTag, 117},
-	{"uint64tofloat64", funcTag, 118},
-	{"uint32tofloat64", funcTag, 119},
-	{"complex128div", funcTag, 120},
-	{"racefuncenter", funcTag, 31},
-	{"racefuncenterfp", funcTag, 9},
-	{"racefuncexit", funcTag, 9},
-	{"raceread", funcTag, 31},
-	{"racewrite", funcTag, 31},
-	{"racereadrange", funcTag, 121},
-	{"racewriterange", funcTag, 121},
-	{"msanread", funcTag, 121},
-	{"msanwrite", funcTag, 121},
-	{"msanmove", funcTag, 122},
-	{"checkptrAlignment", funcTag, 123},
-	{"checkptrArithmetic", funcTag, 125},
-	{"libfuzzerTraceCmp1", funcTag, 127},
-	{"libfuzzerTraceCmp2", funcTag, 129},
-	{"libfuzzerTraceCmp4", funcTag, 130},
-	{"libfuzzerTraceCmp8", funcTag, 131},
-	{"libfuzzerTraceConstCmp1", funcTag, 127},
-	{"libfuzzerTraceConstCmp2", funcTag, 129},
-	{"libfuzzerTraceConstCmp4", funcTag, 130},
-	{"libfuzzerTraceConstCmp8", funcTag, 131},
-	{"x86HasPOPCNT", varTag, 6},
-	{"x86HasSSE41", varTag, 6},
-	{"x86HasFMA", varTag, 6},
-	{"armHasVFPv4", varTag, 6},
-	{"arm64HasATOMICS", varTag, 6},
-}
-
-func runtimeTypes() []*types.Type {
-	var typs [132]*types.Type
-	typs[0] = types.Bytetype
-	typs[1] = types.NewPtr(typs[0])
-	typs[2] = types.Types[TANY]
-	typs[3] = types.NewPtr(typs[2])
-	typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[5] = types.Types[TUINTPTR]
-	typs[6] = types.Types[TBOOL]
-	typs[7] = types.Types[TUNSAFEPTR]
-	typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
-	typs[9] = functype(nil, nil, nil)
-	typs[10] = types.Types[TINTER]
-	typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
-	typs[12] = types.Types[TINT32]
-	typs[13] = types.NewPtr(typs[12])
-	typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
-	typs[15] = types.Types[TINT]
-	typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
-	typs[17] = types.Types[TUINT]
-	typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
-	typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
-	typs[20] = types.Types[TFLOAT64]
-	typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
-	typs[22] = types.Types[TINT64]
-	typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
-	typs[24] = types.Types[TUINT64]
-	typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
-	typs[26] = types.Types[TCOMPLEX128]
-	typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
-	typs[28] = types.Types[TSTRING]
-	typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
-	typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
-	typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
-	typs[32] = types.NewArray(typs[0], 32)
-	typs[33] = types.NewPtr(typs[32])
-	typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[38] = types.NewSlice(typs[28])
-	typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
-	typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
-	typs[41] = types.NewArray(typs[0], 4)
-	typs[42] = types.NewPtr(typs[41])
-	typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
-	typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
-	typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
-	typs[46] = types.Runetype
-	typs[47] = types.NewSlice(typs[46])
-	typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
-	typs[49] = types.NewSlice(typs[0])
-	typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
-	typs[51] = types.NewArray(typs[46], 32)
-	typs[52] = types.NewPtr(typs[51])
-	typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
-	typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
-	typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
-	typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
-	typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
-	typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
-	typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
-	typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
-	typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
-	typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
-	typs[63] = types.NewPtr(typs[5])
-	typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
-	typs[65] = types.Types[TUINT32]
-	typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
-	typs[67] = types.NewMap(typs[2], typs[2])
-	typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
-	typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
-	typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
-	typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
-	typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
-	typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
-	typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
-	typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
-	typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
-	typs[81] = types.NewChan(typs[2], types.Cboth)
-	typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
-	typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
-	typs[84] = types.NewChan(typs[2], types.Crecv)
-	typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
-	typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[87] = types.NewChan(typs[2], types.Csend)
-	typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
-	typs[89] = types.NewArray(typs[0], 3)
-	typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
-	typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
-	typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
-	typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
-	typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
-	typs[96] = types.NewPtr(typs[6])
-	typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
-	typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
-	typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
-	typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
-	typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
-	typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
-	typs[103] = types.NewSlice(typs[2])
-	typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
-	typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
-	typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
-	typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
-	typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
-	typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
-	typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
-	typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
-	typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
-	typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
-	typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
-	typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
-	typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
-	typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
-	typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
-	typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
-	typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
-	typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
-	typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
-	typs[124] = types.NewSlice(typs[7])
-	typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
-	typs[126] = types.Types[TUINT8]
-	typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
-	typs[128] = types.Types[TUINT16]
-	typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
-	typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
-	typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
-	return typs[:]
-}
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
deleted file mode 100644
index e32ab97..0000000
--- a/src/cmd/compile/internal/gc/bv.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"math/bits"
-)
-
-const (
-	wordBits  = 32
-	wordMask  = wordBits - 1
-	wordShift = 5
-)
-
-// A bvec is a bit vector.
-type bvec struct {
-	n int32    // number of bits in vector
-	b []uint32 // words holding bits
-}
-
-func bvalloc(n int32) bvec {
-	nword := (n + wordBits - 1) / wordBits
-	return bvec{n, make([]uint32, nword)}
-}
-
-type bulkBvec struct {
-	words []uint32
-	nbit  int32
-	nword int32
-}
-
-func bvbulkalloc(nbit int32, count int32) bulkBvec {
-	nword := (nbit + wordBits - 1) / wordBits
-	size := int64(nword) * int64(count)
-	if int64(int32(size*4)) != size*4 {
-		Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
-	}
-	return bulkBvec{
-		words: make([]uint32, size),
-		nbit:  nbit,
-		nword: nword,
-	}
-}
-
-func (b *bulkBvec) next() bvec {
-	out := bvec{b.nbit, b.words[:b.nword]}
-	b.words = b.words[b.nword:]
-	return out
-}
-
-func (bv1 bvec) Eq(bv2 bvec) bool {
-	if bv1.n != bv2.n {
-		Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
-	}
-	for i, x := range bv1.b {
-		if x != bv2.b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func (dst bvec) Copy(src bvec) {
-	copy(dst.b, src.b)
-}
-
-func (bv bvec) Get(i int32) bool {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	return bv.b[i>>wordShift]&mask != 0
-}
-
-func (bv bvec) Set(i int32) {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	bv.b[i/wordBits] |= mask
-}
-
-func (bv bvec) Unset(i int32) {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	bv.b[i/wordBits] &^= mask
-}
-
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
-// If there is no such index, bvnext returns -1.
-func (bv bvec) Next(i int32) int32 {
-	if i >= bv.n {
-		return -1
-	}
-
-	// Jump i ahead to next word with bits.
-	if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
-		i &^= wordMask
-		i += wordBits
-		for i < bv.n && bv.b[i>>wordShift] == 0 {
-			i += wordBits
-		}
-	}
-
-	if i >= bv.n {
-		return -1
-	}
-
-	// Find 1 bit.
-	w := bv.b[i>>wordShift] >> uint(i&wordMask)
-	i += int32(bits.TrailingZeros32(w))
-
-	return i
-}
-
-func (bv bvec) IsEmpty() bool {
-	for _, x := range bv.b {
-		if x != 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (bv bvec) Not() {
-	for i, x := range bv.b {
-		bv.b[i] = ^x
-	}
-}
-
-// union
-func (dst bvec) Or(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x | src2.b[i]
-	}
-}
-
-// intersection
-func (dst bvec) And(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x & src2.b[i]
-	}
-}
-
-// difference
-func (dst bvec) AndNot(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x &^ src2.b[i]
-	}
-}
-
-func (bv bvec) String() string {
-	s := make([]byte, 2+bv.n)
-	copy(s, "#*")
-	for i := int32(0); i < bv.n; i++ {
-		ch := byte('0')
-		if bv.Get(i) {
-			ch = '1'
-		}
-		s[2+i] = ch
-	}
-	return string(s)
-}
-
-func (bv bvec) Clear() {
-	for i := range bv.b {
-		bv.b[i] = 0
-	}
-}
-
-// FNV-1 hash function constants.
-const (
-	H0 = 2166136261
-	Hp = 16777619
-)
-
-func hashbitmap(h uint32, bv bvec) uint32 {
-	n := int((bv.n + 31) / 32)
-	for i := 0; i < n; i++ {
-		w := bv.b[i]
-		h = (h * Hp) ^ (w & 0xff)
-		h = (h * Hp) ^ ((w >> 8) & 0xff)
-		h = (h * Hp) ^ ((w >> 16) & 0xff)
-		h = (h * Hp) ^ ((w >> 24) & 0xff)
-	}
-
-	return h
-}
-
-// bvecSet is a set of bvecs, in initial insertion order.
-type bvecSet struct {
-	index []int  // hash -> uniq index. -1 indicates empty slot.
-	uniq  []bvec // unique bvecs, in insertion order
-}
-
-func (m *bvecSet) grow() {
-	// Allocate new index.
-	n := len(m.index) * 2
-	if n == 0 {
-		n = 32
-	}
-	newIndex := make([]int, n)
-	for i := range newIndex {
-		newIndex[i] = -1
-	}
-
-	// Rehash into newIndex.
-	for i, bv := range m.uniq {
-		h := hashbitmap(H0, bv) % uint32(len(newIndex))
-		for {
-			j := newIndex[h]
-			if j < 0 {
-				newIndex[h] = i
-				break
-			}
-			h++
-			if h == uint32(len(newIndex)) {
-				h = 0
-			}
-		}
-	}
-	m.index = newIndex
-}
-
-// add adds bv to the set and returns its index in m.extractUniqe.
-// The caller must not modify bv after this.
-func (m *bvecSet) add(bv bvec) int {
-	if len(m.uniq)*4 >= len(m.index) {
-		m.grow()
-	}
-
-	index := m.index
-	h := hashbitmap(H0, bv) % uint32(len(index))
-	for {
-		j := index[h]
-		if j < 0 {
-			// New bvec.
-			index[h] = len(m.uniq)
-			m.uniq = append(m.uniq, bv)
-			return len(m.uniq) - 1
-		}
-		jlive := m.uniq[j]
-		if bv.Eq(jlive) {
-			// Existing bvec.
-			return j
-		}
-
-		h++
-		if h == uint32(len(index)) {
-			h = 0
-		}
-	}
-}
-
-// extractUniqe returns this slice of unique bit vectors in m, as
-// indexed by the result of bvecSet.add.
-func (m *bvecSet) extractUniqe() []bvec {
-	return m.uniq
-}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
deleted file mode 100644
index bd350f6..0000000
--- a/src/cmd/compile/internal/gc/closure.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types"
-	"fmt"
-)
-
-func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
-	xtype := p.typeExpr(expr.Type)
-	ntype := p.typeExpr(expr.Type)
-
-	xfunc := p.nod(expr, ODCLFUNC, nil, nil)
-	xfunc.Func.SetIsHiddenClosure(Curfn != nil)
-	xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
-	xfunc.Func.Nname.Name.Param.Ntype = xtype
-	xfunc.Func.Nname.Name.Defn = xfunc
-
-	clo := p.nod(expr, OCLOSURE, nil, nil)
-	clo.Func.Ntype = ntype
-
-	xfunc.Func.Closure = clo
-	clo.Func.Closure = xfunc
-
-	p.funcBody(xfunc, expr.Body)
-
-	// closure-specific variables are hanging off the
-	// ordinary ones in the symbol table; see oldname.
-	// unhook them.
-	// make the list of pointers for the closure call.
-	for _, v := range xfunc.Func.Cvars.Slice() {
-		// Unlink from v1; see comment in syntax.go type Param for these fields.
-		v1 := v.Name.Defn
-		v1.Name.Param.Innermost = v.Name.Param.Outer
-
-		// If the closure usage of v is not dense,
-		// we need to make it dense; now that we're out
-		// of the function in which v appeared,
-		// look up v.Sym in the enclosing function
-		// and keep it around for use in the compiled code.
-		//
-		// That is, suppose we just finished parsing the innermost
-		// closure f4 in this code:
-		//
-		//	func f() {
-		//		v := 1
-		//		func() { // f2
-		//			use(v)
-		//			func() { // f3
-		//				func() { // f4
-		//					use(v)
-		//				}()
-		//			}()
-		//		}()
-		//	}
-		//
-		// At this point v.Outer is f2's v; there is no f3's v.
-		// To construct the closure f4 from within f3,
-		// we need to use f3's v and in this case we need to create f3's v.
-		// We are now in the context of f3, so calling oldname(v.Sym)
-		// obtains f3's v, creating it if necessary (as it is in the example).
-		//
-		// capturevars will decide whether to use v directly or &v.
-		v.Name.Param.Outer = oldname(v.Sym)
-	}
-
-	return clo
-}
-
-// typecheckclosure typechecks an OCLOSURE node. It also creates the named
-// function associated with the closure.
-// TODO: This creation of the named function should probably really be done in a
-// separate pass from type-checking.
-func typecheckclosure(clo *Node, top int) {
-	xfunc := clo.Func.Closure
-	// Set current associated iota value, so iota can be used inside
-	// function in ConstSpec, see issue #22344
-	if x := getIotaValue(); x >= 0 {
-		xfunc.SetIota(x)
-	}
-
-	clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
-	clo.Type = clo.Func.Ntype.Type
-	clo.Func.Top = top
-
-	// Do not typecheck xfunc twice, otherwise, we will end up pushing
-	// xfunc to xtop multiple times, causing initLSym called twice.
-	// See #30709
-	if xfunc.Typecheck() == 1 {
-		return
-	}
-
-	for _, ln := range xfunc.Func.Cvars.Slice() {
-		n := ln.Name.Defn
-		if !n.Name.Captured() {
-			n.Name.SetCaptured(true)
-			if n.Name.Decldepth == 0 {
-				Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
-			}
-
-			// Ignore assignments to the variable in straightline code
-			// preceding the first capturing by a closure.
-			if n.Name.Decldepth == decldepth {
-				n.Name.SetAssigned(false)
-			}
-		}
-	}
-
-	xfunc.Func.Nname.Sym = closurename(Curfn)
-	setNodeNameFunc(xfunc.Func.Nname)
-	xfunc = typecheck(xfunc, ctxStmt)
-
-	// Type check the body now, but only if we're inside a function.
-	// At top level (in a variable initialization: curfn==nil) we're not
-	// ready to type check code yet; we'll check it later, because the
-	// underlying closure function we create is added to xtop.
-	if Curfn != nil && clo.Type != nil {
-		oldfn := Curfn
-		Curfn = xfunc
-		olddd := decldepth
-		decldepth = 1
-		typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
-		decldepth = olddd
-		Curfn = oldfn
-	}
-
-	xtop = append(xtop, xfunc)
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int
-
-// closurename generates a new unique name for a closure within
-// outerfunc.
-func closurename(outerfunc *Node) *types.Sym {
-	outer := "glob."
-	prefix := "func"
-	gen := &globClosgen
-
-	if outerfunc != nil {
-		if outerfunc.Func.Closure != nil {
-			prefix = ""
-		}
-
-		outer = outerfunc.funcname()
-
-		// There may be multiple functions named "_". In those
-		// cases, we can't use their individual Closgens as it
-		// would lead to name clashes.
-		if !outerfunc.Func.Nname.isBlank() {
-			gen = &outerfunc.Func.Closgen
-		}
-	}
-
-	*gen++
-	return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// capturevarscomplete is set to true when the capturevars phase is done.
-var capturevarscomplete bool
-
-// capturevars is called in a separate phase after all typechecking is done.
-// It decides whether each variable captured by a closure should be captured
-// by value or by reference.
-// We use value capturing for values <= 128 bytes that are never reassigned
-// after capturing (effectively constant).
-func capturevars(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Pos
-
-	clo := xfunc.Func.Closure
-	cvars := xfunc.Func.Cvars.Slice()
-	out := cvars[:0]
-	for _, v := range cvars {
-		if v.Type == nil {
-			// If v.Type is nil, it means v looked like it
-			// was going to be used in the closure, but
-			// isn't. This happens in struct literals like
-			// s{f: x} where we can't distinguish whether
-			// f is a field identifier or expression until
-			// resolving s.
-			continue
-		}
-		out = append(out, v)
-
-		// type check the & of closed variables outside the closure,
-		// so that the outer frame also grabs them and knows they escape.
-		dowidth(v.Type)
-
-		outer := v.Name.Param.Outer
-		outermost := v.Name.Defn
-
-		// out parameters will be assigned to implicitly upon return.
-		if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
-			v.Name.SetByval(true)
-		} else {
-			outermost.Name.SetAddrtaken(true)
-			outer = nod(OADDR, outer, nil)
-		}
-
-		if Debug.m > 1 {
-			var name *types.Sym
-			if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
-				name = v.Name.Curfn.Func.Nname.Sym
-			}
-			how := "ref"
-			if v.Name.Byval() {
-				how = "value"
-			}
-			Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
-		}
-
-		outer = typecheck(outer, ctxExpr)
-		clo.Func.Enter.Append(outer)
-	}
-
-	xfunc.Func.Cvars.Set(out)
-	lineno = lno
-}
-
-// transformclosure is called in a separate phase after escape analysis.
-// It transform closure bodies to properly reference captured variables.
-func transformclosure(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Pos
-	clo := xfunc.Func.Closure
-
-	if clo.Func.Top&ctxCallee != 0 {
-		// If the closure is directly called, we transform it to a plain function call
-		// with variables passed as args. This avoids allocation of a closure object.
-		// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
-		// will complete the transformation later.
-		// For illustration, the following closure:
-		//	func(a int) {
-		//		println(byval)
-		//		byref++
-		//	}(42)
-		// becomes:
-		//	func(byval int, &byref *int, a int) {
-		//		println(byval)
-		//		(*&byref)++
-		//	}(byval, &byref, 42)
-
-		// f is ONAME of the actual function.
-		f := xfunc.Func.Nname
-
-		// We are going to insert captured variables before input args.
-		var params []*types.Field
-		var decls []*Node
-		for _, v := range xfunc.Func.Cvars.Slice() {
-			if !v.Name.Byval() {
-				// If v of type T is captured by reference,
-				// we introduce function param &v *T
-				// and v remains PAUTOHEAP with &v heapaddr
-				// (accesses will implicitly deref &v).
-				addr := newname(lookup("&" + v.Sym.Name))
-				addr.Type = types.NewPtr(v.Type)
-				v.Name.Param.Heapaddr = addr
-				v = addr
-			}
-
-			v.SetClass(PPARAM)
-			decls = append(decls, v)
-
-			fld := types.NewField()
-			fld.Nname = asTypesNode(v)
-			fld.Type = v.Type
-			fld.Sym = v.Sym
-			params = append(params, fld)
-		}
-
-		if len(params) > 0 {
-			// Prepend params and decls.
-			f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
-			xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
-		}
-
-		dowidth(f.Type)
-		xfunc.Type = f.Type // update type of ODCLFUNC
-	} else {
-		// The closure is not called, so it is going to stay as closure.
-		var body []*Node
-		offset := int64(Widthptr)
-		for _, v := range xfunc.Func.Cvars.Slice() {
-			// cv refers to the field inside of closure OSTRUCTLIT.
-			cv := nod(OCLOSUREVAR, nil, nil)
-
-			cv.Type = v.Type
-			if !v.Name.Byval() {
-				cv.Type = types.NewPtr(v.Type)
-			}
-			offset = Rnd(offset, int64(cv.Type.Align))
-			cv.Xoffset = offset
-			offset += cv.Type.Width
-
-			if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
-				// If it is a small variable captured by value, downgrade it to PAUTO.
-				v.SetClass(PAUTO)
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
-				body = append(body, nod(OAS, v, cv))
-			} else {
-				// Declare variable holding addresses taken from closure
-				// and initialize in entry prologue.
-				addr := newname(lookup("&" + v.Sym.Name))
-				addr.Type = types.NewPtr(v.Type)
-				addr.SetClass(PAUTO)
-				addr.Name.SetUsed(true)
-				addr.Name.Curfn = xfunc
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
-				v.Name.Param.Heapaddr = addr
-				if v.Name.Byval() {
-					cv = nod(OADDR, cv, nil)
-				}
-				body = append(body, nod(OAS, addr, cv))
-			}
-		}
-
-		if len(body) > 0 {
-			typecheckslice(body, ctxStmt)
-			xfunc.Func.Enter.Set(body)
-			xfunc.Func.SetNeedctxt(true)
-		}
-	}
-
-	lineno = lno
-}
-
-// hasemptycvars reports whether closure clo has an
-// empty list of captured vars.
-func hasemptycvars(clo *Node) bool {
-	xfunc := clo.Func.Closure
-	return xfunc.Func.Cvars.Len() == 0
-}
-
-// closuredebugruntimecheck applies boilerplate checks for debug flags
-// and compiling runtime
-func closuredebugruntimecheck(clo *Node) {
-	if Debug_closure > 0 {
-		xfunc := clo.Func.Closure
-		if clo.Esc == EscHeap {
-			Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
-		} else {
-			Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
-		}
-	}
-	if compiling_runtime && clo.Esc == EscHeap {
-		yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
-	}
-}
-
-// closureType returns the struct type used to hold all the information
-// needed in the closure for clo (clo must be a OCLOSURE node).
-// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *Node) *types.Type {
-	// Create closure in the form of a composite literal.
-	// supposing the closure captures an int i and a string s
-	// and has one float64 argument and no results,
-	// the generated code looks like:
-	//
-	//	clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
-	//
-	// The use of the struct provides type information to the garbage
-	// collector so that it can walk the closure. We could use (in this case)
-	// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
-	// The information appears in the binary in the form of type descriptors;
-	// the struct is unnamed so that closures in multiple packages with the
-	// same struct type can share the descriptor.
-	fields := []*Node{
-		namedfield(".F", types.Types[TUINTPTR]),
-	}
-	for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
-		typ := v.Type
-		if !v.Name.Byval() {
-			typ = types.NewPtr(typ)
-		}
-		fields = append(fields, symfield(v.Sym, typ))
-	}
-	typ := tostruct(fields)
-	typ.SetNoalg(true)
-	return typ
-}
-
-func walkclosure(clo *Node, init *Nodes) *Node {
-	xfunc := clo.Func.Closure
-
-	// If no closure vars, don't bother wrapping.
-	if hasemptycvars(clo) {
-		if Debug_closure > 0 {
-			Warnl(clo.Pos, "closure converted to global")
-		}
-		return xfunc.Func.Nname
-	}
-	closuredebugruntimecheck(clo)
-
-	typ := closureType(clo)
-
-	clos := nod(OCOMPLIT, nil, typenod(typ))
-	clos.Esc = clo.Esc
-	clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
-
-	clos = nod(OADDR, clos, nil)
-	clos.Esc = clo.Esc
-
-	// Force type conversion from *struct to the func type.
-	clos = convnop(clos, clo.Type)
-
-	// non-escaping temp to use, if any.
-	if x := prealloc[clo]; x != nil {
-		if !types.Identical(typ, x.Type) {
-			panic("closure type does not match order's assigned type")
-		}
-		clos.Left.Right = x
-		delete(prealloc, clo)
-	}
-
-	return walkexpr(clos, init)
-}
-
-func typecheckpartialcall(fn *Node, sym *types.Sym) {
-	switch fn.Op {
-	case ODOTINTER, ODOTMETH:
-		break
-
-	default:
-		Fatalf("invalid typecheckpartialcall")
-	}
-
-	// Create top-level function.
-	xfunc := makepartialcall(fn, fn.Type, sym)
-	fn.Func = xfunc.Func
-	fn.Func.SetWrapper(true)
-	fn.Right = newname(sym)
-	fn.Op = OCALLPART
-	fn.Type = xfunc.Type
-}
-
-// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
-// for partial calls.
-func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
-	rcvrtype := fn.Left.Type
-	sym := methodSymSuffix(rcvrtype, meth, "-fm")
-
-	if sym.Uniq() {
-		return asNode(sym.Def)
-	}
-	sym.SetUniq(true)
-
-	savecurfn := Curfn
-	saveLineNo := lineno
-	Curfn = nil
-
-	// Set line number equal to the line number where the method is declared.
-	var m *types.Field
-	if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
-		lineno = m.Pos
-	}
-	// Note: !m.Pos.IsKnown() happens for method expressions where
-	// the method is implicitly declared. The Error method of the
-	// built-in error type is one such method.  We leave the line
-	// number at the use of the method expression in this
-	// case. See issue 29389.
-
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set(structargs(t0.Params(), true))
-	tfn.Rlist.Set(structargs(t0.Results(), false))
-
-	xfunc := dclfunc(sym, tfn)
-	xfunc.Func.SetDupok(true)
-	xfunc.Func.SetNeedctxt(true)
-
-	tfn.Type.SetPkg(t0.Pkg())
-
-	// Declare and initialize variable holding receiver.
-
-	cv := nod(OCLOSUREVAR, nil, nil)
-	cv.Type = rcvrtype
-	cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
-
-	ptr := newname(lookup(".this"))
-	declare(ptr, PAUTO)
-	ptr.Name.SetUsed(true)
-	var body []*Node
-	if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
-		ptr.Type = rcvrtype
-		body = append(body, nod(OAS, ptr, cv))
-	} else {
-		ptr.Type = types.NewPtr(rcvrtype)
-		body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
-	}
-
-	call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
-	call.List.Set(paramNnames(tfn.Type))
-	call.SetIsDDD(tfn.Type.IsVariadic())
-	if t0.NumResults() != 0 {
-		n := nod(ORETURN, nil, nil)
-		n.List.Set1(call)
-		call = n
-	}
-	body = append(body, call)
-
-	xfunc.Nbody.Set(body)
-	funcbody()
-
-	xfunc = typecheck(xfunc, ctxStmt)
-	// Need to typecheck the body of the just-generated wrapper.
-	// typecheckslice() requires that Curfn is set when processing an ORETURN.
-	Curfn = xfunc
-	typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
-	sym.Def = asTypesNode(xfunc)
-	xtop = append(xtop, xfunc)
-	Curfn = savecurfn
-	lineno = saveLineNo
-
-	return xfunc
-}
-
-// partialCallType returns the struct type used to hold all the information
-// needed in the closure for n (n must be a OCALLPART node).
-// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *Node) *types.Type {
-	t := tostruct([]*Node{
-		namedfield("F", types.Types[TUINTPTR]),
-		namedfield("R", n.Left.Type),
-	})
-	t.SetNoalg(true)
-	return t
-}
-
-func walkpartialcall(n *Node, init *Nodes) *Node {
-	// Create closure in the form of a composite literal.
-	// For x.M with receiver (x) type T, the generated code looks like:
-	//
-	//	clos = &struct{F uintptr; R T}{T.M·f, x}
-	//
-	// Like walkclosure above.
-
-	if n.Left.Type.IsInterface() {
-		// Trigger panic for method on nil interface now.
-		// Otherwise it happens in the wrapper and is confusing.
-		n.Left = cheapexpr(n.Left, init)
-		n.Left = walkexpr(n.Left, nil)
-
-		tab := nod(OITAB, n.Left, nil)
-		tab = typecheck(tab, ctxExpr)
-
-		c := nod(OCHECKNIL, tab, nil)
-		c.SetTypecheck(1)
-		init.Append(c)
-	}
-
-	typ := partialCallType(n)
-
-	clos := nod(OCOMPLIT, nil, typenod(typ))
-	clos.Esc = n.Esc
-	clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
-
-	clos = nod(OADDR, clos, nil)
-	clos.Esc = n.Esc
-
-	// Force type conversion from *struct to the func type.
-	clos = convnop(clos, n.Type)
-
-	// non-escaping temp to use, if any.
-	if x := prealloc[n]; x != nil {
-		if !types.Identical(typ, x.Type) {
-			panic("partial call type does not match order's assigned type")
-		}
-		clos.Left.Right = x
-		delete(prealloc, n)
-	}
-
-	return walkexpr(clos, init)
-}
-
-// callpartMethod returns the *types.Field representing the method
-// referenced by method value n.
-func callpartMethod(n *Node) *types.Field {
-	if n.Op != OCALLPART {
-		Fatalf("expected OCALLPART, got %v", n)
-	}
-
-	// TODO(mdempsky): Optimize this. If necessary,
-	// makepartialcall could save m for us somewhere.
-	var m *types.Field
-	if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
-		Fatalf("failed to find field for OCALLPART")
-	}
-
-	return m
-}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
new file mode 100644
index 0000000..ba67c58
--- /dev/null
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -0,0 +1,147 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"internal/race"
+	"math/rand"
+	"sort"
+	"sync"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/liveness"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/compile/internal/walk"
+)
+
+// "Portable" code generation.
+
+var (
+	compilequeue []*ir.Func // functions waiting to be compiled
+)
+
+func enqueueFunc(fn *ir.Func) {
+	if ir.CurFunc != nil {
+		base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
+	}
+
+	if ir.FuncName(fn) == "_" {
+		// Skip compiling blank functions.
+		// Frontend already reported any spec-mandated errors (#29870).
+		return
+	}
+
+	if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
+		return // we'll get this as part of its enclosing function
+	}
+
+	if len(fn.Body) == 0 {
+		// Initialize ABI wrappers if necessary.
+		ssagen.InitLSym(fn, false)
+		liveness.WriteFuncMap(fn)
+		return
+	}
+
+	errorsBefore := base.Errors()
+
+	todo := []*ir.Func{fn}
+	for len(todo) > 0 {
+		next := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+
+		prepareFunc(next)
+		todo = append(todo, next.Closures...)
+	}
+
+	if base.Errors() > errorsBefore {
+		return
+	}
+
+	// Enqueue just fn itself. compileFunctions will handle
+	// scheduling compilation of its closures after it's done.
+	compilequeue = append(compilequeue, fn)
+}
+
+// prepareFunc handles any remaining frontend compilation tasks that
+// aren't yet safe to perform concurrently.
+func prepareFunc(fn *ir.Func) {
+	// Set up the function's LSym early to avoid data races with the assemblers.
+	// Do this before walk, as walk needs the LSym to set attributes/relocations
+	// (e.g. in MarkTypeUsedInInterface).
+	ssagen.InitLSym(fn, true)
+
+	// Calculate parameter offsets.
+	types.CalcSize(fn.Type())
+
+	typecheck.DeclContext = ir.PAUTO
+	ir.CurFunc = fn
+	walk.Walk(fn)
+	ir.CurFunc = nil // enforce no further uses of CurFunc
+	typecheck.DeclContext = ir.PEXTERN
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+	if len(compilequeue) == 0 {
+		return
+	}
+
+	if race.Enabled {
+		// Randomize compilation order to try to shake out races.
+		tmp := make([]*ir.Func, len(compilequeue))
+		perm := rand.Perm(len(compilequeue))
+		for i, v := range perm {
+			tmp[v] = compilequeue[i]
+		}
+		copy(compilequeue, tmp)
+	} else {
+		// Compile the longest functions first,
+		// since they're most likely to be the slowest.
+		// This helps avoid stragglers.
+		sort.Slice(compilequeue, func(i, j int) bool {
+			return len(compilequeue[i].Body) > len(compilequeue[j].Body)
+		})
+	}
+
+	// We queue up a goroutine per function that needs to be
+	// compiled, but require them to grab an available worker ID
+	// before doing any substantial work to limit parallelism.
+	workerIDs := make(chan int, base.Flag.LowerC)
+	for i := 0; i < base.Flag.LowerC; i++ {
+		workerIDs <- i
+	}
+
+	var wg sync.WaitGroup
+	var asyncCompile func(*ir.Func)
+	asyncCompile = func(fn *ir.Func) {
+		wg.Add(1)
+		go func() {
+			worker := <-workerIDs
+			ssagen.Compile(fn, worker)
+			workerIDs <- worker
+
+			// Done compiling fn. Schedule it's closures for compilation.
+			for _, closure := range fn.Closures {
+				asyncCompile(closure)
+			}
+			wg.Done()
+		}()
+	}
+
+	types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
+	base.Ctxt.InParallel = true
+	for _, fn := range compilequeue {
+		asyncCompile(fn)
+	}
+	compilequeue = nil
+	wg.Wait()
+	base.Ctxt.InParallel = false
+	types.CalcSizeDisabled = false
+}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
deleted file mode 100644
index b92c8d6..0000000
--- a/src/cmd/compile/internal/gc/const.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"math/big"
-	"strings"
-)
-
-// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype uint8
-
-const (
-	CTxxx Ctype = iota
-
-	CTINT
-	CTRUNE
-	CTFLT
-	CTCPLX
-	CTSTR
-	CTBOOL
-	CTNIL
-)
-
-type Val struct {
-	// U contains one of:
-	// bool     bool when Ctype() == CTBOOL
-	// *Mpint   int when Ctype() == CTINT, rune when Ctype() == CTRUNE
-	// *Mpflt   float when Ctype() == CTFLT
-	// *Mpcplx  pair of floats when Ctype() == CTCPLX
-	// string   string when Ctype() == CTSTR
-	// *Nilval  when Ctype() == CTNIL
-	U interface{}
-}
-
-func (v Val) Ctype() Ctype {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", v.U)
-		panic("unreachable")
-	case nil:
-		return CTxxx
-	case *NilVal:
-		return CTNIL
-	case bool:
-		return CTBOOL
-	case *Mpint:
-		if x.Rune {
-			return CTRUNE
-		}
-		return CTINT
-	case *Mpflt:
-		return CTFLT
-	case *Mpcplx:
-		return CTCPLX
-	case string:
-		return CTSTR
-	}
-}
-
-func eqval(a, b Val) bool {
-	if a.Ctype() != b.Ctype() {
-		return false
-	}
-	switch x := a.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", a.U)
-		panic("unreachable")
-	case *NilVal:
-		return true
-	case bool:
-		y := b.U.(bool)
-		return x == y
-	case *Mpint:
-		y := b.U.(*Mpint)
-		return x.Cmp(y) == 0
-	case *Mpflt:
-		y := b.U.(*Mpflt)
-		return x.Cmp(y) == 0
-	case *Mpcplx:
-		y := b.U.(*Mpcplx)
-		return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
-	case string:
-		y := b.U.(string)
-		return x == y
-	}
-}
-
-// Interface returns the constant value stored in v as an interface{}.
-// It returns int64s for ints and runes, float64s for floats,
-// complex128s for complex values, and nil for constant nils.
-func (v Val) Interface() interface{} {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Interface for %T", v.U)
-		panic("unreachable")
-	case *NilVal:
-		return nil
-	case bool, string:
-		return x
-	case *Mpint:
-		return x.Int64()
-	case *Mpflt:
-		return x.Float64()
-	case *Mpcplx:
-		return complex(x.Real.Float64(), x.Imag.Float64())
-	}
-}
-
-type NilVal struct{}
-
-// Int64Val returns n as an int64.
-// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
-	if !Isconst(n, CTINT) {
-		Fatalf("Int64Val(%v)", n)
-	}
-	return n.Val().U.(*Mpint).Int64()
-}
-
-// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
-	if !Isconst(n, CTINT) {
-		return false
-	}
-
-	// if the value inside n cannot be represented as an int64, the
-	// return value of Int64 is undefined
-	return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
-}
-
-// BoolVal returns n as a bool.
-// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
-	if !Isconst(n, CTBOOL) {
-		Fatalf("BoolVal(%v)", n)
-	}
-	return n.Val().U.(bool)
-}
-
-// StringVal returns the value of a literal string Node as a string.
-// n must be a string constant.
-func (n *Node) StringVal() string {
-	if !Isconst(n, CTSTR) {
-		Fatalf("StringVal(%v)", n)
-	}
-	return n.Val().U.(string)
-}
-
-// truncate float literal fv to 32-bit or 64-bit precision
-// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
-	if t == nil {
-		return oldv
-	}
-
-	if overflow(Val{oldv}, t) {
-		// If there was overflow, simply continuing would set the
-		// value to Inf which in turn would lead to spurious follow-on
-		// errors. Avoid this by returning the existing value.
-		return oldv
-	}
-
-	fv := newMpflt()
-
-	// convert large precision literal floating
-	// into limited precision (float64 or float32)
-	switch t.Etype {
-	case types.TFLOAT32:
-		fv.SetFloat64(oldv.Float32())
-	case types.TFLOAT64:
-		fv.SetFloat64(oldv.Float64())
-	default:
-		Fatalf("truncfltlit: unexpected Etype %v", t.Etype)
-	}
-
-	return fv
-}
-
-// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
-// precision, according to type; return truncated value. In case of
-// overflow, calls yyerror but does not truncate the input value.
-func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx {
-	if t == nil {
-		return oldv
-	}
-
-	if overflow(Val{oldv}, t) {
-		// If there was overflow, simply continuing would set the
-		// value to Inf which in turn would lead to spurious follow-on
-		// errors. Avoid this by returning the existing value.
-		return oldv
-	}
-
-	cv := newMpcmplx()
-
-	switch t.Etype {
-	case types.TCOMPLEX64:
-		cv.Real.SetFloat64(oldv.Real.Float32())
-		cv.Imag.SetFloat64(oldv.Imag.Float32())
-	case types.TCOMPLEX128:
-		cv.Real.SetFloat64(oldv.Real.Float64())
-		cv.Imag.SetFloat64(oldv.Imag.Float64())
-	default:
-		Fatalf("trunccplxlit: unexpected Etype %v", t.Etype)
-	}
-
-	return cv
-}
-
-// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *Node, t *types.Type) *Node    { return convlit1(n, t, false, nil) }
-func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
-
-// convlit1 converts an untyped expression n to type t. If n already
-// has a type, convlit1 has no effect.
-//
-// For explicit conversions, t must be non-nil, and integer-to-string
-// conversions are allowed.
-//
-// For implicit conversions (e.g., assignments), t may be nil; if so,
-// n is converted to its default type.
-//
-// If there's an error converting n to t, context is used in the error
-// message.
-func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node {
-	if explicit && t == nil {
-		Fatalf("explicit conversion missing type")
-	}
-	if t != nil && t.IsUntyped() {
-		Fatalf("bad conversion to untyped: %v", t)
-	}
-
-	if n == nil || n.Type == nil {
-		// Allow sloppy callers.
-		return n
-	}
-	if !n.Type.IsUntyped() {
-		// Already typed; nothing to do.
-		return n
-	}
-
-	if n.Op == OLITERAL {
-		// Can't always set n.Type directly on OLITERAL nodes.
-		// See discussion on CL 20813.
-		n = n.rawcopy()
-	}
-
-	// Nil is technically not a constant, so handle it specially.
-	if n.Type.Etype == TNIL {
-		if t == nil {
-			yyerror("use of untyped nil")
-			n.SetDiag(true)
-			n.Type = nil
-			return n
-		}
-
-		if !t.HasNil() {
-			// Leave for caller to handle.
-			return n
-		}
-
-		n.Type = t
-		return n
-	}
-
-	if t == nil || !okforconst[t.Etype] {
-		t = defaultType(n.Type)
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected untyped expression: %v", n)
-
-	case OLITERAL:
-		v := convertVal(n.Val(), t, explicit)
-		if v.U == nil {
-			break
-		}
-		n.SetVal(v)
-		n.Type = t
-		return n
-
-	case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG:
-		ot := operandType(n.Op, t)
-		if ot == nil {
-			n = defaultlit(n, nil)
-			break
-		}
-
-		n.Left = convlit(n.Left, ot)
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Type = t
-		return n
-
-	case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX:
-		ot := operandType(n.Op, t)
-		if ot == nil {
-			n = defaultlit(n, nil)
-			break
-		}
-
-		n.Left = convlit(n.Left, ot)
-		n.Right = convlit(n.Right, ot)
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if !types.Identical(n.Left.Type, n.Right.Type) {
-			yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-		return n
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		if !t.IsBoolean() {
-			break
-		}
-		n.Type = t
-		return n
-
-	case OLSH, ORSH:
-		n.Left = convlit1(n.Left, t, explicit, nil)
-		n.Type = n.Left.Type
-		if n.Type != nil && !n.Type.IsInteger() {
-			yyerror("invalid operation: %v (shift of type %v)", n, n.Type)
-			n.Type = nil
-		}
-		return n
-	}
-
-	if !n.Diag() {
-		if !t.Broke() {
-			if explicit {
-				yyerror("cannot convert %L to type %v", n, t)
-			} else if context != nil {
-				yyerror("cannot use %L as type %v in %s", n, t, context())
-			} else {
-				yyerror("cannot use %L as type %v", n, t)
-			}
-		}
-		n.SetDiag(true)
-	}
-	n.Type = nil
-	return n
-}
-
-func operandType(op Op, t *types.Type) *types.Type {
-	switch op {
-	case OCOMPLEX:
-		if t.IsComplex() {
-			return floatForComplex(t)
-		}
-	case OREAL, OIMAG:
-		if t.IsFloat() {
-			return complexForFloat(t)
-		}
-	default:
-		if okfor[op][t.Etype] {
-			return t
-		}
-	}
-	return nil
-}
-
-// convertVal converts v into a representation appropriate for t. If
-// no such representation exists, it returns Val{} instead.
-//
-// If explicit is true, then conversions from integer to string are
-// also allowed.
-func convertVal(v Val, t *types.Type, explicit bool) Val {
-	switch ct := v.Ctype(); ct {
-	case CTBOOL:
-		if t.IsBoolean() {
-			return v
-		}
-
-	case CTSTR:
-		if t.IsString() {
-			return v
-		}
-
-	case CTINT, CTRUNE:
-		if explicit && t.IsString() {
-			return tostr(v)
-		}
-		fallthrough
-	case CTFLT, CTCPLX:
-		switch {
-		case t.IsInteger():
-			v = toint(v)
-			overflow(v, t)
-			return v
-		case t.IsFloat():
-			v = toflt(v)
-			v = Val{truncfltlit(v.U.(*Mpflt), t)}
-			return v
-		case t.IsComplex():
-			v = tocplx(v)
-			v = Val{trunccmplxlit(v.U.(*Mpcplx), t)}
-			return v
-		}
-	}
-
-	return Val{}
-}
-
-func tocplx(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		c := newMpcmplx()
-		c.Real.SetInt(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-
-	case *Mpflt:
-		c := newMpcmplx()
-		c.Real.Set(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-	}
-
-	return v
-}
-
-func toflt(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		f := newMpflt()
-		f.SetInt(u)
-		v.U = f
-
-	case *Mpcplx:
-		f := newMpflt()
-		f.Set(&u.Real)
-		if u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v truncated to real", u.GoString())
-		}
-		v.U = f
-	}
-
-	return v
-}
-
-func toint(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if u.Rune {
-			i := new(Mpint)
-			i.Set(u)
-			v.U = i
-		}
-
-	case *Mpflt:
-		i := new(Mpint)
-		if !i.SetFloat(u) {
-			if i.checkOverflow(0) {
-				yyerror("integer too large")
-			} else {
-				// The value of u cannot be represented as an integer;
-				// so we need to print an error message.
-				// Unfortunately some float values cannot be
-				// reasonably formatted for inclusion in an error
-				// message (example: 1 + 1e-100), so first we try to
-				// format the float; if the truncation resulted in
-				// something that looks like an integer we omit the
-				// value from the error message.
-				// (See issue #11371).
-				var t big.Float
-				t.Parse(u.GoString(), 10)
-				if t.IsInt() {
-					yyerror("constant truncated to integer")
-				} else {
-					yyerror("constant %v truncated to integer", u.GoString())
-				}
-			}
-		}
-		v.U = i
-
-	case *Mpcplx:
-		i := new(Mpint)
-		if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v truncated to integer", u.GoString())
-		}
-
-		v.U = i
-	}
-
-	return v
-}
-
-func doesoverflow(v Val, t *types.Type) bool {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if !t.IsInteger() {
-			Fatalf("overflow: %v integer constant", t)
-		}
-		return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
-
-	case *Mpflt:
-		if !t.IsFloat() {
-			Fatalf("overflow: %v floating-point constant", t)
-		}
-		return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
-
-	case *Mpcplx:
-		if !t.IsComplex() {
-			Fatalf("overflow: %v complex constant", t)
-		}
-		return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
-			u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
-	}
-
-	return false
-}
-
-func overflow(v Val, t *types.Type) bool {
-	// v has already been converted
-	// to appropriate form for t.
-	if t == nil || t.Etype == TIDEAL {
-		return false
-	}
-
-	// Only uintptrs may be converted to pointers, which cannot overflow.
-	if t.IsPtr() || t.IsUnsafePtr() {
-		return false
-	}
-
-	if doesoverflow(v, t) {
-		yyerror("constant %v overflows %v", v, t)
-		return true
-	}
-
-	return false
-
-}
-
-func tostr(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		var r rune = 0xFFFD
-		if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 {
-			r = rune(u.Int64())
-		}
-		v.U = string(r)
-	}
-
-	return v
-}
-
-func consttype(n *Node) Ctype {
-	if n == nil || n.Op != OLITERAL {
-		return CTxxx
-	}
-	return n.Val().Ctype()
-}
-
-func Isconst(n *Node, ct Ctype) bool {
-	t := consttype(n)
-
-	// If the caller is asking for CTINT, allow CTRUNE too.
-	// Makes life easier for back ends.
-	return t == ct || (ct == CTINT && t == CTRUNE)
-}
-
-// evconst rewrites constant expressions into OLITERAL nodes.
-func evconst(n *Node) {
-	nl, nr := n.Left, n.Right
-
-	// Pick off just the opcodes that can be constant evaluated.
-	switch op := n.Op; op {
-	case OPLUS, ONEG, OBITNOT, ONOT:
-		if nl.Op == OLITERAL {
-			setconst(n, unaryOp(op, nl.Val(), n.Type))
-		}
-
-	case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setconst(n, binaryOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OLSH, ORSH:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setconst(n, shiftOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OCONV, ORUNESTR:
-		if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
-			setconst(n, convertVal(nl.Val(), n.Type, true))
-		}
-
-	case OCONVNOP:
-		if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
-			// set so n.Orig gets OCONV instead of OCONVNOP
-			n.Op = OCONV
-			setconst(n, nl.Val())
-		}
-
-	case OADDSTR:
-		// Merge adjacent constants in the argument list.
-		s := n.List.Slice()
-		for i1 := 0; i1 < len(s); i1++ {
-			if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
-				// merge from i1 up to but not including i2
-				var strs []string
-				i2 := i1
-				for i2 < len(s) && Isconst(s[i2], CTSTR) {
-					strs = append(strs, s[i2].StringVal())
-					i2++
-				}
-
-				nl := *s[i1]
-				nl.Orig = &nl
-				nl.SetVal(Val{strings.Join(strs, "")})
-				s[i1] = &nl
-				s = append(s[:i1+1], s[i2:]...)
-			}
-		}
-
-		if len(s) == 1 && Isconst(s[0], CTSTR) {
-			n.Op = OLITERAL
-			n.SetVal(s[0].Val())
-		} else {
-			n.List.Set(s)
-		}
-
-	case OCAP, OLEN:
-		switch nl.Type.Etype {
-		case TSTRING:
-			if Isconst(nl, CTSTR) {
-				setintconst(n, int64(len(nl.StringVal())))
-			}
-		case TARRAY:
-			if !hascallchan(nl) {
-				setintconst(n, nl.Type.NumElem())
-			}
-		}
-
-	case OALIGNOF, OOFFSETOF, OSIZEOF:
-		setintconst(n, evalunsafe(n))
-
-	case OREAL, OIMAG:
-		if nl.Op == OLITERAL {
-			var re, im *Mpflt
-			switch u := nl.Val().U.(type) {
-			case *Mpint:
-				re = newMpflt()
-				re.SetInt(u)
-				// im = 0
-			case *Mpflt:
-				re = u
-				// im = 0
-			case *Mpcplx:
-				re = &u.Real
-				im = &u.Imag
-			default:
-				Fatalf("impossible")
-			}
-			if n.Op == OIMAG {
-				if im == nil {
-					im = newMpflt()
-				}
-				re = im
-			}
-			setconst(n, Val{re})
-		}
-
-	case OCOMPLEX:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			// make it a complex literal
-			c := newMpcmplx()
-			c.Real.Set(toflt(nl.Val()).U.(*Mpflt))
-			c.Imag.Set(toflt(nr.Val()).U.(*Mpflt))
-			setconst(n, Val{c})
-		}
-	}
-}
-
-func match(x, y Val) (Val, Val) {
-	switch {
-	case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
-		return tocplx(x), tocplx(y)
-	case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
-		return toflt(x), toflt(y)
-	}
-
-	// Mixed int/rune are fine.
-	return x, y
-}
-
-func compareOp(x Val, op Op, y Val) bool {
-	x, y = match(x, y)
-
-	switch x.Ctype() {
-	case CTBOOL:
-		x, y := x.U.(bool), y.U.(bool)
-		switch op {
-		case OEQ:
-			return x == y
-		case ONE:
-			return x != y
-		}
-
-	case CTINT, CTRUNE:
-		x, y := x.U.(*Mpint), y.U.(*Mpint)
-		return cmpZero(x.Cmp(y), op)
-
-	case CTFLT:
-		x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-		return cmpZero(x.Cmp(y), op)
-
-	case CTCPLX:
-		x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-		eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
-		switch op {
-		case OEQ:
-			return eq
-		case ONE:
-			return !eq
-		}
-
-	case CTSTR:
-		x, y := x.U.(string), y.U.(string)
-		switch op {
-		case OEQ:
-			return x == y
-		case ONE:
-			return x != y
-		case OLT:
-			return x < y
-		case OLE:
-			return x <= y
-		case OGT:
-			return x > y
-		case OGE:
-			return x >= y
-		}
-	}
-
-	Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
-	panic("unreachable")
-}
-
-func cmpZero(x int, op Op) bool {
-	switch op {
-	case OEQ:
-		return x == 0
-	case ONE:
-		return x != 0
-	case OLT:
-		return x < 0
-	case OLE:
-		return x <= 0
-	case OGT:
-		return x > 0
-	case OGE:
-		return x >= 0
-	}
-
-	Fatalf("cmpZero: want comparison operator, got %v", op)
-	panic("unreachable")
-}
-
-func binaryOp(x Val, op Op, y Val) Val {
-	x, y = match(x, y)
-
-Outer:
-	switch x.Ctype() {
-	case CTBOOL:
-		x, y := x.U.(bool), y.U.(bool)
-		switch op {
-		case OANDAND:
-			return Val{U: x && y}
-		case OOROR:
-			return Val{U: x || y}
-		}
-
-	case CTINT, CTRUNE:
-		x, y := x.U.(*Mpint), y.U.(*Mpint)
-
-		u := new(Mpint)
-		u.Rune = x.Rune || y.Rune
-		u.Set(x)
-		switch op {
-		case OADD:
-			u.Add(y)
-		case OSUB:
-			u.Sub(y)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if y.CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Quo(y)
-		case OMOD:
-			if y.CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Rem(y)
-		case OOR:
-			u.Or(y)
-		case OAND:
-			u.And(y)
-		case OANDNOT:
-			u.AndNot(y)
-		case OXOR:
-			u.Xor(y)
-		default:
-			break Outer
-		}
-		return Val{U: u}
-
-	case CTFLT:
-		x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-
-		u := newMpflt()
-		u.Set(x)
-		switch op {
-		case OADD:
-			u.Add(y)
-		case OSUB:
-			u.Sub(y)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if y.CmpFloat64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Quo(y)
-		default:
-			break Outer
-		}
-		return Val{U: u}
-
-	case CTCPLX:
-		x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-
-		u := newMpcmplx()
-		u.Real.Set(&x.Real)
-		u.Imag.Set(&x.Imag)
-		switch op {
-		case OADD:
-			u.Real.Add(&y.Real)
-			u.Imag.Add(&y.Imag)
-		case OSUB:
-			u.Real.Sub(&y.Real)
-			u.Imag.Sub(&y.Imag)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if !u.Div(y) {
-				yyerror("complex division by zero")
-				return Val{}
-			}
-		default:
-			break Outer
-		}
-		return Val{U: u}
-	}
-
-	Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
-	panic("unreachable")
-}
-
-func unaryOp(op Op, x Val, t *types.Type) Val {
-	switch op {
-	case OPLUS:
-		switch x.Ctype() {
-		case CTINT, CTRUNE, CTFLT, CTCPLX:
-			return x
-		}
-
-	case ONEG:
-		switch x.Ctype() {
-		case CTINT, CTRUNE:
-			x := x.U.(*Mpint)
-			u := new(Mpint)
-			u.Rune = x.Rune
-			u.Set(x)
-			u.Neg()
-			return Val{U: u}
-
-		case CTFLT:
-			x := x.U.(*Mpflt)
-			u := newMpflt()
-			u.Set(x)
-			u.Neg()
-			return Val{U: u}
-
-		case CTCPLX:
-			x := x.U.(*Mpcplx)
-			u := newMpcmplx()
-			u.Real.Set(&x.Real)
-			u.Imag.Set(&x.Imag)
-			u.Real.Neg()
-			u.Imag.Neg()
-			return Val{U: u}
-		}
-
-	case OBITNOT:
-		switch x.Ctype() {
-		case CTINT, CTRUNE:
-			x := x.U.(*Mpint)
-
-			u := new(Mpint)
-			u.Rune = x.Rune
-			if t.IsSigned() || t.IsUntyped() {
-				// Signed values change sign.
-				u.SetInt64(-1)
-			} else {
-				// Unsigned values invert their bits.
-				u.Set(maxintval[t.Etype])
-			}
-			u.Xor(x)
-			return Val{U: u}
-		}
-
-	case ONOT:
-		return Val{U: !x.U.(bool)}
-	}
-
-	Fatalf("unaryOp: bad operation: %v %v", op, x)
-	panic("unreachable")
-}
-
-func shiftOp(x Val, op Op, y Val) Val {
-	if x.Ctype() != CTRUNE {
-		x = toint(x)
-	}
-	y = toint(y)
-
-	u := new(Mpint)
-	u.Set(x.U.(*Mpint))
-	u.Rune = x.U.(*Mpint).Rune
-	switch op {
-	case OLSH:
-		u.Lsh(y.U.(*Mpint))
-	case ORSH:
-		u.Rsh(y.U.(*Mpint))
-	default:
-		Fatalf("shiftOp: bad operator: %v", op)
-		panic("unreachable")
-	}
-	return Val{U: u}
-}
-
-// setconst rewrites n as an OLITERAL with value v.
-func setconst(n *Node, v Val) {
-	// If constant folding failed, mark n as broken and give up.
-	if v.U == nil {
-		n.Type = nil
-		return
-	}
-
-	// Ensure n.Orig still points to a semantically-equivalent
-	// expression after we rewrite n into a constant.
-	if n.Orig == n {
-		n.Orig = n.sepcopy()
-	}
-
-	*n = Node{
-		Op:      OLITERAL,
-		Pos:     n.Pos,
-		Orig:    n.Orig,
-		Type:    n.Type,
-		Xoffset: BADWIDTH,
-	}
-	n.SetVal(v)
-	if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
-		Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
-	}
-
-	// Check range.
-	lno := setlineno(n)
-	overflow(v, n.Type)
-	lineno = lno
-
-	if !n.Type.IsUntyped() {
-		switch v.Ctype() {
-		// Truncate precision for non-ideal float.
-		case CTFLT:
-			n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
-		// Truncate precision for non-ideal complex.
-		case CTCPLX:
-			n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)})
-		}
-	}
-}
-
-func setboolconst(n *Node, v bool) {
-	setconst(n, Val{U: v})
-}
-
-func setintconst(n *Node, v int64) {
-	u := new(Mpint)
-	u.SetInt64(v)
-	setconst(n, Val{u})
-}
-
-// nodlit returns a new untyped constant with value v.
-func nodlit(v Val) *Node {
-	n := nod(OLITERAL, nil, nil)
-	n.SetVal(v)
-	n.Type = idealType(v.Ctype())
-	return n
-}
-
-func idealType(ct Ctype) *types.Type {
-	switch ct {
-	case CTSTR:
-		return types.UntypedString
-	case CTBOOL:
-		return types.UntypedBool
-	case CTINT:
-		return types.UntypedInt
-	case CTRUNE:
-		return types.UntypedRune
-	case CTFLT:
-		return types.UntypedFloat
-	case CTCPLX:
-		return types.UntypedComplex
-	case CTNIL:
-		return types.Types[TNIL]
-	}
-	Fatalf("unexpected Ctype: %v", ct)
-	return nil
-}
-
-// defaultlit on both nodes simultaneously;
-// if they're both ideal going in they better
-// get the same type going out.
-// force means must assign concrete (non-ideal) type.
-// The results of defaultlit2 MUST be assigned back to l and r, e.g.
-// 	n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
-	if l.Type == nil || r.Type == nil {
-		return l, r
-	}
-	if !l.Type.IsUntyped() {
-		r = convlit(r, l.Type)
-		return l, r
-	}
-
-	if !r.Type.IsUntyped() {
-		l = convlit(l, r.Type)
-		return l, r
-	}
-
-	if !force {
-		return l, r
-	}
-
-	// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
-	if l.Type.IsBoolean() != r.Type.IsBoolean() {
-		return l, r
-	}
-	if l.Type.IsString() != r.Type.IsString() {
-		return l, r
-	}
-	if l.isNil() || r.isNil() {
-		return l, r
-	}
-
-	t := defaultType(mixUntyped(l.Type, r.Type))
-	l = convlit(l, t)
-	r = convlit(r, t)
-	return l, r
-}
-
-func ctype(t *types.Type) Ctype {
-	switch t {
-	case types.UntypedBool:
-		return CTBOOL
-	case types.UntypedString:
-		return CTSTR
-	case types.UntypedInt:
-		return CTINT
-	case types.UntypedRune:
-		return CTRUNE
-	case types.UntypedFloat:
-		return CTFLT
-	case types.UntypedComplex:
-		return CTCPLX
-	}
-	Fatalf("bad type %v", t)
-	panic("unreachable")
-}
-
-func mixUntyped(t1, t2 *types.Type) *types.Type {
-	t := t1
-	if ctype(t2) > ctype(t1) {
-		t = t2
-	}
-	return t
-}
-
-func defaultType(t *types.Type) *types.Type {
-	if !t.IsUntyped() || t.Etype == TNIL {
-		return t
-	}
-
-	switch t {
-	case types.UntypedBool:
-		return types.Types[TBOOL]
-	case types.UntypedString:
-		return types.Types[TSTRING]
-	case types.UntypedInt:
-		return types.Types[TINT]
-	case types.UntypedRune:
-		return types.Runetype
-	case types.UntypedFloat:
-		return types.Types[TFLOAT64]
-	case types.UntypedComplex:
-		return types.Types[TCOMPLEX128]
-	}
-
-	Fatalf("bad type %v", t)
-	return nil
-}
-
-func smallintconst(n *Node) bool {
-	if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
-		switch simtype[n.Type.Etype] {
-		case TINT8,
-			TUINT8,
-			TINT16,
-			TUINT16,
-			TINT32,
-			TUINT32,
-			TBOOL:
-			return true
-
-		case TIDEAL, TINT64, TUINT64, TPTR:
-			v, ok := n.Val().U.(*Mpint)
-			if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// indexconst checks if Node n contains a constant expression
-// representable as a non-negative int and returns its value.
-// If n is not a constant expression, not representable as an
-// integer, or negative, it returns -1. If n is too large, it
-// returns -2.
-func indexconst(n *Node) int64 {
-	if n.Op != OLITERAL {
-		return -1
-	}
-
-	v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint
-	vi, ok := v.U.(*Mpint)
-	if !ok || vi.CmpInt64(0) < 0 {
-		return -1
-	}
-	if vi.Cmp(maxintval[TINT]) > 0 {
-		return -2
-	}
-
-	return vi.Int64()
-}
-
-// isGoConst reports whether n is a Go language constant (as opposed to a
-// compile-time constant).
-//
-// Expressions derived from nil, like string([]byte(nil)), while they
-// may be known at compile time, are not Go language constants.
-func (n *Node) isGoConst() bool {
-	return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
-func hascallchan(n *Node) bool {
-	if n == nil {
-		return false
-	}
-	switch n.Op {
-	case OAPPEND,
-		OCALL,
-		OCALLFUNC,
-		OCALLINTER,
-		OCALLMETH,
-		OCAP,
-		OCLOSE,
-		OCOMPLEX,
-		OCOPY,
-		ODELETE,
-		OIMAG,
-		OLEN,
-		OMAKE,
-		ONEW,
-		OPANIC,
-		OPRINT,
-		OPRINTN,
-		OREAL,
-		ORECOVER,
-		ORECV:
-		return true
-	}
-
-	if hascallchan(n.Left) || hascallchan(n.Right) {
-		return true
-	}
-	for _, n1 := range n.List.Slice() {
-		if hascallchan(n1) {
-			return true
-		}
-	}
-	for _, n2 := range n.Rlist.Slice() {
-		if hascallchan(n2) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// A constSet represents a set of Go constant expressions.
-type constSet struct {
-	m map[constSetKey]src.XPos
-}
-
-type constSetKey struct {
-	typ *types.Type
-	val interface{}
-}
-
-// add adds constant expression n to s. If a constant expression of
-// equal value and identical type has already been added, then add
-// reports an error about the duplicate value.
-//
-// pos provides position information for where expression n occurred
-// (in case n does not have its own position information). what and
-// where are used in the error message.
-//
-// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
-	if n.Op == OCONVIFACE && n.Implicit() {
-		n = n.Left
-	}
-
-	if !n.isGoConst() {
-		return
-	}
-	if n.Type.IsUntyped() {
-		Fatalf("%v is untyped", n)
-	}
-
-	// Consts are only duplicates if they have the same value and
-	// identical types.
-	//
-	// In general, we have to use types.Identical to test type
-	// identity, because == gives false negatives for anonymous
-	// types and the byte/uint8 and rune/int32 builtin type
-	// aliases.  However, this is not a problem here, because
-	// constant expressions are always untyped or have a named
-	// type, and we explicitly handle the builtin type aliases
-	// below.
-	//
-	// This approach may need to be revisited though if we fix
-	// #21866 by treating all type aliases like byte/uint8 and
-	// rune/int32.
-
-	typ := n.Type
-	switch typ {
-	case types.Bytetype:
-		typ = types.Types[TUINT8]
-	case types.Runetype:
-		typ = types.Types[TINT32]
-	}
-	k := constSetKey{typ, n.Val().Interface()}
-
-	if hasUniquePos(n) {
-		pos = n.Pos
-	}
-
-	if s.m == nil {
-		s.m = make(map[constSetKey]src.XPos)
-	}
-
-	if prevPos, isDup := s.m[k]; isDup {
-		yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
-			what, nodeAndVal(n), where,
-			what, linestr(prevPos))
-	} else {
-		s.m[k] = pos
-	}
-}
-
-// nodeAndVal reports both an expression and its constant value, if
-// the latter is non-obvious.
-//
-// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *Node) string {
-	show := n.String()
-	val := n.Val().Interface()
-	if s := fmt.Sprintf("%#v", val); show != s {
-		show += " (value " + s + ")"
-	}
-	return show
-}
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
deleted file mode 100644
index 6e90eb4..0000000
--- a/src/cmd/compile/internal/gc/dcl.go
+++ /dev/null
@@ -1,1185 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"fmt"
-	"strings"
-)
-
-// Declaration stack & operations
-
-var externdcl []*Node
-
-func testdclstack() {
-	if !types.IsDclstackValid() {
-		if nerrors != 0 {
-			errorexit()
-		}
-		Fatalf("mark left on the dclstack")
-	}
-}
-
-// redeclare emits a diagnostic about symbol s being redeclared at pos.
-func redeclare(pos src.XPos, s *types.Sym, where string) {
-	if !s.Lastlineno.IsKnown() {
-		pkg := s.Origpkg
-		if pkg == nil {
-			pkg = s.Pkg
-		}
-		yyerrorl(pos, "%v redeclared %s\n"+
-			"\tprevious declaration during import %q", s, where, pkg.Path)
-	} else {
-		prevPos := s.Lastlineno
-
-		// When an import and a declaration collide in separate files,
-		// present the import as the "redeclared", because the declaration
-		// is visible where the import is, but not vice versa.
-		// See issue 4510.
-		if s.Def == nil {
-			pos, prevPos = prevPos, pos
-		}
-
-		yyerrorl(pos, "%v redeclared %s\n"+
-			"\tprevious declaration at %v", s, where, linestr(prevPos))
-	}
-}
-
-var vargen int
-
-// declare individual names - var, typ, const
-
-var declare_typegen int
-
-// declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func declare(n *Node, ctxt Class) {
-	if n.isBlank() {
-		return
-	}
-
-	if n.Name == nil {
-		// named OLITERAL needs Name; most OLITERALs don't.
-		n.Name = new(Name)
-	}
-
-	s := n.Sym
-
-	// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
-	if !inimport && !typecheckok && s.Pkg != localpkg {
-		yyerrorl(n.Pos, "cannot declare name %v", s)
-	}
-
-	gen := 0
-	if ctxt == PEXTERN {
-		if s.Name == "init" {
-			yyerrorl(n.Pos, "cannot declare init - must be func")
-		}
-		if s.Name == "main" && s.Pkg.Name == "main" {
-			yyerrorl(n.Pos, "cannot declare main - must be func")
-		}
-		externdcl = append(externdcl, n)
-	} else {
-		if Curfn == nil && ctxt == PAUTO {
-			lineno = n.Pos
-			Fatalf("automatic outside function")
-		}
-		if Curfn != nil && ctxt != PFUNC {
-			Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-		}
-		if n.Op == OTYPE {
-			declare_typegen++
-			gen = declare_typegen
-		} else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
-			vargen++
-			gen = vargen
-		}
-		types.Pushdcl(s)
-		n.Name.Curfn = Curfn
-	}
-
-	if ctxt == PAUTO {
-		n.Xoffset = 0
-	}
-
-	if s.Block == types.Block {
-		// functype will print errors about duplicate function arguments.
-		// Don't repeat the error here.
-		if ctxt != PPARAM && ctxt != PPARAMOUT {
-			redeclare(n.Pos, s, "in this block")
-		}
-	}
-
-	s.Block = types.Block
-	s.Lastlineno = lineno
-	s.Def = asTypesNode(n)
-	n.Name.Vargen = int32(gen)
-	n.SetClass(ctxt)
-	if ctxt == PFUNC {
-		n.Sym.SetFunc(true)
-	}
-
-	autoexport(n, ctxt)
-}
-
-func addvar(n *Node, t *types.Type, ctxt Class) {
-	if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
-		Fatalf("addvar: n=%v t=%v nil", n, t)
-	}
-
-	n.Op = ONAME
-	declare(n, ctxt)
-	n.Type = t
-}
-
-// declare variables from grammar
-// new_name_list (type | [type] = expr_list)
-func variter(vl []*Node, t *Node, el []*Node) []*Node {
-	var init []*Node
-	doexpr := len(el) > 0
-
-	if len(el) == 1 && len(vl) > 1 {
-		e := el[0]
-		as2 := nod(OAS2, nil, nil)
-		as2.List.Set(vl)
-		as2.Rlist.Set1(e)
-		for _, v := range vl {
-			v.Op = ONAME
-			declare(v, dclcontext)
-			v.Name.Param.Ntype = t
-			v.Name.Defn = as2
-			if Curfn != nil {
-				init = append(init, nod(ODCL, v, nil))
-			}
-		}
-
-		return append(init, as2)
-	}
-
-	nel := len(el)
-	for _, v := range vl {
-		var e *Node
-		if doexpr {
-			if len(el) == 0 {
-				yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
-				break
-			}
-			e = el[0]
-			el = el[1:]
-		}
-
-		v.Op = ONAME
-		declare(v, dclcontext)
-		v.Name.Param.Ntype = t
-
-		if e != nil || Curfn != nil || v.isBlank() {
-			if Curfn != nil {
-				init = append(init, nod(ODCL, v, nil))
-			}
-			e = nod(OAS, v, e)
-			init = append(init, e)
-			if e.Right != nil {
-				v.Name.Defn = e
-			}
-		}
-	}
-
-	if len(el) != 0 {
-		yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
-	}
-	return init
-}
-
-// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *Node {
-	if s == nil {
-		Fatalf("newnoname nil")
-	}
-	n := nod(ONONAME, nil, nil)
-	n.Sym = s
-	n.Xoffset = 0
-	return n
-}
-
-// newfuncnamel generates a new name node for a function or method.
-// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
-	n := newnamel(pos, s)
-	n.Func = new(Func)
-	n.Func.SetIsHiddenClosure(Curfn != nil)
-	return n
-}
-
-// this generates a new name node for a name
-// being declared.
-func dclname(s *types.Sym) *Node {
-	n := newname(s)
-	n.Op = ONONAME // caller will correct it
-	return n
-}
-
-func typenod(t *types.Type) *Node {
-	return typenodl(src.NoXPos, t)
-}
-
-func typenodl(pos src.XPos, t *types.Type) *Node {
-	// if we copied another type with *t = *u
-	// then t->nod might be out of date, so
-	// check t->nod->type too
-	if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
-		t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
-		asNode(t.Nod).Type = t
-		asNode(t.Nod).Sym = t.Sym
-	}
-
-	return asNode(t.Nod)
-}
-
-func anonfield(typ *types.Type) *Node {
-	return symfield(nil, typ)
-}
-
-func namedfield(s string, typ *types.Type) *Node {
-	return symfield(lookup(s), typ)
-}
-
-func symfield(s *types.Sym, typ *types.Type) *Node {
-	n := nodSym(ODCLFIELD, nil, s)
-	n.Type = typ
-	return n
-}
-
-// oldname returns the Node that declares symbol s in the current scope.
-// If no such Node currently exists, an ONONAME Node is returned instead.
-// Automatically creates a new closure variable if the referenced symbol was
-// declared in a different (containing) function.
-func oldname(s *types.Sym) *Node {
-	n := asNode(s.Def)
-	if n == nil {
-		// Maybe a top-level declaration will come along later to
-		// define s. resolve will check s.Def again once all input
-		// source has been processed.
-		return newnoname(s)
-	}
-
-	if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
-		// Inner func is referring to var in outer func.
-		//
-		// TODO(rsc): If there is an outer variable x and we
-		// are parsing x := 5 inside the closure, until we get to
-		// the := it looks like a reference to the outer x so we'll
-		// make x a closure variable unnecessarily.
-		c := n.Name.Param.Innermost
-		if c == nil || c.Name.Curfn != Curfn {
-			// Do not have a closure var for the active closure yet; make one.
-			c = newname(s)
-			c.SetClass(PAUTOHEAP)
-			c.Name.SetIsClosureVar(true)
-			c.SetIsDDD(n.IsDDD())
-			c.Name.Defn = n
-
-			// Link into list of active closure variables.
-			// Popped from list in func funcLit.
-			c.Name.Param.Outer = n.Name.Param.Innermost
-			n.Name.Param.Innermost = c
-
-			Curfn.Func.Cvars.Append(c)
-		}
-
-		// return ref to closure var, not original
-		return c
-	}
-
-	return n
-}
-
-// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *Node {
-	n := oldname(sym)
-	if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
-		n.SetDiag(true)
-		yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
-	}
-	return n
-}
-
-// := declarations
-func colasname(n *Node) bool {
-	switch n.Op {
-	case ONAME,
-		ONONAME,
-		OPACK,
-		OTYPE,
-		OLITERAL:
-		return n.Sym != nil
-	}
-
-	return false
-}
-
-func colasdefn(left []*Node, defn *Node) {
-	for _, n := range left {
-		if n.Sym != nil {
-			n.Sym.SetUniq(true)
-		}
-	}
-
-	var nnew, nerr int
-	for i, n := range left {
-		if n.isBlank() {
-			continue
-		}
-		if !colasname(n) {
-			yyerrorl(defn.Pos, "non-name %v on left side of :=", n)
-			nerr++
-			continue
-		}
-
-		if !n.Sym.Uniq() {
-			yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym)
-			n.SetDiag(true)
-			nerr++
-			continue
-		}
-
-		n.Sym.SetUniq(false)
-		if n.Sym.Block == types.Block {
-			continue
-		}
-
-		nnew++
-		n = newname(n.Sym)
-		declare(n, dclcontext)
-		n.Name.Defn = defn
-		defn.Ninit.Append(nod(ODCL, n, nil))
-		left[i] = n
-	}
-
-	if nnew == 0 && nerr == 0 {
-		yyerrorl(defn.Pos, "no new variables on left side of :=")
-	}
-}
-
-// declare the arguments in an
-// interface field declaration.
-func ifacedcl(n *Node) {
-	if n.Op != ODCLFIELD || n.Left == nil {
-		Fatalf("ifacedcl")
-	}
-
-	if n.Sym.IsBlank() {
-		yyerror("methods must have a unique non-blank name")
-	}
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func funchdr(n *Node) {
-	// change the declaration context from extern to auto
-	funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
-	Curfn = n
-	dclcontext = PAUTO
-
-	types.Markdcl()
-
-	if n.Func.Nname != nil {
-		funcargs(n.Func.Nname.Name.Param.Ntype)
-	} else if n.Func.Ntype != nil {
-		funcargs(n.Func.Ntype)
-	} else {
-		funcargs2(n.Type)
-	}
-}
-
-func funcargs(nt *Node) {
-	if nt.Op != OTFUNC {
-		Fatalf("funcargs %v", nt.Op)
-	}
-
-	// re-start the variable generation number
-	// we want to use small numbers for the return variables,
-	// so let them have the chunk starting at 1.
-	//
-	// TODO(mdempsky): This is ugly, and only necessary because
-	// esc.go uses Vargen to figure out result parameters' index
-	// within the result tuple.
-	vargen = nt.Rlist.Len()
-
-	// declare the receiver and in arguments.
-	if nt.Left != nil {
-		funcarg(nt.Left, PPARAM)
-	}
-	for _, n := range nt.List.Slice() {
-		funcarg(n, PPARAM)
-	}
-
-	oldvargen := vargen
-	vargen = 0
-
-	// declare the out arguments.
-	gen := nt.List.Len()
-	for _, n := range nt.Rlist.Slice() {
-		if n.Sym == nil {
-			// Name so that escape analysis can track it. ~r stands for 'result'.
-			n.Sym = lookupN("~r", gen)
-			gen++
-		}
-		if n.Sym.IsBlank() {
-			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
-			// The name must be different from ~r above because if you have
-			//	func f() (_ int)
-			//	func g() int
-			// f is allowed to use a plain 'return' with no arguments, while g is not.
-			// So the two cases must be distinguished.
-			n.Sym = lookupN("~b", gen)
-			gen++
-		}
-
-		funcarg(n, PPARAMOUT)
-	}
-
-	vargen = oldvargen
-}
-
-func funcarg(n *Node, ctxt Class) {
-	if n.Op != ODCLFIELD {
-		Fatalf("funcarg %v", n.Op)
-	}
-	if n.Sym == nil {
-		return
-	}
-
-	n.Right = newnamel(n.Pos, n.Sym)
-	n.Right.Name.Param.Ntype = n.Left
-	n.Right.SetIsDDD(n.IsDDD())
-	declare(n.Right, ctxt)
-
-	vargen++
-	n.Right.Name.Vargen = int32(vargen)
-}
-
-// Same as funcargs, except run over an already constructed TFUNC.
-// This happens during import, where the hidden_fndcl rule has
-// used functype directly to parse the function's type.
-func funcargs2(t *types.Type) {
-	if t.Etype != TFUNC {
-		Fatalf("funcargs2 %v", t)
-	}
-
-	for _, f := range t.Recvs().Fields().Slice() {
-		funcarg2(f, PPARAM)
-	}
-	for _, f := range t.Params().Fields().Slice() {
-		funcarg2(f, PPARAM)
-	}
-	for _, f := range t.Results().Fields().Slice() {
-		funcarg2(f, PPARAMOUT)
-	}
-}
-
-func funcarg2(f *types.Field, ctxt Class) {
-	if f.Sym == nil {
-		return
-	}
-	n := newnamel(f.Pos, f.Sym)
-	f.Nname = asTypesNode(n)
-	n.Type = f.Type
-	n.SetIsDDD(f.IsDDD())
-	declare(n, ctxt)
-}
-
-var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
-
-type funcStackEnt struct {
-	curfn      *Node
-	dclcontext Class
-}
-
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
-func funcbody() {
-	// change the declaration context from auto to previous context
-	types.Popdcl()
-	var e funcStackEnt
-	funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
-	Curfn, dclcontext = e.curfn, e.dclcontext
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *types.Type) {
-	if t == nil {
-		return
-	}
-
-	if t.Sym == nil && t.IsPtr() {
-		t = t.Elem()
-		if t.IsInterface() {
-			yyerror("embedded type cannot be a pointer to interface")
-		}
-	}
-
-	if t.IsPtr() || t.IsUnsafePtr() {
-		yyerror("embedded type cannot be a pointer")
-	} else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() {
-		t.ForwardType().Embedlineno = lineno
-	}
-}
-
-func structfield(n *Node) *types.Field {
-	lno := lineno
-	lineno = n.Pos
-
-	if n.Op != ODCLFIELD {
-		Fatalf("structfield: oops %v\n", n)
-	}
-
-	f := types.NewField()
-	f.Pos = n.Pos
-	f.Sym = n.Sym
-
-	if n.Left != nil {
-		n.Left = typecheck(n.Left, ctxType)
-		n.Type = n.Left.Type
-		n.Left = nil
-	}
-
-	f.Type = n.Type
-	if f.Type == nil {
-		f.SetBroke(true)
-	}
-
-	if n.Embedded() {
-		checkembeddedtype(n.Type)
-		f.Embedded = 1
-	} else {
-		f.Embedded = 0
-	}
-
-	switch u := n.Val().U.(type) {
-	case string:
-		f.Note = u
-	default:
-		yyerror("field tag must be a string")
-	case nil:
-		// no-op
-	}
-
-	lineno = lno
-	return f
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, fss ...[]*types.Field) {
-	seen := make(map[*types.Sym]bool)
-	for _, fs := range fss {
-		for _, f := range fs {
-			if f.Sym == nil || f.Sym.IsBlank() {
-				continue
-			}
-			if seen[f.Sym] {
-				yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name)
-				continue
-			}
-			seen[f.Sym] = true
-		}
-	}
-}
-
-// convert a parsed id/type list into
-// a type for struct/interface/arglist
-func tostruct(l []*Node) *types.Type {
-	t := types.New(TSTRUCT)
-
-	fields := make([]*types.Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-
-	checkdupfields("field", t.FieldSlice())
-
-	if !t.Broke() {
-		checkwidth(t)
-	}
-
-	return t
-}
-
-func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
-	t := types.New(TSTRUCT)
-	t.StructType().Funarg = funarg
-
-	fields := make([]*types.Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		f.SetIsDDD(n.IsDDD())
-		if n.Right != nil {
-			n.Right.Type = f.Type
-			f.Nname = asTypesNode(n.Right)
-		}
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-	return t
-}
-
-func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
-	t := types.New(TSTRUCT)
-	t.StructType().Funarg = funarg
-	t.SetFields(fields)
-	return t
-}
-
-func interfacefield(n *Node) *types.Field {
-	lno := lineno
-	lineno = n.Pos
-
-	if n.Op != ODCLFIELD {
-		Fatalf("interfacefield: oops %v\n", n)
-	}
-
-	if n.Val().Ctype() != CTxxx {
-		yyerror("interface method cannot have annotation")
-	}
-
-	// MethodSpec = MethodName Signature | InterfaceTypeName .
-	//
-	// If Sym != nil, then Sym is MethodName and Left is Signature.
-	// Otherwise, Left is InterfaceTypeName.
-
-	if n.Left != nil {
-		n.Left = typecheck(n.Left, ctxType)
-		n.Type = n.Left.Type
-		n.Left = nil
-	}
-
-	f := types.NewField()
-	f.Pos = n.Pos
-	f.Sym = n.Sym
-	f.Type = n.Type
-	if f.Type == nil {
-		f.SetBroke(true)
-	}
-
-	lineno = lno
-	return f
-}
-
-func tointerface(l []*Node) *types.Type {
-	if len(l) == 0 {
-		return types.Types[TINTER]
-	}
-	t := types.New(TINTER)
-	var fields []*types.Field
-	for _, n := range l {
-		f := interfacefield(n)
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields = append(fields, f)
-	}
-	t.SetInterface(fields)
-	return t
-}
-
-func fakeRecv() *Node {
-	return anonfield(types.FakeRecvType())
-}
-
-func fakeRecvField() *types.Field {
-	f := types.NewField()
-	f.Type = types.FakeRecvType()
-	return f
-}
-
-// isifacemethod reports whether (field) m is
-// an interface method. Such methods have the
-// special receiver type types.FakeRecvType().
-func isifacemethod(f *types.Type) bool {
-	return f.Recv().Type == types.FakeRecvType()
-}
-
-// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *types.Type {
-	t := types.New(TFUNC)
-
-	var rcvr []*Node
-	if this != nil {
-		rcvr = []*Node{this}
-	}
-	t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
-	t.FuncType().Params = tofunargs(in, types.FunargParams)
-	t.FuncType().Results = tofunargs(out, types.FunargResults)
-
-	checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
-
-	if t.Recvs().Broke() || t.Results().Broke() || t.Params().Broke() {
-		t.SetBroke(true)
-	}
-
-	t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
-
-	return t
-}
-
-func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
-	t := types.New(TFUNC)
-
-	var rcvr []*types.Field
-	if this != nil {
-		rcvr = []*types.Field{this}
-	}
-	t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr)
-	t.FuncType().Params = tofunargsfield(in, types.FunargParams)
-	t.FuncType().Results = tofunargsfield(out, types.FunargResults)
-
-	t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
-
-	return t
-}
-
-// origSym returns the original symbol written by the user.
-func origSym(s *types.Sym) *types.Sym {
-	if s == nil {
-		return nil
-	}
-
-	if len(s.Name) > 1 && s.Name[0] == '~' {
-		switch s.Name[1] {
-		case 'r': // originally an unnamed result
-			return nil
-		case 'b': // originally the blank identifier _
-			// TODO(mdempsky): Does s.Pkg matter here?
-			return nblank.Sym
-		}
-		return s
-	}
-
-	if strings.HasPrefix(s.Name, ".anon") {
-		// originally an unnamed or _ name (see subr.go: structargs)
-		return nil
-	}
-
-	return s
-}
-
-// methodSym returns the method symbol representing a method name
-// associated with a specific receiver type.
-//
-// Method symbols can be used to distinguish the same method appearing
-// in different method sets. For example, T.M and (*T).M have distinct
-// method symbols.
-//
-// The returned symbol will be marked as a function.
-func methodSym(recv *types.Type, msym *types.Sym) *types.Sym {
-	sym := methodSymSuffix(recv, msym, "")
-	sym.SetFunc(true)
-	return sym
-}
-
-// methodSymSuffix is like methodsym, but allows attaching a
-// distinguisher suffix. To avoid collisions, the suffix must not
-// start with a letter, number, or period.
-func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
-	if msym.IsBlank() {
-		Fatalf("blank method name")
-	}
-
-	rsym := recv.Sym
-	if recv.IsPtr() {
-		if rsym != nil {
-			Fatalf("declared pointer receiver type: %v", recv)
-		}
-		rsym = recv.Elem().Sym
-	}
-
-	// Find the package the receiver type appeared in. For
-	// anonymous receiver types (i.e., anonymous structs with
-	// embedded fields), use the "go" pseudo-package instead.
-	rpkg := gopkg
-	if rsym != nil {
-		rpkg = rsym.Pkg
-	}
-
-	var b bytes.Buffer
-	if recv.IsPtr() {
-		// The parentheses aren't really necessary, but
-		// they're pretty traditional at this point.
-		fmt.Fprintf(&b, "(%-S)", recv)
-	} else {
-		fmt.Fprintf(&b, "%-S", recv)
-	}
-
-	// A particular receiver type may have multiple non-exported
-	// methods with the same name. To disambiguate them, include a
-	// package qualifier for names that came from a different
-	// package than the receiver type.
-	if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
-		b.WriteString(".")
-		b.WriteString(msym.Pkg.Prefix)
-	}
-
-	b.WriteString(".")
-	b.WriteString(msym.Name)
-	b.WriteString(suffix)
-
-	return rpkg.LookupBytes(b.Bytes())
-}
-
-// Add a method, declared as a function.
-// - msym is the method symbol
-// - t is function type (with receiver)
-// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
-	if msym == nil {
-		Fatalf("no method symbol")
-	}
-
-	// get parent type sym
-	rf := t.Recv() // ptr to this structure
-	if rf == nil {
-		yyerror("missing receiver")
-		return nil
-	}
-
-	mt := methtype(rf.Type)
-	if mt == nil || mt.Sym == nil {
-		pa := rf.Type
-		t := pa
-		if t != nil && t.IsPtr() {
-			if t.Sym != nil {
-				yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-				return nil
-			}
-			t = t.Elem()
-		}
-
-		switch {
-		case t == nil || t.Broke():
-			// rely on typecheck having complained before
-		case t.Sym == nil:
-			yyerror("invalid receiver type %v (%v is not a defined type)", pa, t)
-		case t.IsPtr():
-			yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-		case t.IsInterface():
-			yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
-		default:
-			// Should have picked off all the reasons above,
-			// but just in case, fall back to generic error.
-			yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
-		}
-		return nil
-	}
-
-	if local && mt.Sym.Pkg != localpkg {
-		yyerror("cannot define new methods on non-local type %v", mt)
-		return nil
-	}
-
-	if msym.IsBlank() {
-		return nil
-	}
-
-	if mt.IsStruct() {
-		for _, f := range mt.Fields().Slice() {
-			if f.Sym == msym {
-				yyerror("type %v has both field and method named %v", mt, msym)
-				f.SetBroke(true)
-				return nil
-			}
-		}
-	}
-
-	for _, f := range mt.Methods().Slice() {
-		if msym.Name != f.Sym.Name {
-			continue
-		}
-		// types.Identical only checks that incoming and result parameters match,
-		// so explicitly check that the receiver parameters match too.
-		if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
-			yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
-		}
-		return f
-	}
-
-	f := types.NewField()
-	f.Pos = lineno
-	f.Sym = msym
-	f.Type = t
-	f.SetNointerface(nointerface)
-
-	mt.Methods().Append(f)
-	return f
-}
-
-func funcsymname(s *types.Sym) string {
-	return s.Name + "·f"
-}
-
-// funcsym returns s·f.
-func funcsym(s *types.Sym) *types.Sym {
-	// funcsymsmu here serves to protect not just mutations of funcsyms (below),
-	// but also the package lookup of the func sym name,
-	// since this function gets called concurrently from the backend.
-	// There are no other concurrent package lookups in the backend,
-	// except for the types package, which is protected separately.
-	// Reusing funcsymsmu to also cover this package lookup
-	// avoids a general, broader, expensive package lookup mutex.
-	// Note makefuncsym also does package look-up of func sym names,
-	// but that it is only called serially, from the front end.
-	funcsymsmu.Lock()
-	sf, existed := s.Pkg.LookupOK(funcsymname(s))
-	// Don't export s·f when compiling for dynamic linking.
-	// When dynamically linking, the necessary function
-	// symbols will be created explicitly with makefuncsym.
-	// See the makefuncsym comment for details.
-	if !Ctxt.Flag_dynlink && !existed {
-		funcsyms = append(funcsyms, s)
-	}
-	funcsymsmu.Unlock()
-	return sf
-}
-
-// makefuncsym ensures that s·f is exported.
-// It is only used with -dynlink.
-// When not compiling for dynamic linking,
-// the funcsyms are created as needed by
-// the packages that use them.
-// Normally we emit the s·f stubs as DUPOK syms,
-// but DUPOK doesn't work across shared library boundaries.
-// So instead, when dynamic linking, we only create
-// the s·f stubs in s's package.
-func makefuncsym(s *types.Sym) {
-	if !Ctxt.Flag_dynlink {
-		Fatalf("makefuncsym dynlink")
-	}
-	if s.IsBlank() {
-		return
-	}
-	if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
-		// runtime.getg(), getclosureptr(), getcallerpc(), and
-		// getcallersp() are not real functions and so do not
-		// get funcsyms.
-		return
-	}
-	if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
-		funcsyms = append(funcsyms, s)
-	}
-}
-
-// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *Node) {
-	if n.Op != ONAME || n.Class() != Pxxx {
-		Fatalf("expected ONAME/Pxxx node, got %v", n)
-	}
-
-	n.SetClass(PFUNC)
-	n.Sym.SetFunc(true)
-}
-
-func dclfunc(sym *types.Sym, tfn *Node) *Node {
-	if tfn.Op != OTFUNC {
-		Fatalf("expected OTFUNC node, got %v", tfn)
-	}
-
-	fn := nod(ODCLFUNC, nil, nil)
-	fn.Func.Nname = newfuncnamel(lineno, sym)
-	fn.Func.Nname.Name.Defn = fn
-	fn.Func.Nname.Name.Param.Ntype = tfn
-	setNodeNameFunc(fn.Func.Nname)
-	funchdr(fn)
-	fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
-	return fn
-}
-
-type nowritebarrierrecChecker struct {
-	// extraCalls contains extra function calls that may not be
-	// visible during later analysis. It maps from the ODCLFUNC of
-	// the caller to a list of callees.
-	extraCalls map[*Node][]nowritebarrierrecCall
-
-	// curfn is the current function during AST walks.
-	curfn *Node
-}
-
-type nowritebarrierrecCall struct {
-	target *Node    // ODCLFUNC of caller or callee
-	lineno src.XPos // line of call
-}
-
-type nowritebarrierrecCallSym struct {
-	target *obj.LSym // LSym of callee
-	lineno src.XPos  // line of call
-}
-
-// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
-// must be called before transformclosure and walk.
-func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
-	c := &nowritebarrierrecChecker{
-		extraCalls: make(map[*Node][]nowritebarrierrecCall),
-	}
-
-	// Find all systemstack calls and record their targets. In
-	// general, flow analysis can't see into systemstack, but it's
-	// important to handle it for this check, so we model it
-	// directly. This has to happen before transformclosure since
-	// it's a lot harder to work out the argument after.
-	for _, n := range xtop {
-		if n.Op != ODCLFUNC {
-			continue
-		}
-		c.curfn = n
-		inspect(n, c.findExtraCalls)
-	}
-	c.curfn = nil
-	return c
-}
-
-func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
-	if n.Op != OCALLFUNC {
-		return true
-	}
-	fn := n.Left
-	if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
-		return true
-	}
-	if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
-		return true
-	}
-
-	var callee *Node
-	arg := n.List.First()
-	switch arg.Op {
-	case ONAME:
-		callee = arg.Name.Defn
-	case OCLOSURE:
-		callee = arg.Func.Closure
-	default:
-		Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
-	}
-	if callee.Op != ODCLFUNC {
-		Fatalf("expected ODCLFUNC node, got %+v", callee)
-	}
-	c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
-	return true
-}
-
-// recordCall records a call from ODCLFUNC node "from", to function
-// symbol "to" at position pos.
-//
-// This should be done as late as possible during compilation to
-// capture precise call graphs. The target of the call is an LSym
-// because that's all we know after we start SSA.
-//
-// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
-	if from.Op != ODCLFUNC {
-		Fatalf("expected ODCLFUNC, got %v", from)
-	}
-	// We record this information on the *Func so this is
-	// concurrent-safe.
-	fn := from.Func
-	if fn.nwbrCalls == nil {
-		fn.nwbrCalls = new([]nowritebarrierrecCallSym)
-	}
-	*fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
-}
-
-func (c *nowritebarrierrecChecker) check() {
-	// We walk the call graph as late as possible so we can
-	// capture all calls created by lowering, but this means we
-	// only get to see the obj.LSyms of calls. symToFunc lets us
-	// get back to the ODCLFUNCs.
-	symToFunc := make(map[*obj.LSym]*Node)
-	// funcs records the back-edges of the BFS call graph walk. It
-	// maps from the ODCLFUNC of each function that must not have
-	// write barriers to the call that inhibits them. Functions
-	// that are directly marked go:nowritebarrierrec are in this
-	// map with a zero-valued nowritebarrierrecCall. This also
-	// acts as the set of marks for the BFS of the call graph.
-	funcs := make(map[*Node]nowritebarrierrecCall)
-	// q is the queue of ODCLFUNC Nodes to visit in BFS order.
-	var q nodeQueue
-
-	for _, n := range xtop {
-		if n.Op != ODCLFUNC {
-			continue
-		}
-
-		symToFunc[n.Func.lsym] = n
-
-		// Make nowritebarrierrec functions BFS roots.
-		if n.Func.Pragma&Nowritebarrierrec != 0 {
-			funcs[n] = nowritebarrierrecCall{}
-			q.pushRight(n)
-		}
-		// Check go:nowritebarrier functions.
-		if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
-			yyerrorl(n.Func.WBPos, "write barrier prohibited")
-		}
-	}
-
-	// Perform a BFS of the call graph from all
-	// go:nowritebarrierrec functions.
-	enqueue := func(src, target *Node, pos src.XPos) {
-		if target.Func.Pragma&Yeswritebarrierrec != 0 {
-			// Don't flow into this function.
-			return
-		}
-		if _, ok := funcs[target]; ok {
-			// Already found a path to target.
-			return
-		}
-
-		// Record the path.
-		funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
-		q.pushRight(target)
-	}
-	for !q.empty() {
-		fn := q.popLeft()
-
-		// Check fn.
-		if fn.Func.WBPos.IsKnown() {
-			var err bytes.Buffer
-			call := funcs[fn]
-			for call.target != nil {
-				fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
-				call = funcs[call.target]
-			}
-			yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
-			continue
-		}
-
-		// Enqueue fn's calls.
-		for _, callee := range c.extraCalls[fn] {
-			enqueue(fn, callee.target, callee.lineno)
-		}
-		if fn.Func.nwbrCalls == nil {
-			continue
-		}
-		for _, callee := range *fn.Func.nwbrCalls {
-			target := symToFunc[callee.target]
-			if target != nil {
-				enqueue(fn, target, callee.lineno)
-			}
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
deleted file mode 100644
index f45796c..0000000
--- a/src/cmd/compile/internal/gc/embed.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"encoding/json"
-	"io/ioutil"
-	"log"
-	"path"
-	"sort"
-	"strconv"
-	"strings"
-)
-
-var embedlist []*Node
-
-var embedCfg struct {
-	Patterns map[string][]string
-	Files    map[string]string
-}
-
-func readEmbedCfg(file string) {
-	data, err := ioutil.ReadFile(file)
-	if err != nil {
-		log.Fatalf("-embedcfg: %v", err)
-	}
-	if err := json.Unmarshal(data, &embedCfg); err != nil {
-		log.Fatalf("%s: %v", file, err)
-	}
-	if embedCfg.Patterns == nil {
-		log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
-	}
-	if embedCfg.Files == nil {
-		log.Fatalf("%s: invalid embedcfg: missing Files", file)
-	}
-}
-
-const (
-	embedUnknown = iota
-	embedBytes
-	embedString
-	embedFiles
-)
-
-func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
-	haveEmbed := false
-	for _, decl := range p.file.DeclList {
-		imp, ok := decl.(*syntax.ImportDecl)
-		if !ok {
-			// imports always come first
-			break
-		}
-		path, _ := strconv.Unquote(imp.Path.Value)
-		if path == "embed" {
-			haveEmbed = true
-			break
-		}
-	}
-
-	pos := embeds[0].Pos
-	if !haveEmbed {
-		p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
-		return
-	}
-	if len(names) > 1 {
-		p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
-		return
-	}
-	if len(exprs) > 0 {
-		p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
-		return
-	}
-	if typ == nil {
-		// Should not happen, since len(exprs) == 0 now.
-		p.yyerrorpos(pos, "go:embed cannot apply to var without type")
-		return
-	}
-	if dclcontext != PEXTERN {
-		p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
-		return
-	}
-
-	var list []irEmbed
-	for _, e := range embeds {
-		list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
-	}
-	v := names[0]
-	v.Name.Param.SetEmbedList(list)
-	embedlist = append(embedlist, v)
-}
-
-func embedFileList(v *Node, kind int) []string {
-	// Build list of files to store.
-	have := make(map[string]bool)
-	var list []string
-	for _, e := range v.Name.Param.EmbedList() {
-		for _, pattern := range e.Patterns {
-			files, ok := embedCfg.Patterns[pattern]
-			if !ok {
-				yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
-			}
-			for _, file := range files {
-				if embedCfg.Files[file] == "" {
-					yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
-					continue
-				}
-				if !have[file] {
-					have[file] = true
-					list = append(list, file)
-				}
-				if kind == embedFiles {
-					for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
-						have[dir] = true
-						list = append(list, dir+"/")
-					}
-				}
-			}
-		}
-	}
-	sort.Slice(list, func(i, j int) bool {
-		return embedFileLess(list[i], list[j])
-	})
-
-	if kind == embedString || kind == embedBytes {
-		if len(list) > 1 {
-			yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
-			return nil
-		}
-	}
-
-	return list
-}
-
-// embedKind determines the kind of embedding variable.
-func embedKind(typ *types.Type) int {
-	if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
-		return embedFiles
-	}
-	if typ.Etype == types.TSTRING {
-		return embedString
-	}
-	if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
-		return embedBytes
-	}
-	return embedUnknown
-}
-
-func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
-	if name[len(name)-1] == '/' {
-		isDir = true
-		name = name[:len(name)-1]
-	}
-	i := len(name) - 1
-	for i >= 0 && name[i] != '/' {
-		i--
-	}
-	if i < 0 {
-		return ".", name, isDir
-	}
-	return name[:i], name[i+1:], isDir
-}
-
-// embedFileLess implements the sort order for a list of embedded files.
-// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
-func embedFileLess(x, y string) bool {
-	xdir, xelem, _ := embedFileNameSplit(x)
-	ydir, yelem, _ := embedFileNameSplit(y)
-	return xdir < ydir || xdir == ydir && xelem < yelem
-}
-
-func dumpembeds() {
-	for _, v := range embedlist {
-		initEmbed(v)
-	}
-}
-
-// initEmbed emits the init data for a //go:embed variable,
-// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *Node) {
-	commentPos := v.Name.Param.EmbedList()[0].Pos
-	if !langSupported(1, 16, localpkg) {
-		lno := lineno
-		lineno = commentPos
-		yyerrorv("go1.16", "go:embed")
-		lineno = lno
-		return
-	}
-	if embedCfg.Patterns == nil {
-		yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
-		return
-	}
-	kind := embedKind(v.Type)
-	if kind == embedUnknown {
-		yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
-		return
-	}
-
-	files := embedFileList(v, kind)
-	switch kind {
-	case embedString, embedBytes:
-		file := files[0]
-		fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
-		if err != nil {
-			yyerrorl(v.Pos, "embed %s: %v", file, err)
-		}
-		sym := v.Sym.Linksym()
-		off := 0
-		off = dsymptr(sym, off, fsym, 0)       // data string
-		off = duintptr(sym, off, uint64(size)) // len
-		if kind == embedBytes {
-			duintptr(sym, off, uint64(size)) // cap for slice
-		}
-
-	case embedFiles:
-		slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
-		off := 0
-		// []files pointed at by Files
-		off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
-		off = duintptr(slicedata, off, uint64(len(files)))
-		off = duintptr(slicedata, off, uint64(len(files)))
-
-		// embed/embed.go type file is:
-		//	name string
-		//	data string
-		//	hash [16]byte
-		// Emit one of these per file in the set.
-		const hashSize = 16
-		hash := make([]byte, hashSize)
-		for _, file := range files {
-			off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
-			off = duintptr(slicedata, off, uint64(len(file)))
-			if strings.HasSuffix(file, "/") {
-				// entry for directory - no data
-				off = duintptr(slicedata, off, 0)
-				off = duintptr(slicedata, off, 0)
-				off += hashSize
-			} else {
-				fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
-				if err != nil {
-					yyerrorl(v.Pos, "embed %s: %v", file, err)
-				}
-				off = dsymptr(slicedata, off, fsym, 0) // data string
-				off = duintptr(slicedata, off, uint64(size))
-				off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
-			}
-		}
-		ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
-		sym := v.Sym.Linksym()
-		dsymptr(sym, 0, slicedata, 0)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
deleted file mode 100644
index 6f328ab..0000000
--- a/src/cmd/compile/internal/gc/esc.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"fmt"
-)
-
-func escapes(all []*Node) {
-	visitBottomUp(all, escapeFuncs)
-}
-
-const (
-	EscFuncUnknown = 0 + iota
-	EscFuncPlanned
-	EscFuncStarted
-	EscFuncTagged
-)
-
-func min8(a, b int8) int8 {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func max8(a, b int8) int8 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-const (
-	EscUnknown = iota
-	EscNone    // Does not escape to heap, result, or parameters.
-	EscHeap    // Reachable from the heap
-	EscNever   // By construction will not escape.
-)
-
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *types.Sym {
-	if fn == nil || fn.Func.Nname == nil {
-		return nil
-	}
-	return fn.Func.Nname.Sym
-}
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
-var (
-	looping    Node
-	nonlooping Node
-)
-
-func isSliceSelfAssign(dst, src *Node) bool {
-	// Detect the following special case.
-	//
-	//	func (b *Buffer) Foo() {
-	//		n, m := ...
-	//		b.buf = b.buf[n:m]
-	//	}
-	//
-	// This assignment is a no-op for escape analysis,
-	// it does not store any new pointers into b that were not already there.
-	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
-	// Here we assume that the statement will not contain calls,
-	// that is, that order will move any calls to init.
-	// Otherwise base ONAME value could change between the moments
-	// when we evaluate it for dst and for src.
-
-	// dst is ONAME dereference.
-	if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
-		return false
-	}
-	// src is a slice operation.
-	switch src.Op {
-	case OSLICE, OSLICE3, OSLICESTR:
-		// OK.
-	case OSLICEARR, OSLICE3ARR:
-		// Since arrays are embedded into containing object,
-		// slice of non-pointer array will introduce a new pointer into b that was not already there
-		// (pointer to b itself). After such assignment, if b contents escape,
-		// b escapes as well. If we ignore such OSLICEARR, we will conclude
-		// that b does not escape when b contents do.
-		//
-		// Pointer to an array is OK since it's not stored inside b directly.
-		// For slicing an array (not pointer to array), there is an implicit OADDR.
-		// We check that to determine non-pointer array slicing.
-		if src.Left.Op == OADDR {
-			return false
-		}
-	default:
-		return false
-	}
-	// slice is applied to ONAME dereference.
-	if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
-		return false
-	}
-	// dst and src reference the same base ONAME.
-	return dst.Left == src.Left.Left
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *Node) bool {
-	if isSliceSelfAssign(dst, src) {
-		return true
-	}
-
-	// Detect trivial assignments that assign back to the same object.
-	//
-	// It covers these cases:
-	//	val.x = val.y
-	//	val.x[i] = val.y[j]
-	//	val.x1.x2 = val.x1.y2
-	//	... etc
-	//
-	// These assignments do not change assigned object lifetime.
-
-	if dst == nil || src == nil || dst.Op != src.Op {
-		return false
-	}
-
-	switch dst.Op {
-	case ODOT, ODOTPTR:
-		// Safe trailing accessors that are permitted to differ.
-	case OINDEX:
-		if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
-			return false
-		}
-	default:
-		return false
-	}
-
-	// The expression prefix must be both "safe" and identical.
-	return samesafeexpr(dst.Left, src.Left)
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n *Node) bool {
-	// We may want to use a list of "memory safe" ops instead of generally
-	// "side-effect free", which would include all calls and other ops that can
-	// allocate or change global state. For now, it's safer to start with the latter.
-	//
-	// We're ignoring things like division by zero, index out of range,
-	// and nil pointer dereference here.
-	switch n.Op {
-	case ONAME, OCLOSUREVAR, OLITERAL:
-		return false
-
-	// Left+Right group.
-	case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
-		return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
-
-	// Left group.
-	case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
-		ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
-		return mayAffectMemory(n.Left)
-
-	default:
-		return true
-	}
-}
-
-// heapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *Node) string {
-	if n.Type == nil {
-		return ""
-	}
-
-	// Parameters are always passed via the stack.
-	if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
-		return ""
-	}
-
-	if n.Type.Width > maxStackVarSize {
-		return "too large for stack"
-	}
-
-	if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-
-	if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-	if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-
-	if n.Op == OMAKESLICE {
-		r := n.Right
-		if r == nil {
-			r = n.Left
-		}
-		if !smallintconst(r) {
-			return "non-constant size"
-		}
-		if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
-			return "too large for stack"
-		}
-	}
-
-	return ""
-}
-
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n *Node) {
-	switch n.Op {
-	default:
-		// Unexpected Op, probably due to a previous type error. Ignore.
-
-	case ODEREF, ODOTPTR:
-		// Nothing to do.
-
-	case ONAME:
-		if n == nodfp {
-			break
-		}
-
-		// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
-		// on PPARAM it means something different.
-		if n.Class() == PAUTO && n.Esc == EscNever {
-			break
-		}
-
-		// If a closure reference escapes, mark the outer variable as escaping.
-		if n.Name.IsClosureVar() {
-			addrescapes(n.Name.Defn)
-			break
-		}
-
-		if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
-			break
-		}
-
-		// This is a plain parameter or local variable that needs to move to the heap,
-		// but possibly for the function outside the one we're compiling.
-		// That is, if we have:
-		//
-		//	func f(x int) {
-		//		func() {
-		//			global = &x
-		//		}
-		//	}
-		//
-		// then we're analyzing the inner closure but we need to move x to the
-		// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
-		oldfn := Curfn
-		Curfn = n.Name.Curfn
-		if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
-			Curfn = Curfn.Func.Closure
-		}
-		ln := lineno
-		lineno = Curfn.Pos
-		moveToHeap(n)
-		Curfn = oldfn
-		lineno = ln
-
-	// ODOTPTR has already been introduced,
-	// so these are the non-pointer ODOT and OINDEX.
-	// In &x[0], if x is a slice, then x does not
-	// escape--the pointer inside x does, but that
-	// is always a heap pointer anyway.
-	case ODOT, OINDEX, OPAREN, OCONVNOP:
-		if !n.Left.Type.IsSlice() {
-			addrescapes(n.Left)
-		}
-	}
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *Node) {
-	if Debug.r != 0 {
-		Dump("MOVE", n)
-	}
-	if compiling_runtime {
-		yyerror("%v escapes to heap, not allowed in runtime", n)
-	}
-	if n.Class() == PAUTOHEAP {
-		Dump("n", n)
-		Fatalf("double move to heap")
-	}
-
-	// Allocate a local stack variable to hold the pointer to the heap copy.
-	// temp will add it to the function declaration list automatically.
-	heapaddr := temp(types.NewPtr(n.Type))
-	heapaddr.Sym = lookup("&" + n.Sym.Name)
-	heapaddr.Orig.Sym = heapaddr.Sym
-	heapaddr.Pos = n.Pos
-
-	// Unset AutoTemp to persist the &foo variable name through SSA to
-	// liveness analysis.
-	// TODO(mdempsky/drchase): Cleaner solution?
-	heapaddr.Name.SetAutoTemp(false)
-
-	// Parameters have a local stack copy used at function start/end
-	// in addition to the copy in the heap that may live longer than
-	// the function.
-	if n.Class() == PPARAM || n.Class() == PPARAMOUT {
-		if n.Xoffset == BADWIDTH {
-			Fatalf("addrescapes before param assignment")
-		}
-
-		// We rewrite n below to be a heap variable (indirection of heapaddr).
-		// Preserve a copy so we can still write code referring to the original,
-		// and substitute that copy into the function declaration list
-		// so that analyses of the local (on-stack) variables use it.
-		stackcopy := newname(n.Sym)
-		stackcopy.Type = n.Type
-		stackcopy.Xoffset = n.Xoffset
-		stackcopy.SetClass(n.Class())
-		stackcopy.Name.Param.Heapaddr = heapaddr
-		if n.Class() == PPARAMOUT {
-			// Make sure the pointer to the heap copy is kept live throughout the function.
-			// The function could panic at any point, and then a defer could recover.
-			// Thus, we need the pointer to the heap copy always available so the
-			// post-deferreturn code can copy the return value back to the stack.
-			// See issue 16095.
-			heapaddr.Name.SetIsOutputParamHeapAddr(true)
-		}
-		n.Name.Param.Stackcopy = stackcopy
-
-		// Substitute the stackcopy into the function variable list so that
-		// liveness and other analyses use the underlying stack slot
-		// and not the now-pseudo-variable n.
-		found := false
-		for i, d := range Curfn.Func.Dcl {
-			if d == n {
-				Curfn.Func.Dcl[i] = stackcopy
-				found = true
-				break
-			}
-			// Parameters are before locals, so can stop early.
-			// This limits the search even in functions with many local variables.
-			if d.Class() == PAUTO {
-				break
-			}
-		}
-		if !found {
-			Fatalf("cannot find %v in local variable list", n)
-		}
-		Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	}
-
-	// Modify n in place so that uses of n now mean indirection of the heapaddr.
-	n.SetClass(PAUTOHEAP)
-	n.Xoffset = 0
-	n.Name.Param.Heapaddr = heapaddr
-	n.Esc = EscHeap
-	if Debug.m != 0 {
-		Warnl(n.Pos, "moved to heap: %v", n)
-	}
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const unsafeUintptrTag = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
-
-func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
-	name := func() string {
-		if f.Sym != nil {
-			return f.Sym.Name
-		}
-		return fmt.Sprintf("arg#%d", narg)
-	}
-
-	if fn.Nbody.Len() == 0 {
-		// Assume that uintptr arguments must be held live across the call.
-		// This is most important for syscall.Syscall.
-		// See golang.org/issue/13372.
-		// This really doesn't have much to do with escape analysis per se,
-		// but we are reusing the ability to annotate an individual function
-		// argument and pass those annotations along to importing code.
-		if f.Type.IsUintptr() {
-			if Debug.m != 0 {
-				Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
-			}
-			return unsafeUintptrTag
-		}
-
-		if !f.Type.HasPointers() { // don't bother tagging for scalars
-			return ""
-		}
-
-		var esc EscLeaks
-
-		// External functions are assumed unsafe, unless
-		// //go:noescape is given before the declaration.
-		if fn.Func.Pragma&Noescape != 0 {
-			if Debug.m != 0 && f.Sym != nil {
-				Warnl(f.Pos, "%v does not escape", name())
-			}
-		} else {
-			if Debug.m != 0 && f.Sym != nil {
-				Warnl(f.Pos, "leaking param: %v", name())
-			}
-			esc.AddHeap(0)
-		}
-
-		return esc.Encode()
-	}
-
-	if fn.Func.Pragma&UintptrEscapes != 0 {
-		if f.Type.IsUintptr() {
-			if Debug.m != 0 {
-				Warnl(f.Pos, "marking %v as escaping uintptr", name())
-			}
-			return uintptrEscapesTag
-		}
-		if f.IsDDD() && f.Type.Elem().IsUintptr() {
-			// final argument is ...uintptr.
-			if Debug.m != 0 {
-				Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
-			}
-			return uintptrEscapesTag
-		}
-	}
-
-	if !f.Type.HasPointers() { // don't bother tagging for scalars
-		return ""
-	}
-
-	// Unnamed parameters are unused and therefore do not escape.
-	if f.Sym == nil || f.Sym.IsBlank() {
-		var esc EscLeaks
-		return esc.Encode()
-	}
-
-	n := asNode(f.Nname)
-	loc := e.oldLoc(n)
-	esc := loc.paramEsc
-	esc.Optimize()
-
-	if Debug.m != 0 && !loc.escapes {
-		if esc.Empty() {
-			Warnl(f.Pos, "%v does not escape", name())
-		}
-		if x := esc.Heap(); x >= 0 {
-			if x == 0 {
-				Warnl(f.Pos, "leaking param: %v", name())
-			} else {
-				// TODO(mdempsky): Mention level=x like below?
-				Warnl(f.Pos, "leaking param content: %v", name())
-			}
-		}
-		for i := 0; i < numEscResults; i++ {
-			if x := esc.Result(i); x >= 0 {
-				res := fn.Type.Results().Field(i).Sym
-				Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
-			}
-		}
-	}
-
-	return esc.Encode()
-}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
deleted file mode 100644
index 618bdf7..0000000
--- a/src/cmd/compile/internal/gc/escape.go
+++ /dev/null
@@ -1,1538 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/logopt"
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"math"
-	"strings"
-)
-
-// Escape analysis.
-//
-// Here we analyze functions to determine which Go variables
-// (including implicit allocations such as calls to "new" or "make",
-// composite literals, etc.) can be allocated on the stack. The two
-// key invariants we have to ensure are: (1) pointers to stack objects
-// cannot be stored in the heap, and (2) pointers to a stack object
-// cannot outlive that object (e.g., because the declaring function
-// returned and destroyed the object's stack frame, or its space is
-// reused across loop iterations for logically distinct variables).
-//
-// We implement this with a static data-flow analysis of the AST.
-// First, we construct a directed weighted graph where vertices
-// (termed "locations") represent variables allocated by statements
-// and expressions, and edges represent assignments between variables
-// (with weights representing addressing/dereference counts).
-//
-// Next we walk the graph looking for assignment paths that might
-// violate the invariants stated above. If a variable v's address is
-// stored in the heap or elsewhere that may outlive it, then v is
-// marked as requiring heap allocation.
-//
-// To support interprocedural analysis, we also record data-flow from
-// each function's parameters to the heap and to its result
-// parameters. This information is summarized as "parameter tags",
-// which are used at static call sites to improve escape analysis of
-// function arguments.
-
-// Constructing the location graph.
-//
-// Every allocating statement (e.g., variable declaration) or
-// expression (e.g., "new" or "make") is first mapped to a unique
-// "location."
-//
-// We also model every Go assignment as a directed edges between
-// locations. The number of dereference operations minus the number of
-// addressing operations is recorded as the edge's weight (termed
-// "derefs"). For example:
-//
-//     p = &q    // -1
-//     p = q     //  0
-//     p = *q    //  1
-//     p = **q   //  2
-//
-//     p = **&**&q  // 2
-//
-// Note that the & operator can only be applied to addressable
-// expressions, and the expression &x itself is not addressable, so
-// derefs cannot go below -1.
-//
-// Every Go language construct is lowered into this representation,
-// generally without sensitivity to flow, path, or context; and
-// without distinguishing elements within a compound variable. For
-// example:
-//
-//     var x struct { f, g *int }
-//     var u []*int
-//
-//     x.f = u[0]
-//
-// is modeled simply as
-//
-//     x = *u
-//
-// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
-// u[2], etc. However, we do record the implicit dereference involved
-// in indexing a slice.
-
-type Escape struct {
-	allLocs []*EscLocation
-
-	curfn *Node
-
-	// loopDepth counts the current loop nesting depth within
-	// curfn. It increments within each "for" loop and at each
-	// label with a corresponding backwards "goto" (i.e.,
-	// unstructured loop).
-	loopDepth int
-
-	heapLoc  EscLocation
-	blankLoc EscLocation
-}
-
-// An EscLocation represents an abstract location that stores a Go
-// variable.
-type EscLocation struct {
-	n         *Node     // represented variable or expression, if any
-	curfn     *Node     // enclosing function
-	edges     []EscEdge // incoming edges
-	loopDepth int       // loopDepth at declaration
-
-	// derefs and walkgen are used during walkOne to track the
-	// minimal dereferences from the walk root.
-	derefs  int // >= -1
-	walkgen uint32
-
-	// dst and dstEdgeindex track the next immediate assignment
-	// destination location during walkone, along with the index
-	// of the edge pointing back to this location.
-	dst        *EscLocation
-	dstEdgeIdx int
-
-	// queued is used by walkAll to track whether this location is
-	// in the walk queue.
-	queued bool
-
-	// escapes reports whether the represented variable's address
-	// escapes; that is, whether the variable must be heap
-	// allocated.
-	escapes bool
-
-	// transient reports whether the represented expression's
-	// address does not outlive the statement; that is, whether
-	// its storage can be immediately reused.
-	transient bool
-
-	// paramEsc records the represented parameter's leak set.
-	paramEsc EscLeaks
-}
-
-// An EscEdge represents an assignment edge between two Go variables.
-type EscEdge struct {
-	src    *EscLocation
-	derefs int // >= -1
-	notes  *EscNote
-}
-
-// escapeFuncs performs escape analysis on a minimal batch of
-// functions.
-func escapeFuncs(fns []*Node, recursive bool) {
-	for _, fn := range fns {
-		if fn.Op != ODCLFUNC {
-			Fatalf("unexpected node: %v", fn)
-		}
-	}
-
-	var e Escape
-	e.heapLoc.escapes = true
-
-	// Construct data-flow graph from syntax trees.
-	for _, fn := range fns {
-		e.initFunc(fn)
-	}
-	for _, fn := range fns {
-		e.walkFunc(fn)
-	}
-	e.curfn = nil
-
-	e.walkAll()
-	e.finish(fns)
-}
-
-func (e *Escape) initFunc(fn *Node) {
-	if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
-		Fatalf("unexpected node: %v", fn)
-	}
-	fn.Esc = EscFuncPlanned
-	if Debug.m > 3 {
-		Dump("escAnalyze", fn)
-	}
-
-	e.curfn = fn
-	e.loopDepth = 1
-
-	// Allocate locations for local variables.
-	for _, dcl := range fn.Func.Dcl {
-		if dcl.Op == ONAME {
-			e.newLoc(dcl, false)
-		}
-	}
-}
-
-func (e *Escape) walkFunc(fn *Node) {
-	fn.Esc = EscFuncStarted
-
-	// Identify labels that mark the head of an unstructured loop.
-	inspectList(fn.Nbody, func(n *Node) bool {
-		switch n.Op {
-		case OLABEL:
-			n.Sym.Label = asTypesNode(&nonlooping)
-
-		case OGOTO:
-			// If we visited the label before the goto,
-			// then this is a looping label.
-			if n.Sym.Label == asTypesNode(&nonlooping) {
-				n.Sym.Label = asTypesNode(&looping)
-			}
-		}
-
-		return true
-	})
-
-	e.curfn = fn
-	e.loopDepth = 1
-	e.block(fn.Nbody)
-}
-
-// Below we implement the methods for walking the AST and recording
-// data flow edges. Note that because a sub-expression might have
-// side-effects, it's important to always visit the entire AST.
-//
-// For example, write either:
-//
-//     if x {
-//         e.discard(n.Left)
-//     } else {
-//         e.value(k, n.Left)
-//     }
-//
-// or
-//
-//     if x {
-//         k = e.discardHole()
-//     }
-//     e.value(k, n.Left)
-//
-// Do NOT write:
-//
-//    // BAD: possibly loses side-effects within n.Left
-//    if !x {
-//        e.value(k, n.Left)
-//    }
-
-// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-	defer func() {
-		lineno = lno
-	}()
-
-	if Debug.m > 2 {
-		fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
-	}
-
-	e.stmts(n.Ninit)
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected stmt: %v", n)
-
-	case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
-		// nop
-
-	case OBREAK, OCONTINUE, OGOTO:
-		// TODO(mdempsky): Handle dead code?
-
-	case OBLOCK:
-		e.stmts(n.List)
-
-	case ODCL:
-		// Record loop depth at declaration.
-		if !n.Left.isBlank() {
-			e.dcl(n.Left)
-		}
-
-	case OLABEL:
-		switch asNode(n.Sym.Label) {
-		case &nonlooping:
-			if Debug.m > 2 {
-				fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
-			}
-		case &looping:
-			if Debug.m > 2 {
-				fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
-			}
-			e.loopDepth++
-		default:
-			Fatalf("label missing tag")
-		}
-		n.Sym.Label = nil
-
-	case OIF:
-		e.discard(n.Left)
-		e.block(n.Nbody)
-		e.block(n.Rlist)
-
-	case OFOR, OFORUNTIL:
-		e.loopDepth++
-		e.discard(n.Left)
-		e.stmt(n.Right)
-		e.block(n.Nbody)
-		e.loopDepth--
-
-	case ORANGE:
-		// for List = range Right { Nbody }
-		e.loopDepth++
-		ks := e.addrs(n.List)
-		e.block(n.Nbody)
-		e.loopDepth--
-
-		// Right is evaluated outside the loop.
-		k := e.discardHole()
-		if len(ks) >= 2 {
-			if n.Right.Type.IsArray() {
-				k = ks[1].note(n, "range")
-			} else {
-				k = ks[1].deref(n, "range-deref")
-			}
-		}
-		e.expr(e.later(k), n.Right)
-
-	case OSWITCH:
-		typesw := n.Left != nil && n.Left.Op == OTYPESW
-
-		var ks []EscHole
-		for _, cas := range n.List.Slice() { // cases
-			if typesw && n.Left.Left != nil {
-				cv := cas.Rlist.First()
-				k := e.dcl(cv) // type switch variables have no ODCL.
-				if cv.Type.HasPointers() {
-					ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
-				}
-			}
-
-			e.discards(cas.List)
-			e.block(cas.Nbody)
-		}
-
-		if typesw {
-			e.expr(e.teeHole(ks...), n.Left.Right)
-		} else {
-			e.discard(n.Left)
-		}
-
-	case OSELECT:
-		for _, cas := range n.List.Slice() {
-			e.stmt(cas.Left)
-			e.block(cas.Nbody)
-		}
-	case OSELRECV:
-		e.assign(n.Left, n.Right, "selrecv", n)
-	case OSELRECV2:
-		e.assign(n.Left, n.Right, "selrecv", n)
-		e.assign(n.List.First(), nil, "selrecv", n)
-	case ORECV:
-		// TODO(mdempsky): Consider e.discard(n.Left).
-		e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
-	case OSEND:
-		e.discard(n.Left)
-		e.assignHeap(n.Right, "send", n)
-
-	case OAS, OASOP:
-		e.assign(n.Left, n.Right, "assign", n)
-
-	case OAS2:
-		for i, nl := range n.List.Slice() {
-			e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
-		}
-
-	case OAS2DOTTYPE: // v, ok = x.(type)
-		e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
-		e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
-	case OAS2MAPR: // v, ok = m[k]
-		e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
-		e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
-	case OAS2RECV: // v, ok = <-ch
-		e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
-		e.assign(n.List.Second(), nil, "assign-pair-receive", n)
-
-	case OAS2FUNC:
-		e.stmts(n.Right.Ninit)
-		e.call(e.addrs(n.List), n.Right, nil)
-	case ORETURN:
-		results := e.curfn.Type.Results().FieldSlice()
-		for i, v := range n.List.Slice() {
-			e.assign(asNode(results[i].Nname), v, "return", n)
-		}
-	case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
-		e.call(nil, n, nil)
-	case OGO, ODEFER:
-		e.stmts(n.Left.Ninit)
-		e.call(nil, n.Left, n)
-
-	case ORETJMP:
-		// TODO(mdempsky): What do? esc.go just ignores it.
-	}
-}
-
-func (e *Escape) stmts(l Nodes) {
-	for _, n := range l.Slice() {
-		e.stmt(n)
-	}
-}
-
-// block is like stmts, but preserves loopDepth.
-func (e *Escape) block(l Nodes) {
-	old := e.loopDepth
-	e.stmts(l)
-	e.loopDepth = old
-}
-
-// expr models evaluating an expression n and flowing the result into
-// hole k.
-func (e *Escape) expr(k EscHole, n *Node) {
-	if n == nil {
-		return
-	}
-	e.stmts(n.Ninit)
-	e.exprSkipInit(k, n)
-}
-
-func (e *Escape) exprSkipInit(k EscHole, n *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-	defer func() {
-		lineno = lno
-	}()
-
-	uintptrEscapesHack := k.uintptrEscapesHack
-	k.uintptrEscapesHack = false
-
-	if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
-		// nop
-	} else if k.derefs >= 0 && !n.Type.HasPointers() {
-		k = e.discardHole()
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected expr: %v", n)
-
-	case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
-		// nop
-
-	case ONAME:
-		if n.Class() == PFUNC || n.Class() == PEXTERN {
-			return
-		}
-		e.flow(k, e.oldLoc(n))
-
-	case OPLUS, ONEG, OBITNOT, ONOT:
-		e.discard(n.Left)
-	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
-		e.discard(n.Left)
-		e.discard(n.Right)
-
-	case OADDR:
-		e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
-	case ODEREF:
-		e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
-	case ODOT, ODOTMETH, ODOTINTER:
-		e.expr(k.note(n, "dot"), n.Left)
-	case ODOTPTR:
-		e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
-	case ODOTTYPE, ODOTTYPE2:
-		e.expr(k.dotType(n.Type, n, "dot"), n.Left)
-	case OINDEX:
-		if n.Left.Type.IsArray() {
-			e.expr(k.note(n, "fixed-array-index-of"), n.Left)
-		} else {
-			// TODO(mdempsky): Fix why reason text.
-			e.expr(k.deref(n, "dot of pointer"), n.Left)
-		}
-		e.discard(n.Right)
-	case OINDEXMAP:
-		e.discard(n.Left)
-		e.discard(n.Right)
-	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
-		e.expr(k.note(n, "slice"), n.Left)
-		low, high, max := n.SliceBounds()
-		e.discard(low)
-		e.discard(high)
-		e.discard(max)
-
-	case OCONV, OCONVNOP:
-		if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
-			// When -d=checkptr=2 is enabled, treat
-			// conversions to unsafe.Pointer as an
-			// escaping operation. This allows better
-			// runtime instrumentation, since we can more
-			// easily detect object boundaries on the heap
-			// than the stack.
-			e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
-		} else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
-			e.unsafeValue(k, n.Left)
-		} else {
-			e.expr(k, n.Left)
-		}
-	case OCONVIFACE:
-		if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
-			k = e.spill(k, n)
-		}
-		e.expr(k.note(n, "interface-converted"), n.Left)
-
-	case ORECV:
-		e.discard(n.Left)
-
-	case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
-		e.call([]EscHole{k}, n, nil)
-
-	case ONEW:
-		e.spill(k, n)
-
-	case OMAKESLICE:
-		e.spill(k, n)
-		e.discard(n.Left)
-		e.discard(n.Right)
-	case OMAKECHAN:
-		e.discard(n.Left)
-	case OMAKEMAP:
-		e.spill(k, n)
-		e.discard(n.Left)
-
-	case ORECOVER:
-		// nop
-
-	case OCALLPART:
-		// Flow the receiver argument to both the closure and
-		// to the receiver parameter.
-
-		closureK := e.spill(k, n)
-
-		m := callpartMethod(n)
-
-		// We don't know how the method value will be called
-		// later, so conservatively assume the result
-		// parameters all flow to the heap.
-		//
-		// TODO(mdempsky): Change ks into a callback, so that
-		// we don't have to create this dummy slice?
-		var ks []EscHole
-		for i := m.Type.NumResults(); i > 0; i-- {
-			ks = append(ks, e.heapHole())
-		}
-		paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
-
-		e.expr(e.teeHole(paramK, closureK), n.Left)
-
-	case OPTRLIT:
-		e.expr(e.spill(k, n), n.Left)
-
-	case OARRAYLIT:
-		for _, elt := range n.List.Slice() {
-			if elt.Op == OKEY {
-				elt = elt.Right
-			}
-			e.expr(k.note(n, "array literal element"), elt)
-		}
-
-	case OSLICELIT:
-		k = e.spill(k, n)
-		k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
-
-		for _, elt := range n.List.Slice() {
-			if elt.Op == OKEY {
-				elt = elt.Right
-			}
-			e.expr(k.note(n, "slice-literal-element"), elt)
-		}
-
-	case OSTRUCTLIT:
-		for _, elt := range n.List.Slice() {
-			e.expr(k.note(n, "struct literal element"), elt.Left)
-		}
-
-	case OMAPLIT:
-		e.spill(k, n)
-
-		// Map keys and values are always stored in the heap.
-		for _, elt := range n.List.Slice() {
-			e.assignHeap(elt.Left, "map literal key", n)
-			e.assignHeap(elt.Right, "map literal value", n)
-		}
-
-	case OCLOSURE:
-		k = e.spill(k, n)
-
-		// Link addresses of captured variables to closure.
-		for _, v := range n.Func.Closure.Func.Cvars.Slice() {
-			if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
-				continue
-			}
-
-			k := k
-			if !v.Name.Byval() {
-				k = k.addr(v, "reference")
-			}
-
-			e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
-		}
-
-	case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
-		e.spill(k, n)
-		e.discard(n.Left)
-
-	case OADDSTR:
-		e.spill(k, n)
-
-		// Arguments of OADDSTR never escape;
-		// runtime.concatstrings makes sure of that.
-		e.discards(n.List)
-	}
-}
-
-// unsafeValue evaluates a uintptr-typed arithmetic expression looking
-// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *Node) {
-	if n.Type.Etype != TUINTPTR {
-		Fatalf("unexpected type %v for %v", n.Type, n)
-	}
-
-	e.stmts(n.Ninit)
-
-	switch n.Op {
-	case OCONV, OCONVNOP:
-		if n.Left.Type.IsUnsafePtr() {
-			e.expr(k, n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case ODOTPTR:
-		if isReflectHeaderDataField(n) {
-			e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case OPLUS, ONEG, OBITNOT:
-		e.unsafeValue(k, n.Left)
-	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
-		e.unsafeValue(k, n.Left)
-		e.unsafeValue(k, n.Right)
-	case OLSH, ORSH:
-		e.unsafeValue(k, n.Left)
-		// RHS need not be uintptr-typed (#32959) and can't meaningfully
-		// flow pointers anyway.
-		e.discard(n.Right)
-	default:
-		e.exprSkipInit(e.discardHole(), n)
-	}
-}
-
-// discard evaluates an expression n for side-effects, but discards
-// its value.
-func (e *Escape) discard(n *Node) {
-	e.expr(e.discardHole(), n)
-}
-
-func (e *Escape) discards(l Nodes) {
-	for _, n := range l.Slice() {
-		e.discard(n)
-	}
-}
-
-// addr evaluates an addressable expression n and returns an EscHole
-// that represents storing into the represented location.
-func (e *Escape) addr(n *Node) EscHole {
-	if n == nil || n.isBlank() {
-		// Can happen at least in OSELRECV.
-		// TODO(mdempsky): Anywhere else?
-		return e.discardHole()
-	}
-
-	k := e.heapHole()
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected addr: %v", n)
-	case ONAME:
-		if n.Class() == PEXTERN {
-			break
-		}
-		k = e.oldLoc(n).asHole()
-	case ODOT:
-		k = e.addr(n.Left)
-	case OINDEX:
-		e.discard(n.Right)
-		if n.Left.Type.IsArray() {
-			k = e.addr(n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case ODEREF, ODOTPTR:
-		e.discard(n)
-	case OINDEXMAP:
-		e.discard(n.Left)
-		e.assignHeap(n.Right, "key of map put", n)
-	}
-
-	if !n.Type.HasPointers() {
-		k = e.discardHole()
-	}
-
-	return k
-}
-
-func (e *Escape) addrs(l Nodes) []EscHole {
-	var ks []EscHole
-	for _, n := range l.Slice() {
-		ks = append(ks, e.addr(n))
-	}
-	return ks
-}
-
-// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *Node, why string, where *Node) {
-	// Filter out some no-op assignments for escape analysis.
-	ignore := dst != nil && src != nil && isSelfAssign(dst, src)
-	if ignore && Debug.m != 0 {
-		Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
-	}
-
-	k := e.addr(dst)
-	if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
-		e.unsafeValue(e.heapHole().note(where, why), src)
-	} else {
-		if ignore {
-			k = e.discardHole()
-		}
-		e.expr(k.note(where, why), src)
-	}
-}
-
-func (e *Escape) assignHeap(src *Node, why string, where *Node) {
-	e.expr(e.heapHole().note(where, why), src)
-}
-
-// call evaluates a call expressions, including builtin calls. ks
-// should contain the holes representing where the function callee's
-// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *Node) {
-	topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
-	if topLevelDefer {
-		// force stack allocation of defer record, unless
-		// open-coded defers are used (see ssa.go)
-		where.Esc = EscNever
-	}
-
-	argument := func(k EscHole, arg *Node) {
-		if topLevelDefer {
-			// Top level defers arguments don't escape to
-			// heap, but they do need to last until end of
-			// function.
-			k = e.later(k)
-		} else if where != nil {
-			k = e.heapHole()
-		}
-
-		e.expr(k.note(call, "call parameter"), arg)
-	}
-
-	switch call.Op {
-	default:
-		Fatalf("unexpected call op: %v", call.Op)
-
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		fixVariadicCall(call)
-
-		// Pick out the function callee, if statically known.
-		var fn *Node
-		switch call.Op {
-		case OCALLFUNC:
-			switch v := staticValue(call.Left); {
-			case v.Op == ONAME && v.Class() == PFUNC:
-				fn = v
-			case v.Op == OCLOSURE:
-				fn = v.Func.Closure.Func.Nname
-			}
-		case OCALLMETH:
-			fn = asNode(call.Left.Type.FuncType().Nname)
-		}
-
-		fntype := call.Left.Type
-		if fn != nil {
-			fntype = fn.Type
-		}
-
-		if ks != nil && fn != nil && e.inMutualBatch(fn) {
-			for i, result := range fn.Type.Results().FieldSlice() {
-				e.expr(ks[i], asNode(result.Nname))
-			}
-		}
-
-		if r := fntype.Recv(); r != nil {
-			argument(e.tagHole(ks, fn, r), call.Left.Left)
-		} else {
-			// Evaluate callee function expression.
-			argument(e.discardHole(), call.Left)
-		}
-
-		args := call.List.Slice()
-		for i, param := range fntype.Params().FieldSlice() {
-			argument(e.tagHole(ks, fn, param), args[i])
-		}
-
-	case OAPPEND:
-		args := call.List.Slice()
-
-		// Appendee slice may flow directly to the result, if
-		// it has enough capacity. Alternatively, a new heap
-		// slice might be allocated, and all slice elements
-		// might flow to heap.
-		appendeeK := ks[0]
-		if args[0].Type.Elem().HasPointers() {
-			appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
-		}
-		argument(appendeeK, args[0])
-
-		if call.IsDDD() {
-			appendedK := e.discardHole()
-			if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
-				appendedK = e.heapHole().deref(call, "appended slice...")
-			}
-			argument(appendedK, args[1])
-		} else {
-			for _, arg := range args[1:] {
-				argument(e.heapHole(), arg)
-			}
-		}
-
-	case OCOPY:
-		argument(e.discardHole(), call.Left)
-
-		copiedK := e.discardHole()
-		if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
-			copiedK = e.heapHole().deref(call, "copied slice")
-		}
-		argument(copiedK, call.Right)
-
-	case OPANIC:
-		argument(e.heapHole(), call.Left)
-
-	case OCOMPLEX:
-		argument(e.discardHole(), call.Left)
-		argument(e.discardHole(), call.Right)
-	case ODELETE, OPRINT, OPRINTN, ORECOVER:
-		for _, arg := range call.List.Slice() {
-			argument(e.discardHole(), arg)
-		}
-	case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
-		argument(e.discardHole(), call.Left)
-	}
-}
-
-// tagHole returns a hole for evaluating an argument passed to param.
-// ks should contain the holes representing where the function
-// callee's results flows. fn is the statically-known callee function,
-// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
-	// If this is a dynamic call, we can't rely on param.Note.
-	if fn == nil {
-		return e.heapHole()
-	}
-
-	if e.inMutualBatch(fn) {
-		return e.addr(asNode(param.Nname))
-	}
-
-	// Call to previously tagged function.
-
-	if param.Note == uintptrEscapesTag {
-		k := e.heapHole()
-		k.uintptrEscapesHack = true
-		return k
-	}
-
-	var tagKs []EscHole
-
-	esc := ParseLeaks(param.Note)
-	if x := esc.Heap(); x >= 0 {
-		tagKs = append(tagKs, e.heapHole().shift(x))
-	}
-
-	if ks != nil {
-		for i := 0; i < numEscResults; i++ {
-			if x := esc.Result(i); x >= 0 {
-				tagKs = append(tagKs, ks[i].shift(x))
-			}
-		}
-	}
-
-	return e.teeHole(tagKs...)
-}
-
-// inMutualBatch reports whether function fn is in the batch of
-// mutually recursive functions being analyzed. When this is true,
-// fn has not yet been analyzed, so its parameters and results
-// should be incorporated directly into the flow graph instead of
-// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *Node) bool {
-	if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
-		if fn.Name.Defn.Esc == EscFuncUnknown {
-			Fatalf("graph inconsistency")
-		}
-		return true
-	}
-	return false
-}
-
-// An EscHole represents a context for evaluation a Go
-// expression. E.g., when evaluating p in "x = **p", we'd have a hole
-// with dst==x and derefs==2.
-type EscHole struct {
-	dst    *EscLocation
-	derefs int // >= -1
-	notes  *EscNote
-
-	// uintptrEscapesHack indicates this context is evaluating an
-	// argument for a //go:uintptrescapes function.
-	uintptrEscapesHack bool
-}
-
-type EscNote struct {
-	next  *EscNote
-	where *Node
-	why   string
-}
-
-func (k EscHole) note(where *Node, why string) EscHole {
-	if where == nil || why == "" {
-		Fatalf("note: missing where/why")
-	}
-	if Debug.m >= 2 || logopt.Enabled() {
-		k.notes = &EscNote{
-			next:  k.notes,
-			where: where,
-			why:   why,
-		}
-	}
-	return k
-}
-
-func (k EscHole) shift(delta int) EscHole {
-	k.derefs += delta
-	if k.derefs < -1 {
-		Fatalf("derefs underflow: %v", k.derefs)
-	}
-	return k
-}
-
-func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *Node, why string) EscHole  { return k.shift(-1).note(where, why) }
-
-func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
-	if !t.IsInterface() && !isdirectiface(t) {
-		k = k.shift(1)
-	}
-	return k.note(where, why)
-}
-
-// teeHole returns a new hole that flows into each hole of ks,
-// similar to the Unix tee(1) command.
-func (e *Escape) teeHole(ks ...EscHole) EscHole {
-	if len(ks) == 0 {
-		return e.discardHole()
-	}
-	if len(ks) == 1 {
-		return ks[0]
-	}
-	// TODO(mdempsky): Optimize if there's only one non-discard hole?
-
-	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
-	// new temporary location ltmp, wire it into place, and return
-	// a hole for "ltmp = _".
-	loc := e.newLoc(nil, true)
-	for _, k := range ks {
-		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
-		// semantically equivalent. To combine holes like "l1
-		// = _" and "l2 = &_", we'd need to wire them as "l1 =
-		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
-		// instead.
-		if k.derefs < 0 {
-			Fatalf("teeHole: negative derefs")
-		}
-
-		e.flow(k, loc)
-	}
-	return loc.asHole()
-}
-
-func (e *Escape) dcl(n *Node) EscHole {
-	loc := e.oldLoc(n)
-	loc.loopDepth = e.loopDepth
-	return loc.asHole()
-}
-
-// spill allocates a new location associated with expression n, flows
-// its address to k, and returns a hole that flows values to it. It's
-// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *Node) EscHole {
-	loc := e.newLoc(n, true)
-	e.flow(k.addr(n, "spill"), loc)
-	return loc.asHole()
-}
-
-// later returns a new hole that flows into k, but some time later.
-// Its main effect is to prevent immediate reuse of temporary
-// variables introduced during Order.
-func (e *Escape) later(k EscHole) EscHole {
-	loc := e.newLoc(nil, false)
-	e.flow(k, loc)
-	return loc.asHole()
-}
-
-// canonicalNode returns the canonical *Node that n logically
-// represents.
-func canonicalNode(n *Node) *Node {
-	if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
-		n = n.Name.Defn
-		if n.Name.IsClosureVar() {
-			Fatalf("still closure var")
-		}
-	}
-
-	return n
-}
-
-func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
-	if e.curfn == nil {
-		Fatalf("e.curfn isn't set")
-	}
-	if n != nil && n.Type != nil && n.Type.NotInHeap() {
-		yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
-	}
-
-	n = canonicalNode(n)
-	loc := &EscLocation{
-		n:         n,
-		curfn:     e.curfn,
-		loopDepth: e.loopDepth,
-		transient: transient,
-	}
-	e.allLocs = append(e.allLocs, loc)
-	if n != nil {
-		if n.Op == ONAME && n.Name.Curfn != e.curfn {
-			Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
-		}
-
-		if n.HasOpt() {
-			Fatalf("%v already has a location", n)
-		}
-		n.SetOpt(loc)
-
-		if why := heapAllocReason(n); why != "" {
-			e.flow(e.heapHole().addr(n, why), loc)
-		}
-	}
-	return loc
-}
-
-func (e *Escape) oldLoc(n *Node) *EscLocation {
-	n = canonicalNode(n)
-	return n.Opt().(*EscLocation)
-}
-
-func (l *EscLocation) asHole() EscHole {
-	return EscHole{dst: l}
-}
-
-func (e *Escape) flow(k EscHole, src *EscLocation) {
-	dst := k.dst
-	if dst == &e.blankLoc {
-		return
-	}
-	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
-		return
-	}
-	if dst.escapes && k.derefs < 0 { // dst = &src
-		if Debug.m >= 2 || logopt.Enabled() {
-			pos := linestr(src.n.Pos)
-			if Debug.m >= 2 {
-				fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
-			}
-			explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
-			if logopt.Enabled() {
-				logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
-			}
-
-		}
-		src.escapes = true
-		return
-	}
-
-	// TODO(mdempsky): Deduplicate edges?
-	dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
-}
-
-func (e *Escape) heapHole() EscHole    { return e.heapLoc.asHole() }
-func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
-
-// walkAll computes the minimal dereferences between all pairs of
-// locations.
-func (e *Escape) walkAll() {
-	// We use a work queue to keep track of locations that we need
-	// to visit, and repeatedly walk until we reach a fixed point.
-	//
-	// We walk once from each location (including the heap), and
-	// then re-enqueue each location on its transition from
-	// transient->!transient and !escapes->escapes, which can each
-	// happen at most once. So we take Θ(len(e.allLocs)) walks.
-
-	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
-	todo := make([]*EscLocation, 0, len(e.allLocs)+1)
-	enqueue := func(loc *EscLocation) {
-		if !loc.queued {
-			todo = append(todo, loc)
-			loc.queued = true
-		}
-	}
-
-	for _, loc := range e.allLocs {
-		enqueue(loc)
-	}
-	enqueue(&e.heapLoc)
-
-	var walkgen uint32
-	for len(todo) > 0 {
-		root := todo[len(todo)-1]
-		todo = todo[:len(todo)-1]
-		root.queued = false
-
-		walkgen++
-		e.walkOne(root, walkgen, enqueue)
-	}
-}
-
-// walkOne computes the minimal number of dereferences from root to
-// all other locations.
-func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
-	// The data flow graph has negative edges (from addressing
-	// operations), so we use the Bellman-Ford algorithm. However,
-	// we don't have to worry about infinite negative cycles since
-	// we bound intermediate dereference counts to 0.
-
-	root.walkgen = walkgen
-	root.derefs = 0
-	root.dst = nil
-
-	todo := []*EscLocation{root} // LIFO queue
-	for len(todo) > 0 {
-		l := todo[len(todo)-1]
-		todo = todo[:len(todo)-1]
-
-		base := l.derefs
-
-		// If l.derefs < 0, then l's address flows to root.
-		addressOf := base < 0
-		if addressOf {
-			// For a flow path like "root = &l; l = x",
-			// l's address flows to root, but x's does
-			// not. We recognize this by lower bounding
-			// base at 0.
-			base = 0
-
-			// If l's address flows to a non-transient
-			// location, then l can't be transiently
-			// allocated.
-			if !root.transient && l.transient {
-				l.transient = false
-				enqueue(l)
-			}
-		}
-
-		if e.outlives(root, l) {
-			// l's value flows to root. If l is a function
-			// parameter and root is the heap or a
-			// corresponding result parameter, then record
-			// that value flow for tagging the function
-			// later.
-			if l.isName(PPARAM) {
-				if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
-					if Debug.m >= 2 {
-						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
-					}
-					explanation := e.explainPath(root, l)
-					if logopt.Enabled() {
-						logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
-							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
-					}
-				}
-				l.leakTo(root, base)
-			}
-
-			// If l's address flows somewhere that
-			// outlives it, then l needs to be heap
-			// allocated.
-			if addressOf && !l.escapes {
-				if logopt.Enabled() || Debug.m >= 2 {
-					if Debug.m >= 2 {
-						fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
-					}
-					explanation := e.explainPath(root, l)
-					if logopt.Enabled() {
-						logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
-					}
-				}
-				l.escapes = true
-				enqueue(l)
-				continue
-			}
-		}
-
-		for i, edge := range l.edges {
-			if edge.src.escapes {
-				continue
-			}
-			derefs := base + edge.derefs
-			if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
-				edge.src.walkgen = walkgen
-				edge.src.derefs = derefs
-				edge.src.dst = l
-				edge.src.dstEdgeIdx = i
-				todo = append(todo, edge.src)
-			}
-		}
-	}
-}
-
-// explainPath prints an explanation of how src flows to the walk root.
-func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
-	visited := make(map[*EscLocation]bool)
-	pos := linestr(src.n.Pos)
-	var explanation []*logopt.LoggedOpt
-	for {
-		// Prevent infinite loop.
-		if visited[src] {
-			if Debug.m >= 2 {
-				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
-			}
-			break
-		}
-		visited[src] = true
-		dst := src.dst
-		edge := &dst.edges[src.dstEdgeIdx]
-		if edge.src != src {
-			Fatalf("path inconsistency: %v != %v", edge.src, src)
-		}
-
-		explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
-
-		if dst == root {
-			break
-		}
-		src = dst
-	}
-
-	return explanation
-}
-
-func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
-	ops := "&"
-	if derefs >= 0 {
-		ops = strings.Repeat("*", derefs)
-	}
-	print := Debug.m >= 2
-
-	flow := fmt.Sprintf("   flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
-	if print {
-		fmt.Printf("%s:%s\n", pos, flow)
-	}
-	if logopt.Enabled() {
-		var epos src.XPos
-		if notes != nil {
-			epos = notes.where.Pos
-		} else if srcloc != nil && srcloc.n != nil {
-			epos = srcloc.n.Pos
-		}
-		explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
-	}
-
-	for note := notes; note != nil; note = note.next {
-		if print {
-			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
-		}
-		if logopt.Enabled() {
-			explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
-				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
-		}
-	}
-	return explanation
-}
-
-func (e *Escape) explainLoc(l *EscLocation) string {
-	if l == &e.heapLoc {
-		return "{heap}"
-	}
-	if l.n == nil {
-		// TODO(mdempsky): Omit entirely.
-		return "{temp}"
-	}
-	if l.n.Op == ONAME {
-		return fmt.Sprintf("%v", l.n)
-	}
-	return fmt.Sprintf("{storage for %v}", l.n)
-}
-
-// outlives reports whether values stored in l may survive beyond
-// other's lifetime if stack allocated.
-func (e *Escape) outlives(l, other *EscLocation) bool {
-	// The heap outlives everything.
-	if l.escapes {
-		return true
-	}
-
-	// We don't know what callers do with returned values, so
-	// pessimistically we need to assume they flow to the heap and
-	// outlive everything too.
-	if l.isName(PPARAMOUT) {
-		// Exception: Directly called closures can return
-		// locations allocated outside of them without forcing
-		// them to the heap. For example:
-		//
-		//    var u int  // okay to stack allocate
-		//    *(func() *int { return &u }()) = 42
-		if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
-			return false
-		}
-
-		return true
-	}
-
-	// If l and other are within the same function, then l
-	// outlives other if it was declared outside other's loop
-	// scope. For example:
-	//
-	//    var l *int
-	//    for {
-	//        l = new(int)
-	//    }
-	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
-		return true
-	}
-
-	// If other is declared within a child closure of where l is
-	// declared, then l outlives it. For example:
-	//
-	//    var l *int
-	//    func() {
-	//        l = new(int)
-	//    }
-	if containsClosure(l.curfn, other.curfn) {
-		return true
-	}
-
-	return false
-}
-
-// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *Node) bool {
-	if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
-		Fatalf("bad containsClosure: %v, %v", f, c)
-	}
-
-	// Common case.
-	if f == c {
-		return false
-	}
-
-	// Closures within function Foo are named like "Foo.funcN..."
-	// TODO(mdempsky): Better way to recognize this.
-	fn := f.Func.Nname.Sym.Name
-	cn := c.Func.Nname.Sym.Name
-	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
-}
-
-// leak records that parameter l leaks to sink.
-func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
-	// If sink is a result parameter and we can fit return bits
-	// into the escape analysis tag, then record a return leak.
-	if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
-		// TODO(mdempsky): Eliminate dependency on Vargen here.
-		ri := int(sink.n.Name.Vargen) - 1
-		if ri < numEscResults {
-			// Leak to result parameter.
-			l.paramEsc.AddResult(ri, derefs)
-			return
-		}
-	}
-
-	// Otherwise, record as heap leak.
-	l.paramEsc.AddHeap(derefs)
-}
-
-func (e *Escape) finish(fns []*Node) {
-	// Record parameter tags for package export data.
-	for _, fn := range fns {
-		fn.Esc = EscFuncTagged
-
-		narg := 0
-		for _, fs := range &types.RecvsParams {
-			for _, f := range fs(fn.Type).Fields().Slice() {
-				narg++
-				f.Note = e.paramTag(fn, narg, f)
-			}
-		}
-	}
-
-	for _, loc := range e.allLocs {
-		n := loc.n
-		if n == nil {
-			continue
-		}
-		n.SetOpt(nil)
-
-		// Update n.Esc based on escape analysis results.
-
-		if loc.escapes {
-			if n.Op != ONAME {
-				if Debug.m != 0 {
-					Warnl(n.Pos, "%S escapes to heap", n)
-				}
-				if logopt.Enabled() {
-					logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
-				}
-			}
-			n.Esc = EscHeap
-			addrescapes(n)
-		} else {
-			if Debug.m != 0 && n.Op != ONAME {
-				Warnl(n.Pos, "%S does not escape", n)
-			}
-			n.Esc = EscNone
-			if loc.transient {
-				n.SetTransient(true)
-			}
-		}
-	}
-}
-
-func (l *EscLocation) isName(c Class) bool {
-	return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
-}
-
-const numEscResults = 7
-
-// An EscLeaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type EscLeaks [1 + numEscResults]uint8
-
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
-
-// Heap returns the minimum deref count of any assignment flow from l
-// to the heap. If no such flows exist, Heap returns -1.
-func (l EscLeaks) Heap() int { return l.get(0) }
-
-// Result returns the minimum deref count of any assignment flow from
-// l to its function's i'th result parameter. If no such flows exist,
-// Result returns -1.
-func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
-
-// AddHeap adds an assignment flow from l to the heap.
-func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
-
-// AddResult adds an assignment flow from l to its function's i'th
-// result parameter.
-func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
-
-func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
-
-func (l *EscLeaks) add(i, derefs int) {
-	if old := l.get(i); old < 0 || derefs < old {
-		l.set(i, derefs)
-	}
-}
-
-func (l *EscLeaks) set(i, derefs int) {
-	v := derefs + 1
-	if v < 0 {
-		Fatalf("invalid derefs count: %v", derefs)
-	}
-	if v > math.MaxUint8 {
-		v = math.MaxUint8
-	}
-
-	l[i] = uint8(v)
-}
-
-// Optimize removes result flow paths that are equal in length or
-// longer than the shortest heap flow path.
-func (l *EscLeaks) Optimize() {
-	// If we have a path to the heap, then there's no use in
-	// keeping equal or longer paths elsewhere.
-	if x := l.Heap(); x >= 0 {
-		for i := 0; i < numEscResults; i++ {
-			if l.Result(i) >= x {
-				l.setResult(i, -1)
-			}
-		}
-	}
-}
-
-var leakTagCache = map[EscLeaks]string{}
-
-// Encode converts l into a binary string for export data.
-func (l EscLeaks) Encode() string {
-	if l.Heap() == 0 {
-		// Space optimization: empty string encodes more
-		// efficiently in export data.
-		return ""
-	}
-	if s, ok := leakTagCache[l]; ok {
-		return s
-	}
-
-	n := len(l)
-	for n > 0 && l[n-1] == 0 {
-		n--
-	}
-	s := "esc:" + string(l[:n])
-	leakTagCache[l] = s
-	return s
-}
-
-// ParseLeaks parses a binary string representing an EscLeaks.
-func ParseLeaks(s string) EscLeaks {
-	var l EscLeaks
-	if !strings.HasPrefix(s, "esc:") {
-		l.AddHeap(0)
-		return l
-	}
-	copy(l[:], s[4:])
-	return l
-}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index c6917e0..356fcfa 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,225 +5,68 @@
 package gc
 
 import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/inline"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/internal/bio"
-	"cmd/internal/src"
 	"fmt"
-)
-
-var (
-	Debug_export int // if set, print debugging information about export data
+	"go/constant"
 )
 
 func exportf(bout *bio.Writer, format string, args ...interface{}) {
 	fmt.Fprintf(bout, format, args...)
-	if Debug_export != 0 {
+	if base.Debug.Export != 0 {
 		fmt.Printf(format, args...)
 	}
 }
 
-var asmlist []*Node
-
-// exportsym marks n for export (or reexport).
-func exportsym(n *Node) {
-	if n.Sym.OnExportList() {
-		return
-	}
-	n.Sym.SetOnExportList(true)
-
-	if Debug.E != 0 {
-		fmt.Printf("export symbol %v\n", n.Sym)
-	}
-
-	exportlist = append(exportlist, n)
-}
-
-func initname(s string) bool {
-	return s == "init"
-}
-
-func autoexport(n *Node, ctxt Class) {
-	if n.Sym.Pkg != localpkg {
-		return
-	}
-	if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
-		return
-	}
-	if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
-		return
-	}
-
-	if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
-		exportsym(n)
-	}
-	if asmhdr != "" && !n.Sym.Asm() {
-		n.Sym.SetAsm(true)
-		asmlist = append(asmlist, n)
-	}
-}
-
 func dumpexport(bout *bio.Writer) {
+	p := &exporter{marked: make(map[*types.Type]bool)}
+	for _, n := range typecheck.Target.Exports {
+		p.markObject(n)
+	}
+
 	// The linker also looks for the $$ marker - use char after $$ to distinguish format.
 	exportf(bout, "\n$$B\n") // indicate binary export format
 	off := bout.Offset()
-	iexport(bout.Writer)
+	typecheck.WriteExports(bout.Writer)
 	size := bout.Offset() - off
 	exportf(bout, "\n$$\n")
 
-	if Debug_export != 0 {
-		fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
-	}
-}
-
-func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
-	n := asNode(s.PkgDef())
-	if n == nil {
-		// iimport should have created a stub ONONAME
-		// declaration for all imported symbols. The exception
-		// is declarations for Runtimepkg, which are populated
-		// by loadsys instead.
-		if s.Pkg != Runtimepkg {
-			Fatalf("missing ONONAME for %v\n", s)
-		}
-
-		n = dclname(s)
-		s.SetPkgDef(asTypesNode(n))
-		s.Importdef = ipkg
-	}
-	if n.Op != ONONAME && n.Op != op {
-		redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
-	}
-	return n
-}
-
-// importtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
-	n := importsym(ipkg, s, OTYPE)
-	if n.Op != OTYPE {
-		t := types.New(TFORW)
-		t.Sym = s
-		t.Nod = asTypesNode(n)
-
-		n.Op = OTYPE
-		n.Pos = pos
-		n.Type = t
-		n.SetClass(PEXTERN)
-	}
-
-	t := n.Type
-	if t == nil {
-		Fatalf("importtype %v", s)
-	}
-	return t
-}
-
-// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
-	n := importsym(ipkg, s, op)
-	if n.Op != ONONAME {
-		if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
-			redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
-		}
-		return nil
-	}
-
-	n.Op = op
-	n.Pos = pos
-	n.SetClass(ctxt)
-	if ctxt == PFUNC {
-		n.Sym.SetFunc(true)
-	}
-	n.Type = t
-	return n
-}
-
-// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
-	n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
-	if n == nil { // TODO: Check that value matches.
-		return
-	}
-
-	n.SetVal(val)
-
-	if Debug.E != 0 {
-		fmt.Printf("import const %v %L = %v\n", s, t, val)
-	}
-}
-
-// importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
-	if n == nil {
-		return
-	}
-
-	n.Func = new(Func)
-	t.SetNname(asTypesNode(n))
-
-	if Debug.E != 0 {
-		fmt.Printf("import func %v%S\n", s, t)
-	}
-}
-
-// importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
-	if n == nil {
-		return
-	}
-
-	if Debug.E != 0 {
-		fmt.Printf("import var %v %L\n", s, t)
-	}
-}
-
-// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
-	if n == nil {
-		return
-	}
-
-	if Debug.E != 0 {
-		fmt.Printf("import type %v = %L\n", s, t)
+	if base.Debug.Export != 0 {
+		fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
 	}
 }
 
 func dumpasmhdr() {
-	b, err := bio.Create(asmhdr)
+	b, err := bio.Create(base.Flag.AsmHdr)
 	if err != nil {
-		Fatalf("%v", err)
+		base.Fatalf("%v", err)
 	}
-	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
-	for _, n := range asmlist {
-		if n.Sym.IsBlank() {
+	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
+	for _, n := range typecheck.Target.Asms {
+		if n.Sym().IsBlank() {
 			continue
 		}
-		switch n.Op {
-		case OLITERAL:
-			t := n.Val().Ctype()
-			if t == CTFLT || t == CTCPLX {
+		switch n.Op() {
+		case ir.OLITERAL:
+			t := n.Val().Kind()
+			if t == constant.Float || t == constant.Complex {
 				break
 			}
-			fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+			fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
 
-		case OTYPE:
-			t := n.Type
+		case ir.OTYPE:
+			t := n.Type()
 			if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
 				break
 			}
-			fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+			fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
 			for _, f := range t.Fields().Slice() {
 				if !f.Sym.IsBlank() {
-					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
 				}
 			}
 		}
@@ -231,3 +74,83 @@
 
 	b.Close()
 }
+
+type exporter struct {
+	marked map[*types.Type]bool // types already seen by markType
+}
+
+// markObject visits a reachable object.
+func (p *exporter) markObject(n ir.Node) {
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC {
+			inline.Inline_Flood(n, typecheck.Export)
+		}
+	}
+
+	p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *exporter) markType(t *types.Type) {
+	if p.marked[t] {
+		return
+	}
+	p.marked[t] = true
+
+	// If this is a named type, mark all of its associated
+	// methods. Skip interface types because t.Methods contains
+	// only their unexpanded method set (i.e., exclusive of
+	// interface embeddings), and the switch statement below
+	// handles their full method set.
+	if t.Sym() != nil && t.Kind() != types.TINTER {
+		for _, m := range t.Methods().Slice() {
+			if types.IsExported(m.Sym.Name) {
+				p.markObject(ir.AsNode(m.Nname))
+			}
+		}
+	}
+
+	// Recursively mark any types that can be produced given a
+	// value of type t: dereferencing a pointer; indexing or
+	// iterating over an array, slice, or map; receiving from a
+	// channel; accessing a struct field or interface method; or
+	// calling a function.
+	//
+	// Notably, we don't mark function parameter types, because
+	// the user already needs some way to construct values of
+	// those types.
+	switch t.Kind() {
+	case types.TPTR, types.TARRAY, types.TSLICE:
+		p.markType(t.Elem())
+
+	case types.TCHAN:
+		if t.ChanDir().CanRecv() {
+			p.markType(t.Elem())
+		}
+
+	case types.TMAP:
+		p.markType(t.Key())
+		p.markType(t.Elem())
+
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+				p.markType(f.Type)
+			}
+		}
+
+	case types.TFUNC:
+		for _, f := range t.Results().FieldSlice() {
+			p.markType(f.Type)
+		}
+
+	case types.TINTER:
+		for _, f := range t.FieldSlice() {
+			if types.IsExported(f.Sym.Name) {
+				p.markType(f.Type)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
deleted file mode 100644
index f92f5d0..0000000
--- a/src/cmd/compile/internal/gc/fmt.go
+++ /dev/null
@@ -1,1986 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-	"sync"
-	"unicode/utf8"
-)
-
-// A FmtFlag value is a set of flags (or 0).
-// They control how the Xconv functions format their values.
-// See the respective function's documentation for details.
-type FmtFlag int
-
-const ( //                                 fmt.Format flag/prec or verb
-	FmtLeft     FmtFlag = 1 << iota // '-'
-	FmtSharp                        // '#'
-	FmtSign                         // '+'
-	FmtUnsigned                     // internal use only (historic: u flag)
-	FmtShort                        // verb == 'S'       (historic: h flag)
-	FmtLong                         // verb == 'L'       (historic: l flag)
-	FmtComma                        // '.' (== hasPrec)  (historic: , flag)
-	FmtByte                         // '0'               (historic: hh flag)
-)
-
-// fmtFlag computes the (internal) FmtFlag
-// value given the fmt.State and format verb.
-func fmtFlag(s fmt.State, verb rune) FmtFlag {
-	var flag FmtFlag
-	if s.Flag('-') {
-		flag |= FmtLeft
-	}
-	if s.Flag('#') {
-		flag |= FmtSharp
-	}
-	if s.Flag('+') {
-		flag |= FmtSign
-	}
-	if s.Flag(' ') {
-		Fatalf("FmtUnsigned in format string")
-	}
-	if _, ok := s.Precision(); ok {
-		flag |= FmtComma
-	}
-	if s.Flag('0') {
-		flag |= FmtByte
-	}
-	switch verb {
-	case 'S':
-		flag |= FmtShort
-	case 'L':
-		flag |= FmtLong
-	}
-	return flag
-}
-
-// Format conversions:
-// TODO(gri) verify these; eliminate those not used anymore
-//
-//	%v Op		Node opcodes
-//		Flags:  #: print Go syntax (automatic unless mode == FDbg)
-//
-//	%j *Node	Node details
-//		Flags:  0: suppresses things not relevant until walk
-//
-//	%v *Val		Constant values
-//
-//	%v *types.Sym		Symbols
-//	%S              unqualified identifier in any mode
-//		Flags:  +,- #: mode (see below)
-//			0: in export mode: unqualified identifier if exported, qualified if not
-//
-//	%v *types.Type	Types
-//	%S              omit "func" and receiver in function types
-//	%L              definition instead of name.
-//		Flags:  +,- #: mode (see below)
-//			' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
-//
-//	%v *Node	Nodes
-//	%S              (only in +/debug mode) suppress recursion
-//	%L              (only in Error mode) print "foo (type Bar)"
-//		Flags:  +,- #: mode (see below)
-//
-//	%v Nodes	Node lists
-//		Flags:  those of *Node
-//			.: separate items with ',' instead of ';'
-
-// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
-const (
-	FErr fmtMode = iota
-	FDbg
-	FTypeId
-	FTypeIdName // same as FTypeId, but use package name instead of prefix
-)
-
-// The mode flags '+', '-', and '#' are sticky; they persist through
-// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is
-// sticky only on *types.Type recursions and only used in %-/*types.Sym mode.
-//
-// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
-
-// Useful format combinations:
-// TODO(gri): verify these
-//
-// *Node, Nodes:
-//   %+v    multiline recursive debug dump of *Node/Nodes
-//   %+S    non-recursive debug dump
-//
-// *Node:
-//   %#v    Go format
-//   %L     "foo (type Bar)" for error messages
-//
-// *types.Type:
-//   %#v    Go format
-//   %#L    type definition instead of name
-//   %#S    omit "func" and receiver in function signature
-//
-//   %-v    type identifiers
-//   %-S    type identifiers without "func" and arg names in type signatures (methodsym)
-//   %- v   type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
-
-// update returns the results of applying f to mode.
-func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
-	switch {
-	case f&FmtSign != 0:
-		mode = FDbg
-	case f&FmtSharp != 0:
-		// ignore (textual export format no longer supported)
-	case f&FmtUnsigned != 0:
-		mode = FTypeIdName
-	case f&FmtLeft != 0:
-		mode = FTypeId
-	}
-
-	f &^= FmtSharp | FmtLeft | FmtSign
-	return f, mode
-}
-
-var goopnames = []string{
-	OADDR:     "&",
-	OADD:      "+",
-	OADDSTR:   "+",
-	OALIGNOF:  "unsafe.Alignof",
-	OANDAND:   "&&",
-	OANDNOT:   "&^",
-	OAND:      "&",
-	OAPPEND:   "append",
-	OAS:       "=",
-	OAS2:      "=",
-	OBREAK:    "break",
-	OCALL:     "function call", // not actual syntax
-	OCAP:      "cap",
-	OCASE:     "case",
-	OCLOSE:    "close",
-	OCOMPLEX:  "complex",
-	OBITNOT:   "^",
-	OCONTINUE: "continue",
-	OCOPY:     "copy",
-	ODELETE:   "delete",
-	ODEFER:    "defer",
-	ODIV:      "/",
-	OEQ:       "==",
-	OFALL:     "fallthrough",
-	OFOR:      "for",
-	OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
-	OGE:       ">=",
-	OGOTO:     "goto",
-	OGT:       ">",
-	OIF:       "if",
-	OIMAG:     "imag",
-	OINLMARK:  "inlmark",
-	ODEREF:    "*",
-	OLEN:      "len",
-	OLE:       "<=",
-	OLSH:      "<<",
-	OLT:       "<",
-	OMAKE:     "make",
-	ONEG:      "-",
-	OMOD:      "%",
-	OMUL:      "*",
-	ONEW:      "new",
-	ONE:       "!=",
-	ONOT:      "!",
-	OOFFSETOF: "unsafe.Offsetof",
-	OOROR:     "||",
-	OOR:       "|",
-	OPANIC:    "panic",
-	OPLUS:     "+",
-	OPRINTN:   "println",
-	OPRINT:    "print",
-	ORANGE:    "range",
-	OREAL:     "real",
-	ORECV:     "<-",
-	ORECOVER:  "recover",
-	ORETURN:   "return",
-	ORSH:      ">>",
-	OSELECT:   "select",
-	OSEND:     "<-",
-	OSIZEOF:   "unsafe.Sizeof",
-	OSUB:      "-",
-	OSWITCH:   "switch",
-	OXOR:      "^",
-}
-
-func (o Op) GoString() string {
-	return fmt.Sprintf("%#v", o)
-}
-
-func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
-	switch verb {
-	case 'v':
-		o.oconv(s, fmtFlag(s, verb), mode)
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
-	}
-}
-
-func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
-	if flag&FmtSharp != 0 || mode != FDbg {
-		if int(o) < len(goopnames) && goopnames[o] != "" {
-			fmt.Fprint(s, goopnames[o])
-			return
-		}
-	}
-
-	// 'o.String()' instead of just 'o' to avoid infinite recursion
-	fmt.Fprint(s, o.String())
-}
-
-type (
-	fmtMode int
-
-	fmtNodeErr        Node
-	fmtNodeDbg        Node
-	fmtNodeTypeId     Node
-	fmtNodeTypeIdName Node
-
-	fmtOpErr        Op
-	fmtOpDbg        Op
-	fmtOpTypeId     Op
-	fmtOpTypeIdName Op
-
-	fmtTypeErr        types.Type
-	fmtTypeDbg        types.Type
-	fmtTypeTypeId     types.Type
-	fmtTypeTypeIdName types.Type
-
-	fmtSymErr        types.Sym
-	fmtSymDbg        types.Sym
-	fmtSymTypeId     types.Sym
-	fmtSymTypeIdName types.Sym
-
-	fmtNodesErr        Nodes
-	fmtNodesDbg        Nodes
-	fmtNodesTypeId     Nodes
-	fmtNodesTypeIdName Nodes
-)
-
-func (n *fmtNodeErr) Format(s fmt.State, verb rune)        { (*Node)(n).format(s, verb, FErr) }
-func (n *fmtNodeDbg) Format(s fmt.State, verb rune)        { (*Node)(n).format(s, verb, FDbg) }
-func (n *fmtNodeTypeId) Format(s fmt.State, verb rune)     { (*Node)(n).format(s, verb, FTypeId) }
-func (n *fmtNodeTypeIdName) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeIdName) }
-func (n *Node) Format(s fmt.State, verb rune)              { n.format(s, verb, FErr) }
-
-func (o fmtOpErr) Format(s fmt.State, verb rune)        { Op(o).format(s, verb, FErr) }
-func (o fmtOpDbg) Format(s fmt.State, verb rune)        { Op(o).format(s, verb, FDbg) }
-func (o fmtOpTypeId) Format(s fmt.State, verb rune)     { Op(o).format(s, verb, FTypeId) }
-func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) }
-func (o Op) Format(s fmt.State, verb rune)              { o.format(s, verb, FErr) }
-
-func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) }
-func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) }
-func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) {
-	typeFormat((*types.Type)(t), s, verb, FTypeId)
-}
-func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) {
-	typeFormat((*types.Type)(t), s, verb, FTypeIdName)
-}
-
-// func (t *types.Type) Format(s fmt.State, verb rune)     // in package types
-
-func (y *fmtSymErr) Format(s fmt.State, verb rune)    { symFormat((*types.Sym)(y), s, verb, FErr) }
-func (y *fmtSymDbg) Format(s fmt.State, verb rune)    { symFormat((*types.Sym)(y), s, verb, FDbg) }
-func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) }
-func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) {
-	symFormat((*types.Sym)(y), s, verb, FTypeIdName)
-}
-
-// func (y *types.Sym) Format(s fmt.State, verb rune)            // in package types  { y.format(s, verb, FErr) }
-
-func (n fmtNodesErr) Format(s fmt.State, verb rune)        { (Nodes)(n).format(s, verb, FErr) }
-func (n fmtNodesDbg) Format(s fmt.State, verb rune)        { (Nodes)(n).format(s, verb, FDbg) }
-func (n fmtNodesTypeId) Format(s fmt.State, verb rune)     { (Nodes)(n).format(s, verb, FTypeId) }
-func (n fmtNodesTypeIdName) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeIdName) }
-func (n Nodes) Format(s fmt.State, verb rune)              { n.format(s, verb, FErr) }
-
-func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) {
-	m.prepareArgs(args)
-	fmt.Fprintf(s, format, args...)
-}
-
-func (m fmtMode) Sprintf(format string, args ...interface{}) string {
-	m.prepareArgs(args)
-	return fmt.Sprintf(format, args...)
-}
-
-func (m fmtMode) Sprint(args ...interface{}) string {
-	m.prepareArgs(args)
-	return fmt.Sprint(args...)
-}
-
-func (m fmtMode) prepareArgs(args []interface{}) {
-	switch m {
-	case FErr:
-		for i, arg := range args {
-			switch arg := arg.(type) {
-			case Op:
-				args[i] = fmtOpErr(arg)
-			case *Node:
-				args[i] = (*fmtNodeErr)(arg)
-			case *types.Type:
-				args[i] = (*fmtTypeErr)(arg)
-			case *types.Sym:
-				args[i] = (*fmtSymErr)(arg)
-			case Nodes:
-				args[i] = fmtNodesErr(arg)
-			case Val, int32, int64, string, types.EType:
-				// OK: printing these types doesn't depend on mode
-			default:
-				Fatalf("mode.prepareArgs type %T", arg)
-			}
-		}
-	case FDbg:
-		for i, arg := range args {
-			switch arg := arg.(type) {
-			case Op:
-				args[i] = fmtOpDbg(arg)
-			case *Node:
-				args[i] = (*fmtNodeDbg)(arg)
-			case *types.Type:
-				args[i] = (*fmtTypeDbg)(arg)
-			case *types.Sym:
-				args[i] = (*fmtSymDbg)(arg)
-			case Nodes:
-				args[i] = fmtNodesDbg(arg)
-			case Val, int32, int64, string, types.EType:
-				// OK: printing these types doesn't depend on mode
-			default:
-				Fatalf("mode.prepareArgs type %T", arg)
-			}
-		}
-	case FTypeId:
-		for i, arg := range args {
-			switch arg := arg.(type) {
-			case Op:
-				args[i] = fmtOpTypeId(arg)
-			case *Node:
-				args[i] = (*fmtNodeTypeId)(arg)
-			case *types.Type:
-				args[i] = (*fmtTypeTypeId)(arg)
-			case *types.Sym:
-				args[i] = (*fmtSymTypeId)(arg)
-			case Nodes:
-				args[i] = fmtNodesTypeId(arg)
-			case Val, int32, int64, string, types.EType:
-				// OK: printing these types doesn't depend on mode
-			default:
-				Fatalf("mode.prepareArgs type %T", arg)
-			}
-		}
-	case FTypeIdName:
-		for i, arg := range args {
-			switch arg := arg.(type) {
-			case Op:
-				args[i] = fmtOpTypeIdName(arg)
-			case *Node:
-				args[i] = (*fmtNodeTypeIdName)(arg)
-			case *types.Type:
-				args[i] = (*fmtTypeTypeIdName)(arg)
-			case *types.Sym:
-				args[i] = (*fmtSymTypeIdName)(arg)
-			case Nodes:
-				args[i] = fmtNodesTypeIdName(arg)
-			case Val, int32, int64, string, types.EType:
-				// OK: printing these types doesn't depend on mode
-			default:
-				Fatalf("mode.prepareArgs type %T", arg)
-			}
-		}
-	default:
-		Fatalf("mode.prepareArgs mode %d", m)
-	}
-}
-
-func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
-	switch verb {
-	case 'v', 'S', 'L':
-		n.nconv(s, fmtFlag(s, verb), mode)
-
-	case 'j':
-		n.jconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
-	}
-}
-
-// *Node details
-func (n *Node) jconv(s fmt.State, flag FmtFlag) {
-	c := flag & FmtShort
-
-	// Useful to see which nodes in a Node Dump/dumplist are actually identical
-	if Debug_dumpptrs != 0 {
-		fmt.Fprintf(s, " p(%p)", n)
-	}
-	if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
-		fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
-	}
-
-	if Debug_dumpptrs != 0 && c == 0 && n.Name != nil && n.Name.Defn != nil {
-		// Useful to see where Defn is set and what node it points to
-		fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
-	}
-
-	if n.Pos.IsKnown() {
-		pfx := ""
-		switch n.Pos.IsStmt() {
-		case src.PosNotStmt:
-			pfx = "_" // "-" would be confusing
-		case src.PosIsStmt:
-			pfx = "+"
-		}
-		fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
-	}
-
-	if c == 0 && n.Xoffset != BADWIDTH {
-		fmt.Fprintf(s, " x(%d)", n.Xoffset)
-	}
-
-	if n.Class() != 0 {
-		fmt.Fprintf(s, " class(%v)", n.Class())
-	}
-
-	if n.Colas() {
-		fmt.Fprintf(s, " colas(%v)", n.Colas())
-	}
-
-	switch n.Esc {
-	case EscUnknown:
-		break
-
-	case EscHeap:
-		fmt.Fprint(s, " esc(h)")
-
-	case EscNone:
-		fmt.Fprint(s, " esc(no)")
-
-	case EscNever:
-		if c == 0 {
-			fmt.Fprint(s, " esc(N)")
-		}
-
-	default:
-		fmt.Fprintf(s, " esc(%d)", n.Esc)
-	}
-
-	if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
-		fmt.Fprintf(s, " ld(%d)", e.loopDepth)
-	}
-
-	if c == 0 && n.Typecheck() != 0 {
-		fmt.Fprintf(s, " tc(%d)", n.Typecheck())
-	}
-
-	if n.IsDDD() {
-		fmt.Fprintf(s, " isddd(%v)", n.IsDDD())
-	}
-
-	if n.Implicit() {
-		fmt.Fprintf(s, " implicit(%v)", n.Implicit())
-	}
-
-	if n.Embedded() {
-		fmt.Fprintf(s, " embedded")
-	}
-
-	if n.Op == ONAME {
-		if n.Name.Addrtaken() {
-			fmt.Fprint(s, " addrtaken")
-		}
-		if n.Name.Assigned() {
-			fmt.Fprint(s, " assigned")
-		}
-		if n.Name.IsClosureVar() {
-			fmt.Fprint(s, " closurevar")
-		}
-		if n.Name.Captured() {
-			fmt.Fprint(s, " captured")
-		}
-		if n.Name.IsOutputParamHeapAddr() {
-			fmt.Fprint(s, " outputparamheapaddr")
-		}
-	}
-	if n.Bounded() {
-		fmt.Fprint(s, " bounded")
-	}
-	if n.NonNil() {
-		fmt.Fprint(s, " nonnil")
-	}
-
-	if c == 0 && n.HasCall() {
-		fmt.Fprint(s, " hascall")
-	}
-
-	if c == 0 && n.Name != nil && n.Name.Used() {
-		fmt.Fprint(s, " used")
-	}
-}
-
-func (v Val) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v':
-		v.vconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
-	}
-}
-
-func (v Val) vconv(s fmt.State, flag FmtFlag) {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if !u.Rune {
-			if flag&FmtSharp != 0 {
-				fmt.Fprint(s, u.String())
-				return
-			}
-			fmt.Fprint(s, u.GoString())
-			return
-		}
-
-		switch x := u.Int64(); {
-		case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
-			fmt.Fprintf(s, "'%c'", int(x))
-
-		case 0 <= x && x < 1<<16:
-			fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
-
-		case 0 <= x && x <= utf8.MaxRune:
-			fmt.Fprintf(s, "'\\U%08x'", uint64(x))
-
-		default:
-			fmt.Fprintf(s, "('\\x00' + %v)", u)
-		}
-
-	case *Mpflt:
-		if flag&FmtSharp != 0 {
-			fmt.Fprint(s, u.String())
-			return
-		}
-		fmt.Fprint(s, u.GoString())
-		return
-
-	case *Mpcplx:
-		if flag&FmtSharp != 0 {
-			fmt.Fprint(s, u.String())
-			return
-		}
-		fmt.Fprint(s, u.GoString())
-		return
-
-	case string:
-		fmt.Fprint(s, strconv.Quote(u))
-
-	case bool:
-		fmt.Fprint(s, u)
-
-	case *NilVal:
-		fmt.Fprint(s, "nil")
-
-	default:
-		fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
-	}
-}
-
-/*
-s%,%,\n%g
-s%\n+%\n%g
-s%^[	]*T%%g
-s%,.*%%g
-s%.+%	[T&]		= "&",%g
-s%^	........*\]%&~%g
-s%~	%%g
-*/
-
-func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
-	if flag&FmtShort == 0 {
-		switch mode {
-		case FErr: // This is for the user
-			if s.Pkg == builtinpkg || s.Pkg == localpkg {
-				b.WriteString(s.Name)
-				return
-			}
-
-			// If the name was used by multiple packages, display the full path,
-			if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
-				fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
-				return
-			}
-			b.WriteString(s.Pkg.Name)
-			b.WriteByte('.')
-			b.WriteString(s.Name)
-			return
-
-		case FDbg:
-			b.WriteString(s.Pkg.Name)
-			b.WriteByte('.')
-			b.WriteString(s.Name)
-			return
-
-		case FTypeIdName:
-			// dcommontype, typehash
-			b.WriteString(s.Pkg.Name)
-			b.WriteByte('.')
-			b.WriteString(s.Name)
-			return
-
-		case FTypeId:
-			// (methodsym), typesym, weaksym
-			b.WriteString(s.Pkg.Prefix)
-			b.WriteByte('.')
-			b.WriteString(s.Name)
-			return
-		}
-	}
-
-	if flag&FmtByte != 0 {
-		// FmtByte (hh) implies FmtShort (h)
-		// skip leading "type." in method name
-		name := s.Name
-		if i := strings.LastIndex(name, "."); i >= 0 {
-			name = name[i+1:]
-		}
-
-		if mode == FDbg {
-			fmt.Fprintf(b, "@%q.%s", s.Pkg.Path, name)
-			return
-		}
-
-		b.WriteString(name)
-		return
-	}
-
-	b.WriteString(s.Name)
-}
-
-var basicnames = []string{
-	TINT:        "int",
-	TUINT:       "uint",
-	TINT8:       "int8",
-	TUINT8:      "uint8",
-	TINT16:      "int16",
-	TUINT16:     "uint16",
-	TINT32:      "int32",
-	TUINT32:     "uint32",
-	TINT64:      "int64",
-	TUINT64:     "uint64",
-	TUINTPTR:    "uintptr",
-	TFLOAT32:    "float32",
-	TFLOAT64:    "float64",
-	TCOMPLEX64:  "complex64",
-	TCOMPLEX128: "complex128",
-	TBOOL:       "bool",
-	TANY:        "any",
-	TSTRING:     "string",
-	TNIL:        "nil",
-	TIDEAL:      "untyped number",
-	TBLANK:      "blank",
-}
-
-var fmtBufferPool = sync.Pool{
-	New: func() interface{} {
-		return new(bytes.Buffer)
-	},
-}
-
-func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
-	buf := fmtBufferPool.Get().(*bytes.Buffer)
-	buf.Reset()
-	defer fmtBufferPool.Put(buf)
-
-	tconv2(buf, t, flag, mode, nil)
-	return types.InternString(buf.Bytes())
-}
-
-// tconv2 writes a string representation of t to b.
-// flag and mode control exactly what is printed.
-// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
-// See #16897 before changing the implementation of tconv.
-func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) {
-	if off, ok := visited[t]; ok {
-		// We've seen this type before, so we're trying to print it recursively.
-		// Print a reference to it instead.
-		fmt.Fprintf(b, "@%d", off)
-		return
-	}
-	if t == nil {
-		b.WriteString("<T>")
-		return
-	}
-	if t.Etype == types.TSSA {
-		b.WriteString(t.Extra.(string))
-		return
-	}
-	if t.Etype == types.TTUPLE {
-		b.WriteString(t.FieldType(0).String())
-		b.WriteByte(',')
-		b.WriteString(t.FieldType(1).String())
-		return
-	}
-
-	if t.Etype == types.TRESULTS {
-		tys := t.Extra.(*types.Results).Types
-		for i, et := range tys {
-			if i > 0 {
-				b.WriteByte(',')
-			}
-			b.WriteString(et.String())
-		}
-		return
-	}
-
-	flag, mode = flag.update(mode)
-	if mode == FTypeIdName {
-		flag |= FmtUnsigned
-	}
-	if t == types.Bytetype || t == types.Runetype {
-		// in %-T mode collapse rune and byte with their originals.
-		switch mode {
-		case FTypeIdName, FTypeId:
-			t = types.Types[t.Etype]
-		default:
-			sconv2(b, t.Sym, FmtShort, mode)
-			return
-		}
-	}
-	if t == types.Errortype {
-		b.WriteString("error")
-		return
-	}
-
-	// Unless the 'L' flag was specified, if the type has a name, just print that name.
-	if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
-		switch mode {
-		case FTypeId, FTypeIdName:
-			if flag&FmtShort != 0 {
-				if t.Vargen != 0 {
-					sconv2(b, t.Sym, FmtShort, mode)
-					fmt.Fprintf(b, "·%d", t.Vargen)
-					return
-				}
-				sconv2(b, t.Sym, FmtShort, mode)
-				return
-			}
-
-			if mode == FTypeIdName {
-				sconv2(b, t.Sym, FmtUnsigned, mode)
-				return
-			}
-
-			if t.Sym.Pkg == localpkg && t.Vargen != 0 {
-				b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
-				return
-			}
-		}
-
-		sconv2(b, t.Sym, 0, mode)
-		return
-	}
-
-	if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
-		var name string
-		switch t {
-		case types.UntypedBool:
-			name = "untyped bool"
-		case types.UntypedString:
-			name = "untyped string"
-		case types.UntypedInt:
-			name = "untyped int"
-		case types.UntypedRune:
-			name = "untyped rune"
-		case types.UntypedFloat:
-			name = "untyped float"
-		case types.UntypedComplex:
-			name = "untyped complex"
-		default:
-			name = basicnames[t.Etype]
-		}
-		b.WriteString(name)
-		return
-	}
-
-	if mode == FDbg {
-		b.WriteString(t.Etype.String())
-		b.WriteByte('-')
-		tconv2(b, t, flag, FErr, visited)
-		return
-	}
-
-	// At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
-	// try to print it recursively.
-	// We record the offset in the result buffer where the type's text starts. This offset serves as a reference
-	// point for any later references to the same type.
-	// Note that we remove the type from the visited map as soon as the recursive call is done.
-	// This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
-	// but I'd like to use the @ notation only when strictly necessary.)
-	if visited == nil {
-		visited = map[*types.Type]int{}
-	}
-	visited[t] = b.Len()
-	defer delete(visited, t)
-
-	switch t.Etype {
-	case TPTR:
-		b.WriteByte('*')
-		switch mode {
-		case FTypeId, FTypeIdName:
-			if flag&FmtShort != 0 {
-				tconv2(b, t.Elem(), FmtShort, mode, visited)
-				return
-			}
-		}
-		tconv2(b, t.Elem(), 0, mode, visited)
-
-	case TARRAY:
-		b.WriteByte('[')
-		b.WriteString(strconv.FormatInt(t.NumElem(), 10))
-		b.WriteByte(']')
-		tconv2(b, t.Elem(), 0, mode, visited)
-
-	case TSLICE:
-		b.WriteString("[]")
-		tconv2(b, t.Elem(), 0, mode, visited)
-
-	case TCHAN:
-		switch t.ChanDir() {
-		case types.Crecv:
-			b.WriteString("<-chan ")
-			tconv2(b, t.Elem(), 0, mode, visited)
-		case types.Csend:
-			b.WriteString("chan<- ")
-			tconv2(b, t.Elem(), 0, mode, visited)
-		default:
-			b.WriteString("chan ")
-			if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
-				b.WriteByte('(')
-				tconv2(b, t.Elem(), 0, mode, visited)
-				b.WriteByte(')')
-			} else {
-				tconv2(b, t.Elem(), 0, mode, visited)
-			}
-		}
-
-	case TMAP:
-		b.WriteString("map[")
-		tconv2(b, t.Key(), 0, mode, visited)
-		b.WriteByte(']')
-		tconv2(b, t.Elem(), 0, mode, visited)
-
-	case TINTER:
-		if t.IsEmptyInterface() {
-			b.WriteString("interface {}")
-			break
-		}
-		b.WriteString("interface {")
-		for i, f := range t.Fields().Slice() {
-			if i != 0 {
-				b.WriteByte(';')
-			}
-			b.WriteByte(' ')
-			switch {
-			case f.Sym == nil:
-				// Check first that a symbol is defined for this type.
-				// Wrong interface definitions may have types lacking a symbol.
-				break
-			case types.IsExported(f.Sym.Name):
-				sconv2(b, f.Sym, FmtShort, mode)
-			default:
-				flag1 := FmtLeft
-				if flag&FmtUnsigned != 0 {
-					flag1 = FmtUnsigned
-				}
-				sconv2(b, f.Sym, flag1, mode)
-			}
-			tconv2(b, f.Type, FmtShort, mode, visited)
-		}
-		if t.NumFields() != 0 {
-			b.WriteByte(' ')
-		}
-		b.WriteByte('}')
-
-	case TFUNC:
-		if flag&FmtShort != 0 {
-			// no leading func
-		} else {
-			if t.Recv() != nil {
-				b.WriteString("method")
-				tconv2(b, t.Recvs(), 0, mode, visited)
-				b.WriteByte(' ')
-			}
-			b.WriteString("func")
-		}
-		tconv2(b, t.Params(), 0, mode, visited)
-
-		switch t.NumResults() {
-		case 0:
-			// nothing to do
-
-		case 1:
-			b.WriteByte(' ')
-			tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
-
-		default:
-			b.WriteByte(' ')
-			tconv2(b, t.Results(), 0, mode, visited)
-		}
-
-	case TSTRUCT:
-		if m := t.StructType().Map; m != nil {
-			mt := m.MapType()
-			// Format the bucket struct for map[x]y as map.bucket[x]y.
-			// This avoids a recursive print that generates very long names.
-			switch t {
-			case mt.Bucket:
-				b.WriteString("map.bucket[")
-			case mt.Hmap:
-				b.WriteString("map.hdr[")
-			case mt.Hiter:
-				b.WriteString("map.iter[")
-			default:
-				Fatalf("unknown internal map type")
-			}
-			tconv2(b, m.Key(), 0, mode, visited)
-			b.WriteByte(']')
-			tconv2(b, m.Elem(), 0, mode, visited)
-			break
-		}
-
-		if funarg := t.StructType().Funarg; funarg != types.FunargNone {
-			b.WriteByte('(')
-			var flag1 FmtFlag
-			switch mode {
-			case FTypeId, FTypeIdName, FErr:
-				// no argument names on function signature, and no "noescape"/"nosplit" tags
-				flag1 = FmtShort
-			}
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					b.WriteString(", ")
-				}
-				fldconv(b, f, flag1, mode, visited, funarg)
-			}
-			b.WriteByte(')')
-		} else {
-			b.WriteString("struct {")
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					b.WriteByte(';')
-				}
-				b.WriteByte(' ')
-				fldconv(b, f, FmtLong, mode, visited, funarg)
-			}
-			if t.NumFields() != 0 {
-				b.WriteByte(' ')
-			}
-			b.WriteByte('}')
-		}
-
-	case TFORW:
-		b.WriteString("undefined")
-		if t.Sym != nil {
-			b.WriteByte(' ')
-			sconv2(b, t.Sym, 0, mode)
-		}
-
-	case TUNSAFEPTR:
-		b.WriteString("unsafe.Pointer")
-
-	case Txxx:
-		b.WriteString("Txxx")
-	default:
-		// Don't know how to handle - fall back to detailed prints.
-		b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym))
-	}
-}
-
-// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op Op) bool {
-	switch op {
-	case OIF, OFOR, OFORUNTIL, OSWITCH:
-		return true
-	}
-
-	return false
-}
-
-func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
-	// some statements allow for an init, but at most one,
-	// but we may have an arbitrary number added, eg by typecheck
-	// and inlining. If it doesn't fit the syntax, emit an enclosing
-	// block starting with the init statements.
-
-	// if we can just say "for" n->ninit; ... then do so
-	simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
-
-	// otherwise, print the inits as separate statements
-	complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr)
-
-	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
-	extrablock := complexinit && stmtwithinit(n.Op)
-
-	if extrablock {
-		fmt.Fprint(s, "{")
-	}
-
-	if complexinit {
-		mode.Fprintf(s, " %v; ", n.Ninit)
-	}
-
-	switch n.Op {
-	case ODCL:
-		mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
-
-	case ODCLFIELD:
-		if n.Sym != nil {
-			mode.Fprintf(s, "%v %v", n.Sym, n.Left)
-		} else {
-			mode.Fprintf(s, "%v", n.Left)
-		}
-
-	// Don't export "v = <N>" initializing statements, hope they're always
-	// preceded by the DCL which will be re-parsed and typechecked to reproduce
-	// the "v = <N>" again.
-	case OAS:
-		if n.Colas() && !complexinit {
-			mode.Fprintf(s, "%v := %v", n.Left, n.Right)
-		} else {
-			mode.Fprintf(s, "%v = %v", n.Left, n.Right)
-		}
-
-	case OASOP:
-		if n.Implicit() {
-			if n.SubOp() == OADD {
-				mode.Fprintf(s, "%v++", n.Left)
-			} else {
-				mode.Fprintf(s, "%v--", n.Left)
-			}
-			break
-		}
-
-		mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right)
-
-	case OAS2:
-		if n.Colas() && !complexinit {
-			mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
-			break
-		}
-		fallthrough
-
-	case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-		mode.Fprintf(s, "%.v = %v", n.List, n.Right)
-
-	case ORETURN:
-		mode.Fprintf(s, "return %.v", n.List)
-
-	case ORETJMP:
-		mode.Fprintf(s, "retjmp %v", n.Sym)
-
-	case OINLMARK:
-		mode.Fprintf(s, "inlmark %d", n.Xoffset)
-
-	case OGO:
-		mode.Fprintf(s, "go %v", n.Left)
-
-	case ODEFER:
-		mode.Fprintf(s, "defer %v", n.Left)
-
-	case OIF:
-		if simpleinit {
-			mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
-		} else {
-			mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
-		}
-		if n.Rlist.Len() != 0 {
-			mode.Fprintf(s, " else { %v }", n.Rlist)
-		}
-
-	case OFOR, OFORUNTIL:
-		opname := "for"
-		if n.Op == OFORUNTIL {
-			opname = "foruntil"
-		}
-		if mode == FErr { // TODO maybe only if FmtShort, same below
-			fmt.Fprintf(s, "%s loop", opname)
-			break
-		}
-
-		fmt.Fprint(s, opname)
-		if simpleinit {
-			mode.Fprintf(s, " %v;", n.Ninit.First())
-		} else if n.Right != nil {
-			fmt.Fprint(s, " ;")
-		}
-
-		if n.Left != nil {
-			mode.Fprintf(s, " %v", n.Left)
-		}
-
-		if n.Right != nil {
-			mode.Fprintf(s, "; %v", n.Right)
-		} else if simpleinit {
-			fmt.Fprint(s, ";")
-		}
-
-		if n.Op == OFORUNTIL && n.List.Len() != 0 {
-			mode.Fprintf(s, "; %v", n.List)
-		}
-
-		mode.Fprintf(s, " { %v }", n.Nbody)
-
-	case ORANGE:
-		if mode == FErr {
-			fmt.Fprint(s, "for loop")
-			break
-		}
-
-		if n.List.Len() == 0 {
-			mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
-			break
-		}
-
-		mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
-
-	case OSELECT, OSWITCH:
-		if mode == FErr {
-			mode.Fprintf(s, "%v statement", n.Op)
-			break
-		}
-
-		mode.Fprintf(s, "%#v", n.Op)
-		if simpleinit {
-			mode.Fprintf(s, " %v;", n.Ninit.First())
-		}
-		if n.Left != nil {
-			mode.Fprintf(s, " %v ", n.Left)
-		}
-
-		mode.Fprintf(s, " { %v }", n.List)
-
-	case OCASE:
-		if n.List.Len() != 0 {
-			mode.Fprintf(s, "case %.v", n.List)
-		} else {
-			fmt.Fprint(s, "default")
-		}
-		mode.Fprintf(s, ": %v", n.Nbody)
-
-	case OBREAK, OCONTINUE, OGOTO, OFALL:
-		if n.Sym != nil {
-			mode.Fprintf(s, "%#v %v", n.Op, n.Sym)
-		} else {
-			mode.Fprintf(s, "%#v", n.Op)
-		}
-
-	case OEMPTY:
-		break
-
-	case OLABEL:
-		mode.Fprintf(s, "%v: ", n.Sym)
-	}
-
-	if extrablock {
-		fmt.Fprint(s, "}")
-	}
-}
-
-var opprec = []int{
-	OALIGNOF:       8,
-	OAPPEND:        8,
-	OBYTES2STR:     8,
-	OARRAYLIT:      8,
-	OSLICELIT:      8,
-	ORUNES2STR:     8,
-	OCALLFUNC:      8,
-	OCALLINTER:     8,
-	OCALLMETH:      8,
-	OCALL:          8,
-	OCAP:           8,
-	OCLOSE:         8,
-	OCONVIFACE:     8,
-	OCONVNOP:       8,
-	OCONV:          8,
-	OCOPY:          8,
-	ODELETE:        8,
-	OGETG:          8,
-	OLEN:           8,
-	OLITERAL:       8,
-	OMAKESLICE:     8,
-	OMAKESLICECOPY: 8,
-	OMAKE:          8,
-	OMAPLIT:        8,
-	ONAME:          8,
-	ONEW:           8,
-	ONONAME:        8,
-	OOFFSETOF:      8,
-	OPACK:          8,
-	OPANIC:         8,
-	OPAREN:         8,
-	OPRINTN:        8,
-	OPRINT:         8,
-	ORUNESTR:       8,
-	OSIZEOF:        8,
-	OSTR2BYTES:     8,
-	OSTR2RUNES:     8,
-	OSTRUCTLIT:     8,
-	OTARRAY:        8,
-	OTCHAN:         8,
-	OTFUNC:         8,
-	OTINTER:        8,
-	OTMAP:          8,
-	OTSTRUCT:       8,
-	OINDEXMAP:      8,
-	OINDEX:         8,
-	OSLICE:         8,
-	OSLICESTR:      8,
-	OSLICEARR:      8,
-	OSLICE3:        8,
-	OSLICE3ARR:     8,
-	OSLICEHEADER:   8,
-	ODOTINTER:      8,
-	ODOTMETH:       8,
-	ODOTPTR:        8,
-	ODOTTYPE2:      8,
-	ODOTTYPE:       8,
-	ODOT:           8,
-	OXDOT:          8,
-	OCALLPART:      8,
-	OPLUS:          7,
-	ONOT:           7,
-	OBITNOT:        7,
-	ONEG:           7,
-	OADDR:          7,
-	ODEREF:         7,
-	ORECV:          7,
-	OMUL:           6,
-	ODIV:           6,
-	OMOD:           6,
-	OLSH:           6,
-	ORSH:           6,
-	OAND:           6,
-	OANDNOT:        6,
-	OADD:           5,
-	OSUB:           5,
-	OOR:            5,
-	OXOR:           5,
-	OEQ:            4,
-	OLT:            4,
-	OLE:            4,
-	OGE:            4,
-	OGT:            4,
-	ONE:            4,
-	OSEND:          3,
-	OANDAND:        2,
-	OOROR:          1,
-
-	// Statements handled by stmtfmt
-	OAS:         -1,
-	OAS2:        -1,
-	OAS2DOTTYPE: -1,
-	OAS2FUNC:    -1,
-	OAS2MAPR:    -1,
-	OAS2RECV:    -1,
-	OASOP:       -1,
-	OBREAK:      -1,
-	OCASE:       -1,
-	OCONTINUE:   -1,
-	ODCL:        -1,
-	ODCLFIELD:   -1,
-	ODEFER:      -1,
-	OEMPTY:      -1,
-	OFALL:       -1,
-	OFOR:        -1,
-	OFORUNTIL:   -1,
-	OGOTO:       -1,
-	OIF:         -1,
-	OLABEL:      -1,
-	OGO:         -1,
-	ORANGE:      -1,
-	ORETURN:     -1,
-	OSELECT:     -1,
-	OSWITCH:     -1,
-
-	OEND: 0,
-}
-
-func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
-	for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) {
-		n = n.Left
-	}
-
-	if n == nil {
-		fmt.Fprint(s, "<N>")
-		return
-	}
-
-	nprec := opprec[n.Op]
-	if n.Op == OTYPE && n.Sym != nil {
-		nprec = 8
-	}
-
-	if prec > nprec {
-		mode.Fprintf(s, "(%v)", n)
-		return
-	}
-
-	switch n.Op {
-	case OPAREN:
-		mode.Fprintf(s, "(%v)", n.Left)
-
-	case OLITERAL: // this is a bit of a mess
-		if mode == FErr {
-			if n.Orig != nil && n.Orig != n {
-				n.Orig.exprfmt(s, prec, mode)
-				return
-			}
-			if n.Sym != nil {
-				fmt.Fprint(s, smodeString(n.Sym, mode))
-				return
-			}
-		}
-		if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
-			n.Orig.exprfmt(s, prec, mode)
-			return
-		}
-		if n.Type != nil && !n.Type.IsUntyped() {
-			// Need parens when type begins with what might
-			// be misinterpreted as a unary operator: * or <-.
-			if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
-				mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
-				return
-			} else {
-				mode.Fprintf(s, "%v(%v)", n.Type, n.Val())
-				return
-			}
-		}
-
-		mode.Fprintf(s, "%v", n.Val())
-
-	// Special case: name used as local variable in export.
-	// _ becomes ~b%d internally; print as _ for export
-	case ONAME:
-		if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
-			fmt.Fprint(s, "_")
-			return
-		}
-		fallthrough
-	case OPACK, ONONAME:
-		fmt.Fprint(s, smodeString(n.Sym, mode))
-
-	case OTYPE:
-		if n.Type == nil && n.Sym != nil {
-			fmt.Fprint(s, smodeString(n.Sym, mode))
-			return
-		}
-		mode.Fprintf(s, "%v", n.Type)
-
-	case OTARRAY:
-		if n.Left != nil {
-			mode.Fprintf(s, "[%v]%v", n.Left, n.Right)
-			return
-		}
-		mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck
-
-	case OTMAP:
-		mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
-
-	case OTCHAN:
-		switch n.TChanDir() {
-		case types.Crecv:
-			mode.Fprintf(s, "<-chan %v", n.Left)
-
-		case types.Csend:
-			mode.Fprintf(s, "chan<- %v", n.Left)
-
-		default:
-			if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv {
-				mode.Fprintf(s, "chan (%v)", n.Left)
-			} else {
-				mode.Fprintf(s, "chan %v", n.Left)
-			}
-		}
-
-	case OTSTRUCT:
-		fmt.Fprint(s, "<struct>")
-
-	case OTINTER:
-		fmt.Fprint(s, "<inter>")
-
-	case OTFUNC:
-		fmt.Fprint(s, "<func>")
-
-	case OCLOSURE:
-		if mode == FErr {
-			fmt.Fprint(s, "func literal")
-			return
-		}
-		if n.Nbody.Len() != 0 {
-			mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
-			return
-		}
-		mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
-
-	case OCOMPLIT:
-		if mode == FErr {
-			if n.Implicit() {
-				mode.Fprintf(s, "... argument")
-				return
-			}
-			if n.Right != nil {
-				mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
-				return
-			}
-
-			fmt.Fprint(s, "composite literal")
-			return
-		}
-		mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
-
-	case OPTRLIT:
-		mode.Fprintf(s, "&%v", n.Left)
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
-		if mode == FErr {
-			mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
-			return
-		}
-		mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
-
-	case OKEY:
-		if n.Left != nil && n.Right != nil {
-			mode.Fprintf(s, "%v:%v", n.Left, n.Right)
-			return
-		}
-
-		if n.Left == nil && n.Right != nil {
-			mode.Fprintf(s, ":%v", n.Right)
-			return
-		}
-		if n.Left != nil && n.Right == nil {
-			mode.Fprintf(s, "%v:", n.Left)
-			return
-		}
-		fmt.Fprint(s, ":")
-
-	case OSTRUCTKEY:
-		mode.Fprintf(s, "%v:%v", n.Sym, n.Left)
-
-	case OCALLPART:
-		n.Left.exprfmt(s, nprec, mode)
-		if n.Right == nil || n.Right.Sym == nil {
-			fmt.Fprint(s, ".<nil>")
-			return
-		}
-		mode.Fprintf(s, ".%0S", n.Right.Sym)
-
-	case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-		n.Left.exprfmt(s, nprec, mode)
-		if n.Sym == nil {
-			fmt.Fprint(s, ".<nil>")
-			return
-		}
-		mode.Fprintf(s, ".%0S", n.Sym)
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left.exprfmt(s, nprec, mode)
-		if n.Right != nil {
-			mode.Fprintf(s, ".(%v)", n.Right)
-			return
-		}
-		mode.Fprintf(s, ".(%v)", n.Type)
-
-	case OINDEX, OINDEXMAP:
-		n.Left.exprfmt(s, nprec, mode)
-		mode.Fprintf(s, "[%v]", n.Right)
-
-	case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
-		n.Left.exprfmt(s, nprec, mode)
-		fmt.Fprint(s, "[")
-		low, high, max := n.SliceBounds()
-		if low != nil {
-			fmt.Fprint(s, low.modeString(mode))
-		}
-		fmt.Fprint(s, ":")
-		if high != nil {
-			fmt.Fprint(s, high.modeString(mode))
-		}
-		if n.Op.IsSlice3() {
-			fmt.Fprint(s, ":")
-			if max != nil {
-				fmt.Fprint(s, max.modeString(mode))
-			}
-		}
-		fmt.Fprint(s, "]")
-
-	case OSLICEHEADER:
-		if n.List.Len() != 2 {
-			Fatalf("bad OSLICEHEADER list length %d", n.List.Len())
-		}
-		mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second())
-
-	case OCOMPLEX, OCOPY:
-		if n.Left != nil {
-			mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
-		} else {
-			mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
-		}
-
-	case OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		OBYTES2STR,
-		ORUNES2STR,
-		OSTR2BYTES,
-		OSTR2RUNES,
-		ORUNESTR:
-		if n.Type == nil || n.Type.Sym == nil {
-			mode.Fprintf(s, "(%v)", n.Type)
-		} else {
-			mode.Fprintf(s, "%v", n.Type)
-		}
-		if n.Left != nil {
-			mode.Fprintf(s, "(%v)", n.Left)
-		} else {
-			mode.Fprintf(s, "(%.v)", n.List)
-		}
-
-	case OREAL,
-		OIMAG,
-		OAPPEND,
-		OCAP,
-		OCLOSE,
-		ODELETE,
-		OLEN,
-		OMAKE,
-		ONEW,
-		OPANIC,
-		ORECOVER,
-		OALIGNOF,
-		OOFFSETOF,
-		OSIZEOF,
-		OPRINT,
-		OPRINTN:
-		if n.Left != nil {
-			mode.Fprintf(s, "%#v(%v)", n.Op, n.Left)
-			return
-		}
-		if n.IsDDD() {
-			mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
-			return
-		}
-		mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
-
-	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
-		n.Left.exprfmt(s, nprec, mode)
-		if n.IsDDD() {
-			mode.Fprintf(s, "(%.v...)", n.List)
-			return
-		}
-		mode.Fprintf(s, "(%.v)", n.List)
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		if n.List.Len() != 0 { // pre-typecheck
-			mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
-			return
-		}
-		if n.Right != nil {
-			mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
-			return
-		}
-		if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
-			mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
-			return
-		}
-		mode.Fprintf(s, "make(%v)", n.Type)
-
-	case OMAKESLICECOPY:
-		mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right)
-
-	case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
-		// Unary
-		mode.Fprintf(s, "%#v", n.Op)
-		if n.Left != nil && n.Left.Op == n.Op {
-			fmt.Fprint(s, " ")
-		}
-		n.Left.exprfmt(s, nprec+1, mode)
-
-		// Binary
-	case OADD,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLT,
-		OLSH,
-		OMOD,
-		OMUL,
-		ONE,
-		OOR,
-		OOROR,
-		ORSH,
-		OSEND,
-		OSUB,
-		OXOR:
-		n.Left.exprfmt(s, nprec, mode)
-		mode.Fprintf(s, " %#v ", n.Op)
-		n.Right.exprfmt(s, nprec+1, mode)
-
-	case OADDSTR:
-		for i, n1 := range n.List.Slice() {
-			if i != 0 {
-				fmt.Fprint(s, " + ")
-			}
-			n1.exprfmt(s, nprec, mode)
-		}
-	case ODDD:
-		mode.Fprintf(s, "...")
-	default:
-		mode.Fprintf(s, "<node %v>", n.Op)
-	}
-}
-
-func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
-	t := n.Type
-
-	// We almost always want the original.
-	// TODO(gri) Why the special case for OLITERAL?
-	if n.Op != OLITERAL && n.Orig != nil {
-		n = n.Orig
-	}
-
-	if flag&FmtLong != 0 && t != nil {
-		if t.Etype == TNIL {
-			fmt.Fprint(s, "nil")
-		} else if n.Op == ONAME && n.Name.AutoTemp() {
-			mode.Fprintf(s, "%v value", t)
-		} else {
-			mode.Fprintf(s, "%v (type %v)", n, t)
-		}
-		return
-	}
-
-	// TODO inlining produces expressions with ninits. we can't print these yet.
-
-	if opprec[n.Op] < 0 {
-		n.stmtfmt(s, mode)
-		return
-	}
-
-	n.exprfmt(s, 0, mode)
-}
-
-func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
-	recur := flag&FmtShort == 0
-
-	if recur {
-		indent(s)
-		if dumpdepth > 40 {
-			fmt.Fprint(s, "...")
-			return
-		}
-
-		if n.Ninit.Len() != 0 {
-			mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
-			indent(s)
-		}
-	}
-
-	switch n.Op {
-	default:
-		mode.Fprintf(s, "%v%j", n.Op, n)
-
-	case OLITERAL:
-		mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
-
-	case ONAME, ONONAME:
-		if n.Sym != nil {
-			mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
-		} else {
-			mode.Fprintf(s, "%v%j", n.Op, n)
-		}
-		if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
-			indent(s)
-			mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
-		}
-
-	case OASOP:
-		mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n)
-
-	case OTYPE:
-		mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
-		if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
-			indent(s)
-			mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
-		}
-	}
-
-	if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
-		mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
-	}
-	if n.Sym != nil && n.Op != ONAME {
-		mode.Fprintf(s, " %v", n.Sym)
-	}
-
-	if n.Type != nil {
-		mode.Fprintf(s, " %v", n.Type)
-	}
-
-	if recur {
-		if n.Left != nil {
-			mode.Fprintf(s, "%v", n.Left)
-		}
-		if n.Right != nil {
-			mode.Fprintf(s, "%v", n.Right)
-		}
-		if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
-			indent(s)
-			// The function associated with a closure
-			mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
-		}
-		if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
-			indent(s)
-			// The dcls for a func or closure
-			mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
-		}
-		if n.List.Len() != 0 {
-			indent(s)
-			mode.Fprintf(s, "%v-list%v", n.Op, n.List)
-		}
-
-		if n.Rlist.Len() != 0 {
-			indent(s)
-			mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
-		}
-
-		if n.Nbody.Len() != 0 {
-			indent(s)
-			mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
-		}
-	}
-}
-
-// "%S" suppresses qualifying with package
-func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
-	switch verb {
-	case 'v', 'S':
-		fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode))
-
-	default:
-		fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
-	}
-}
-
-func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) }
-
-// See #16897 before changing the implementation of sconv.
-func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
-	if flag&FmtLong != 0 {
-		panic("linksymfmt")
-	}
-
-	if s == nil {
-		return "<S>"
-	}
-
-	if s.Name == "_" {
-		return "_"
-	}
-	buf := fmtBufferPool.Get().(*bytes.Buffer)
-	buf.Reset()
-	defer fmtBufferPool.Put(buf)
-
-	flag, mode = flag.update(mode)
-	symfmt(buf, s, flag, mode)
-	return types.InternString(buf.Bytes())
-}
-
-func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
-	if flag&FmtLong != 0 {
-		panic("linksymfmt")
-	}
-	if s == nil {
-		b.WriteString("<S>")
-		return
-	}
-	if s.Name == "_" {
-		b.WriteString("_")
-		return
-	}
-
-	flag, mode = flag.update(mode)
-	symfmt(b, s, flag, mode)
-}
-
-func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) {
-	if f == nil {
-		b.WriteString("<T>")
-		return
-	}
-	flag, mode = flag.update(mode)
-	if mode == FTypeIdName {
-		flag |= FmtUnsigned
-	}
-
-	var name string
-	if flag&FmtShort == 0 {
-		s := f.Sym
-
-		// Take the name from the original.
-		if mode == FErr {
-			s = origSym(s)
-		}
-
-		if s != nil && f.Embedded == 0 {
-			if funarg != types.FunargNone {
-				name = asNode(f.Nname).modeString(mode)
-			} else if flag&FmtLong != 0 {
-				name = mode.Sprintf("%0S", s)
-				if !types.IsExported(name) && flag&FmtUnsigned == 0 {
-					name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg)
-				}
-			} else {
-				name = smodeString(s, mode)
-			}
-		}
-	}
-
-	if name != "" {
-		b.WriteString(name)
-		b.WriteString(" ")
-	}
-
-	if f.IsDDD() {
-		var et *types.Type
-		if f.Type != nil {
-			et = f.Type.Elem()
-		}
-		b.WriteString("...")
-		tconv2(b, et, 0, mode, visited)
-	} else {
-		tconv2(b, f.Type, 0, mode, visited)
-	}
-
-	if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" {
-		b.WriteString(" ")
-		b.WriteString(strconv.Quote(f.Note))
-	}
-}
-
-// "%L"  print definition, not name
-// "%S"  omit 'func' and receiver from function types, short type names
-func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
-	switch verb {
-	case 'v', 'S', 'L':
-		fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode))
-	default:
-		fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
-	}
-}
-
-func (n *Node) String() string                 { return fmt.Sprint(n) }
-func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) }
-
-// "%L"  suffix with "(type %T)" where possible
-// "%+S" in debug mode, don't recurse, no multiline output
-func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) {
-	if n == nil {
-		fmt.Fprint(s, "<N>")
-		return
-	}
-
-	flag, mode = flag.update(mode)
-
-	switch mode {
-	case FErr:
-		n.nodefmt(s, flag, mode)
-
-	case FDbg:
-		dumpdepth++
-		n.nodedump(s, flag, mode)
-		dumpdepth--
-
-	default:
-		Fatalf("unhandled %%N mode: %d", mode)
-	}
-}
-
-func (l Nodes) format(s fmt.State, verb rune, mode fmtMode) {
-	switch verb {
-	case 'v':
-		l.hconv(s, fmtFlag(s, verb), mode)
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Nodes)", verb)
-	}
-}
-
-func (n Nodes) String() string {
-	return fmt.Sprint(n)
-}
-
-// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
-func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) {
-	if l.Len() == 0 && mode == FDbg {
-		fmt.Fprint(s, "<nil>")
-		return
-	}
-
-	flag, mode = flag.update(mode)
-	sep := "; "
-	if mode == FDbg {
-		sep = "\n"
-	} else if flag&FmtComma != 0 {
-		sep = ", "
-	}
-
-	for i, n := range l.Slice() {
-		fmt.Fprint(s, n.modeString(mode))
-		if i+1 < l.Len() {
-			fmt.Fprint(s, sep)
-		}
-	}
-}
-
-func dumplist(s string, l Nodes) {
-	fmt.Printf("%s%+v\n", s, l)
-}
-
-func fdumplist(w io.Writer, s string, l Nodes) {
-	fmt.Fprintf(w, "%s%+v\n", s, l)
-}
-
-func Dump(s string, n *Node) {
-	fmt.Printf("%s [%p]%+v\n", s, n, n)
-}
-
-// TODO(gri) make variable local somehow
-var dumpdepth int
-
-// indent prints indentation to s.
-func indent(s fmt.State) {
-	fmt.Fprint(s, "\n")
-	for i := 0; i < dumpdepth; i++ {
-		fmt.Fprint(s, ".   ")
-	}
-}
-
-func ellipsisIf(b bool) string {
-	if b {
-		return "..."
-	}
-	return ""
-}
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
deleted file mode 100644
index 929653e..0000000
--- a/src/cmd/compile/internal/gc/gen.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"strconv"
-)
-
-// sysfunc looks up Go function name in package runtime. This function
-// must follow the internal calling convention.
-func sysfunc(name string) *obj.LSym {
-	s := Runtimepkg.Lookup(name)
-	s.SetFunc(true)
-	return s.Linksym()
-}
-
-// sysvar looks up a variable (or assembly function) name in package
-// runtime. If this is a function, it may have a special calling
-// convention.
-func sysvar(name string) *obj.LSym {
-	return Runtimepkg.Lookup(name).Linksym()
-}
-
-// isParamStackCopy reports whether this is the on-stack copy of a
-// function parameter that moved to the heap.
-func (n *Node) isParamStackCopy() bool {
-	return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
-}
-
-// isParamHeapCopy reports whether this is the on-heap copy of
-// a function parameter that moved to the heap.
-func (n *Node) isParamHeapCopy() bool {
-	return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
-}
-
-// autotmpname returns the name for an autotmp variable numbered n.
-func autotmpname(n int) string {
-	// Give each tmp a different name so that they can be registerized.
-	// Add a preceding . to avoid clashing with legal names.
-	const prefix = ".autotmp_"
-	// Start with a buffer big enough to hold a large n.
-	b := []byte(prefix + "      ")[:len(prefix)]
-	b = strconv.AppendInt(b, int64(n), 10)
-	return types.InternString(b)
-}
-
-// make a new Node off the books
-func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
-	if curfn == nil {
-		Fatalf("no curfn for tempAt")
-	}
-	if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
-		Dump("tempAt", curfn)
-		Fatalf("adding tempAt to wrong closure function")
-	}
-	if t == nil {
-		Fatalf("tempAt called with nil type")
-	}
-
-	s := &types.Sym{
-		Name: autotmpname(len(curfn.Func.Dcl)),
-		Pkg:  localpkg,
-	}
-	n := newnamel(pos, s)
-	s.Def = asTypesNode(n)
-	n.Type = t
-	n.SetClass(PAUTO)
-	n.Esc = EscNever
-	n.Name.Curfn = curfn
-	n.Name.SetUsed(true)
-	n.Name.SetAutoTemp(true)
-	curfn.Func.Dcl = append(curfn.Func.Dcl, n)
-
-	dowidth(t)
-
-	return n.Orig
-}
-
-func temp(t *types.Type) *Node {
-	return tempAt(lineno, Curfn, t)
-}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
deleted file mode 100644
index 274930b..0000000
--- a/src/cmd/compile/internal/gc/go.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/ssa"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"sync"
-)
-
-const (
-	BADWIDTH = types.BADWIDTH
-)
-
-var (
-	// maximum size variable which we will allocate on the stack.
-	// This limit is for explicit variable declarations like "var x T" or "x := ...".
-	// Note: the flag smallframes can update this value.
-	maxStackVarSize = int64(10 * 1024 * 1024)
-
-	// maximum size of implicit variables that we will allocate on the stack.
-	//   p := new(T)          allocating T on the stack
-	//   p := &T{}            allocating T on the stack
-	//   s := make([]T, n)    allocating [n]T on the stack
-	//   s := []byte("...")   allocating [n]byte on the stack
-	// Note: the flag smallframes can update this value.
-	maxImplicitStackVarSize = int64(64 * 1024)
-
-	// smallArrayBytes is the maximum size of an array which is considered small.
-	// Small arrays will be initialized directly with a sequence of constant stores.
-	// Large arrays will be initialized by copying from a static temp.
-	// 256 bytes was chosen to minimize generated code + statictmp size.
-	smallArrayBytes = int64(256)
-)
-
-// isRuntimePkg reports whether p is package runtime.
-func isRuntimePkg(p *types.Pkg) bool {
-	if compiling_runtime && p == localpkg {
-		return true
-	}
-	return p.Path == "runtime"
-}
-
-// isReflectPkg reports whether p is package reflect.
-func isReflectPkg(p *types.Pkg) bool {
-	if p == localpkg {
-		return myimportpath == "reflect"
-	}
-	return p.Path == "reflect"
-}
-
-// The Class of a variable/function describes the "storage class"
-// of a variable or function. During parsing, storage classes are
-// called declaration contexts.
-type Class uint8
-
-//go:generate stringer -type=Class
-const (
-	Pxxx      Class = iota // no class; used during ssa conversion to indicate pseudo-variables
-	PEXTERN                // global variables
-	PAUTO                  // local variables
-	PAUTOHEAP              // local variables or parameters moved to heap
-	PPARAM                 // input arguments
-	PPARAMOUT              // output results
-	PFUNC                  // global functions
-
-	// Careful: Class is stored in three bits in Node.flags.
-	_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
-)
-
-// Slices in the runtime are represented by three components:
-//
-// type slice struct {
-// 	ptr unsafe.Pointer
-// 	len int
-// 	cap int
-// }
-//
-// Strings in the runtime are represented by two components:
-//
-// type string struct {
-// 	ptr unsafe.Pointer
-// 	len int
-// }
-//
-// These variables are the offsets of fields and sizes of these structs.
-var (
-	slicePtrOffset int64
-	sliceLenOffset int64
-	sliceCapOffset int64
-
-	sizeofSlice  int64
-	sizeofString int64
-)
-
-var pragcgobuf [][]string
-
-var outfile string
-var linkobj string
-
-// nerrors is the number of compiler errors reported
-// since the last call to saveerrors.
-var nerrors int
-
-// nsavederrors is the total number of compiler errors
-// reported before the last call to saveerrors.
-var nsavederrors int
-
-var nsyntaxerrors int
-
-var decldepth int32
-
-var nolocalimports bool
-
-// gc debug flags
-type DebugFlags struct {
-	P, B, C, E,
-	K, L, N, S,
-	W, e, h, j,
-	l, m, r, w int
-}
-
-var Debug DebugFlags
-
-var debugstr string
-
-var Debug_checknil int
-var Debug_typeassert int
-
-var localpkg *types.Pkg // package being compiled
-
-var inimport bool // set during import
-
-var itabpkg *types.Pkg // fake pkg for itab entries
-
-var itablinkpkg *types.Pkg // fake package for runtime itab entries
-
-var Runtimepkg *types.Pkg // fake package runtime
-
-var racepkg *types.Pkg // package runtime/race
-
-var msanpkg *types.Pkg // package runtime/msan
-
-var unsafepkg *types.Pkg // package unsafe
-
-var trackpkg *types.Pkg // fake package for field tracking
-
-var mappkg *types.Pkg // fake package for map zero value
-
-var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
-
-var zerosize int64
-
-var myimportpath string
-
-var localimport string
-
-var asmhdr string
-
-var simtype [NTYPE]types.EType
-
-var (
-	isInt     [NTYPE]bool
-	isFloat   [NTYPE]bool
-	isComplex [NTYPE]bool
-	issimple  [NTYPE]bool
-)
-
-var (
-	okforeq    [NTYPE]bool
-	okforadd   [NTYPE]bool
-	okforand   [NTYPE]bool
-	okfornone  [NTYPE]bool
-	okforcmp   [NTYPE]bool
-	okforbool  [NTYPE]bool
-	okforcap   [NTYPE]bool
-	okforlen   [NTYPE]bool
-	okforarith [NTYPE]bool
-	okforconst [NTYPE]bool
-)
-
-var (
-	okfor [OEND][]bool
-	iscmp [OEND]bool
-)
-
-var minintval [NTYPE]*Mpint
-
-var maxintval [NTYPE]*Mpint
-
-var minfltval [NTYPE]*Mpflt
-
-var maxfltval [NTYPE]*Mpflt
-
-var xtop []*Node
-
-var exportlist []*Node
-
-var importlist []*Node // imported functions and methods with inlinable bodies
-
-var (
-	funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
-	funcsyms   []*types.Sym
-)
-
-var dclcontext Class // PEXTERN/PAUTO
-
-var Curfn *Node
-
-var Widthptr int
-
-var Widthreg int
-
-var nblank *Node
-
-var typecheckok bool
-
-var compiling_runtime bool
-
-// Compiling the standard library
-var compiling_std bool
-
-var use_writebarrier bool
-
-var pure_go bool
-
-var flag_installsuffix string
-
-var flag_race bool
-
-var flag_msan bool
-
-var flagDWARF bool
-
-// Whether we are adding any sort of code instrumentation, such as
-// when the race detector is enabled.
-var instrumenting bool
-
-// Whether we are tracking lexical scopes for DWARF.
-var trackScopes bool
-
-// Controls generation of DWARF inlined instance records. Zero
-// disables, 1 emits inlined routines but suppresses var info,
-// and 2 emits inlined routines with tracking of formals/locals.
-var genDwarfInline int
-
-var debuglive int
-
-var Ctxt *obj.Link
-
-var writearchive bool
-
-var nodfp *Node
-
-var disable_checknil int
-
-var autogeneratedPos src.XPos
-
-// interface to back end
-
-type Arch struct {
-	LinkArch *obj.LinkArch
-
-	REGSP     int
-	MAXWIDTH  int64
-	SoftFloat bool
-
-	PadFrame func(int64) int64
-
-	// ZeroRange zeroes a range of memory on stack. It is only inserted
-	// at function entry, and it is ok to clobber registers.
-	ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
-
-	Ginsnop      func(*Progs) *obj.Prog
-	Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
-
-	// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
-	SSAMarkMoves func(*SSAGenState, *ssa.Block)
-
-	// SSAGenValue emits Prog(s) for the Value.
-	SSAGenValue func(*SSAGenState, *ssa.Value)
-
-	// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
-	// for all values in the block before SSAGenBlock.
-	SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
-}
-
-var thearch Arch
-
-var (
-	staticuint64s,
-	zerobase *Node
-
-	assertE2I,
-	assertE2I2,
-	assertI2I,
-	assertI2I2,
-	deferproc,
-	deferprocStack,
-	Deferreturn,
-	Duffcopy,
-	Duffzero,
-	gcWriteBarrier,
-	goschedguarded,
-	growslice,
-	msanread,
-	msanwrite,
-	msanmove,
-	newobject,
-	newproc,
-	panicdivide,
-	panicshift,
-	panicdottypeE,
-	panicdottypeI,
-	panicnildottype,
-	panicoverflow,
-	raceread,
-	racereadrange,
-	racewrite,
-	racewriterange,
-	x86HasPOPCNT,
-	x86HasSSE41,
-	x86HasFMA,
-	armHasVFPv4,
-	arm64HasATOMICS,
-	typedmemclr,
-	typedmemmove,
-	Udiv,
-	writeBarrier,
-	zerobaseSym *obj.LSym
-
-	BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
-	ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
-
-	// Wasm
-	WasmMove,
-	WasmZero,
-	WasmDiv,
-	WasmTruncS,
-	WasmTruncU,
-	SigPanic *obj.LSym
-)
-
-// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
-var GCWriteBarrierReg map[int16]*obj.LSym
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
deleted file mode 100644
index d599a38..0000000
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ /dev/null
@@ -1,333 +0,0 @@
-// Derived from Inferno utils/6c/txt.c
-// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package gc
-
-import (
-	"cmd/compile/internal/ssa"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-)
-
-var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
-
-// Progs accumulates Progs for a function and converts them into machine code.
-type Progs struct {
-	Text      *obj.Prog  // ATEXT Prog for this function
-	next      *obj.Prog  // next Prog
-	pc        int64      // virtual PC; count of Progs
-	pos       src.XPos   // position to use for new Progs
-	curfn     *Node      // fn these Progs are for
-	progcache []obj.Prog // local progcache
-	cacheidx  int        // first free element of progcache
-
-	nextLive LivenessIndex // liveness index for the next Prog
-	prevLive LivenessIndex // last emitted liveness index
-}
-
-// newProgs returns a new Progs for fn.
-// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *Node, worker int) *Progs {
-	pp := new(Progs)
-	if Ctxt.CanReuseProgs() {
-		sz := len(sharedProgArray) / nBackendWorkers
-		pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
-	}
-	pp.curfn = fn
-
-	// prime the pump
-	pp.next = pp.NewProg()
-	pp.clearp(pp.next)
-
-	pp.pos = fn.Pos
-	pp.settext(fn)
-	// PCDATA tables implicitly start with index -1.
-	pp.prevLive = LivenessIndex{-1, false}
-	pp.nextLive = pp.prevLive
-	return pp
-}
-
-func (pp *Progs) NewProg() *obj.Prog {
-	var p *obj.Prog
-	if pp.cacheidx < len(pp.progcache) {
-		p = &pp.progcache[pp.cacheidx]
-		pp.cacheidx++
-	} else {
-		p = new(obj.Prog)
-	}
-	p.Ctxt = Ctxt
-	return p
-}
-
-// Flush converts from pp to machine code.
-func (pp *Progs) Flush() {
-	plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
-	obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
-}
-
-// Free clears pp and any associated resources.
-func (pp *Progs) Free() {
-	if Ctxt.CanReuseProgs() {
-		// Clear progs to enable GC and avoid abuse.
-		s := pp.progcache[:pp.cacheidx]
-		for i := range s {
-			s[i] = obj.Prog{}
-		}
-	}
-	// Clear pp to avoid abuse.
-	*pp = Progs{}
-}
-
-// Prog adds a Prog with instruction As to pp.
-func (pp *Progs) Prog(as obj.As) *obj.Prog {
-	if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
-		// Emit stack map index change.
-		idx := pp.nextLive.stackMapIndex
-		pp.prevLive.stackMapIndex = idx
-		p := pp.Prog(obj.APCDATA)
-		Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
-		Addrconst(&p.To, int64(idx))
-	}
-	if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
-		// Emit unsafe-point marker.
-		pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
-		p := pp.Prog(obj.APCDATA)
-		Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
-		if pp.nextLive.isUnsafePoint {
-			Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
-		} else {
-			Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
-		}
-	}
-
-	p := pp.next
-	pp.next = pp.NewProg()
-	pp.clearp(pp.next)
-	p.Link = pp.next
-
-	if !pp.pos.IsKnown() && Debug.K != 0 {
-		Warn("prog: unknown position (line 0)")
-	}
-
-	p.As = as
-	p.Pos = pp.pos
-	if pp.pos.IsStmt() == src.PosIsStmt {
-		// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
-		if ssa.LosesStmtMark(as) {
-			return p
-		}
-		pp.pos = pp.pos.WithNotStmt()
-	}
-	return p
-}
-
-func (pp *Progs) clearp(p *obj.Prog) {
-	obj.Nopout(p)
-	p.As = obj.AEND
-	p.Pc = pp.pc
-	pp.pc++
-}
-
-func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
-	q := pp.NewProg()
-	pp.clearp(q)
-	q.As = as
-	q.Pos = p.Pos
-	q.From.Type = ftype
-	q.From.Reg = freg
-	q.From.Offset = foffset
-	q.To.Type = ttype
-	q.To.Reg = treg
-	q.To.Offset = toffset
-	q.Link = p.Link
-	p.Link = q
-	return q
-}
-
-func (pp *Progs) settext(fn *Node) {
-	if pp.Text != nil {
-		Fatalf("Progs.settext called twice")
-	}
-	ptxt := pp.Prog(obj.ATEXT)
-	pp.Text = ptxt
-
-	fn.Func.lsym.Func().Text = ptxt
-	ptxt.From.Type = obj.TYPE_MEM
-	ptxt.From.Name = obj.NAME_EXTERN
-	ptxt.From.Sym = fn.Func.lsym
-}
-
-// initLSym defines f's obj.LSym and initializes it based on the
-// properties of f. This includes setting the symbol flags and ABI and
-// creating and initializing related DWARF symbols.
-//
-// initLSym must be called exactly once per function and must be
-// called for both functions with bodies and functions without bodies.
-func (f *Func) initLSym(hasBody bool) {
-	if f.lsym != nil {
-		Fatalf("Func.initLSym called twice")
-	}
-
-	if nam := f.Nname; !nam.isBlank() {
-		f.lsym = nam.Sym.Linksym()
-		if f.Pragma&Systemstack != 0 {
-			f.lsym.Set(obj.AttrCFunc, true)
-		}
-
-		var aliasABI obj.ABI
-		needABIAlias := false
-		defABI, hasDefABI := symabiDefs[f.lsym.Name]
-		if hasDefABI && defABI == obj.ABI0 {
-			// Symbol is defined as ABI0. Create an
-			// Internal -> ABI0 wrapper.
-			f.lsym.SetABI(obj.ABI0)
-			needABIAlias, aliasABI = true, obj.ABIInternal
-		} else {
-			// No ABI override. Check that the symbol is
-			// using the expected ABI.
-			want := obj.ABIInternal
-			if f.lsym.ABI() != want {
-				Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
-			}
-		}
-
-		isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
-		if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
-			// Either 1) this symbol is definitely
-			// referenced as ABI0 from this package; or 2)
-			// this symbol is defined in this package but
-			// given a linkname, indicating that it may be
-			// referenced from another package. Create an
-			// ABI0 -> Internal wrapper so it can be
-			// called as ABI0. In case 2, it's important
-			// that we know it's defined in this package
-			// since other packages may "pull" symbols
-			// using linkname and we don't want to create
-			// duplicate ABI wrappers.
-			if f.lsym.ABI() != obj.ABI0 {
-				needABIAlias, aliasABI = true, obj.ABI0
-			}
-		}
-
-		if needABIAlias {
-			// These LSyms have the same name as the
-			// native function, so we create them directly
-			// rather than looking them up. The uniqueness
-			// of f.lsym ensures uniqueness of asym.
-			asym := &obj.LSym{
-				Name: f.lsym.Name,
-				Type: objabi.SABIALIAS,
-				R:    []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
-			}
-			asym.SetABI(aliasABI)
-			asym.Set(obj.AttrDuplicateOK, true)
-			Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
-		}
-	}
-
-	if !hasBody {
-		// For body-less functions, we only create the LSym.
-		return
-	}
-
-	var flag int
-	if f.Dupok() {
-		flag |= obj.DUPOK
-	}
-	if f.Wrapper() {
-		flag |= obj.WRAPPER
-	}
-	if f.Needctxt() {
-		flag |= obj.NEEDCTXT
-	}
-	if f.Pragma&Nosplit != 0 {
-		flag |= obj.NOSPLIT
-	}
-	if f.ReflectMethod() {
-		flag |= obj.REFLECTMETHOD
-	}
-
-	// Clumsy but important.
-	// See test/recover.go for test cases and src/reflect/value.go
-	// for the actual functions being considered.
-	if myimportpath == "reflect" {
-		switch f.Nname.Sym.Name {
-		case "callReflect", "callMethod":
-			flag |= obj.WRAPPER
-		}
-	}
-
-	Ctxt.InitTextSym(f.lsym, flag)
-}
-
-func ggloblnod(nam *Node) {
-	s := nam.Sym.Linksym()
-	s.Gotype = ngotype(nam).Linksym()
-	flags := 0
-	if nam.Name.Readonly() {
-		flags = obj.RODATA
-	}
-	if nam.Type != nil && !nam.Type.HasPointers() {
-		flags |= obj.NOPTR
-	}
-	Ctxt.Globl(s, nam.Type.Width, flags)
-	if nam.Name.LibfuzzerExtraCounter() {
-		s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
-	}
-	if nam.Sym.Linkname != "" {
-		// Make sure linkname'd symbol is non-package. When a symbol is
-		// both imported and linkname'd, s.Pkg may not set to "_" in
-		// types.Sym.Linksym because LSym already exists. Set it here.
-		s.Pkg = "_"
-	}
-}
-
-func ggloblsym(s *obj.LSym, width int32, flags int16) {
-	if flags&obj.LOCAL != 0 {
-		s.Set(obj.AttrLocal, true)
-		flags &^= obj.LOCAL
-	}
-	Ctxt.Globl(s, int64(width), int(flags))
-}
-
-func Addrconst(a *obj.Addr, v int64) {
-	a.Sym = nil
-	a.Type = obj.TYPE_CONST
-	a.Offset = v
-}
-
-func Patch(p *obj.Prog, to *obj.Prog) {
-	if p.To.Type != obj.TYPE_BRANCH {
-		Fatalf("patch: not a branch")
-	}
-	p.To.SetTarget(to)
-	p.To.Offset = to.Pc
-}
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
deleted file mode 100644
index 1f53d8c..0000000
--- a/src/cmd/compile/internal/gc/iexport.go
+++ /dev/null
@@ -1,1515 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Indexed package export.
-//
-// The indexed export data format is an evolution of the previous
-// binary export data format. Its chief contribution is introducing an
-// index table, which allows efficient random access of individual
-// declarations and inline function bodies. In turn, this allows
-// avoiding unnecessary work for compilation units that import large
-// packages.
-//
-//
-// The top-level data format is structured as:
-//
-//     Header struct {
-//         Tag        byte   // 'i'
-//         Version    uvarint
-//         StringSize uvarint
-//         DataSize   uvarint
-//     }
-//
-//     Strings [StringSize]byte
-//     Data    [DataSize]byte
-//
-//     MainIndex []struct{
-//         PkgPath   stringOff
-//         PkgName   stringOff
-//         PkgHeight uvarint
-//
-//         Decls []struct{
-//             Name   stringOff
-//             Offset declOff
-//         }
-//     }
-//
-//     Fingerprint [8]byte
-//
-// uvarint means a uint64 written out using uvarint encoding.
-//
-// []T means a uvarint followed by that many T objects. In other
-// words:
-//
-//     Len   uvarint
-//     Elems [Len]T
-//
-// stringOff means a uvarint that indicates an offset within the
-// Strings section. At that offset is another uvarint, followed by
-// that many bytes, which form the string value.
-//
-// declOff means a uvarint that indicates an offset within the Data
-// section where the associated declaration can be found.
-//
-//
-// There are five kinds of declarations, distinguished by their first
-// byte:
-//
-//     type Var struct {
-//         Tag  byte // 'V'
-//         Pos  Pos
-//         Type typeOff
-//     }
-//
-//     type Func struct {
-//         Tag       byte // 'F'
-//         Pos       Pos
-//         Signature Signature
-//     }
-//
-//     type Const struct {
-//         Tag   byte // 'C'
-//         Pos   Pos
-//         Value Value
-//     }
-//
-//     type Type struct {
-//         Tag        byte // 'T'
-//         Pos        Pos
-//         Underlying typeOff
-//
-//         Methods []struct{  // omitted if Underlying is an interface type
-//             Pos       Pos
-//             Name      stringOff
-//             Recv      Param
-//             Signature Signature
-//         }
-//     }
-//
-//     type Alias struct {
-//         Tag  byte // 'A'
-//         Pos  Pos
-//         Type typeOff
-//     }
-//
-//
-// typeOff means a uvarint that either indicates a predeclared type,
-// or an offset into the Data section. If the uvarint is less than
-// predeclReserved, then it indicates the index into the predeclared
-// types list (see predeclared in bexport.go for order). Otherwise,
-// subtracting predeclReserved yields the offset of a type descriptor.
-//
-// Value means a type and type-specific value. See
-// (*exportWriter).value for details.
-//
-//
-// There are nine kinds of type descriptors, distinguished by an itag:
-//
-//     type DefinedType struct {
-//         Tag     itag // definedType
-//         Name    stringOff
-//         PkgPath stringOff
-//     }
-//
-//     type PointerType struct {
-//         Tag  itag // pointerType
-//         Elem typeOff
-//     }
-//
-//     type SliceType struct {
-//         Tag  itag // sliceType
-//         Elem typeOff
-//     }
-//
-//     type ArrayType struct {
-//         Tag  itag // arrayType
-//         Len  uint64
-//         Elem typeOff
-//     }
-//
-//     type ChanType struct {
-//         Tag  itag   // chanType
-//         Dir  uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
-//         Elem typeOff
-//     }
-//
-//     type MapType struct {
-//         Tag  itag // mapType
-//         Key  typeOff
-//         Elem typeOff
-//     }
-//
-//     type FuncType struct {
-//         Tag       itag // signatureType
-//         PkgPath   stringOff
-//         Signature Signature
-//     }
-//
-//     type StructType struct {
-//         Tag     itag // structType
-//         PkgPath stringOff
-//         Fields []struct {
-//             Pos      Pos
-//             Name     stringOff
-//             Type     typeOff
-//             Embedded bool
-//             Note     stringOff
-//         }
-//     }
-//
-//     type InterfaceType struct {
-//         Tag     itag // interfaceType
-//         PkgPath stringOff
-//         Embeddeds []struct {
-//             Pos  Pos
-//             Type typeOff
-//         }
-//         Methods []struct {
-//             Pos       Pos
-//             Name      stringOff
-//             Signature Signature
-//         }
-//     }
-//
-//
-//     type Signature struct {
-//         Params   []Param
-//         Results  []Param
-//         Variadic bool  // omitted if Results is empty
-//     }
-//
-//     type Param struct {
-//         Pos  Pos
-//         Name stringOff
-//         Type typOff
-//     }
-//
-//
-// Pos encodes a file:line:column triple, incorporating a simple delta
-// encoding scheme within a data object. See exportWriter.pos for
-// details.
-//
-//
-// Compiler-specific details.
-//
-// cmd/compile writes out a second index for inline bodies and also
-// appends additional compiler-specific details after declarations.
-// Third-party tools are not expected to depend on these details and
-// they're expected to change much more rapidly, so they're omitted
-// here. See exportWriter's varExt/funcExt/etc methods for details.
-
-package gc
-
-import (
-	"bufio"
-	"bytes"
-	"cmd/compile/internal/types"
-	"cmd/internal/goobj"
-	"cmd/internal/src"
-	"crypto/md5"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"math/big"
-	"sort"
-	"strings"
-)
-
-// Current indexed export format version. Increase with each format change.
-// 1: added column details to Pos
-// 0: Go1.11 encoding
-const iexportVersion = 1
-
-// predeclReserved is the number of type offsets reserved for types
-// implicitly declared in the universe block.
-const predeclReserved = 32
-
-// An itag distinguishes the kind of type that was written into the
-// indexed export format.
-type itag uint64
-
-const (
-	// Types
-	definedType itag = iota
-	pointerType
-	sliceType
-	arrayType
-	chanType
-	mapType
-	signatureType
-	structType
-	interfaceType
-)
-
-func iexport(out *bufio.Writer) {
-	// Mark inline bodies that are reachable through exported types.
-	// (Phase 0 of bexport.go.)
-	{
-		// TODO(mdempsky): Separate from bexport logic.
-		p := &exporter{marked: make(map[*types.Type]bool)}
-		for _, n := range exportlist {
-			sym := n.Sym
-			p.markType(asNode(sym.Def).Type)
-		}
-	}
-
-	p := iexporter{
-		allPkgs:     map[*types.Pkg]bool{},
-		stringIndex: map[string]uint64{},
-		declIndex:   map[*Node]uint64{},
-		inlineIndex: map[*Node]uint64{},
-		typIndex:    map[*types.Type]uint64{},
-	}
-
-	for i, pt := range predeclared() {
-		p.typIndex[pt] = uint64(i)
-	}
-	if len(p.typIndex) > predeclReserved {
-		Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
-	}
-
-	// Initialize work queue with exported declarations.
-	for _, n := range exportlist {
-		p.pushDecl(n)
-	}
-
-	// Loop until no more work. We use a queue because while
-	// writing out inline bodies, we may discover additional
-	// declarations that are needed.
-	for !p.declTodo.empty() {
-		p.doDecl(p.declTodo.popLeft())
-	}
-
-	// Append indices to data0 section.
-	dataLen := uint64(p.data0.Len())
-	w := p.newWriter()
-	w.writeIndex(p.declIndex, true)
-	w.writeIndex(p.inlineIndex, false)
-	w.flush()
-
-	// Assemble header.
-	var hdr intWriter
-	hdr.WriteByte('i')
-	hdr.uint64(iexportVersion)
-	hdr.uint64(uint64(p.strings.Len()))
-	hdr.uint64(dataLen)
-
-	// Flush output.
-	h := md5.New()
-	wr := io.MultiWriter(out, h)
-	io.Copy(wr, &hdr)
-	io.Copy(wr, &p.strings)
-	io.Copy(wr, &p.data0)
-
-	// Add fingerprint (used by linker object file).
-	// Attach this to the end, so tools (e.g. gcimporter) don't care.
-	copy(Ctxt.Fingerprint[:], h.Sum(nil)[:])
-	out.Write(Ctxt.Fingerprint[:])
-}
-
-// writeIndex writes out an object index. mainIndex indicates whether
-// we're writing out the main index, which is also read by
-// non-compiler tools and includes a complete package description
-// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
-	// Build a map from packages to objects from that package.
-	pkgObjs := map[*types.Pkg][]*Node{}
-
-	// For the main index, make sure to include every package that
-	// we reference, even if we're not exporting (or reexporting)
-	// any symbols from it.
-	if mainIndex {
-		pkgObjs[localpkg] = nil
-		for pkg := range w.p.allPkgs {
-			pkgObjs[pkg] = nil
-		}
-	}
-
-	for n := range index {
-		pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n)
-	}
-
-	var pkgs []*types.Pkg
-	for pkg, objs := range pkgObjs {
-		pkgs = append(pkgs, pkg)
-
-		sort.Slice(objs, func(i, j int) bool {
-			return objs[i].Sym.Name < objs[j].Sym.Name
-		})
-	}
-
-	sort.Slice(pkgs, func(i, j int) bool {
-		return pkgs[i].Path < pkgs[j].Path
-	})
-
-	w.uint64(uint64(len(pkgs)))
-	for _, pkg := range pkgs {
-		w.string(pkg.Path)
-		if mainIndex {
-			w.string(pkg.Name)
-			w.uint64(uint64(pkg.Height))
-		}
-
-		objs := pkgObjs[pkg]
-		w.uint64(uint64(len(objs)))
-		for _, n := range objs {
-			w.string(n.Sym.Name)
-			w.uint64(index[n])
-		}
-	}
-}
-
-type iexporter struct {
-	// allPkgs tracks all packages that have been referenced by
-	// the export data, so we can ensure to include them in the
-	// main index.
-	allPkgs map[*types.Pkg]bool
-
-	declTodo nodeQueue
-
-	strings     intWriter
-	stringIndex map[string]uint64
-
-	data0       intWriter
-	declIndex   map[*Node]uint64
-	inlineIndex map[*Node]uint64
-	typIndex    map[*types.Type]uint64
-}
-
-// stringOff returns the offset of s within the string section.
-// If not already present, it's added to the end.
-func (p *iexporter) stringOff(s string) uint64 {
-	off, ok := p.stringIndex[s]
-	if !ok {
-		off = uint64(p.strings.Len())
-		p.stringIndex[s] = off
-
-		p.strings.uint64(uint64(len(s)))
-		p.strings.WriteString(s)
-	}
-	return off
-}
-
-// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(n *Node) {
-	if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE {
-		Fatalf("weird Sym: %v, %v", n, n.Sym)
-	}
-
-	// Don't export predeclared declarations.
-	if n.Sym.Pkg == builtinpkg || n.Sym.Pkg == unsafepkg {
-		return
-	}
-
-	if _, ok := p.declIndex[n]; ok {
-		return
-	}
-
-	p.declIndex[n] = ^uint64(0) // mark n present in work queue
-	p.declTodo.pushRight(n)
-}
-
-// exportWriter handles writing out individual data section chunks.
-type exportWriter struct {
-	p *iexporter
-
-	data       intWriter
-	currPkg    *types.Pkg
-	prevFile   string
-	prevLine   int64
-	prevColumn int64
-}
-
-func (p *iexporter) doDecl(n *Node) {
-	w := p.newWriter()
-	w.setPkg(n.Sym.Pkg, false)
-
-	switch n.Op {
-	case ONAME:
-		switch n.Class() {
-		case PEXTERN:
-			// Variable.
-			w.tag('V')
-			w.pos(n.Pos)
-			w.typ(n.Type)
-			w.varExt(n)
-
-		case PFUNC:
-			if n.IsMethod() {
-				Fatalf("unexpected method: %v", n)
-			}
-
-			// Function.
-			w.tag('F')
-			w.pos(n.Pos)
-			w.signature(n.Type)
-			w.funcExt(n)
-
-		default:
-			Fatalf("unexpected class: %v, %v", n, n.Class())
-		}
-
-	case OLITERAL:
-		// Constant.
-		n = typecheck(n, ctxExpr)
-		w.tag('C')
-		w.pos(n.Pos)
-		w.value(n.Type, n.Val())
-
-	case OTYPE:
-		if IsAlias(n.Sym) {
-			// Alias.
-			w.tag('A')
-			w.pos(n.Pos)
-			w.typ(n.Type)
-			break
-		}
-
-		// Defined type.
-		w.tag('T')
-		w.pos(n.Pos)
-
-		underlying := n.Type.Orig
-		if underlying == types.Errortype.Orig {
-			// For "type T error", use error as the
-			// underlying type instead of error's own
-			// underlying anonymous interface. This
-			// ensures consistency with how importers may
-			// declare error (e.g., go/types uses nil Pkg
-			// for predeclared objects).
-			underlying = types.Errortype
-		}
-		w.typ(underlying)
-
-		t := n.Type
-		if t.IsInterface() {
-			w.typeExt(t)
-			break
-		}
-
-		ms := t.Methods()
-		w.uint64(uint64(ms.Len()))
-		for _, m := range ms.Slice() {
-			w.pos(m.Pos)
-			w.selector(m.Sym)
-			w.param(m.Type.Recv())
-			w.signature(m.Type)
-		}
-
-		w.typeExt(t)
-		for _, m := range ms.Slice() {
-			w.methExt(m)
-		}
-
-	default:
-		Fatalf("unexpected node: %v", n)
-	}
-
-	p.declIndex[n] = w.flush()
-}
-
-func (w *exportWriter) tag(tag byte) {
-	w.data.WriteByte(tag)
-}
-
-func (p *iexporter) doInline(f *Node) {
-	w := p.newWriter()
-	w.setPkg(fnpkg(f), false)
-
-	w.stmtList(asNodes(f.Func.Inl.Body))
-
-	p.inlineIndex[f] = w.flush()
-}
-
-func (w *exportWriter) pos(pos src.XPos) {
-	p := Ctxt.PosTable.Pos(pos)
-	file := p.Base().AbsFilename()
-	line := int64(p.RelLine())
-	column := int64(p.RelCol())
-
-	// Encode position relative to the last position: column
-	// delta, then line delta, then file name. We reserve the
-	// bottom bit of the column and line deltas to encode whether
-	// the remaining fields are present.
-	//
-	// Note: Because data objects may be read out of order (or not
-	// at all), we can only apply delta encoding within a single
-	// object. This is handled implicitly by tracking prevFile,
-	// prevLine, and prevColumn as fields of exportWriter.
-
-	deltaColumn := (column - w.prevColumn) << 1
-	deltaLine := (line - w.prevLine) << 1
-
-	if file != w.prevFile {
-		deltaLine |= 1
-	}
-	if deltaLine != 0 {
-		deltaColumn |= 1
-	}
-
-	w.int64(deltaColumn)
-	if deltaColumn&1 != 0 {
-		w.int64(deltaLine)
-		if deltaLine&1 != 0 {
-			w.string(file)
-		}
-	}
-
-	w.prevFile = file
-	w.prevLine = line
-	w.prevColumn = column
-}
-
-func (w *exportWriter) pkg(pkg *types.Pkg) {
-	// Ensure any referenced packages are declared in the main index.
-	w.p.allPkgs[pkg] = true
-
-	w.string(pkg.Path)
-}
-
-func (w *exportWriter) qualifiedIdent(n *Node) {
-	// Ensure any referenced declarations are written out too.
-	w.p.pushDecl(n)
-
-	s := n.Sym
-	w.string(s.Name)
-	w.pkg(s.Pkg)
-}
-
-func (w *exportWriter) selector(s *types.Sym) {
-	if w.currPkg == nil {
-		Fatalf("missing currPkg")
-	}
-
-	// Method selectors are rewritten into method symbols (of the
-	// form T.M) during typechecking, but we want to write out
-	// just the bare method name.
-	name := s.Name
-	if i := strings.LastIndex(name, "."); i >= 0 {
-		name = name[i+1:]
-	} else {
-		pkg := w.currPkg
-		if types.IsExported(name) {
-			pkg = localpkg
-		}
-		if s.Pkg != pkg {
-			Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
-		}
-	}
-
-	w.string(name)
-}
-
-func (w *exportWriter) typ(t *types.Type) {
-	w.data.uint64(w.p.typOff(t))
-}
-
-func (p *iexporter) newWriter() *exportWriter {
-	return &exportWriter{p: p}
-}
-
-func (w *exportWriter) flush() uint64 {
-	off := uint64(w.p.data0.Len())
-	io.Copy(&w.p.data0, &w.data)
-	return off
-}
-
-func (p *iexporter) typOff(t *types.Type) uint64 {
-	off, ok := p.typIndex[t]
-	if !ok {
-		w := p.newWriter()
-		w.doTyp(t)
-		off = predeclReserved + w.flush()
-		p.typIndex[t] = off
-	}
-	return off
-}
-
-func (w *exportWriter) startType(k itag) {
-	w.data.uint64(uint64(k))
-}
-
-func (w *exportWriter) doTyp(t *types.Type) {
-	if t.Sym != nil {
-		if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg {
-			Fatalf("builtin type missing from typIndex: %v", t)
-		}
-
-		w.startType(definedType)
-		w.qualifiedIdent(typenod(t))
-		return
-	}
-
-	switch t.Etype {
-	case TPTR:
-		w.startType(pointerType)
-		w.typ(t.Elem())
-
-	case TSLICE:
-		w.startType(sliceType)
-		w.typ(t.Elem())
-
-	case TARRAY:
-		w.startType(arrayType)
-		w.uint64(uint64(t.NumElem()))
-		w.typ(t.Elem())
-
-	case TCHAN:
-		w.startType(chanType)
-		w.uint64(uint64(t.ChanDir()))
-		w.typ(t.Elem())
-
-	case TMAP:
-		w.startType(mapType)
-		w.typ(t.Key())
-		w.typ(t.Elem())
-
-	case TFUNC:
-		w.startType(signatureType)
-		w.setPkg(t.Pkg(), true)
-		w.signature(t)
-
-	case TSTRUCT:
-		w.startType(structType)
-		w.setPkg(t.Pkg(), true)
-
-		w.uint64(uint64(t.NumFields()))
-		for _, f := range t.FieldSlice() {
-			w.pos(f.Pos)
-			w.selector(f.Sym)
-			w.typ(f.Type)
-			w.bool(f.Embedded != 0)
-			w.string(f.Note)
-		}
-
-	case TINTER:
-		var embeddeds, methods []*types.Field
-		for _, m := range t.Methods().Slice() {
-			if m.Sym != nil {
-				methods = append(methods, m)
-			} else {
-				embeddeds = append(embeddeds, m)
-			}
-		}
-
-		w.startType(interfaceType)
-		w.setPkg(t.Pkg(), true)
-
-		w.uint64(uint64(len(embeddeds)))
-		for _, f := range embeddeds {
-			w.pos(f.Pos)
-			w.typ(f.Type)
-		}
-
-		w.uint64(uint64(len(methods)))
-		for _, f := range methods {
-			w.pos(f.Pos)
-			w.selector(f.Sym)
-			w.signature(f.Type)
-		}
-
-	default:
-		Fatalf("unexpected type: %v", t)
-	}
-}
-
-func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
-	if pkg == nil {
-		// TODO(mdempsky): Proactively set Pkg for types and
-		// remove this fallback logic.
-		pkg = localpkg
-	}
-
-	if write {
-		w.pkg(pkg)
-	}
-
-	w.currPkg = pkg
-}
-
-func (w *exportWriter) signature(t *types.Type) {
-	w.paramList(t.Params().FieldSlice())
-	w.paramList(t.Results().FieldSlice())
-	if n := t.Params().NumFields(); n > 0 {
-		w.bool(t.Params().Field(n - 1).IsDDD())
-	}
-}
-
-func (w *exportWriter) paramList(fs []*types.Field) {
-	w.uint64(uint64(len(fs)))
-	for _, f := range fs {
-		w.param(f)
-	}
-}
-
-func (w *exportWriter) param(f *types.Field) {
-	w.pos(f.Pos)
-	w.localIdent(origSym(f.Sym), 0)
-	w.typ(f.Type)
-}
-
-func constTypeOf(typ *types.Type) Ctype {
-	switch typ {
-	case types.UntypedInt, types.UntypedRune:
-		return CTINT
-	case types.UntypedFloat:
-		return CTFLT
-	case types.UntypedComplex:
-		return CTCPLX
-	}
-
-	switch typ.Etype {
-	case TCHAN, TFUNC, TMAP, TNIL, TINTER, TPTR, TSLICE, TUNSAFEPTR:
-		return CTNIL
-	case TBOOL:
-		return CTBOOL
-	case TSTRING:
-		return CTSTR
-	case TINT, TINT8, TINT16, TINT32, TINT64,
-		TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
-		return CTINT
-	case TFLOAT32, TFLOAT64:
-		return CTFLT
-	case TCOMPLEX64, TCOMPLEX128:
-		return CTCPLX
-	}
-
-	Fatalf("unexpected constant type: %v", typ)
-	return 0
-}
-
-func (w *exportWriter) value(typ *types.Type, v Val) {
-	if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
-		Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
-	}
-	w.typ(typ)
-
-	// Each type has only one admissible constant representation,
-	// so we could type switch directly on v.U here. However,
-	// switching on the type increases symmetry with import logic
-	// and provides a useful consistency check.
-
-	switch constTypeOf(typ) {
-	case CTNIL:
-		// Only one value; nothing to encode.
-		_ = v.U.(*NilVal)
-	case CTBOOL:
-		w.bool(v.U.(bool))
-	case CTSTR:
-		w.string(v.U.(string))
-	case CTINT:
-		w.mpint(&v.U.(*Mpint).Val, typ)
-	case CTFLT:
-		w.mpfloat(&v.U.(*Mpflt).Val, typ)
-	case CTCPLX:
-		x := v.U.(*Mpcplx)
-		w.mpfloat(&x.Real.Val, typ)
-		w.mpfloat(&x.Imag.Val, typ)
-	}
-}
-
-func intSize(typ *types.Type) (signed bool, maxBytes uint) {
-	if typ.IsUntyped() {
-		return true, Mpprec / 8
-	}
-
-	switch typ.Etype {
-	case TFLOAT32, TCOMPLEX64:
-		return true, 3
-	case TFLOAT64, TCOMPLEX128:
-		return true, 7
-	}
-
-	signed = typ.IsSigned()
-	maxBytes = uint(typ.Size())
-
-	// The go/types API doesn't expose sizes to importers, so they
-	// don't know how big these types are.
-	switch typ.Etype {
-	case TINT, TUINT, TUINTPTR:
-		maxBytes = 8
-	}
-
-	return
-}
-
-// mpint exports a multi-precision integer.
-//
-// For unsigned types, small values are written out as a single
-// byte. Larger values are written out as a length-prefixed big-endian
-// byte string, where the length prefix is encoded as its complement.
-// For example, bytes 0, 1, and 2 directly represent the integer
-// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
-// 2-, and 3-byte big-endian string follow.
-//
-// Encoding for signed types use the same general approach as for
-// unsigned types, except small values use zig-zag encoding and the
-// bottom bit of length prefix byte for large values is reserved as a
-// sign bit.
-//
-// The exact boundary between small and large encodings varies
-// according to the maximum number of bytes needed to encode a value
-// of type typ. As a special case, 8-bit types are always encoded as a
-// single byte.
-//
-// TODO(mdempsky): Is this level of complexity really worthwhile?
-func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
-	signed, maxBytes := intSize(typ)
-
-	negative := x.Sign() < 0
-	if !signed && negative {
-		Fatalf("negative unsigned integer; type %v, value %v", typ, x)
-	}
-
-	b := x.Bytes()
-	if len(b) > 0 && b[0] == 0 {
-		Fatalf("leading zeros")
-	}
-	if uint(len(b)) > maxBytes {
-		Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
-	}
-
-	maxSmall := 256 - maxBytes
-	if signed {
-		maxSmall = 256 - 2*maxBytes
-	}
-	if maxBytes == 1 {
-		maxSmall = 256
-	}
-
-	// Check if x can use small value encoding.
-	if len(b) <= 1 {
-		var ux uint
-		if len(b) == 1 {
-			ux = uint(b[0])
-		}
-		if signed {
-			ux <<= 1
-			if negative {
-				ux--
-			}
-		}
-		if ux < maxSmall {
-			w.data.WriteByte(byte(ux))
-			return
-		}
-	}
-
-	n := 256 - uint(len(b))
-	if signed {
-		n = 256 - 2*uint(len(b))
-		if negative {
-			n |= 1
-		}
-	}
-	if n < maxSmall || n >= 256 {
-		Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
-	}
-
-	w.data.WriteByte(byte(n))
-	w.data.Write(b)
-}
-
-// mpfloat exports a multi-precision floating point number.
-//
-// The number's value is decomposed into mantissa × 2**exponent, where
-// mantissa is an integer. The value is written out as mantissa (as a
-// multi-precision integer) and then the exponent, except exponent is
-// omitted if mantissa is zero.
-func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) {
-	if f.IsInf() {
-		Fatalf("infinite constant")
-	}
-
-	// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
-	var mant big.Float
-	exp := int64(f.MantExp(&mant))
-
-	// Scale so that mant is an integer.
-	prec := mant.MinPrec()
-	mant.SetMantExp(&mant, int(prec))
-	exp -= int64(prec)
-
-	manti, acc := mant.Int(nil)
-	if acc != big.Exact {
-		Fatalf("mantissa scaling failed for %f (%s)", f, acc)
-	}
-	w.mpint(manti, typ)
-	if manti.Sign() != 0 {
-		w.int64(exp)
-	}
-}
-
-func (w *exportWriter) bool(b bool) bool {
-	var x uint64
-	if b {
-		x = 1
-	}
-	w.uint64(x)
-	return b
-}
-
-func (w *exportWriter) int64(x int64)   { w.data.int64(x) }
-func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
-func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
-
-// Compiler-specific extensions.
-
-func (w *exportWriter) varExt(n *Node) {
-	w.linkname(n.Sym)
-	w.symIdx(n.Sym)
-}
-
-func (w *exportWriter) funcExt(n *Node) {
-	w.linkname(n.Sym)
-	w.symIdx(n.Sym)
-
-	// Escape analysis.
-	for _, fs := range &types.RecvsParams {
-		for _, f := range fs(n.Type).FieldSlice() {
-			w.string(f.Note)
-		}
-	}
-
-	// Inline body.
-	if n.Func.Inl != nil {
-		w.uint64(1 + uint64(n.Func.Inl.Cost))
-		if n.Func.ExportInline() {
-			w.p.doInline(n)
-		}
-
-		// Endlineno for inlined function.
-		if n.Name.Defn != nil {
-			w.pos(n.Name.Defn.Func.Endlineno)
-		} else {
-			// When the exported node was defined externally,
-			// e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
-			// Keep it as we don't distinguish this case in iimport.go.
-			w.pos(n.Func.Endlineno)
-		}
-	} else {
-		w.uint64(0)
-	}
-}
-
-func (w *exportWriter) methExt(m *types.Field) {
-	w.bool(m.Nointerface())
-	w.funcExt(asNode(m.Type.Nname()))
-}
-
-func (w *exportWriter) linkname(s *types.Sym) {
-	w.string(s.Linkname)
-}
-
-func (w *exportWriter) symIdx(s *types.Sym) {
-	lsym := s.Linksym()
-	if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
-		// Don't export index for non-package symbols, linkname'd symbols,
-		// and symbols without an index. They can only be referenced by
-		// name.
-		w.int64(-1)
-	} else {
-		// For a defined symbol, export its index.
-		// For re-exporting an imported symbol, pass its index through.
-		w.int64(int64(lsym.SymIdx))
-	}
-}
-
-func (w *exportWriter) typeExt(t *types.Type) {
-	// Export whether this type is marked notinheap.
-	w.bool(t.NotInHeap())
-	// For type T, export the index of type descriptor symbols of T and *T.
-	if i, ok := typeSymIdx[t]; ok {
-		w.int64(i[0])
-		w.int64(i[1])
-		return
-	}
-	w.symIdx(typesym(t))
-	w.symIdx(typesym(t.PtrTo()))
-}
-
-// Inline bodies.
-
-func (w *exportWriter) stmtList(list Nodes) {
-	for _, n := range list.Slice() {
-		w.node(n)
-	}
-	w.op(OEND)
-}
-
-func (w *exportWriter) node(n *Node) {
-	if opprec[n.Op] < 0 {
-		w.stmt(n)
-	} else {
-		w.expr(n)
-	}
-}
-
-// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
-// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (w *exportWriter) stmt(n *Node) {
-	if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
-		// can't use stmtList here since we don't want the final OEND
-		for _, n := range n.Ninit.Slice() {
-			w.stmt(n)
-		}
-	}
-
-	switch op := n.Op; op {
-	case ODCL:
-		w.op(ODCL)
-		w.pos(n.Left.Pos)
-		w.localName(n.Left)
-		w.typ(n.Left.Type)
-
-	// case ODCLFIELD:
-	//	unimplemented - handled by default case
-
-	case OAS:
-		// Don't export "v = <N>" initializing statements, hope they're always
-		// preceded by the DCL which will be re-parsed and typecheck to reproduce
-		// the "v = <N>" again.
-		if n.Right != nil {
-			w.op(OAS)
-			w.pos(n.Pos)
-			w.expr(n.Left)
-			w.expr(n.Right)
-		}
-
-	case OASOP:
-		w.op(OASOP)
-		w.pos(n.Pos)
-		w.op(n.SubOp())
-		w.expr(n.Left)
-		if w.bool(!n.Implicit()) {
-			w.expr(n.Right)
-		}
-
-	case OAS2:
-		w.op(OAS2)
-		w.pos(n.Pos)
-		w.exprList(n.List)
-		w.exprList(n.Rlist)
-
-	case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-		w.op(OAS2)
-		w.pos(n.Pos)
-		w.exprList(n.List)
-		w.exprList(asNodes([]*Node{n.Right}))
-
-	case ORETURN:
-		w.op(ORETURN)
-		w.pos(n.Pos)
-		w.exprList(n.List)
-
-	// case ORETJMP:
-	// 	unreachable - generated by compiler for trampolin routines
-
-	case OGO, ODEFER:
-		w.op(op)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-
-	case OIF:
-		w.op(OIF)
-		w.pos(n.Pos)
-		w.stmtList(n.Ninit)
-		w.expr(n.Left)
-		w.stmtList(n.Nbody)
-		w.stmtList(n.Rlist)
-
-	case OFOR:
-		w.op(OFOR)
-		w.pos(n.Pos)
-		w.stmtList(n.Ninit)
-		w.exprsOrNil(n.Left, n.Right)
-		w.stmtList(n.Nbody)
-
-	case ORANGE:
-		w.op(ORANGE)
-		w.pos(n.Pos)
-		w.stmtList(n.List)
-		w.expr(n.Right)
-		w.stmtList(n.Nbody)
-
-	case OSELECT, OSWITCH:
-		w.op(op)
-		w.pos(n.Pos)
-		w.stmtList(n.Ninit)
-		w.exprsOrNil(n.Left, nil)
-		w.caseList(n)
-
-	// case OCASE:
-	//	handled by caseList
-
-	case OFALL:
-		w.op(OFALL)
-		w.pos(n.Pos)
-
-	case OBREAK, OCONTINUE:
-		w.op(op)
-		w.pos(n.Pos)
-		w.exprsOrNil(n.Left, nil)
-
-	case OEMPTY:
-		// nothing to emit
-
-	case OGOTO, OLABEL:
-		w.op(op)
-		w.pos(n.Pos)
-		w.string(n.Sym.Name)
-
-	default:
-		Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
-	}
-}
-
-func (w *exportWriter) caseList(sw *Node) {
-	namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
-
-	cases := sw.List.Slice()
-	w.uint64(uint64(len(cases)))
-	for _, cas := range cases {
-		if cas.Op != OCASE {
-			Fatalf("expected OCASE, got %v", cas)
-		}
-		w.pos(cas.Pos)
-		w.stmtList(cas.List)
-		if namedTypeSwitch {
-			w.localName(cas.Rlist.First())
-		}
-		w.stmtList(cas.Nbody)
-	}
-}
-
-func (w *exportWriter) exprList(list Nodes) {
-	for _, n := range list.Slice() {
-		w.expr(n)
-	}
-	w.op(OEND)
-}
-
-func (w *exportWriter) expr(n *Node) {
-	// from nodefmt (fmt.go)
-	//
-	// nodefmt reverts nodes back to their original - we don't need to do
-	// it because we are not bound to produce valid Go syntax when exporting
-	//
-	// if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
-	// 	n = n.Orig
-	// }
-
-	// from exprfmt (fmt.go)
-	for n.Op == OPAREN || n.Implicit() && (n.Op == ODEREF || n.Op == OADDR || n.Op == ODOT || n.Op == ODOTPTR) {
-		n = n.Left
-	}
-
-	switch op := n.Op; op {
-	// expressions
-	// (somewhat closely following the structure of exprfmt in fmt.go)
-	case OLITERAL:
-		if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
-			w.expr(n.Orig)
-			break
-		}
-		w.op(OLITERAL)
-		w.pos(n.Pos)
-		w.value(n.Type, n.Val())
-
-	case ONAME:
-		// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
-		// but for export, this should be rendered as (*pkg.T).meth.
-		// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
-		if n.isMethodExpression() {
-			w.op(OXDOT)
-			w.pos(n.Pos)
-			w.expr(n.Left) // n.Left.Op == OTYPE
-			w.selector(n.Right.Sym)
-			break
-		}
-
-		// Package scope name.
-		if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() {
-			w.op(ONONAME)
-			w.qualifiedIdent(n)
-			break
-		}
-
-		// Function scope name.
-		w.op(ONAME)
-		w.localName(n)
-
-	// case OPACK, ONONAME:
-	// 	should have been resolved by typechecking - handled by default case
-
-	case OTYPE:
-		w.op(OTYPE)
-		w.typ(n.Type)
-
-	case OTYPESW:
-		w.op(OTYPESW)
-		w.pos(n.Pos)
-		var s *types.Sym
-		if n.Left != nil {
-			if n.Left.Op != ONONAME {
-				Fatalf("expected ONONAME, got %v", n.Left)
-			}
-			s = n.Left.Sym
-		}
-		w.localIdent(s, 0) // declared pseudo-variable, if any
-		w.exprsOrNil(n.Right, nil)
-
-	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
-	// 	should have been resolved by typechecking - handled by default case
-
-	// case OCLOSURE:
-	//	unimplemented - handled by default case
-
-	// case OCOMPLIT:
-	// 	should have been resolved by typechecking - handled by default case
-
-	case OPTRLIT:
-		w.op(OADDR)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-
-	case OSTRUCTLIT:
-		w.op(OSTRUCTLIT)
-		w.pos(n.Pos)
-		w.typ(n.Type)
-		w.elemList(n.List) // special handling of field names
-
-	case OARRAYLIT, OSLICELIT, OMAPLIT:
-		w.op(OCOMPLIT)
-		w.pos(n.Pos)
-		w.typ(n.Type)
-		w.exprList(n.List)
-
-	case OKEY:
-		w.op(OKEY)
-		w.pos(n.Pos)
-		w.exprsOrNil(n.Left, n.Right)
-
-	// case OSTRUCTKEY:
-	//	unreachable - handled in case OSTRUCTLIT by elemList
-
-	case OCALLPART:
-		// An OCALLPART is an OXDOT before type checking.
-		w.op(OXDOT)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		// Right node should be ONAME
-		w.selector(n.Right.Sym)
-
-	case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-		w.op(OXDOT)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.selector(n.Sym)
-
-	case ODOTTYPE, ODOTTYPE2:
-		w.op(ODOTTYPE)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.typ(n.Type)
-
-	case OINDEX, OINDEXMAP:
-		w.op(OINDEX)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.expr(n.Right)
-
-	case OSLICE, OSLICESTR, OSLICEARR:
-		w.op(OSLICE)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		low, high, _ := n.SliceBounds()
-		w.exprsOrNil(low, high)
-
-	case OSLICE3, OSLICE3ARR:
-		w.op(OSLICE3)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		low, high, max := n.SliceBounds()
-		w.exprsOrNil(low, high)
-		w.expr(max)
-
-	case OCOPY, OCOMPLEX:
-		// treated like other builtin calls (see e.g., OREAL)
-		w.op(op)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.expr(n.Right)
-		w.op(OEND)
-
-	case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
-		w.op(OCONV)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.typ(n.Type)
-
-	case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
-		w.op(op)
-		w.pos(n.Pos)
-		if n.Left != nil {
-			w.expr(n.Left)
-			w.op(OEND)
-		} else {
-			w.exprList(n.List) // emits terminating OEND
-		}
-		// only append() calls may contain '...' arguments
-		if op == OAPPEND {
-			w.bool(n.IsDDD())
-		} else if n.IsDDD() {
-			Fatalf("exporter: unexpected '...' with %v call", op)
-		}
-
-	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
-		w.op(OCALL)
-		w.pos(n.Pos)
-		w.stmtList(n.Ninit)
-		w.expr(n.Left)
-		w.exprList(n.List)
-		w.bool(n.IsDDD())
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		w.op(op) // must keep separate from OMAKE for importer
-		w.pos(n.Pos)
-		w.typ(n.Type)
-		switch {
-		default:
-			// empty list
-			w.op(OEND)
-		case n.List.Len() != 0: // pre-typecheck
-			w.exprList(n.List) // emits terminating OEND
-		case n.Right != nil:
-			w.expr(n.Left)
-			w.expr(n.Right)
-			w.op(OEND)
-		case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
-			w.expr(n.Left)
-			w.op(OEND)
-		}
-
-	// unary expressions
-	case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
-		w.op(op)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-
-	// binary expressions
-	case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
-		OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
-		w.op(op)
-		w.pos(n.Pos)
-		w.expr(n.Left)
-		w.expr(n.Right)
-
-	case OADDSTR:
-		w.op(OADDSTR)
-		w.pos(n.Pos)
-		w.exprList(n.List)
-
-	case ODCLCONST:
-		// if exporting, DCLCONST should just be removed as its usage
-		// has already been replaced with literals
-
-	default:
-		Fatalf("cannot export %v (%d) node\n"+
-			"\t==> please file an issue and assign to gri@", n.Op, int(n.Op))
-	}
-}
-
-func (w *exportWriter) op(op Op) {
-	w.uint64(uint64(op))
-}
-
-func (w *exportWriter) exprsOrNil(a, b *Node) {
-	ab := 0
-	if a != nil {
-		ab |= 1
-	}
-	if b != nil {
-		ab |= 2
-	}
-	w.uint64(uint64(ab))
-	if ab&1 != 0 {
-		w.expr(a)
-	}
-	if ab&2 != 0 {
-		w.node(b)
-	}
-}
-
-func (w *exportWriter) elemList(list Nodes) {
-	w.uint64(uint64(list.Len()))
-	for _, n := range list.Slice() {
-		w.selector(n.Sym)
-		w.expr(n.Left)
-	}
-}
-
-func (w *exportWriter) localName(n *Node) {
-	// Escape analysis happens after inline bodies are saved, but
-	// we're using the same ONAME nodes, so we might still see
-	// PAUTOHEAP here.
-	//
-	// Check for Stackcopy to identify PAUTOHEAP that came from
-	// PPARAM/PPARAMOUT, because we only want to include vargen in
-	// non-param names.
-	var v int32
-	if n.Class() == PAUTO || (n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy == nil) {
-		v = n.Name.Vargen
-	}
-
-	w.localIdent(n.Sym, v)
-}
-
-func (w *exportWriter) localIdent(s *types.Sym, v int32) {
-	// Anonymous parameters.
-	if s == nil {
-		w.string("")
-		return
-	}
-
-	name := s.Name
-	if name == "_" {
-		w.string("_")
-		return
-	}
-
-	// TODO(mdempsky): Fix autotmp hack.
-	if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
-		Fatalf("unexpected dot in identifier: %v", name)
-	}
-
-	if v > 0 {
-		if strings.Contains(name, "·") {
-			Fatalf("exporter: unexpected · in symbol name")
-		}
-		name = fmt.Sprintf("%s·%d", name, v)
-	}
-
-	if !types.IsExported(name) && s.Pkg != w.currPkg {
-		Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
-	}
-
-	w.string(name)
-}
-
-type intWriter struct {
-	bytes.Buffer
-}
-
-func (w *intWriter) int64(x int64) {
-	var buf [binary.MaxVarintLen64]byte
-	n := binary.PutVarint(buf[:], x)
-	w.Write(buf[:n])
-}
-
-func (w *intWriter) uint64(x uint64) {
-	var buf [binary.MaxVarintLen64]byte
-	n := binary.PutUvarint(buf[:], x)
-	w.Write(buf[:n])
-}
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
deleted file mode 100644
index c0114d0..0000000
--- a/src/cmd/compile/internal/gc/iimport.go
+++ /dev/null
@@ -1,1117 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Indexed package import.
-// See iexport.go for the export data format.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/bio"
-	"cmd/internal/goobj"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"math/big"
-	"os"
-	"strings"
-)
-
-// An iimporterAndOffset identifies an importer and an offset within
-// its data section.
-type iimporterAndOffset struct {
-	p   *iimporter
-	off uint64
-}
-
-var (
-	// declImporter maps from imported identifiers to an importer
-	// and offset where that identifier's declaration can be read.
-	declImporter = map[*types.Sym]iimporterAndOffset{}
-
-	// inlineImporter is like declImporter, but for inline bodies
-	// for function and method symbols.
-	inlineImporter = map[*types.Sym]iimporterAndOffset{}
-)
-
-func expandDecl(n *Node) {
-	if n.Op != ONONAME {
-		return
-	}
-
-	r := importReaderFor(n, declImporter)
-	if r == nil {
-		// Can happen if user tries to reference an undeclared name.
-		return
-	}
-
-	r.doDecl(n)
-}
-
-func expandInline(fn *Node) {
-	if fn.Func.Inl.Body != nil {
-		return
-	}
-
-	r := importReaderFor(fn, inlineImporter)
-	if r == nil {
-		Fatalf("missing import reader for %v", fn)
-	}
-
-	r.doInline(fn)
-}
-
-func importReaderFor(n *Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
-	x, ok := importers[n.Sym]
-	if !ok {
-		return nil
-	}
-
-	return x.p.newReader(x.off, n.Sym.Pkg)
-}
-
-type intReader struct {
-	*bio.Reader
-	pkg *types.Pkg
-}
-
-func (r *intReader) int64() int64 {
-	i, err := binary.ReadVarint(r.Reader)
-	if err != nil {
-		yyerror("import %q: read error: %v", r.pkg.Path, err)
-		errorexit()
-	}
-	return i
-}
-
-func (r *intReader) uint64() uint64 {
-	i, err := binary.ReadUvarint(r.Reader)
-	if err != nil {
-		yyerror("import %q: read error: %v", r.pkg.Path, err)
-		errorexit()
-	}
-	return i
-}
-
-func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
-	ir := &intReader{in, pkg}
-
-	version := ir.uint64()
-	if version != iexportVersion {
-		yyerror("import %q: unknown export format version %d", pkg.Path, version)
-		errorexit()
-	}
-
-	sLen := ir.uint64()
-	dLen := ir.uint64()
-
-	// Map string (and data) section into memory as a single large
-	// string. This reduces heap fragmentation and allows
-	// returning individual substrings very efficiently.
-	data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
-	if err != nil {
-		yyerror("import %q: mapping input: %v", pkg.Path, err)
-		errorexit()
-	}
-	stringData := data[:sLen]
-	declData := data[sLen:]
-
-	in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
-
-	p := &iimporter{
-		ipkg: pkg,
-
-		pkgCache:     map[uint64]*types.Pkg{},
-		posBaseCache: map[uint64]*src.PosBase{},
-		typCache:     map[uint64]*types.Type{},
-
-		stringData: stringData,
-		declData:   declData,
-	}
-
-	for i, pt := range predeclared() {
-		p.typCache[uint64(i)] = pt
-	}
-
-	// Declaration index.
-	for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
-		pkg := p.pkgAt(ir.uint64())
-		pkgName := p.stringAt(ir.uint64())
-		pkgHeight := int(ir.uint64())
-		if pkg.Name == "" {
-			pkg.Name = pkgName
-			pkg.Height = pkgHeight
-			numImport[pkgName]++
-
-			// TODO(mdempsky): This belongs somewhere else.
-			pkg.Lookup("_").Def = asTypesNode(nblank)
-		} else {
-			if pkg.Name != pkgName {
-				Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
-			}
-			if pkg.Height != pkgHeight {
-				Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
-			}
-		}
-
-		for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
-			s := pkg.Lookup(p.stringAt(ir.uint64()))
-			off := ir.uint64()
-
-			if _, ok := declImporter[s]; ok {
-				continue
-			}
-			declImporter[s] = iimporterAndOffset{p, off}
-
-			// Create stub declaration. If used, this will
-			// be overwritten by expandDecl.
-			if s.Def != nil {
-				Fatalf("unexpected definition for %v: %v", s, asNode(s.Def))
-			}
-			s.Def = asTypesNode(npos(src.NoXPos, dclname(s)))
-		}
-	}
-
-	// Inline body index.
-	for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
-		pkg := p.pkgAt(ir.uint64())
-
-		for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
-			s := pkg.Lookup(p.stringAt(ir.uint64()))
-			off := ir.uint64()
-
-			if _, ok := inlineImporter[s]; ok {
-				continue
-			}
-			inlineImporter[s] = iimporterAndOffset{p, off}
-		}
-	}
-
-	// Fingerprint.
-	_, err = io.ReadFull(in, fingerprint[:])
-	if err != nil {
-		yyerror("import %s: error reading fingerprint", pkg.Path)
-		errorexit()
-	}
-	return fingerprint
-}
-
-type iimporter struct {
-	ipkg *types.Pkg
-
-	pkgCache     map[uint64]*types.Pkg
-	posBaseCache map[uint64]*src.PosBase
-	typCache     map[uint64]*types.Type
-
-	stringData string
-	declData   string
-}
-
-func (p *iimporter) stringAt(off uint64) string {
-	var x [binary.MaxVarintLen64]byte
-	n := copy(x[:], p.stringData[off:])
-
-	slen, n := binary.Uvarint(x[:n])
-	if n <= 0 {
-		Fatalf("varint failed")
-	}
-	spos := off + uint64(n)
-	return p.stringData[spos : spos+slen]
-}
-
-func (p *iimporter) posBaseAt(off uint64) *src.PosBase {
-	if posBase, ok := p.posBaseCache[off]; ok {
-		return posBase
-	}
-
-	file := p.stringAt(off)
-	posBase := src.NewFileBase(file, file)
-	p.posBaseCache[off] = posBase
-	return posBase
-}
-
-func (p *iimporter) pkgAt(off uint64) *types.Pkg {
-	if pkg, ok := p.pkgCache[off]; ok {
-		return pkg
-	}
-
-	pkg := p.ipkg
-	if pkgPath := p.stringAt(off); pkgPath != "" {
-		pkg = types.NewPkg(pkgPath, "")
-	}
-	p.pkgCache[off] = pkg
-	return pkg
-}
-
-// An importReader keeps state for reading an individual imported
-// object (declaration or inline body).
-type importReader struct {
-	strings.Reader
-	p *iimporter
-
-	currPkg    *types.Pkg
-	prevBase   *src.PosBase
-	prevLine   int64
-	prevColumn int64
-}
-
-func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
-	r := &importReader{
-		p:       p,
-		currPkg: pkg,
-	}
-	// (*strings.Reader).Reset wasn't added until Go 1.7, and we
-	// need to build with Go 1.4.
-	r.Reader = *strings.NewReader(p.declData[off:])
-	return r
-}
-
-func (r *importReader) string() string        { return r.p.stringAt(r.uint64()) }
-func (r *importReader) posBase() *src.PosBase { return r.p.posBaseAt(r.uint64()) }
-func (r *importReader) pkg() *types.Pkg       { return r.p.pkgAt(r.uint64()) }
-
-func (r *importReader) setPkg() {
-	r.currPkg = r.pkg()
-}
-
-func (r *importReader) doDecl(n *Node) {
-	if n.Op != ONONAME {
-		Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op)
-	}
-
-	tag := r.byte()
-	pos := r.pos()
-
-	switch tag {
-	case 'A':
-		typ := r.typ()
-
-		importalias(r.p.ipkg, pos, n.Sym, typ)
-
-	case 'C':
-		typ, val := r.value()
-
-		importconst(r.p.ipkg, pos, n.Sym, typ, val)
-
-	case 'F':
-		typ := r.signature(nil)
-
-		importfunc(r.p.ipkg, pos, n.Sym, typ)
-		r.funcExt(n)
-
-	case 'T':
-		// Types can be recursive. We need to setup a stub
-		// declaration before recursing.
-		t := importtype(r.p.ipkg, pos, n.Sym)
-
-		// We also need to defer width calculations until
-		// after the underlying type has been assigned.
-		defercheckwidth()
-		underlying := r.typ()
-		setUnderlying(t, underlying)
-		resumecheckwidth()
-
-		if underlying.IsInterface() {
-			r.typeExt(t)
-			break
-		}
-
-		ms := make([]*types.Field, r.uint64())
-		for i := range ms {
-			mpos := r.pos()
-			msym := r.ident()
-			recv := r.param()
-			mtyp := r.signature(recv)
-
-			f := types.NewField()
-			f.Pos = mpos
-			f.Sym = msym
-			f.Type = mtyp
-			ms[i] = f
-
-			m := newfuncnamel(mpos, methodSym(recv.Type, msym))
-			m.Type = mtyp
-			m.SetClass(PFUNC)
-			// methodSym already marked m.Sym as a function.
-
-			// (comment from parser.go)
-			// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
-			// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
-			// out by typecheck's lookdot as this $$.ttype. So by providing
-			// this back link here we avoid special casing there.
-			mtyp.SetNname(asTypesNode(m))
-		}
-		t.Methods().Set(ms)
-
-		r.typeExt(t)
-		for _, m := range ms {
-			r.methExt(m)
-		}
-
-	case 'V':
-		typ := r.typ()
-
-		importvar(r.p.ipkg, pos, n.Sym, typ)
-		r.varExt(n)
-
-	default:
-		Fatalf("unexpected tag: %v", tag)
-	}
-}
-
-func (p *importReader) value() (typ *types.Type, v Val) {
-	typ = p.typ()
-
-	switch constTypeOf(typ) {
-	case CTNIL:
-		v.U = &NilVal{}
-	case CTBOOL:
-		v.U = p.bool()
-	case CTSTR:
-		v.U = p.string()
-	case CTINT:
-		x := new(Mpint)
-		x.Rune = typ == types.UntypedRune
-		p.mpint(&x.Val, typ)
-		v.U = x
-	case CTFLT:
-		x := newMpflt()
-		p.float(x, typ)
-		v.U = x
-	case CTCPLX:
-		x := newMpcmplx()
-		p.float(&x.Real, typ)
-		p.float(&x.Imag, typ)
-		v.U = x
-	}
-	return
-}
-
-func (p *importReader) mpint(x *big.Int, typ *types.Type) {
-	signed, maxBytes := intSize(typ)
-
-	maxSmall := 256 - maxBytes
-	if signed {
-		maxSmall = 256 - 2*maxBytes
-	}
-	if maxBytes == 1 {
-		maxSmall = 256
-	}
-
-	n, _ := p.ReadByte()
-	if uint(n) < maxSmall {
-		v := int64(n)
-		if signed {
-			v >>= 1
-			if n&1 != 0 {
-				v = ^v
-			}
-		}
-		x.SetInt64(v)
-		return
-	}
-
-	v := -n
-	if signed {
-		v = -(n &^ 1) >> 1
-	}
-	if v < 1 || uint(v) > maxBytes {
-		Fatalf("weird decoding: %v, %v => %v", n, signed, v)
-	}
-	b := make([]byte, v)
-	p.Read(b)
-	x.SetBytes(b)
-	if signed && n&1 != 0 {
-		x.Neg(x)
-	}
-}
-
-func (p *importReader) float(x *Mpflt, typ *types.Type) {
-	var mant big.Int
-	p.mpint(&mant, typ)
-	m := x.Val.SetInt(&mant)
-	if m.Sign() == 0 {
-		return
-	}
-	m.SetMantExp(m, int(p.int64()))
-}
-
-func (r *importReader) ident() *types.Sym {
-	name := r.string()
-	if name == "" {
-		return nil
-	}
-	pkg := r.currPkg
-	if types.IsExported(name) {
-		pkg = localpkg
-	}
-	return pkg.Lookup(name)
-}
-
-func (r *importReader) qualifiedIdent() *types.Sym {
-	name := r.string()
-	pkg := r.pkg()
-	return pkg.Lookup(name)
-}
-
-func (r *importReader) pos() src.XPos {
-	delta := r.int64()
-	r.prevColumn += delta >> 1
-	if delta&1 != 0 {
-		delta = r.int64()
-		r.prevLine += delta >> 1
-		if delta&1 != 0 {
-			r.prevBase = r.posBase()
-		}
-	}
-
-	if (r.prevBase == nil || r.prevBase.AbsFilename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
-		// TODO(mdempsky): Remove once we reliably write
-		// position information for all nodes.
-		return src.NoXPos
-	}
-
-	if r.prevBase == nil {
-		Fatalf("missing posbase")
-	}
-	pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
-	return Ctxt.PosTable.XPos(pos)
-}
-
-func (r *importReader) typ() *types.Type {
-	return r.p.typAt(r.uint64())
-}
-
-func (p *iimporter) typAt(off uint64) *types.Type {
-	t, ok := p.typCache[off]
-	if !ok {
-		if off < predeclReserved {
-			Fatalf("predeclared type missing from cache: %d", off)
-		}
-		t = p.newReader(off-predeclReserved, nil).typ1()
-		p.typCache[off] = t
-	}
-	return t
-}
-
-func (r *importReader) typ1() *types.Type {
-	switch k := r.kind(); k {
-	default:
-		Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
-		return nil
-
-	case definedType:
-		// We might be called from within doInline, in which
-		// case Sym.Def can point to declared parameters
-		// instead of the top-level types. Also, we don't
-		// support inlining functions with local defined
-		// types. Therefore, this must be a package-scope
-		// type.
-		n := asNode(r.qualifiedIdent().PkgDef())
-		if n.Op == ONONAME {
-			expandDecl(n)
-		}
-		if n.Op != OTYPE {
-			Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n)
-		}
-		return n.Type
-	case pointerType:
-		return types.NewPtr(r.typ())
-	case sliceType:
-		return types.NewSlice(r.typ())
-	case arrayType:
-		n := r.uint64()
-		return types.NewArray(r.typ(), int64(n))
-	case chanType:
-		dir := types.ChanDir(r.uint64())
-		return types.NewChan(r.typ(), dir)
-	case mapType:
-		return types.NewMap(r.typ(), r.typ())
-
-	case signatureType:
-		r.setPkg()
-		return r.signature(nil)
-
-	case structType:
-		r.setPkg()
-
-		fs := make([]*types.Field, r.uint64())
-		for i := range fs {
-			pos := r.pos()
-			sym := r.ident()
-			typ := r.typ()
-			emb := r.bool()
-			note := r.string()
-
-			f := types.NewField()
-			f.Pos = pos
-			f.Sym = sym
-			f.Type = typ
-			if emb {
-				f.Embedded = 1
-			}
-			f.Note = note
-			fs[i] = f
-		}
-
-		t := types.New(TSTRUCT)
-		t.SetPkg(r.currPkg)
-		t.SetFields(fs)
-		return t
-
-	case interfaceType:
-		r.setPkg()
-
-		embeddeds := make([]*types.Field, r.uint64())
-		for i := range embeddeds {
-			pos := r.pos()
-			typ := r.typ()
-
-			f := types.NewField()
-			f.Pos = pos
-			f.Type = typ
-			embeddeds[i] = f
-		}
-
-		methods := make([]*types.Field, r.uint64())
-		for i := range methods {
-			pos := r.pos()
-			sym := r.ident()
-			typ := r.signature(fakeRecvField())
-
-			f := types.NewField()
-			f.Pos = pos
-			f.Sym = sym
-			f.Type = typ
-			methods[i] = f
-		}
-
-		t := types.New(TINTER)
-		t.SetPkg(r.currPkg)
-		t.SetInterface(append(embeddeds, methods...))
-
-		// Ensure we expand the interface in the frontend (#25055).
-		checkwidth(t)
-		return t
-	}
-}
-
-func (r *importReader) kind() itag {
-	return itag(r.uint64())
-}
-
-func (r *importReader) signature(recv *types.Field) *types.Type {
-	params := r.paramList()
-	results := r.paramList()
-	if n := len(params); n > 0 {
-		params[n-1].SetIsDDD(r.bool())
-	}
-	t := functypefield(recv, params, results)
-	t.SetPkg(r.currPkg)
-	return t
-}
-
-func (r *importReader) paramList() []*types.Field {
-	fs := make([]*types.Field, r.uint64())
-	for i := range fs {
-		fs[i] = r.param()
-	}
-	return fs
-}
-
-func (r *importReader) param() *types.Field {
-	f := types.NewField()
-	f.Pos = r.pos()
-	f.Sym = r.ident()
-	f.Type = r.typ()
-	return f
-}
-
-func (r *importReader) bool() bool {
-	return r.uint64() != 0
-}
-
-func (r *importReader) int64() int64 {
-	n, err := binary.ReadVarint(r)
-	if err != nil {
-		Fatalf("readVarint: %v", err)
-	}
-	return n
-}
-
-func (r *importReader) uint64() uint64 {
-	n, err := binary.ReadUvarint(r)
-	if err != nil {
-		Fatalf("readVarint: %v", err)
-	}
-	return n
-}
-
-func (r *importReader) byte() byte {
-	x, err := r.ReadByte()
-	if err != nil {
-		Fatalf("declReader.ReadByte: %v", err)
-	}
-	return x
-}
-
-// Compiler-specific extensions.
-
-func (r *importReader) varExt(n *Node) {
-	r.linkname(n.Sym)
-	r.symIdx(n.Sym)
-}
-
-func (r *importReader) funcExt(n *Node) {
-	r.linkname(n.Sym)
-	r.symIdx(n.Sym)
-
-	// Escape analysis.
-	for _, fs := range &types.RecvsParams {
-		for _, f := range fs(n.Type).FieldSlice() {
-			f.Note = r.string()
-		}
-	}
-
-	// Inline body.
-	if u := r.uint64(); u > 0 {
-		n.Func.Inl = &Inline{
-			Cost: int32(u - 1),
-		}
-		n.Func.Endlineno = r.pos()
-	}
-}
-
-func (r *importReader) methExt(m *types.Field) {
-	if r.bool() {
-		m.SetNointerface(true)
-	}
-	r.funcExt(asNode(m.Type.Nname()))
-}
-
-func (r *importReader) linkname(s *types.Sym) {
-	s.Linkname = r.string()
-}
-
-func (r *importReader) symIdx(s *types.Sym) {
-	lsym := s.Linksym()
-	idx := int32(r.int64())
-	if idx != -1 {
-		if s.Linkname != "" {
-			Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
-		}
-		lsym.SymIdx = idx
-		lsym.Set(obj.AttrIndexed, true)
-	}
-}
-
-func (r *importReader) typeExt(t *types.Type) {
-	t.SetNotInHeap(r.bool())
-	i, pi := r.int64(), r.int64()
-	if i != -1 && pi != -1 {
-		typeSymIdx[t] = [2]int64{i, pi}
-	}
-}
-
-// Map imported type T to the index of type descriptor symbols of T and *T,
-// so we can use index to reference the symbol.
-var typeSymIdx = make(map[*types.Type][2]int64)
-
-func (r *importReader) doInline(n *Node) {
-	if len(n.Func.Inl.Body) != 0 {
-		Fatalf("%v already has inline body", n)
-	}
-
-	funchdr(n)
-	body := r.stmtList()
-	funcbody()
-	if body == nil {
-		//
-		// Make sure empty body is not interpreted as
-		// no inlineable body (see also parser.fnbody)
-		// (not doing so can cause significant performance
-		// degradation due to unnecessary calls to empty
-		// functions).
-		body = []*Node{}
-	}
-	n.Func.Inl.Body = body
-
-	importlist = append(importlist, n)
-
-	if Debug.E > 0 && Debug.m > 2 {
-		if Debug.m > 3 {
-			fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
-		} else {
-			fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Inlined function bodies
-
-// Approach: Read nodes and use them to create/declare the same data structures
-// as done originally by the (hidden) parser by closely following the parser's
-// original code. In other words, "parsing" the import data (which happens to
-// be encoded in binary rather textual form) is the best way at the moment to
-// re-establish the syntax tree's invariants. At some future point we might be
-// able to avoid this round-about way and create the rewritten nodes directly,
-// possibly avoiding a lot of duplicate work (name resolution, type checking).
-//
-// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
-// unrefined nodes (since this is what the importer uses). The respective case
-// entries are unreachable in the importer.
-
-func (r *importReader) stmtList() []*Node {
-	var list []*Node
-	for {
-		n := r.node()
-		if n == nil {
-			break
-		}
-		// OBLOCK nodes may be created when importing ODCL nodes - unpack them
-		if n.Op == OBLOCK {
-			list = append(list, n.List.Slice()...)
-		} else {
-			list = append(list, n)
-		}
-
-	}
-	return list
-}
-
-func (r *importReader) caseList(sw *Node) []*Node {
-	namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
-
-	cases := make([]*Node, r.uint64())
-	for i := range cases {
-		cas := nodl(r.pos(), OCASE, nil, nil)
-		cas.List.Set(r.stmtList())
-		if namedTypeSwitch {
-			// Note: per-case variables will have distinct, dotted
-			// names after import. That's okay: swt.go only needs
-			// Sym for diagnostics anyway.
-			caseVar := newnamel(cas.Pos, r.ident())
-			declare(caseVar, dclcontext)
-			cas.Rlist.Set1(caseVar)
-			caseVar.Name.Defn = sw.Left
-		}
-		cas.Nbody.Set(r.stmtList())
-		cases[i] = cas
-	}
-	return cases
-}
-
-func (r *importReader) exprList() []*Node {
-	var list []*Node
-	for {
-		n := r.expr()
-		if n == nil {
-			break
-		}
-		list = append(list, n)
-	}
-	return list
-}
-
-func (r *importReader) expr() *Node {
-	n := r.node()
-	if n != nil && n.Op == OBLOCK {
-		Fatalf("unexpected block node: %v", n)
-	}
-	return n
-}
-
-// TODO(gri) split into expr and stmt
-func (r *importReader) node() *Node {
-	switch op := r.op(); op {
-	// expressions
-	// case OPAREN:
-	// 	unreachable - unpacked by exporter
-
-	case OLITERAL:
-		pos := r.pos()
-		typ, val := r.value()
-
-		n := npos(pos, nodlit(val))
-		n.Type = typ
-		return n
-
-	case ONONAME:
-		return mkname(r.qualifiedIdent())
-
-	case ONAME:
-		return mkname(r.ident())
-
-	// case OPACK, ONONAME:
-	// 	unreachable - should have been resolved by typechecking
-
-	case OTYPE:
-		return typenod(r.typ())
-
-	case OTYPESW:
-		n := nodl(r.pos(), OTYPESW, nil, nil)
-		if s := r.ident(); s != nil {
-			n.Left = npos(n.Pos, newnoname(s))
-		}
-		n.Right, _ = r.exprsOrNil()
-		return n
-
-	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
-	//      unreachable - should have been resolved by typechecking
-
-	// case OCLOSURE:
-	//	unimplemented
-
-	// case OPTRLIT:
-	//	unreachable - mapped to case OADDR below by exporter
-
-	case OSTRUCTLIT:
-		// TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
-		savedlineno := lineno
-		lineno = r.pos()
-		n := nodl(lineno, OCOMPLIT, nil, typenod(r.typ()))
-		n.List.Set(r.elemList()) // special handling of field names
-		lineno = savedlineno
-		return n
-
-	// case OARRAYLIT, OSLICELIT, OMAPLIT:
-	// 	unreachable - mapped to case OCOMPLIT below by exporter
-
-	case OCOMPLIT:
-		n := nodl(r.pos(), OCOMPLIT, nil, typenod(r.typ()))
-		n.List.Set(r.exprList())
-		return n
-
-	case OKEY:
-		pos := r.pos()
-		left, right := r.exprsOrNil()
-		return nodl(pos, OKEY, left, right)
-
-	// case OSTRUCTKEY:
-	//	unreachable - handled in case OSTRUCTLIT by elemList
-
-	// case OCALLPART:
-	//	unreachable - mapped to case OXDOT below by exporter
-
-	// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-	// 	unreachable - mapped to case OXDOT below by exporter
-
-	case OXDOT:
-		// see parser.new_dotname
-		return npos(r.pos(), nodSym(OXDOT, r.expr(), r.ident()))
-
-	// case ODOTTYPE, ODOTTYPE2:
-	// 	unreachable - mapped to case ODOTTYPE below by exporter
-
-	case ODOTTYPE:
-		n := nodl(r.pos(), ODOTTYPE, r.expr(), nil)
-		n.Type = r.typ()
-		return n
-
-	// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
-	// 	unreachable - mapped to cases below by exporter
-
-	case OINDEX:
-		return nodl(r.pos(), op, r.expr(), r.expr())
-
-	case OSLICE, OSLICE3:
-		n := nodl(r.pos(), op, r.expr(), nil)
-		low, high := r.exprsOrNil()
-		var max *Node
-		if n.Op.IsSlice3() {
-			max = r.expr()
-		}
-		n.SetSliceBounds(low, high, max)
-		return n
-
-	// case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
-	// 	unreachable - mapped to OCONV case below by exporter
-
-	case OCONV:
-		n := nodl(r.pos(), OCONV, r.expr(), nil)
-		n.Type = r.typ()
-		return n
-
-	case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
-		n := npos(r.pos(), builtinCall(op))
-		n.List.Set(r.exprList())
-		if op == OAPPEND {
-			n.SetIsDDD(r.bool())
-		}
-		return n
-
-	// case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
-	// 	unreachable - mapped to OCALL case below by exporter
-
-	case OCALL:
-		n := nodl(r.pos(), OCALL, nil, nil)
-		n.Ninit.Set(r.stmtList())
-		n.Left = r.expr()
-		n.List.Set(r.exprList())
-		n.SetIsDDD(r.bool())
-		return n
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		n := npos(r.pos(), builtinCall(OMAKE))
-		n.List.Append(typenod(r.typ()))
-		n.List.Append(r.exprList()...)
-		return n
-
-	// unary expressions
-	case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
-		return nodl(r.pos(), op, r.expr(), nil)
-
-	// binary expressions
-	case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
-		OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
-		return nodl(r.pos(), op, r.expr(), r.expr())
-
-	case OADDSTR:
-		pos := r.pos()
-		list := r.exprList()
-		x := npos(pos, list[0])
-		for _, y := range list[1:] {
-			x = nodl(pos, OADD, x, y)
-		}
-		return x
-
-	// --------------------------------------------------------------------
-	// statements
-	case ODCL:
-		pos := r.pos()
-		lhs := npos(pos, dclname(r.ident()))
-		typ := typenod(r.typ())
-		return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
-
-	// case ODCLFIELD:
-	//	unimplemented
-
-	// case OAS, OASWB:
-	// 	unreachable - mapped to OAS case below by exporter
-
-	case OAS:
-		return nodl(r.pos(), OAS, r.expr(), r.expr())
-
-	case OASOP:
-		n := nodl(r.pos(), OASOP, nil, nil)
-		n.SetSubOp(r.op())
-		n.Left = r.expr()
-		if !r.bool() {
-			n.Right = nodintconst(1)
-			n.SetImplicit(true)
-		} else {
-			n.Right = r.expr()
-		}
-		return n
-
-	// case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-	// 	unreachable - mapped to OAS2 case below by exporter
-
-	case OAS2:
-		n := nodl(r.pos(), OAS2, nil, nil)
-		n.List.Set(r.exprList())
-		n.Rlist.Set(r.exprList())
-		return n
-
-	case ORETURN:
-		n := nodl(r.pos(), ORETURN, nil, nil)
-		n.List.Set(r.exprList())
-		return n
-
-	// case ORETJMP:
-	// 	unreachable - generated by compiler for trampolin routines (not exported)
-
-	case OGO, ODEFER:
-		return nodl(r.pos(), op, r.expr(), nil)
-
-	case OIF:
-		n := nodl(r.pos(), OIF, nil, nil)
-		n.Ninit.Set(r.stmtList())
-		n.Left = r.expr()
-		n.Nbody.Set(r.stmtList())
-		n.Rlist.Set(r.stmtList())
-		return n
-
-	case OFOR:
-		n := nodl(r.pos(), OFOR, nil, nil)
-		n.Ninit.Set(r.stmtList())
-		n.Left, n.Right = r.exprsOrNil()
-		n.Nbody.Set(r.stmtList())
-		return n
-
-	case ORANGE:
-		n := nodl(r.pos(), ORANGE, nil, nil)
-		n.List.Set(r.stmtList())
-		n.Right = r.expr()
-		n.Nbody.Set(r.stmtList())
-		return n
-
-	case OSELECT, OSWITCH:
-		n := nodl(r.pos(), op, nil, nil)
-		n.Ninit.Set(r.stmtList())
-		n.Left, _ = r.exprsOrNil()
-		n.List.Set(r.caseList(n))
-		return n
-
-	// case OCASE:
-	//	handled by caseList
-
-	case OFALL:
-		n := nodl(r.pos(), OFALL, nil, nil)
-		return n
-
-	case OBREAK, OCONTINUE:
-		pos := r.pos()
-		left, _ := r.exprsOrNil()
-		if left != nil {
-			left = newname(left.Sym)
-		}
-		return nodl(pos, op, left, nil)
-
-	// case OEMPTY:
-	// 	unreachable - not emitted by exporter
-
-	case OGOTO, OLABEL:
-		n := nodl(r.pos(), op, nil, nil)
-		n.Sym = lookup(r.string())
-		return n
-
-	case OEND:
-		return nil
-
-	default:
-		Fatalf("cannot import %v (%d) node\n"+
-			"\t==> please file an issue and assign to gri@", op, int(op))
-		panic("unreachable") // satisfy compiler
-	}
-}
-
-func (r *importReader) op() Op {
-	return Op(r.uint64())
-}
-
-func (r *importReader) elemList() []*Node {
-	c := r.uint64()
-	list := make([]*Node, c)
-	for i := range list {
-		s := r.ident()
-		list[i] = nodSym(OSTRUCTKEY, r.expr(), s)
-	}
-	return list
-}
-
-func (r *importReader) exprsOrNil() (a, b *Node) {
-	ab := r.uint64()
-	if ab&1 != 0 {
-		a = r.expr()
-	}
-	if ab&2 != 0 {
-		b = r.node()
-	}
-	return
-}
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
deleted file mode 100644
index ec9cc4b..0000000
--- a/src/cmd/compile/internal/gc/init.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-)
-
-// A function named init is a special case.
-// It is called by the initialization before main is run.
-// To make it unique within a package and also uncallable,
-// the name, normally "pkg.init", is altered to "pkg.init.0".
-var renameinitgen int
-
-// Dummy function for autotmps generated during typechecking.
-var dummyInitFn = nod(ODCLFUNC, nil, nil)
-
-func renameinit() *types.Sym {
-	s := lookupN("init.", renameinitgen)
-	renameinitgen++
-	return s
-}
-
-// fninit makes an initialization record for the package.
-// See runtime/proc.go:initTask for its layout.
-// The 3 tasks for initialization are:
-//   1) Initialize all of the packages the current package depends on.
-//   2) Initialize all the variables that have initializers.
-//   3) Run any init functions.
-func fninit(n []*Node) {
-	nf := initOrder(n)
-
-	var deps []*obj.LSym // initTask records for packages the current package depends on
-	var fns []*obj.LSym  // functions to call for package initialization
-
-	// Find imported packages with init tasks.
-	for _, s := range types.InitSyms {
-		deps = append(deps, s.Linksym())
-	}
-
-	// Make a function that contains all the initialization statements.
-	if len(nf) > 0 {
-		lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
-		initializers := lookup("init")
-		fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
-		for _, dcl := range dummyInitFn.Func.Dcl {
-			dcl.Name.Curfn = fn
-		}
-		fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
-		dummyInitFn.Func.Dcl = nil
-
-		fn.Nbody.Set(nf)
-		funcbody()
-
-		fn = typecheck(fn, ctxStmt)
-		Curfn = fn
-		typecheckslice(nf, ctxStmt)
-		Curfn = nil
-		xtop = append(xtop, fn)
-		fns = append(fns, initializers.Linksym())
-	}
-	if dummyInitFn.Func.Dcl != nil {
-		// We only generate temps using dummyInitFn if there
-		// are package-scope initialization statements, so
-		// something's weird if we get here.
-		Fatalf("dummyInitFn still has declarations")
-	}
-	dummyInitFn = nil
-
-	// Record user init functions.
-	for i := 0; i < renameinitgen; i++ {
-		s := lookupN("init.", i)
-		fn := asNode(s.Def).Name.Defn
-		// Skip init functions with empty bodies.
-		if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
-			continue
-		}
-		fns = append(fns, s.Linksym())
-	}
-
-	if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
-		return // nothing to initialize
-	}
-
-	// Make an .inittask structure.
-	sym := lookup(".inittask")
-	nn := newname(sym)
-	nn.Type = types.Types[TUINT8] // dummy type
-	nn.SetClass(PEXTERN)
-	sym.Def = asTypesNode(nn)
-	exportsym(nn)
-	lsym := sym.Linksym()
-	ot := 0
-	ot = duintptr(lsym, ot, 0) // state: not initialized yet
-	ot = duintptr(lsym, ot, uint64(len(deps)))
-	ot = duintptr(lsym, ot, uint64(len(fns)))
-	for _, d := range deps {
-		ot = dsymptr(lsym, ot, d, 0)
-	}
-	for _, f := range fns {
-		ot = dsymptr(lsym, ot, f, 0)
-	}
-	// An initTask has pointers, but none into the Go heap.
-	// It's not quite read only, the state field must be modifiable.
-	ggloblsym(lsym, int32(ot), obj.NOPTR)
-}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
deleted file mode 100644
index 600d12b..0000000
--- a/src/cmd/compile/internal/gc/inl.go
+++ /dev/null
@@ -1,1499 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-// The inlining facility makes 2 passes: first caninl determines which
-// functions are suitable for inlining, and for those that are it
-// saves a copy of the body. Then inlcalls walks each function body to
-// expand calls to inlinable functions.
-//
-// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
-// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
-// are not supported.
-//      0: disabled
-//      1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
-//      2: (unassigned)
-//      3: (unassigned)
-//      4: allow non-leaf functions
-//
-// At some point this may get another default and become switch-offable with -N.
-//
-// The -d typcheckinl flag enables early typechecking of all imported bodies,
-// which is useful to flush out bugs.
-//
-// The Debug.m flag enables diagnostic output.  a single -m is useful for verifying
-// which calls get inlined or not, more is for debugging, and may go away at any point.
-
-package gc
-
-import (
-	"cmd/compile/internal/logopt"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"fmt"
-	"strings"
-)
-
-// Inlining budget parameters, gathered in one place
-const (
-	inlineMaxBudget       = 80
-	inlineExtraAppendCost = 0
-	// default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
-	inlineExtraCallCost  = 57              // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
-	inlineExtraPanicCost = 1               // do not penalize inlining panics.
-	inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
-
-	inlineBigFunctionNodes   = 5000 // Functions with this many nodes are considered "big".
-	inlineBigFunctionMaxCost = 20   // Max cost of inlinee when inlining into a "big" function.
-)
-
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
-// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *Node) *types.Pkg {
-	if fn.IsMethod() {
-		// method
-		rcvr := fn.Type.Recv().Type
-
-		if rcvr.IsPtr() {
-			rcvr = rcvr.Elem()
-		}
-		if rcvr.Sym == nil {
-			Fatalf("receiver with no sym: [%v] %L  (%v)", fn.Sym, fn, rcvr)
-		}
-		return rcvr.Sym.Pkg
-	}
-
-	// non-method
-	return fn.Sym.Pkg
-}
-
-// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
-// because they're a copy of an already checked body.
-func typecheckinl(fn *Node) {
-	lno := setlineno(fn)
-
-	expandInline(fn)
-
-	// typecheckinl is only for imported functions;
-	// their bodies may refer to unsafe as long as the package
-	// was marked safe during import (which was checked then).
-	// the ->inl of a local function has been typechecked before caninl copied it.
-	pkg := fnpkg(fn)
-
-	if pkg == localpkg || pkg == nil {
-		return // typecheckinl on local function
-	}
-
-	if Debug.m > 2 || Debug_export != 0 {
-		fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
-	}
-
-	savefn := Curfn
-	Curfn = fn
-	typecheckslice(fn.Func.Inl.Body, ctxStmt)
-	Curfn = savefn
-
-	// During expandInline (which imports fn.Func.Inl.Body),
-	// declarations are added to fn.Func.Dcl by funcHdr(). Move them
-	// to fn.Func.Inl.Dcl for consistency with how local functions
-	// behave. (Append because typecheckinl may be called multiple
-	// times.)
-	fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
-	fn.Func.Dcl = nil
-
-	lineno = lno
-}
-
-// Caninl determines whether fn is inlineable.
-// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
-// fn and ->nbody will already have been typechecked.
-func caninl(fn *Node) {
-	if fn.Op != ODCLFUNC {
-		Fatalf("caninl %v", fn)
-	}
-	if fn.Func.Nname == nil {
-		Fatalf("caninl no nname %+v", fn)
-	}
-
-	var reason string // reason, if any, that the function was not inlined
-	if Debug.m > 1 || logopt.Enabled() {
-		defer func() {
-			if reason != "" {
-				if Debug.m > 1 {
-					fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
-				}
-				if logopt.Enabled() {
-					logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", fn.funcname(), reason)
-				}
-			}
-		}()
-	}
-
-	// If marked "go:noinline", don't inline
-	if fn.Func.Pragma&Noinline != 0 {
-		reason = "marked go:noinline"
-		return
-	}
-
-	// If marked "go:norace" and -race compilation, don't inline.
-	if flag_race && fn.Func.Pragma&Norace != 0 {
-		reason = "marked go:norace with -race compilation"
-		return
-	}
-
-	// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
-	if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 {
-		reason = "marked go:nocheckptr"
-		return
-	}
-
-	// If marked "go:cgo_unsafe_args", don't inline, since the
-	// function makes assumptions about its argument frame layout.
-	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
-		reason = "marked go:cgo_unsafe_args"
-		return
-	}
-
-	// If marked as "go:uintptrescapes", don't inline, since the
-	// escape information is lost during inlining.
-	if fn.Func.Pragma&UintptrEscapes != 0 {
-		reason = "marked as having an escaping uintptr argument"
-		return
-	}
-
-	// The nowritebarrierrec checker currently works at function
-	// granularity, so inlining yeswritebarrierrec functions can
-	// confuse it (#22342). As a workaround, disallow inlining
-	// them for now.
-	if fn.Func.Pragma&Yeswritebarrierrec != 0 {
-		reason = "marked go:yeswritebarrierrec"
-		return
-	}
-
-	// If fn has no body (is defined outside of Go), cannot inline it.
-	if fn.Nbody.Len() == 0 {
-		reason = "no function body"
-		return
-	}
-
-	if fn.Typecheck() == 0 {
-		Fatalf("caninl on non-typechecked function %v", fn)
-	}
-
-	n := fn.Func.Nname
-	if n.Func.InlinabilityChecked() {
-		return
-	}
-	defer n.Func.SetInlinabilityChecked(true)
-
-	cc := int32(inlineExtraCallCost)
-	if Debug.l == 4 {
-		cc = 1 // this appears to yield better performance than 0.
-	}
-
-	// At this point in the game the function we're looking at may
-	// have "stale" autos, vars that still appear in the Dcl list, but
-	// which no longer have any uses in the function body (due to
-	// elimination by deadcode). We'd like to exclude these dead vars
-	// when creating the "Inline.Dcl" field below; to accomplish this,
-	// the hairyVisitor below builds up a map of used/referenced
-	// locals, and we use this map to produce a pruned Inline.Dcl
-	// list. See issue 25249 for more context.
-
-	visitor := hairyVisitor{
-		budget:        inlineMaxBudget,
-		extraCallCost: cc,
-		usedLocals:    make(map[*Node]bool),
-	}
-	if visitor.visitList(fn.Nbody) {
-		reason = visitor.reason
-		return
-	}
-	if visitor.budget < 0 {
-		reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-visitor.budget, inlineMaxBudget)
-		return
-	}
-
-	n.Func.Inl = &Inline{
-		Cost: inlineMaxBudget - visitor.budget,
-		Dcl:  inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
-		Body: inlcopylist(fn.Nbody.Slice()),
-	}
-
-	// hack, TODO, check for better way to link method nodes back to the thing with the ->inl
-	// this is so export can find the body of a method
-	fn.Type.FuncType().Nname = asTypesNode(n)
-
-	if Debug.m > 1 {
-		fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
-	} else if Debug.m != 0 {
-		fmt.Printf("%v: can inline %v\n", fn.Line(), n)
-	}
-	if logopt.Enabled() {
-		logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", fn.funcname(), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
-	}
-}
-
-// inlFlood marks n's inline body for export and recursively ensures
-// all called functions are marked too.
-func inlFlood(n *Node) {
-	if n == nil {
-		return
-	}
-	if n.Op != ONAME || n.Class() != PFUNC {
-		Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
-	}
-	if n.Func == nil {
-		Fatalf("inlFlood: missing Func on %v", n)
-	}
-	if n.Func.Inl == nil {
-		return
-	}
-
-	if n.Func.ExportInline() {
-		return
-	}
-	n.Func.SetExportInline(true)
-
-	typecheckinl(n)
-
-	// Recursively identify all referenced functions for
-	// reexport. We want to include even non-called functions,
-	// because after inlining they might be callable.
-	inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
-		switch n.Op {
-		case ONAME:
-			switch n.Class() {
-			case PFUNC:
-				if n.isMethodExpression() {
-					inlFlood(asNode(n.Type.Nname()))
-				} else {
-					inlFlood(n)
-					exportsym(n)
-				}
-			case PEXTERN:
-				exportsym(n)
-			}
-
-		case ODOTMETH:
-			fn := asNode(n.Type.Nname())
-			inlFlood(fn)
-
-		case OCALLPART:
-			// Okay, because we don't yet inline indirect
-			// calls to method values.
-		case OCLOSURE:
-			// If the closure is inlinable, we'll need to
-			// flood it too. But today we don't support
-			// inlining functions that contain closures.
-			//
-			// When we do, we'll probably want:
-			//     inlFlood(n.Func.Closure.Func.Nname)
-			Fatalf("unexpected closure in inlinable function")
-		}
-		return true
-	})
-}
-
-// hairyVisitor visits a function body to determine its inlining
-// hairiness and whether or not it can be inlined.
-type hairyVisitor struct {
-	budget        int32
-	reason        string
-	extraCallCost int32
-	usedLocals    map[*Node]bool
-}
-
-// Look for anything we want to punt on.
-func (v *hairyVisitor) visitList(ll Nodes) bool {
-	for _, n := range ll.Slice() {
-		if v.visit(n) {
-			return true
-		}
-	}
-	return false
-}
-
-func (v *hairyVisitor) visit(n *Node) bool {
-	if n == nil {
-		return false
-	}
-
-	switch n.Op {
-	// Call is okay if inlinable and we have the budget for the body.
-	case OCALLFUNC:
-		// Functions that call runtime.getcaller{pc,sp} can not be inlined
-		// because getcaller{pc,sp} expect a pointer to the caller's first argument.
-		//
-		// runtime.throw is a "cheap call" like panic in normal code.
-		if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
-			fn := n.Left.Sym.Name
-			if fn == "getcallerpc" || fn == "getcallersp" {
-				v.reason = "call to " + fn
-				return true
-			}
-			if fn == "throw" {
-				v.budget -= inlineExtraThrowCost
-				break
-			}
-		}
-
-		if isIntrinsicCall(n) {
-			// Treat like any other node.
-			break
-		}
-
-		if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
-			v.budget -= fn.Func.Inl.Cost
-			break
-		}
-
-		// Call cost for non-leaf inlining.
-		v.budget -= v.extraCallCost
-
-	// Call is okay if inlinable and we have the budget for the body.
-	case OCALLMETH:
-		t := n.Left.Type
-		if t == nil {
-			Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
-		}
-		if t.Nname() == nil {
-			Fatalf("no function definition for [%p] %+v\n", t, t)
-		}
-		if isRuntimePkg(n.Left.Sym.Pkg) {
-			fn := n.Left.Sym.Name
-			if fn == "heapBits.nextArena" {
-				// Special case: explicitly allow
-				// mid-stack inlining of
-				// runtime.heapBits.next even though
-				// it calls slow-path
-				// runtime.heapBits.nextArena.
-				break
-			}
-		}
-		if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil {
-			v.budget -= inlfn.Inl.Cost
-			break
-		}
-		// Call cost for non-leaf inlining.
-		v.budget -= v.extraCallCost
-
-	// Things that are too hairy, irrespective of the budget
-	case OCALL, OCALLINTER:
-		// Call cost for non-leaf inlining.
-		v.budget -= v.extraCallCost
-
-	case OPANIC:
-		v.budget -= inlineExtraPanicCost
-
-	case ORECOVER:
-		// recover matches the argument frame pointer to find
-		// the right panic value, so it needs an argument frame.
-		v.reason = "call to recover"
-		return true
-
-	case OCLOSURE,
-		ORANGE,
-		OSELECT,
-		OGO,
-		ODEFER,
-		ODCLTYPE, // can't print yet
-		ORETJMP:
-		v.reason = "unhandled op " + n.Op.String()
-		return true
-
-	case OAPPEND:
-		v.budget -= inlineExtraAppendCost
-
-	case ODCLCONST, OEMPTY, OFALL:
-		// These nodes don't produce code; omit from inlining budget.
-		return false
-
-	case OLABEL:
-		// TODO(mdempsky): Add support for inlining labeled control statements.
-		if n.labeledControl() != nil {
-			v.reason = "labeled control"
-			return true
-		}
-
-	case OBREAK, OCONTINUE:
-		if n.Sym != nil {
-			// Should have short-circuited due to labeledControl above.
-			Fatalf("unexpected labeled break/continue: %v", n)
-		}
-
-	case OIF:
-		if Isconst(n.Left, CTBOOL) {
-			// This if and the condition cost nothing.
-			return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
-				v.visitList(n.Rlist)
-		}
-
-	case ONAME:
-		if n.Class() == PAUTO {
-			v.usedLocals[n] = true
-		}
-
-	}
-
-	v.budget--
-
-	// When debugging, don't stop early, to get full cost of inlining this function
-	if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
-		return true
-	}
-
-	return v.visit(n.Left) || v.visit(n.Right) ||
-		v.visitList(n.List) || v.visitList(n.Rlist) ||
-		v.visitList(n.Ninit) || v.visitList(n.Nbody)
-}
-
-// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
-// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
-// the body and dcls of an inlineable function.
-func inlcopylist(ll []*Node) []*Node {
-	s := make([]*Node, 0, len(ll))
-	for _, n := range ll {
-		s = append(s, inlcopy(n))
-	}
-	return s
-}
-
-func inlcopy(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	case ONAME, OTYPE, OLITERAL:
-		return n
-	}
-
-	m := n.copy()
-	if n.Op != OCALLPART && m.Func != nil {
-		Fatalf("unexpected Func: %v", m)
-	}
-	m.Left = inlcopy(n.Left)
-	m.Right = inlcopy(n.Right)
-	m.List.Set(inlcopylist(n.List.Slice()))
-	m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
-	m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
-	m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
-
-	return m
-}
-
-func countNodes(n *Node) int {
-	if n == nil {
-		return 0
-	}
-	cnt := 1
-	cnt += countNodes(n.Left)
-	cnt += countNodes(n.Right)
-	for _, n1 := range n.Ninit.Slice() {
-		cnt += countNodes(n1)
-	}
-	for _, n1 := range n.Nbody.Slice() {
-		cnt += countNodes(n1)
-	}
-	for _, n1 := range n.List.Slice() {
-		cnt += countNodes(n1)
-	}
-	for _, n1 := range n.Rlist.Slice() {
-		cnt += countNodes(n1)
-	}
-	return cnt
-}
-
-// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
-// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *Node) {
-	savefn := Curfn
-	Curfn = fn
-	maxCost := int32(inlineMaxBudget)
-	if countNodes(fn) >= inlineBigFunctionNodes {
-		maxCost = inlineBigFunctionMaxCost
-	}
-	// Map to keep track of functions that have been inlined at a particular
-	// call site, in order to stop inlining when we reach the beginning of a
-	// recursion cycle again. We don't inline immediately recursive functions,
-	// but allow inlining if there is a recursion cycle of many functions.
-	// Most likely, the inlining will stop before we even hit the beginning of
-	// the cycle again, but the map catches the unusual case.
-	inlMap := make(map[*Node]bool)
-	fn = inlnode(fn, maxCost, inlMap)
-	if fn != Curfn {
-		Fatalf("inlnode replaced curfn")
-	}
-	Curfn = savefn
-}
-
-// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *Node) {
-	n.Op = OBLOCK
-
-	// n->ninit stays
-	n.List.Set(n.Nbody.Slice())
-
-	n.Nbody.Set(nil)
-	n.Rlist.Set(nil)
-}
-
-// Turn an OINLCALL into a single valued expression.
-// The result of inlconv2expr MUST be assigned back to n, e.g.
-// 	n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *Node) *Node {
-	r := n.Rlist.First()
-	return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
-}
-
-// Turn the rlist (with the return values) of the OINLCALL in
-// n into an expression list lumping the ninit and body
-// containing the inlined statements on the first list element so
-// order will be preserved Used in return, oas2func and call
-// statements.
-func inlconv2list(n *Node) []*Node {
-	if n.Op != OINLCALL || n.Rlist.Len() == 0 {
-		Fatalf("inlconv2list %+v\n", n)
-	}
-
-	s := n.Rlist.Slice()
-	s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
-	return s
-}
-
-func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = inlnode(s[i], maxCost, inlMap)
-	}
-}
-
-// inlnode recurses over the tree to find inlineable calls, which will
-// be turned into OINLCALLs by mkinlcall. When the recursion comes
-// back up will examine left, right, list, rlist, ninit, ntest, nincr,
-// nbody and nelse and use one of the 4 inlconv/glue functions above
-// to turn the OINLCALL into an expression, a statement, or patch it
-// in to this nodes list or rlist as appropriate.
-// NOTE it makes no sense to pass the glue functions down the
-// recursion to the level where the OINLCALL gets created because they
-// have to edit /this/ n, so you'd have to push that one down as well,
-// but then you may as well do it here.  so this is cleaner and
-// shorter and less complicated.
-// The result of inlnode MUST be assigned back to n, e.g.
-// 	n.Left = inlnode(n.Left)
-func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
-	if n == nil {
-		return n
-	}
-
-	switch n.Op {
-	case ODEFER, OGO:
-		switch n.Left.Op {
-		case OCALLFUNC, OCALLMETH:
-			n.Left.SetNoInline(true)
-		}
-
-	// TODO do them here (or earlier),
-	// so escape analysis can avoid more heapmoves.
-	case OCLOSURE:
-		return n
-	case OCALLMETH:
-		// Prevent inlining some reflect.Value methods when using checkptr,
-		// even when package reflect was compiled without it (#35073).
-		if s := n.Left.Sym; Debug_checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
-			return n
-		}
-	}
-
-	lno := setlineno(n)
-
-	inlnodelist(n.Ninit, maxCost, inlMap)
-	for _, n1 := range n.Ninit.Slice() {
-		if n1.Op == OINLCALL {
-			inlconv2stmt(n1)
-		}
-	}
-
-	n.Left = inlnode(n.Left, maxCost, inlMap)
-	if n.Left != nil && n.Left.Op == OINLCALL {
-		n.Left = inlconv2expr(n.Left)
-	}
-
-	n.Right = inlnode(n.Right, maxCost, inlMap)
-	if n.Right != nil && n.Right.Op == OINLCALL {
-		if n.Op == OFOR || n.Op == OFORUNTIL {
-			inlconv2stmt(n.Right)
-		} else if n.Op == OAS2FUNC {
-			n.Rlist.Set(inlconv2list(n.Right))
-			n.Right = nil
-			n.Op = OAS2
-			n.SetTypecheck(0)
-			n = typecheck(n, ctxStmt)
-		} else {
-			n.Right = inlconv2expr(n.Right)
-		}
-	}
-
-	inlnodelist(n.List, maxCost, inlMap)
-	if n.Op == OBLOCK {
-		for _, n2 := range n.List.Slice() {
-			if n2.Op == OINLCALL {
-				inlconv2stmt(n2)
-			}
-		}
-	} else {
-		s := n.List.Slice()
-		for i1, n1 := range s {
-			if n1 != nil && n1.Op == OINLCALL {
-				s[i1] = inlconv2expr(s[i1])
-			}
-		}
-	}
-
-	inlnodelist(n.Rlist, maxCost, inlMap)
-	s := n.Rlist.Slice()
-	for i1, n1 := range s {
-		if n1.Op == OINLCALL {
-			if n.Op == OIF {
-				inlconv2stmt(n1)
-			} else {
-				s[i1] = inlconv2expr(s[i1])
-			}
-		}
-	}
-
-	inlnodelist(n.Nbody, maxCost, inlMap)
-	for _, n := range n.Nbody.Slice() {
-		if n.Op == OINLCALL {
-			inlconv2stmt(n)
-		}
-	}
-
-	// with all the branches out of the way, it is now time to
-	// transmogrify this node itself unless inhibited by the
-	// switch at the top of this function.
-	switch n.Op {
-	case OCALLFUNC, OCALLMETH:
-		if n.NoInline() {
-			return n
-		}
-	}
-
-	switch n.Op {
-	case OCALLFUNC:
-		if Debug.m > 3 {
-			fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
-		}
-		if isIntrinsicCall(n) {
-			break
-		}
-		if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
-			n = mkinlcall(n, fn, maxCost, inlMap)
-		}
-
-	case OCALLMETH:
-		if Debug.m > 3 {
-			fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
-		}
-
-		// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
-		if n.Left.Type == nil {
-			Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
-		}
-
-		if n.Left.Type.Nname() == nil {
-			Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
-		}
-
-		n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost, inlMap)
-	}
-
-	lineno = lno
-	return n
-}
-
-// inlCallee takes a function-typed expression and returns the underlying function ONAME
-// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn *Node) *Node {
-	fn = staticValue(fn)
-	switch {
-	case fn.Op == ONAME && fn.Class() == PFUNC:
-		if fn.isMethodExpression() {
-			n := asNode(fn.Type.Nname())
-			// Check that receiver type matches fn.Left.
-			// TODO(mdempsky): Handle implicit dereference
-			// of pointer receiver argument?
-			if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
-				return nil
-			}
-			return n
-		}
-		return fn
-	case fn.Op == OCLOSURE:
-		c := fn.Func.Closure
-		caninl(c)
-		return c.Func.Nname
-	}
-	return nil
-}
-
-func staticValue(n *Node) *Node {
-	for {
-		if n.Op == OCONVNOP {
-			n = n.Left
-			continue
-		}
-
-		n1 := staticValue1(n)
-		if n1 == nil {
-			return n
-		}
-		n = n1
-	}
-}
-
-// staticValue1 implements a simple SSA-like optimization. If n is a local variable
-// that is initialized and never reassigned, staticValue1 returns the initializer
-// expression. Otherwise, it returns nil.
-func staticValue1(n *Node) *Node {
-	if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
-		return nil
-	}
-
-	defn := n.Name.Defn
-	if defn == nil {
-		return nil
-	}
-
-	var rhs *Node
-FindRHS:
-	switch defn.Op {
-	case OAS:
-		rhs = defn.Right
-	case OAS2:
-		for i, lhs := range defn.List.Slice() {
-			if lhs == n {
-				rhs = defn.Rlist.Index(i)
-				break FindRHS
-			}
-		}
-		Fatalf("%v missing from LHS of %v", n, defn)
-	default:
-		return nil
-	}
-	if rhs == nil {
-		Fatalf("RHS is nil: %v", defn)
-	}
-
-	unsafe, _ := reassigned(n)
-	if unsafe {
-		return nil
-	}
-
-	return rhs
-}
-
-// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
-// indicating whether the name has any assignments other than its declaration.
-// The second return value is the first such assignment encountered in the walk, if any. It is mostly
-// useful for -m output documenting the reason for inhibited optimizations.
-// NB: global variables are always considered to be re-assigned.
-// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n *Node) (bool, *Node) {
-	if n.Op != ONAME {
-		Fatalf("reassigned %v", n)
-	}
-	// no way to reliably check for no-reassignment of globals, assume it can be
-	if n.Name.Curfn == nil {
-		return true, nil
-	}
-	f := n.Name.Curfn
-	// There just might be a good reason for this although this can be pretty surprising:
-	// local variables inside a closure have Curfn pointing to the OCLOSURE node instead
-	// of the corresponding ODCLFUNC.
-	// We need to walk the function body to check for reassignments so we follow the
-	// linkage to the ODCLFUNC node as that is where body is held.
-	if f.Op == OCLOSURE {
-		f = f.Func.Closure
-	}
-	v := reassignVisitor{name: n}
-	a := v.visitList(f.Nbody)
-	return a != nil, a
-}
-
-type reassignVisitor struct {
-	name *Node
-}
-
-func (v *reassignVisitor) visit(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-	switch n.Op {
-	case OAS, OSELRECV:
-		if n.Left == v.name && n != v.name.Name.Defn {
-			return n
-		}
-	case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV:
-		for _, p := range n.List.Slice() {
-			if p == v.name && n != v.name.Name.Defn {
-				return n
-			}
-		}
-	case OSELRECV2:
-		if (n.Left == v.name || n.List.First() == v.name) && n != v.name.Name.Defn {
-			return n
-		}
-	}
-	if a := v.visit(n.Left); a != nil {
-		return a
-	}
-	if a := v.visit(n.Right); a != nil {
-		return a
-	}
-	if a := v.visitList(n.List); a != nil {
-		return a
-	}
-	if a := v.visitList(n.Rlist); a != nil {
-		return a
-	}
-	if a := v.visitList(n.Ninit); a != nil {
-		return a
-	}
-	if a := v.visitList(n.Nbody); a != nil {
-		return a
-	}
-	return nil
-}
-
-func (v *reassignVisitor) visitList(l Nodes) *Node {
-	for _, n := range l.Slice() {
-		if a := v.visit(n); a != nil {
-			return a
-		}
-	}
-	return nil
-}
-
-func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
-	n := asNode(t.Nname)
-	if n == nil || n.isBlank() {
-		return nblank
-	}
-
-	inlvar := inlvars[n]
-	if inlvar == nil {
-		Fatalf("missing inlvar for %v", n)
-	}
-	as.Ninit.Append(nod(ODCL, inlvar, nil))
-	inlvar.Name.Defn = as
-	return inlvar
-}
-
-var inlgen int
-
-// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
-// function with an inlinable body, return an OINLCALL node that can replace n.
-// The returned node's Ninit has the parameter assignments, the Nbody is the
-// inlined function body, and (List, Rlist) contain the (input, output)
-// parameters.
-// The result of mkinlcall MUST be assigned back to n, e.g.
-// 	n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
-	if fn.Func.Inl == nil {
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
-				fmt.Sprintf("%s cannot be inlined", fn.pkgFuncName()))
-		}
-		return n
-	}
-	if fn.Func.Inl.Cost > maxCost {
-		// The inlined function body is too big. Typically we use this check to restrict
-		// inlining into very big functions.  See issue 26546 and 17566.
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
-				fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, fn.pkgFuncName(), maxCost))
-		}
-		return n
-	}
-
-	if fn == Curfn || fn.Name.Defn == Curfn {
-		// Can't recursively inline a function into itself.
-		if logopt.Enabled() {
-			logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", Curfn.funcname()))
-		}
-		return n
-	}
-
-	if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
-		// Runtime package must not be instrumented.
-		// Instrument skips runtime package. However, some runtime code can be
-		// inlined into other packages and instrumented there. To avoid this,
-		// we disable inlining of runtime functions when instrumenting.
-		// The example that we observed is inlining of LockOSThread,
-		// which lead to false race reports on m contents.
-		return n
-	}
-
-	if inlMap[fn] {
-		if Debug.m > 1 {
-			fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
-		}
-		return n
-	}
-	inlMap[fn] = true
-	defer func() {
-		inlMap[fn] = false
-	}()
-	if Debug_typecheckinl == 0 {
-		typecheckinl(fn)
-	}
-
-	// We have a function node, and it has an inlineable body.
-	if Debug.m > 1 {
-		fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
-	} else if Debug.m != 0 {
-		fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
-	}
-	if Debug.m > 2 {
-		fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
-	}
-
-	if ssaDump != "" && ssaDump == Curfn.funcname() {
-		ssaDumpInlined = append(ssaDumpInlined, fn)
-	}
-
-	ninit := n.Ninit
-
-	// For normal function calls, the function callee expression
-	// may contain side effects (e.g., added by addinit during
-	// inlconv2expr or inlconv2list). Make sure to preserve these,
-	// if necessary (#42703).
-	if n.Op == OCALLFUNC {
-		callee := n.Left
-		for callee.Op == OCONVNOP {
-			ninit.AppendNodes(&callee.Ninit)
-			callee = callee.Left
-		}
-		if callee.Op != ONAME && callee.Op != OCLOSURE {
-			Fatalf("unexpected callee expression: %v", callee)
-		}
-	}
-
-	// Make temp names to use instead of the originals.
-	inlvars := make(map[*Node]*Node)
-
-	// record formals/locals for later post-processing
-	var inlfvars []*Node
-
-	// Handle captured variables when inlining closures.
-	if fn.Name.Defn != nil {
-		if c := fn.Name.Defn.Func.Closure; c != nil {
-			for _, v := range c.Func.Closure.Func.Cvars.Slice() {
-				if v.Op == OXXX {
-					continue
-				}
-
-				o := v.Name.Param.Outer
-				// make sure the outer param matches the inlining location
-				// NB: if we enabled inlining of functions containing OCLOSURE or refined
-				// the reassigned check via some sort of copy propagation this would most
-				// likely need to be changed to a loop to walk up to the correct Param
-				if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
-					Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
-				}
-
-				if v.Name.Byval() {
-					iv := typecheck(inlvar(v), ctxExpr)
-					ninit.Append(nod(ODCL, iv, nil))
-					ninit.Append(typecheck(nod(OAS, iv, o), ctxStmt))
-					inlvars[v] = iv
-				} else {
-					addr := newname(lookup("&" + v.Sym.Name))
-					addr.Type = types.NewPtr(v.Type)
-					ia := typecheck(inlvar(addr), ctxExpr)
-					ninit.Append(nod(ODCL, ia, nil))
-					ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), ctxStmt))
-					inlvars[addr] = ia
-
-					// When capturing by reference, all occurrence of the captured var
-					// must be substituted with dereference of the temporary address
-					inlvars[v] = typecheck(nod(ODEREF, ia, nil), ctxExpr)
-				}
-			}
-		}
-	}
-
-	for _, ln := range fn.Func.Inl.Dcl {
-		if ln.Op != ONAME {
-			continue
-		}
-		if ln.Class() == PPARAMOUT { // return values handled below.
-			continue
-		}
-		if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
-			// TODO(mdempsky): Remove once I'm confident
-			// this never actually happens. We currently
-			// perform inlining before escape analysis, so
-			// nothing should have moved to the heap yet.
-			Fatalf("impossible: %v", ln)
-		}
-		inlf := typecheck(inlvar(ln), ctxExpr)
-		inlvars[ln] = inlf
-		if genDwarfInline > 0 {
-			if ln.Class() == PPARAM {
-				inlf.Name.SetInlFormal(true)
-			} else {
-				inlf.Name.SetInlLocal(true)
-			}
-			inlf.Pos = ln.Pos
-			inlfvars = append(inlfvars, inlf)
-		}
-	}
-
-	nreturns := 0
-	inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
-		if n != nil && n.Op == ORETURN {
-			nreturns++
-		}
-		return true
-	})
-
-	// We can delay declaring+initializing result parameters if:
-	// (1) there's only one "return" statement in the inlined
-	// function, and (2) the result parameters aren't named.
-	delayretvars := nreturns == 1
-
-	// temporaries for return values.
-	var retvars []*Node
-	for i, t := range fn.Type.Results().Fields().Slice() {
-		var m *Node
-		if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
-			m = inlvar(n)
-			m = typecheck(m, ctxExpr)
-			inlvars[n] = m
-			delayretvars = false // found a named result parameter
-		} else {
-			// anonymous return values, synthesize names for use in assignment that replaces return
-			m = retvar(t, i)
-		}
-
-		if genDwarfInline > 0 {
-			// Don't update the src.Pos on a return variable if it
-			// was manufactured by the inliner (e.g. "~R2"); such vars
-			// were not part of the original callee.
-			if !strings.HasPrefix(m.Sym.Name, "~R") {
-				m.Name.SetInlFormal(true)
-				m.Pos = t.Pos
-				inlfvars = append(inlfvars, m)
-			}
-		}
-
-		retvars = append(retvars, m)
-	}
-
-	// Assign arguments to the parameters' temp names.
-	as := nod(OAS2, nil, nil)
-	as.SetColas(true)
-	if n.Op == OCALLMETH {
-		if n.Left.Left == nil {
-			Fatalf("method call without receiver: %+v", n)
-		}
-		as.Rlist.Append(n.Left.Left)
-	}
-	as.Rlist.Append(n.List.Slice()...)
-
-	// For non-dotted calls to variadic functions, we assign the
-	// variadic parameter's temp name separately.
-	var vas *Node
-
-	if recv := fn.Type.Recv(); recv != nil {
-		as.List.Append(inlParam(recv, as, inlvars))
-	}
-	for _, param := range fn.Type.Params().Fields().Slice() {
-		// For ordinary parameters or variadic parameters in
-		// dotted calls, just add the variable to the
-		// assignment list, and we're done.
-		if !param.IsDDD() || n.IsDDD() {
-			as.List.Append(inlParam(param, as, inlvars))
-			continue
-		}
-
-		// Otherwise, we need to collect the remaining values
-		// to pass as a slice.
-
-		x := as.List.Len()
-		for as.List.Len() < as.Rlist.Len() {
-			as.List.Append(argvar(param.Type, as.List.Len()))
-		}
-		varargs := as.List.Slice()[x:]
-
-		vas = nod(OAS, nil, nil)
-		vas.Left = inlParam(param, vas, inlvars)
-		if len(varargs) == 0 {
-			vas.Right = nodnil()
-			vas.Right.Type = param.Type
-		} else {
-			vas.Right = nod(OCOMPLIT, nil, typenod(param.Type))
-			vas.Right.List.Set(varargs)
-		}
-	}
-
-	if as.Rlist.Len() != 0 {
-		as = typecheck(as, ctxStmt)
-		ninit.Append(as)
-	}
-
-	if vas != nil {
-		vas = typecheck(vas, ctxStmt)
-		ninit.Append(vas)
-	}
-
-	if !delayretvars {
-		// Zero the return parameters.
-		for _, n := range retvars {
-			ninit.Append(nod(ODCL, n, nil))
-			ras := nod(OAS, n, nil)
-			ras = typecheck(ras, ctxStmt)
-			ninit.Append(ras)
-		}
-	}
-
-	retlabel := autolabel(".i")
-
-	inlgen++
-
-	parent := -1
-	if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
-		parent = b.InliningIndex()
-	}
-	newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
-
-	// Add an inline mark just before the inlined body.
-	// This mark is inline in the code so that it's a reasonable spot
-	// to put a breakpoint. Not sure if that's really necessary or not
-	// (in which case it could go at the end of the function instead).
-	// Note issue 28603.
-	inlMark := nod(OINLMARK, nil, nil)
-	inlMark.Pos = n.Pos.WithIsStmt()
-	inlMark.Xoffset = int64(newIndex)
-	ninit.Append(inlMark)
-
-	if genDwarfInline > 0 {
-		if !fn.Sym.Linksym().WasInlined() {
-			Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
-			fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
-		}
-	}
-
-	subst := inlsubst{
-		retlabel:     retlabel,
-		retvars:      retvars,
-		delayretvars: delayretvars,
-		inlvars:      inlvars,
-		bases:        make(map[*src.PosBase]*src.PosBase),
-		newInlIndex:  newIndex,
-	}
-
-	body := subst.list(asNodes(fn.Func.Inl.Body))
-
-	lab := nodSym(OLABEL, nil, retlabel)
-	body = append(body, lab)
-
-	typecheckslice(body, ctxStmt)
-
-	if genDwarfInline > 0 {
-		for _, v := range inlfvars {
-			v.Pos = subst.updatedPos(v.Pos)
-		}
-	}
-
-	//dumplist("ninit post", ninit);
-
-	call := nod(OINLCALL, nil, nil)
-	call.Ninit.Set(ninit.Slice())
-	call.Nbody.Set(body)
-	call.Rlist.Set(retvars)
-	call.Type = n.Type
-	call.SetTypecheck(1)
-
-	// transitive inlining
-	// might be nice to do this before exporting the body,
-	// but can't emit the body with inlining expanded.
-	// instead we emit the things that the body needs
-	// and each use must redo the inlining.
-	// luckily these are small.
-	inlnodelist(call.Nbody, maxCost, inlMap)
-	for _, n := range call.Nbody.Slice() {
-		if n.Op == OINLCALL {
-			inlconv2stmt(n)
-		}
-	}
-
-	if Debug.m > 2 {
-		fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
-	}
-
-	return call
-}
-
-// Every time we expand a function we generate a new set of tmpnames,
-// PAUTO's in the calling functions, and link them off of the
-// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *Node) *Node {
-	if Debug.m > 3 {
-		fmt.Printf("inlvar %+v\n", var_)
-	}
-
-	n := newname(var_.Sym)
-	n.Type = var_.Type
-	n.SetClass(PAUTO)
-	n.Name.SetUsed(true)
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	n.Name.SetAddrtaken(var_.Name.Addrtaken())
-
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// Synthesize a variable to store the inlined function's results in.
-func retvar(t *types.Field, i int) *Node {
-	n := newname(lookupN("~R", i))
-	n.Type = t.Type
-	n.SetClass(PAUTO)
-	n.Name.SetUsed(true)
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// Synthesize a variable to store the inlined function's arguments
-// when they come from a multiple return call.
-func argvar(t *types.Type, i int) *Node {
-	n := newname(lookupN("~arg", i))
-	n.Type = t.Elem()
-	n.SetClass(PAUTO)
-	n.Name.SetUsed(true)
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// The inlsubst type implements the actual inlining of a single
-// function call.
-type inlsubst struct {
-	// Target of the goto substituted in place of a return.
-	retlabel *types.Sym
-
-	// Temporary result variables.
-	retvars []*Node
-
-	// Whether result variables should be initialized at the
-	// "return" statement.
-	delayretvars bool
-
-	inlvars map[*Node]*Node
-
-	// bases maps from original PosBase to PosBase with an extra
-	// inlined call frame.
-	bases map[*src.PosBase]*src.PosBase
-
-	// newInlIndex is the index of the inlined call frame to
-	// insert for inlined nodes.
-	newInlIndex int
-}
-
-// list inlines a list of nodes.
-func (subst *inlsubst) list(ll Nodes) []*Node {
-	s := make([]*Node, 0, ll.Len())
-	for _, n := range ll.Slice() {
-		s = append(s, subst.node(n))
-	}
-	return s
-}
-
-// node recursively copies a node from the saved pristine body of the
-// inlined function, substituting references to input/output
-// parameters with ones to the tmpnames, and substituting returns with
-// assignments to the output.
-func (subst *inlsubst) node(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	case ONAME:
-		if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
-			if Debug.m > 2 {
-				fmt.Printf("substituting name %+v  ->  %+v\n", n, inlvar)
-			}
-			return inlvar
-		}
-
-		if Debug.m > 2 {
-			fmt.Printf("not substituting name %+v\n", n)
-		}
-		return n
-
-	case OLITERAL, OTYPE:
-		// If n is a named constant or type, we can continue
-		// using it in the inline copy. Otherwise, make a copy
-		// so we can update the line number.
-		if n.Sym != nil {
-			return n
-		}
-
-		// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
-
-	//		dump("Return before substitution", n);
-	case ORETURN:
-		m := nodSym(OGOTO, nil, subst.retlabel)
-		m.Ninit.Set(subst.list(n.Ninit))
-
-		if len(subst.retvars) != 0 && n.List.Len() != 0 {
-			as := nod(OAS2, nil, nil)
-
-			// Make a shallow copy of retvars.
-			// Otherwise OINLCALL.Rlist will be the same list,
-			// and later walk and typecheck may clobber it.
-			for _, n := range subst.retvars {
-				as.List.Append(n)
-			}
-			as.Rlist.Set(subst.list(n.List))
-
-			if subst.delayretvars {
-				for _, n := range as.List.Slice() {
-					as.Ninit.Append(nod(ODCL, n, nil))
-					n.Name.Defn = as
-				}
-			}
-
-			as = typecheck(as, ctxStmt)
-			m.Ninit.Append(as)
-		}
-
-		typecheckslice(m.Ninit.Slice(), ctxStmt)
-		m = typecheck(m, ctxStmt)
-
-		//		dump("Return after substitution", m);
-		return m
-
-	case OGOTO, OLABEL:
-		m := n.copy()
-		m.Pos = subst.updatedPos(m.Pos)
-		m.Ninit.Set(nil)
-		p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen)
-		m.Sym = lookup(p)
-
-		return m
-	}
-
-	m := n.copy()
-	m.Pos = subst.updatedPos(m.Pos)
-	m.Ninit.Set(nil)
-
-	if n.Op == OCLOSURE {
-		Fatalf("cannot inline function containing closure: %+v", n)
-	}
-
-	m.Left = subst.node(n.Left)
-	m.Right = subst.node(n.Right)
-	m.List.Set(subst.list(n.List))
-	m.Rlist.Set(subst.list(n.Rlist))
-	m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
-	m.Nbody.Set(subst.list(n.Nbody))
-
-	return m
-}
-
-func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
-	pos := Ctxt.PosTable.Pos(xpos)
-	oldbase := pos.Base() // can be nil
-	newbase := subst.bases[oldbase]
-	if newbase == nil {
-		newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
-		subst.bases[oldbase] = newbase
-	}
-	pos.SetBase(newbase)
-	return Ctxt.PosTable.XPos(pos)
-}
-
-func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
-	s := make([]*Node, 0, len(ll))
-	for _, n := range ll {
-		if n.Class() == PAUTO {
-			if _, found := vis.usedLocals[n]; !found {
-				continue
-			}
-		}
-		s = append(s, n)
-	}
-	return s
-}
-
-// devirtualize replaces interface method calls within fn with direct
-// concrete-type method calls where applicable.
-func devirtualize(fn *Node) {
-	Curfn = fn
-	inspectList(fn.Nbody, func(n *Node) bool {
-		if n.Op == OCALLINTER {
-			devirtualizeCall(n)
-		}
-		return true
-	})
-}
-
-func devirtualizeCall(call *Node) {
-	recv := staticValue(call.Left.Left)
-	if recv.Op != OCONVIFACE {
-		return
-	}
-
-	typ := recv.Left.Type
-	if typ.IsInterface() {
-		return
-	}
-
-	x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
-	x.Type = typ
-	x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
-	x = typecheck(x, ctxExpr|ctxCallee)
-	switch x.Op {
-	case ODOTMETH:
-		if Debug.m != 0 {
-			Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
-		}
-		call.Op = OCALLMETH
-		call.Left = x
-	case ODOTINTER:
-		// Promoted method from embedded interface-typed field (#42279).
-		if Debug.m != 0 {
-			Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
-		}
-		call.Op = OCALLINTER
-		call.Left = x
-	default:
-		// TODO(mdempsky): Turn back into Fatalf after more testing.
-		if Debug.m != 0 {
-			Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
-		}
-		return
-	}
-
-	// Duplicated logic from typecheck for function call return
-	// value types.
-	//
-	// Receiver parameter size may have changed; need to update
-	// call.Type to get correct stack offsets for result
-	// parameters.
-	checkwidth(x.Type)
-	switch ft := x.Type; ft.NumResults() {
-	case 0:
-	case 1:
-		call.Type = ft.Results().Field(0).Type
-	default:
-		call.Type = ft.Results()
-	}
-}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index a6963a3..726a068 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -2,716 +2,238 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:generate go run mkbuiltin.go
-
 package gc
 
 import (
 	"bufio"
 	"bytes"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/deadcode"
+	"cmd/compile/internal/devirtualize"
+	"cmd/compile/internal/dwarfgen"
+	"cmd/compile/internal/escape"
+	"cmd/compile/internal/inline"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/noder"
+	"cmd/compile/internal/pkginit"
+	"cmd/compile/internal/reflectdata"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
-	"cmd/internal/bio"
 	"cmd/internal/dwarf"
-	"cmd/internal/goobj"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
 	"cmd/internal/src"
-	"cmd/internal/sys"
 	"flag"
 	"fmt"
-	"internal/goversion"
-	"io"
-	"io/ioutil"
 	"log"
 	"os"
-	"path"
-	"regexp"
 	"runtime"
-	"sort"
-	"strconv"
-	"strings"
 )
 
-var (
-	buildid      string
-	spectre      string
-	spectreIndex bool
-)
-
-var (
-	Debug_append       int
-	Debug_checkptr     int
-	Debug_closure      int
-	Debug_compilelater int
-	debug_dclstack     int
-	Debug_dumpptrs     int
-	Debug_libfuzzer    int
-	Debug_panic        int
-	Debug_slice        int
-	Debug_vlog         bool
-	Debug_wb           int
-	Debug_pctab        string
-	Debug_locationlist int
-	Debug_typecheckinl int
-	Debug_gendwarfinl  int
-	Debug_softfloat    int
-	Debug_defer        int
-)
-
-// Debug arguments.
-// These can be specified with the -d flag, as in "-d nil"
-// to set the debug_checknil variable.
-// Multiple options can be comma-separated.
-// Each option accepts an optional argument, as in "gcprog=2"
-var debugtab = []struct {
-	name string
-	help string
-	val  interface{} // must be *int or *string
-}{
-	{"append", "print information about append compilation", &Debug_append},
-	{"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr},
-	{"closure", "print information about closure compilation", &Debug_closure},
-	{"compilelater", "compile functions as late as possible", &Debug_compilelater},
-	{"disablenil", "disable nil checks", &disable_checknil},
-	{"dclstack", "run internal dclstack check", &debug_dclstack},
-	{"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug_dumpptrs},
-	{"gcprog", "print dump of GC programs", &Debug_gcprog},
-	{"libfuzzer", "coverage instrumentation for libfuzzer", &Debug_libfuzzer},
-	{"nil", "print information about nil checks", &Debug_checknil},
-	{"panic", "do not hide any compiler panic", &Debug_panic},
-	{"slice", "print information about slice compilation", &Debug_slice},
-	{"typeassert", "print information about type assertion inlining", &Debug_typeassert},
-	{"wb", "print information about write barriers", &Debug_wb},
-	{"export", "print export data", &Debug_export},
-	{"pctab", "print named pc-value table", &Debug_pctab},
-	{"locationlists", "print information about DWARF location list creation", &Debug_locationlist},
-	{"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
-	{"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
-	{"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
-	{"defer", "print information about defer compilation", &Debug_defer},
-	{"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled},
-}
-
-const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
-
-<key> is one of:
-
-`
-
-const debugHelpFooter = `
-<value> is key-specific.
-
-Key "checkptr" supports values:
-	"0": instrumentation disabled
-	"1": conversions involving unsafe.Pointer are instrumented
-	"2": conversions to unsafe.Pointer force heap allocation
-
-Key "pctab" supports values:
-	"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
-`
-
-func usage() {
-	fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
-	objabi.Flagprint(os.Stderr)
-	Exit(2)
-}
-
 func hidePanic() {
-	if Debug_panic == 0 && nsavederrors+nerrors > 0 {
+	if base.Debug.Panic == 0 && base.Errors() > 0 {
 		// If we've already complained about things
 		// in the program, don't bother complaining
 		// about a panic too; let the user clean up
 		// the code and try again.
 		if err := recover(); err != nil {
-			errorexit()
+			if err == "-h" {
+				panic(err)
+			}
+			base.ErrorExit()
 		}
 	}
 }
 
-// supportsDynlink reports whether or not the code generator for the given
-// architecture supports the -shared and -dynlink flags.
-func supportsDynlink(arch *sys.Arch) bool {
-	return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
-}
-
-// timing data for compiler phases
-var timings Timings
-var benchfile string
-
-var nowritebarrierrecCheck *nowritebarrierrecChecker
-
 // Main parses flags and Go source files specified in the command-line
 // arguments, type-checks the parsed Go package, compiles functions to machine
 // code, and finally writes the compiled package definition to disk.
-func Main(archInit func(*Arch)) {
-	timings.Start("fe", "init")
+func Main(archInit func(*ssagen.ArchInfo)) {
+	base.Timer.Start("fe", "init")
 
 	defer hidePanic()
 
-	archInit(&thearch)
+	archInit(&ssagen.Arch)
 
-	Ctxt = obj.Linknew(thearch.LinkArch)
-	Ctxt.DiagFunc = yyerror
-	Ctxt.DiagFlush = flusherrors
-	Ctxt.Bso = bufio.NewWriter(os.Stdout)
+	base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+	base.Ctxt.DiagFunc = base.Errorf
+	base.Ctxt.DiagFlush = base.FlushErrors
+	base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
 
 	// UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
 	// on Darwin don't support it properly, especially since macOS 10.14 (Mojave).  This is exposed as a flag
 	// to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
 	// See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
-	Ctxt.UseBASEntries = Ctxt.Headtype != objabi.Hdarwin
+	base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin
 
-	localpkg = types.NewPkg("", "")
-	localpkg.Prefix = "\"\""
+	types.LocalPkg = types.NewPkg("", "")
+	types.LocalPkg.Prefix = "\"\""
 
 	// We won't know localpkg's height until after import
 	// processing. In the mean time, set to MaxPkgHeight to ensure
 	// height comparisons at least work until then.
-	localpkg.Height = types.MaxPkgHeight
+	types.LocalPkg.Height = types.MaxPkgHeight
 
 	// pseudo-package, for scoping
-	builtinpkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
-	builtinpkg.Prefix = "go.builtin"            // not go%2ebuiltin
+	types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+	types.BuiltinPkg.Prefix = "go.builtin"            // not go%2ebuiltin
 
 	// pseudo-package, accessed by import "unsafe"
-	unsafepkg = types.NewPkg("unsafe", "unsafe")
+	ir.Pkgs.Unsafe = types.NewPkg("unsafe", "unsafe")
 
 	// Pseudo-package that contains the compiler's builtin
 	// declarations for package runtime. These are declared in a
 	// separate package to avoid conflicts with package runtime's
 	// actual declarations, which may differ intentionally but
 	// insignificantly.
-	Runtimepkg = types.NewPkg("go.runtime", "runtime")
-	Runtimepkg.Prefix = "runtime"
+	ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
+	ir.Pkgs.Runtime.Prefix = "runtime"
 
 	// pseudo-packages used in symbol tables
-	itabpkg = types.NewPkg("go.itab", "go.itab")
-	itabpkg.Prefix = "go.itab" // not go%2eitab
-
-	itablinkpkg = types.NewPkg("go.itablink", "go.itablink")
-	itablinkpkg.Prefix = "go.itablink" // not go%2eitablink
-
-	trackpkg = types.NewPkg("go.track", "go.track")
-	trackpkg.Prefix = "go.track" // not go%2etrack
-
-	// pseudo-package used for map zero values
-	mappkg = types.NewPkg("go.map", "go.map")
-	mappkg.Prefix = "go.map"
+	ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
+	ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab
 
 	// pseudo-package used for methods with anonymous receivers
-	gopkg = types.NewPkg("go", "")
+	ir.Pkgs.Go = types.NewPkg("go", "")
 
-	Wasm := objabi.GOARCH == "wasm"
-
-	// Whether the limit for stack-allocated objects is much smaller than normal.
-	// This can be helpful for diagnosing certain causes of GC latency. See #27732.
-	smallFrames := false
-	jsonLogOpt := ""
-
-	flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
-	flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
-	flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
-
-	objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
-	objabi.Flagcount("B", "disable bounds checking", &Debug.B)
-	objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
-	objabi.Flagcount("E", "debug symbol export", &Debug.E)
-	objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
-	objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
-	objabi.Flagcount("N", "disable optimizations", &Debug.N)
-	objabi.Flagcount("S", "print assembly listing", &Debug.S)
-	objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
-	objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
-	objabi.Flagcount("h", "halt on error", &Debug.h)
-	objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
-	objabi.Flagcount("l", "disable inlining", &Debug.l)
-	objabi.Flagcount("m", "print optimization decisions", &Debug.m)
-	objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
-	objabi.Flagcount("w", "debug type checking", &Debug.w)
-
-	objabi.Flagfn1("I", "add `directory` to import search path", addidir)
-	objabi.AddVersionFlag() // -V
-	flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
-	flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
-	flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
-	flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
-	flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
-	flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
-	flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
-	flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
-	objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
-	objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
-	objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
-	flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
-	flag.StringVar(&flag_lang, "lang", "", "release to compile for")
-	flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
-	objabi.Flagcount("live", "debug liveness analysis", &debuglive)
-	if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
-		flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
-	}
-	flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
-	flag.StringVar(&outfile, "o", "", "write output to `file`")
-	flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
-	flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
-	if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
-		flag.BoolVar(&flag_race, "race", false, "enable race detector")
-	}
-	flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)")
-	if enableTrace {
-		flag.BoolVar(&trace, "t", false, "trace type-checking")
-	}
-	flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
-	flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
-	flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
-	var flag_shared bool
-	var flag_dynlink bool
-	if supportsDynlink(thearch.LinkArch.Arch) {
-		flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
-		flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
-		flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries")
-	}
-	flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
-	flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
-	flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
-	var goversion string
-	flag.StringVar(&goversion, "goversion", "", "required version of the runtime")
-	var symabisPath string
-	flag.StringVar(&symabisPath, "symabis", "", "read symbol ABIs from `file`")
-	flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`")
-	flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
-	flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
-	flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
-	flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects")
-	flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF")
-	flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging")
-
-	objabi.Flagparse(usage)
-
-	Ctxt.Pkgpath = myimportpath
-
-	for _, f := range strings.Split(spectre, ",") {
-		f = strings.TrimSpace(f)
-		switch f {
-		default:
-			log.Fatalf("unknown setting -spectre=%s", f)
-		case "":
-			// nothing
-		case "all":
-			spectreIndex = true
-			Ctxt.Retpoline = true
-		case "index":
-			spectreIndex = true
-		case "ret":
-			Ctxt.Retpoline = true
-		}
-	}
-
-	if spectreIndex {
-		switch objabi.GOARCH {
-		case "amd64":
-			// ok
-		default:
-			log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
-		}
-	}
+	base.DebugSSA = ssa.PhaseOption
+	base.ParseFlags()
 
 	// Record flags that affect the build result. (And don't
 	// record flags that don't, since that would cause spurious
 	// changes in the binary.)
-	recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
+	dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
 
-	if smallFrames {
-		maxStackVarSize = 128 * 1024
-		maxImplicitStackVarSize = 16 * 1024
+	if !base.EnableTrace && base.Flag.LowerT {
+		log.Fatalf("compiler not built with support for -t")
 	}
 
-	Ctxt.Flag_shared = flag_dynlink || flag_shared
-	Ctxt.Flag_dynlink = flag_dynlink
-	Ctxt.Flag_optimize = Debug.N == 0
+	// Enable inlining (after RecordFlags, to avoid recording the rewritten -l).  For now:
+	//	default: inlining on.  (Flag.LowerL == 1)
+	//	-l: inlining off  (Flag.LowerL == 0)
+	//	-l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
+	if base.Flag.LowerL <= 1 {
+		base.Flag.LowerL = 1 - base.Flag.LowerL
+	}
 
-	Ctxt.Debugasm = Debug.S
-	Ctxt.Debugvlog = Debug_vlog
-	if flagDWARF {
-		Ctxt.DebugInfo = debuginfo
-		Ctxt.GenAbstractFunc = genAbstractFunc
-		Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt)
+	if base.Flag.SmallFrames {
+		ir.MaxStackVarSize = 128 * 1024
+		ir.MaxImplicitStackVarSize = 16 * 1024
+	}
+
+	if base.Flag.Dwarf {
+		base.Ctxt.DebugInfo = dwarfgen.Info
+		base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc
+		base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
 	} else {
 		// turn off inline generation if no dwarf at all
-		genDwarfInline = 0
-		Ctxt.Flag_locationlists = false
+		base.Flag.GenDwarfInl = 0
+		base.Ctxt.Flag_locationlists = false
+	}
+	if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 {
+		log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
 	}
 
-	if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {
-		usage()
+	types.ParseLangFlag()
+
+	if base.Flag.SymABIs != "" {
+		ssagen.ReadSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
 	}
 
-	if goversion != "" && goversion != runtime.Version() {
-		fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), goversion)
-		Exit(2)
+	if base.Compiling(base.NoInstrumentPkgs) {
+		base.Flag.Race = false
+		base.Flag.MSan = false
 	}
 
-	checkLang()
-
-	if symabisPath != "" {
-		readSymABIs(symabisPath, myimportpath)
-	}
-
-	thearch.LinkArch.Init(Ctxt)
-
-	if outfile == "" {
-		p := flag.Arg(0)
-		if i := strings.LastIndex(p, "/"); i >= 0 {
-			p = p[i+1:]
-		}
-		if runtime.GOOS == "windows" {
-			if i := strings.LastIndex(p, `\`); i >= 0 {
-				p = p[i+1:]
-			}
-		}
-		if i := strings.LastIndex(p, "."); i >= 0 {
-			p = p[:i]
-		}
-		suffix := ".o"
-		if writearchive {
-			suffix = ".a"
-		}
-		outfile = p + suffix
-	}
-
+	ssagen.Arch.LinkArch.Init(base.Ctxt)
 	startProfile()
-
-	if flag_race && flag_msan {
-		log.Fatal("cannot use both -race and -msan")
+	if base.Flag.Race || base.Flag.MSan {
+		base.Flag.Cfg.Instrumenting = true
 	}
-	if flag_race || flag_msan {
-		// -race and -msan imply -d=checkptr for now.
-		Debug_checkptr = 1
+	if base.Flag.Dwarf {
+		dwarf.EnableLogging(base.Debug.DwarfInl != 0)
 	}
-	if ispkgin(omit_pkgs) {
-		flag_race = false
-		flag_msan = false
-	}
-	if flag_race {
-		racepkg = types.NewPkg("runtime/race", "")
-	}
-	if flag_msan {
-		msanpkg = types.NewPkg("runtime/msan", "")
-	}
-	if flag_race || flag_msan {
-		instrumenting = true
+	if base.Debug.SoftFloat != 0 {
+		ssagen.Arch.SoftFloat = true
 	}
 
-	if compiling_runtime && Debug.N != 0 {
-		log.Fatal("cannot disable optimizations while compiling runtime")
-	}
-	if nBackendWorkers < 1 {
-		log.Fatalf("-c must be at least 1, got %d", nBackendWorkers)
-	}
-	if nBackendWorkers > 1 && !concurrentBackendAllowed() {
-		log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
-	}
-	if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 {
-		log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name)
+	if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
+		logopt.LogJsonOption(base.Flag.JSON)
 	}
 
-	// parse -d argument
-	if debugstr != "" {
-	Split:
-		for _, name := range strings.Split(debugstr, ",") {
-			if name == "" {
-				continue
-			}
-			// display help about the -d option itself and quit
-			if name == "help" {
-				fmt.Print(debugHelpHeader)
-				maxLen := len("ssa/help")
-				for _, t := range debugtab {
-					if len(t.name) > maxLen {
-						maxLen = len(t.name)
-					}
-				}
-				for _, t := range debugtab {
-					fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
-				}
-				// ssa options have their own help
-				fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
-				fmt.Print(debugHelpFooter)
-				os.Exit(0)
-			}
-			val, valstring, haveInt := 1, "", true
-			if i := strings.IndexAny(name, "=:"); i >= 0 {
-				var err error
-				name, valstring = name[:i], name[i+1:]
-				val, err = strconv.Atoi(valstring)
-				if err != nil {
-					val, haveInt = 1, false
-				}
-			}
-			for _, t := range debugtab {
-				if t.name != name {
-					continue
-				}
-				switch vp := t.val.(type) {
-				case nil:
-					// Ignore
-				case *string:
-					*vp = valstring
-				case *int:
-					if !haveInt {
-						log.Fatalf("invalid debug value %v", name)
-					}
-					*vp = val
-				default:
-					panic("bad debugtab type")
-				}
-				continue Split
-			}
-			// special case for ssa for now
-			if strings.HasPrefix(name, "ssa/") {
-				// expect form ssa/phase/flag
-				// e.g. -d=ssa/generic_cse/time
-				// _ in phase name also matches space
-				phase := name[4:]
-				flag := "debug" // default flag is debug
-				if i := strings.Index(phase, "/"); i >= 0 {
-					flag = phase[i+1:]
-					phase = phase[:i]
-				}
-				err := ssa.PhaseOption(phase, flag, val, valstring)
-				if err != "" {
-					log.Fatalf(err)
-				}
-				continue Split
-			}
-			log.Fatalf("unknown debug key -d %s\n", name)
+	ir.EscFmt = escape.Fmt
+	ir.IsIntrinsicCall = ssagen.IsIntrinsicCall
+	inline.SSADumpInline = ssagen.DumpInline
+	ssagen.InitEnv()
+	ssagen.InitTables()
+
+	types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+	types.RegSize = ssagen.Arch.LinkArch.RegSize
+	types.MaxWidth = ssagen.Arch.MAXWIDTH
+
+	typecheck.Target = new(ir.Package)
+
+	typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
+	typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
+
+	base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
+
+	typecheck.InitUniverse()
+
+	// Parse and typecheck input.
+	noder.LoadPackage(flag.Args())
+
+	dwarfgen.RecordPackageName()
+	ssagen.CgoSymABIs()
+
+	// Build init task.
+	if initTask := pkginit.Task(); initTask != nil {
+		typecheck.Export(initTask)
+	}
+
+	// Eliminate some obviously dead code.
+	// Must happen after typechecking.
+	for _, n := range typecheck.Target.Decls {
+		if n.Op() == ir.ODCLFUNC {
+			deadcode.Func(n.(*ir.Func))
 		}
 	}
 
-	if compiling_runtime {
-		// Runtime can't use -d=checkptr, at least not yet.
-		Debug_checkptr = 0
-
-		// Fuzzing the runtime isn't interesting either.
-		Debug_libfuzzer = 0
+	// Compute Addrtaken for names.
+	// We need to wait until typechecking is done so that when we see &x[i]
+	// we know that x has its address taken if x is an array, but not if x is a slice.
+	// We compute Addrtaken in bulk here.
+	// After this phase, we maintain Addrtaken incrementally.
+	if typecheck.DirtyAddrtaken {
+		typecheck.ComputeAddrtaken(typecheck.Target.Decls)
+		typecheck.DirtyAddrtaken = false
 	}
+	typecheck.IncrementalAddrtaken = true
 
-	// set via a -d flag
-	Ctxt.Debugpcln = Debug_pctab
-	if flagDWARF {
-		dwarf.EnableLogging(Debug_gendwarfinl != 0)
-	}
-
-	if Debug_softfloat != 0 {
-		thearch.SoftFloat = true
-	}
-
-	// enable inlining.  for now:
-	//	default: inlining on.  (Debug.l == 1)
-	//	-l: inlining off  (Debug.l == 0)
-	//	-l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
-	if Debug.l <= 1 {
-		Debug.l = 1 - Debug.l
-	}
-
-	if jsonLogOpt != "" { // parse version,destination from json logging optimization.
-		logopt.LogJsonOption(jsonLogOpt)
-	}
-
-	ssaDump = os.Getenv("GOSSAFUNC")
-	ssaDir = os.Getenv("GOSSADIR")
-	if ssaDump != "" {
-		if strings.HasSuffix(ssaDump, "+") {
-			ssaDump = ssaDump[:len(ssaDump)-1]
-			ssaDumpStdout = true
-		}
-		spl := strings.Split(ssaDump, ":")
-		if len(spl) > 1 {
-			ssaDump = spl[0]
-			ssaDumpCFG = spl[1]
-		}
-	}
-
-	trackScopes = flagDWARF
-
-	Widthptr = thearch.LinkArch.PtrSize
-	Widthreg = thearch.LinkArch.RegSize
-
-	// initialize types package
-	// (we need to do this to break dependencies that otherwise
-	// would lead to import cycles)
-	types.Widthptr = Widthptr
-	types.Dowidth = dowidth
-	types.Fatalf = Fatalf
-	types.Sconv = func(s *types.Sym, flag, mode int) string {
-		return sconv(s, FmtFlag(flag), fmtMode(mode))
-	}
-	types.Tconv = func(t *types.Type, flag, mode int) string {
-		return tconv(t, FmtFlag(flag), fmtMode(mode))
-	}
-	types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
-		symFormat(sym, s, verb, fmtMode(mode))
-	}
-	types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
-		typeFormat(t, s, verb, fmtMode(mode))
-	}
-	types.TypeLinkSym = func(t *types.Type) *obj.LSym {
-		return typenamesym(t).Linksym()
-	}
-	types.FmtLeft = int(FmtLeft)
-	types.FmtUnsigned = int(FmtUnsigned)
-	types.FErr = int(FErr)
-	types.Ctxt = Ctxt
-
-	initUniverse()
-
-	dclcontext = PEXTERN
-	nerrors = 0
-
-	autogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
-
-	timings.Start("fe", "loadsys")
-	loadsys()
-
-	timings.Start("fe", "parse")
-	lines := parseFiles(flag.Args())
-	timings.Stop()
-	timings.AddEvent(int64(lines), "lines")
-
-	finishUniverse()
-
-	recordPackageName()
-
-	typecheckok = true
-
-	// Process top-level declarations in phases.
-
-	// Phase 1: const, type, and names and types of funcs.
-	//   This will gather all the information about types
-	//   and methods but doesn't depend on any of it.
-	//
-	//   We also defer type alias declarations until phase 2
-	//   to avoid cycles like #18640.
-	//   TODO(gri) Remove this again once we have a fix for #25838.
-
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "top1")
-	for i := 0; i < len(xtop); i++ {
-		n := xtop[i]
-		if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
-			xtop[i] = typecheck(n, ctxStmt)
-		}
-	}
-
-	// Phase 2: Variable assignments.
-	//   To check interface assignments, depends on phase 1.
-
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "top2")
-	for i := 0; i < len(xtop); i++ {
-		n := xtop[i]
-		if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
-			xtop[i] = typecheck(n, ctxStmt)
-		}
-	}
-
-	// Phase 3: Type check function bodies.
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "func")
-	var fcount int64
-	for i := 0; i < len(xtop); i++ {
-		n := xtop[i]
-		if n.Op == ODCLFUNC {
-			Curfn = n
-			decldepth = 1
-			saveerrors()
-			typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
-			checkreturn(Curfn)
-			if nerrors != 0 {
-				Curfn.Nbody.Set(nil) // type errors; do not compile
-			}
-			// Now that we've checked whether n terminates,
-			// we can eliminate some obviously dead code.
-			deadcode(Curfn)
-			fcount++
-		}
-	}
-	// With all types checked, it's now safe to verify map keys. One single
-	// check past phase 9 isn't sufficient, as we may exit with other errors
-	// before then, thus skipping map key errors.
-	checkMapKeys()
-	timings.AddEvent(fcount, "funcs")
-
-	if nsavederrors+nerrors != 0 {
-		errorexit()
-	}
-
-	fninit(xtop)
-
-	// Phase 4: Decide how to capture closed variables.
-	// This needs to run before escape analysis,
-	// because variables captured by value do not escape.
-	timings.Start("fe", "capturevars")
-	for _, n := range xtop {
-		if n.Op == ODCLFUNC && n.Func.Closure != nil {
-			Curfn = n
-			capturevars(n)
-		}
-	}
-	capturevarscomplete = true
-
-	Curfn = nil
-
-	if nsavederrors+nerrors != 0 {
-		errorexit()
-	}
-
-	// Phase 5: Inlining
-	timings.Start("fe", "inlining")
-	if Debug_typecheckinl != 0 {
+	if base.Debug.TypecheckInl != 0 {
 		// Typecheck imported function bodies if Debug.l > 1,
 		// otherwise lazily when used or re-exported.
-		for _, n := range importlist {
-			if n.Func.Inl != nil {
-				saveerrors()
-				typecheckinl(n)
-			}
-		}
-
-		if nsavederrors+nerrors != 0 {
-			errorexit()
-		}
+		typecheck.AllImportedBodies()
 	}
 
-	if Debug.l != 0 {
-		// Find functions that can be inlined and clone them before walk expands them.
-		visitBottomUp(xtop, func(list []*Node, recursive bool) {
-			numfns := numNonClosures(list)
-			for _, n := range list {
-				if !recursive || numfns > 1 {
-					// We allow inlining if there is no
-					// recursion, or the recursion cycle is
-					// across more than one function.
-					caninl(n)
-				} else {
-					if Debug.m > 1 {
-						fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
-					}
-				}
-				inlcalls(n)
-			}
-		})
+	// Inlining
+	base.Timer.Start("fe", "inlining")
+	if base.Flag.LowerL != 0 {
+		inline.InlinePackage()
 	}
 
-	for _, n := range xtop {
-		if n.Op == ODCLFUNC {
-			devirtualize(n)
+	// Devirtualize.
+	for _, n := range typecheck.Target.Decls {
+		if n.Op() == ir.ODCLFUNC {
+			devirtualize.Func(n.(*ir.Func))
 		}
 	}
-	Curfn = nil
+	ir.CurFunc = nil
 
-	// Phase 6: Escape analysis.
+	// Escape analysis.
 	// Required for moving heap allocations onto stack,
 	// which in turn is required by the closure implementation,
 	// which stores the addresses of stack variables into the closure.
@@ -719,140 +241,86 @@
 	// or else the stack copier will not update it.
 	// Large values are also moved off stack in escape analysis;
 	// because large values may contain pointers, it must happen early.
-	timings.Start("fe", "escapes")
-	escapes(xtop)
+	base.Timer.Start("fe", "escapes")
+	escape.Funcs(typecheck.Target.Decls)
 
 	// Collect information for go:nowritebarrierrec
-	// checking. This must happen before transformclosure.
+	// checking. This must happen before transforming closures during Walk
 	// We'll do the final check after write barriers are
 	// inserted.
-	if compiling_runtime {
-		nowritebarrierrecCheck = newNowritebarrierrecChecker()
-	}
-
-	// Phase 7: Transform closure bodies to properly reference captured variables.
-	// This needs to happen before walk, because closures must be transformed
-	// before walk reaches a call of a closure.
-	timings.Start("fe", "xclosures")
-	for _, n := range xtop {
-		if n.Op == ODCLFUNC && n.Func.Closure != nil {
-			Curfn = n
-			transformclosure(n)
-		}
+	if base.Flag.CompilingRuntime {
+		ssagen.EnableNoWriteBarrierRecCheck()
 	}
 
 	// Prepare for SSA compilation.
-	// This must be before peekitabs, because peekitabs
+	// This must be before CompileITabs, because CompileITabs
 	// can trigger function compilation.
-	initssaconfig()
+	typecheck.InitRuntime()
+	ssagen.InitConfig()
 
 	// Just before compilation, compile itabs found on
 	// the right side of OCONVIFACE so that methods
 	// can be de-virtualized during compilation.
-	Curfn = nil
-	peekitabs()
+	ir.CurFunc = nil
+	reflectdata.CompileITabs()
 
-	// Phase 8: Compile top level functions.
-	// Don't use range--walk can add functions to xtop.
-	timings.Start("be", "compilefuncs")
-	fcount = 0
-	for i := 0; i < len(xtop); i++ {
-		n := xtop[i]
-		if n.Op == ODCLFUNC {
-			funccompile(n)
+	// Compile top level functions.
+	// Don't use range--walk can add functions to Target.Decls.
+	base.Timer.Start("be", "compilefuncs")
+	fcount := int64(0)
+	for i := 0; i < len(typecheck.Target.Decls); i++ {
+		if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
+			enqueueFunc(fn)
 			fcount++
 		}
 	}
-	timings.AddEvent(fcount, "funcs")
+	base.Timer.AddEvent(fcount, "funcs")
 
 	compileFunctions()
 
-	if nowritebarrierrecCheck != nil {
-		// Write barriers are now known. Check the
-		// call graph.
-		nowritebarrierrecCheck.check()
-		nowritebarrierrecCheck = nil
+	if base.Flag.CompilingRuntime {
+		// Write barriers are now known. Check the call graph.
+		ssagen.NoWriteBarrierRecCheck()
 	}
 
 	// Finalize DWARF inline routine DIEs, then explicitly turn off
 	// DWARF inlining gen so as to avoid problems with generated
 	// method wrappers.
-	if Ctxt.DwFixups != nil {
-		Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0)
-		Ctxt.DwFixups = nil
-		genDwarfInline = 0
-	}
-
-	// Phase 9: Check external declarations.
-	timings.Start("be", "externaldcls")
-	for i, n := range externdcl {
-		if n.Op == ONAME {
-			externdcl[i] = typecheck(externdcl[i], ctxExpr)
-		}
-	}
-	// Check the map keys again, since we typechecked the external
-	// declarations.
-	checkMapKeys()
-
-	if nerrors+nsavederrors != 0 {
-		errorexit()
+	if base.Ctxt.DwFixups != nil {
+		base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+		base.Ctxt.DwFixups = nil
+		base.Flag.GenDwarfInl = 0
 	}
 
 	// Write object data to disk.
-	timings.Start("be", "dumpobj")
+	base.Timer.Start("be", "dumpobj")
 	dumpdata()
-	Ctxt.NumberSyms()
+	base.Ctxt.NumberSyms()
 	dumpobj()
-	if asmhdr != "" {
+	if base.Flag.AsmHdr != "" {
 		dumpasmhdr()
 	}
 
-	// Check whether any of the functions we have compiled have gigantic stack frames.
-	sort.Slice(largeStackFrames, func(i, j int) bool {
-		return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
-	})
-	for _, large := range largeStackFrames {
-		if large.callee != 0 {
-			yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
-		} else {
-			yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
-		}
-	}
+	ssagen.CheckLargeStacks()
+	typecheck.CheckFuncStack()
 
-	if len(funcStack) != 0 {
-		Fatalf("funcStack is non-empty: %v", len(funcStack))
-	}
 	if len(compilequeue) != 0 {
-		Fatalf("%d uncompiled functions", len(compilequeue))
+		base.Fatalf("%d uncompiled functions", len(compilequeue))
 	}
 
-	logopt.FlushLoggedOpts(Ctxt, myimportpath)
+	logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath)
+	base.ExitIfErrors()
 
-	if nerrors+nsavederrors != 0 {
-		errorexit()
-	}
+	base.FlushErrors()
+	base.Timer.Stop()
 
-	flusherrors()
-	timings.Stop()
-
-	if benchfile != "" {
-		if err := writebench(benchfile); err != nil {
+	if base.Flag.Bench != "" {
+		if err := writebench(base.Flag.Bench); err != nil {
 			log.Fatalf("cannot write benchmark data: %v", err)
 		}
 	}
 }
 
-// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*Node) int {
-	count := 0
-	for _, n := range list {
-		if n.Func.Closure == nil {
-			count++
-		}
-	}
-	return count
-}
-
 func writebench(filename string) error {
 	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
 	if err != nil {
@@ -863,7 +331,7 @@
 	fmt.Fprintln(&buf, "commit:", objabi.Version)
 	fmt.Fprintln(&buf, "goos:", runtime.GOOS)
 	fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
-	timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":")
+	base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
 
 	n, err := f.Write(buf.Bytes())
 	if err != nil {
@@ -876,735 +344,6 @@
 	return f.Close()
 }
 
-var (
-	importMap   = map[string]string{}
-	packageFile map[string]string // nil means not in use
-)
-
-func addImportMap(s string) {
-	if strings.Count(s, "=") != 1 {
-		log.Fatal("-importmap argument must be of the form source=actual")
-	}
-	i := strings.Index(s, "=")
-	source, actual := s[:i], s[i+1:]
-	if source == "" || actual == "" {
-		log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
-	}
-	importMap[source] = actual
-}
-
-func readImportCfg(file string) {
-	packageFile = map[string]string{}
-	data, err := ioutil.ReadFile(file)
-	if err != nil {
-		log.Fatalf("-importcfg: %v", err)
-	}
-
-	for lineNum, line := range strings.Split(string(data), "\n") {
-		lineNum++ // 1-based
-		line = strings.TrimSpace(line)
-		if line == "" || strings.HasPrefix(line, "#") {
-			continue
-		}
-
-		var verb, args string
-		if i := strings.Index(line, " "); i < 0 {
-			verb = line
-		} else {
-			verb, args = line[:i], strings.TrimSpace(line[i+1:])
-		}
-		var before, after string
-		if i := strings.Index(args, "="); i >= 0 {
-			before, after = args[:i], args[i+1:]
-		}
-		switch verb {
-		default:
-			log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
-		case "importmap":
-			if before == "" || after == "" {
-				log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
-			}
-			importMap[before] = after
-		case "packagefile":
-			if before == "" || after == "" {
-				log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
-			}
-			packageFile[before] = after
-		}
-	}
-}
-
-// symabiDefs and symabiRefs record the defined and referenced ABIs of
-// symbols required by non-Go code. These are keyed by link symbol
-// name, where the local package prefix is always `"".`
-var symabiDefs, symabiRefs map[string]obj.ABI
-
-// readSymABIs reads a symabis file that specifies definitions and
-// references of text symbols by ABI.
-//
-// The symabis format is a set of lines, where each line is a sequence
-// of whitespace-separated fields. The first field is a verb and is
-// either "def" for defining a symbol ABI or "ref" for referencing a
-// symbol using an ABI. For both "def" and "ref", the second field is
-// the symbol name and the third field is the ABI name, as one of the
-// named cmd/internal/obj.ABI constants.
-func readSymABIs(file, myimportpath string) {
-	data, err := ioutil.ReadFile(file)
-	if err != nil {
-		log.Fatalf("-symabis: %v", err)
-	}
-
-	symabiDefs = make(map[string]obj.ABI)
-	symabiRefs = make(map[string]obj.ABI)
-
-	localPrefix := ""
-	if myimportpath != "" {
-		// Symbols in this package may be written either as
-		// "".X or with the package's import path already in
-		// the symbol.
-		localPrefix = objabi.PathToPrefix(myimportpath) + "."
-	}
-
-	for lineNum, line := range strings.Split(string(data), "\n") {
-		lineNum++ // 1-based
-		line = strings.TrimSpace(line)
-		if line == "" || strings.HasPrefix(line, "#") {
-			continue
-		}
-
-		parts := strings.Fields(line)
-		switch parts[0] {
-		case "def", "ref":
-			// Parse line.
-			if len(parts) != 3 {
-				log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
-			}
-			sym, abistr := parts[1], parts[2]
-			abi, valid := obj.ParseABI(abistr)
-			if !valid {
-				log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
-			}
-
-			// If the symbol is already prefixed with
-			// myimportpath, rewrite it to start with ""
-			// so it matches the compiler's internal
-			// symbol names.
-			if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
-				sym = `"".` + sym[len(localPrefix):]
-			}
-
-			// Record for later.
-			if parts[0] == "def" {
-				symabiDefs[sym] = abi
-			} else {
-				symabiRefs[sym] = abi
-			}
-		default:
-			log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
-		}
-	}
-}
-
-func saveerrors() {
-	nsavederrors += nerrors
-	nerrors = 0
-}
-
-func arsize(b *bufio.Reader, name string) int {
-	var buf [ArhdrSize]byte
-	if _, err := io.ReadFull(b, buf[:]); err != nil {
-		return -1
-	}
-	aname := strings.Trim(string(buf[0:16]), " ")
-	if !strings.HasPrefix(aname, name) {
-		return -1
-	}
-	asize := strings.Trim(string(buf[48:58]), " ")
-	i, _ := strconv.Atoi(asize)
-	return i
-}
-
-var idirs []string
-
-func addidir(dir string) {
-	if dir != "" {
-		idirs = append(idirs, dir)
-	}
-}
-
-func isDriveLetter(b byte) bool {
-	return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
-}
-
-// is this path a local name? begins with ./ or ../ or /
-func islocalname(name string) bool {
-	return strings.HasPrefix(name, "/") ||
-		runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
-		strings.HasPrefix(name, "./") || name == "." ||
-		strings.HasPrefix(name, "../") || name == ".."
-}
-
-func findpkg(name string) (file string, ok bool) {
-	if islocalname(name) {
-		if nolocalimports {
-			return "", false
-		}
-
-		if packageFile != nil {
-			file, ok = packageFile[name]
-			return file, ok
-		}
-
-		// try .a before .6.  important for building libraries:
-		// if there is an array.6 in the array.a library,
-		// want to find all of array.a, not just array.6.
-		file = fmt.Sprintf("%s.a", name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s.o", name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		return "", false
-	}
-
-	// local imports should be canonicalized already.
-	// don't want to see "encoding/../encoding/base64"
-	// as different from "encoding/base64".
-	if q := path.Clean(name); q != name {
-		yyerror("non-canonical import path %q (should be %q)", name, q)
-		return "", false
-	}
-
-	if packageFile != nil {
-		file, ok = packageFile[name]
-		return file, ok
-	}
-
-	for _, dir := range idirs {
-		file = fmt.Sprintf("%s/%s.a", dir, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s/%s.o", dir, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-	}
-
-	if objabi.GOROOT != "" {
-		suffix := ""
-		suffixsep := ""
-		if flag_installsuffix != "" {
-			suffixsep = "_"
-			suffix = flag_installsuffix
-		} else if flag_race {
-			suffixsep = "_"
-			suffix = "race"
-		} else if flag_msan {
-			suffixsep = "_"
-			suffix = "msan"
-		}
-
-		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-	}
-
-	return "", false
-}
-
-// loadsys loads the definitions for the low-level runtime functions,
-// so that the compiler can generate calls to them,
-// but does not make them visible to user code.
-func loadsys() {
-	types.Block = 1
-
-	inimport = true
-	typecheckok = true
-
-	typs := runtimeTypes()
-	for _, d := range &runtimeDecls {
-		sym := Runtimepkg.Lookup(d.name)
-		typ := typs[d.typ]
-		switch d.tag {
-		case funcTag:
-			importfunc(Runtimepkg, src.NoXPos, sym, typ)
-		case varTag:
-			importvar(Runtimepkg, src.NoXPos, sym, typ)
-		default:
-			Fatalf("unhandled declaration tag %v", d.tag)
-		}
-	}
-
-	typecheckok = false
-	inimport = false
-}
-
-// myheight tracks the local package's height based on packages
-// imported so far.
-var myheight int
-
-func importfile(f *Val) *types.Pkg {
-	path_, ok := f.U.(string)
-	if !ok {
-		yyerror("import path must be a string")
-		return nil
-	}
-
-	if len(path_) == 0 {
-		yyerror("import path is empty")
-		return nil
-	}
-
-	if isbadimport(path_, false) {
-		return nil
-	}
-
-	// The package name main is no longer reserved,
-	// but we reserve the import path "main" to identify
-	// the main package, just as we reserve the import
-	// path "math" to identify the standard math package.
-	if path_ == "main" {
-		yyerror("cannot import \"main\"")
-		errorexit()
-	}
-
-	if myimportpath != "" && path_ == myimportpath {
-		yyerror("import %q while compiling that package (import cycle)", path_)
-		errorexit()
-	}
-
-	if mapped, ok := importMap[path_]; ok {
-		path_ = mapped
-	}
-
-	if path_ == "unsafe" {
-		return unsafepkg
-	}
-
-	if islocalname(path_) {
-		if path_[0] == '/' {
-			yyerror("import path cannot be absolute path")
-			return nil
-		}
-
-		prefix := Ctxt.Pathname
-		if localimport != "" {
-			prefix = localimport
-		}
-		path_ = path.Join(prefix, path_)
-
-		if isbadimport(path_, true) {
-			return nil
-		}
-	}
-
-	file, found := findpkg(path_)
-	if !found {
-		yyerror("can't find import: %q", path_)
-		errorexit()
-	}
-
-	importpkg := types.NewPkg(path_, "")
-	if importpkg.Imported {
-		return importpkg
-	}
-
-	importpkg.Imported = true
-
-	imp, err := bio.Open(file)
-	if err != nil {
-		yyerror("can't open import: %q: %v", path_, err)
-		errorexit()
-	}
-	defer imp.Close()
-
-	// check object header
-	p, err := imp.ReadString('\n')
-	if err != nil {
-		yyerror("import %s: reading input: %v", file, err)
-		errorexit()
-	}
-
-	if p == "!<arch>\n" { // package archive
-		// package export block should be first
-		sz := arsize(imp.Reader, "__.PKGDEF")
-		if sz <= 0 {
-			yyerror("import %s: not a package file", file)
-			errorexit()
-		}
-		p, err = imp.ReadString('\n')
-		if err != nil {
-			yyerror("import %s: reading input: %v", file, err)
-			errorexit()
-		}
-	}
-
-	if !strings.HasPrefix(p, "go object ") {
-		yyerror("import %s: not a go object file: %s", file, p)
-		errorexit()
-	}
-	q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
-	if p[10:] != q {
-		yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
-		errorexit()
-	}
-
-	// process header lines
-	for {
-		p, err = imp.ReadString('\n')
-		if err != nil {
-			yyerror("import %s: reading input: %v", file, err)
-			errorexit()
-		}
-		if p == "\n" {
-			break // header ends with blank line
-		}
-	}
-
-	// In the importfile, if we find:
-	// $$\n  (textual format): not supported anymore
-	// $$B\n (binary format) : import directly, then feed the lexer a dummy statement
-
-	// look for $$
-	var c byte
-	for {
-		c, err = imp.ReadByte()
-		if err != nil {
-			break
-		}
-		if c == '$' {
-			c, err = imp.ReadByte()
-			if c == '$' || err != nil {
-				break
-			}
-		}
-	}
-
-	// get character after $$
-	if err == nil {
-		c, _ = imp.ReadByte()
-	}
-
-	var fingerprint goobj.FingerprintType
-	switch c {
-	case '\n':
-		yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
-		return nil
-
-	case 'B':
-		if Debug_export != 0 {
-			fmt.Printf("importing %s (%s)\n", path_, file)
-		}
-		imp.ReadByte() // skip \n after $$B
-
-		c, err = imp.ReadByte()
-		if err != nil {
-			yyerror("import %s: reading input: %v", file, err)
-			errorexit()
-		}
-
-		// Indexed format is distinguished by an 'i' byte,
-		// whereas previous export formats started with 'c', 'd', or 'v'.
-		if c != 'i' {
-			yyerror("import %s: unexpected package format byte: %v", file, c)
-			errorexit()
-		}
-		fingerprint = iimport(importpkg, imp)
-
-	default:
-		yyerror("no import in %q", path_)
-		errorexit()
-	}
-
-	// assume files move (get installed) so don't record the full path
-	if packageFile != nil {
-		// If using a packageFile map, assume path_ can be recorded directly.
-		Ctxt.AddImport(path_, fingerprint)
-	} else {
-		// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
-		Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
-	}
-
-	if importpkg.Height >= myheight {
-		myheight = importpkg.Height + 1
-	}
-
-	return importpkg
-}
-
-func pkgnotused(lineno src.XPos, path string, name string) {
-	// If the package was imported with a name other than the final
-	// import path element, show it explicitly in the error message.
-	// Note that this handles both renamed imports and imports of
-	// packages containing unconventional package declarations.
-	// Note that this uses / always, even on Windows, because Go import
-	// paths always use forward slashes.
-	elem := path
-	if i := strings.LastIndex(elem, "/"); i >= 0 {
-		elem = elem[i+1:]
-	}
-	if name == "" || elem == name {
-		yyerrorl(lineno, "imported and not used: %q", path)
-	} else {
-		yyerrorl(lineno, "imported and not used: %q as %s", path, name)
-	}
-}
-
-func mkpackage(pkgname string) {
-	if localpkg.Name == "" {
-		if pkgname == "_" {
-			yyerror("invalid package name _")
-		}
-		localpkg.Name = pkgname
-	} else {
-		if pkgname != localpkg.Name {
-			yyerror("package %s; expected %s", pkgname, localpkg.Name)
-		}
-	}
-}
-
-func clearImports() {
-	type importedPkg struct {
-		pos  src.XPos
-		path string
-		name string
-	}
-	var unused []importedPkg
-
-	for _, s := range localpkg.Syms {
-		n := asNode(s.Def)
-		if n == nil {
-			continue
-		}
-		if n.Op == OPACK {
-			// throw away top-level package name left over
-			// from previous file.
-			// leave s->block set to cause redeclaration
-			// errors if a conflicting top-level name is
-			// introduced by a different file.
-			if !n.Name.Used() && nsyntaxerrors == 0 {
-				unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name})
-			}
-			s.Def = nil
-			continue
-		}
-		if IsAlias(s) {
-			// throw away top-level name left over
-			// from previous import . "x"
-			if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && nsyntaxerrors == 0 {
-				unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""})
-				n.Name.Pack.Name.SetUsed(true)
-			}
-			s.Def = nil
-			continue
-		}
-	}
-
-	sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
-	for _, pkg := range unused {
-		pkgnotused(pkg.pos, pkg.path, pkg.name)
-	}
-}
-
-func IsAlias(sym *types.Sym) bool {
-	return sym.Def != nil && asNode(sym.Def).Sym != sym
-}
-
-// By default, assume any debug flags are incompatible with concurrent
-// compilation. A few are safe and potentially in common use for
-// normal compiles, though; return true for those.
-func concurrentFlagOk() bool {
-	// Report whether any debug flag that would prevent concurrent
-	// compilation is set, by zeroing out the allowed ones and then
-	// checking if the resulting struct is zero.
-	d := Debug
-	d.B = 0 // disable bounds checking
-	d.C = 0 // disable printing of columns in error messages
-	d.e = 0 // no limit on errors; errors all come from non-concurrent code
-	d.N = 0 // disable optimizations
-	d.l = 0 // disable inlining
-	d.w = 0 // all printing happens before compilation
-	d.W = 0 // all printing happens before compilation
-	d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
-
-	return d == DebugFlags{}
-}
-
-func concurrentBackendAllowed() bool {
-	if !concurrentFlagOk() {
-		return false
-	}
-
-	// Debug.S by itself is ok, because all printing occurs
-	// while writing the object file, and that is non-concurrent.
-	// Adding Debug_vlog, however, causes Debug.S to also print
-	// while flushing the plist, which happens concurrently.
-	if Debug_vlog || debugstr != "" || debuglive > 0 {
-		return false
-	}
-	// TODO: Test and delete this condition.
-	if objabi.Fieldtrack_enabled != 0 {
-		return false
-	}
-	// TODO: fix races and enable the following flags
-	if Ctxt.Flag_shared || Ctxt.Flag_dynlink || flag_race {
-		return false
-	}
-	return true
-}
-
-// recordFlags records the specified command-line flags to be placed
-// in the DWARF info.
-func recordFlags(flags ...string) {
-	if myimportpath == "" {
-		// We can't record the flags if we don't know what the
-		// package name is.
-		return
-	}
-
-	type BoolFlag interface {
-		IsBoolFlag() bool
-	}
-	type CountFlag interface {
-		IsCountFlag() bool
-	}
-	var cmd bytes.Buffer
-	for _, name := range flags {
-		f := flag.Lookup(name)
-		if f == nil {
-			continue
-		}
-		getter := f.Value.(flag.Getter)
-		if getter.String() == f.DefValue {
-			// Flag has default value, so omit it.
-			continue
-		}
-		if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
-			val, ok := getter.Get().(bool)
-			if ok && val {
-				fmt.Fprintf(&cmd, " -%s", f.Name)
-				continue
-			}
-		}
-		if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
-			val, ok := getter.Get().(int)
-			if ok && val == 1 {
-				fmt.Fprintf(&cmd, " -%s", f.Name)
-				continue
-			}
-		}
-		fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
-	}
-
-	if cmd.Len() == 0 {
-		return
-	}
-	s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
-	s.Type = objabi.SDWARFCUINFO
-	// Sometimes (for example when building tests) we can link
-	// together two package main archives. So allow dups.
-	s.Set(obj.AttrDuplicateOK, true)
-	Ctxt.Data = append(Ctxt.Data, s)
-	s.P = cmd.Bytes()[1:]
-}
-
-// recordPackageName records the name of the package being
-// compiled, so that the linker can save it in the compile unit's DIE.
-func recordPackageName() {
-	s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath)
-	s.Type = objabi.SDWARFCUINFO
-	// Sometimes (for example when building tests) we can link
-	// together two package main archives. So allow dups.
-	s.Set(obj.AttrDuplicateOK, true)
-	Ctxt.Data = append(Ctxt.Data, s)
-	s.P = []byte(localpkg.Name)
-}
-
-// flag_lang is the language version we are compiling for, set by the -lang flag.
-var flag_lang string
-
-// currentLang returns the current language version.
-func currentLang() string {
-	return fmt.Sprintf("go1.%d", goversion.Version)
-}
-
-// goVersionRE is a regular expression that matches the valid
-// arguments to the -lang flag.
-var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
-
-// A lang is a language version broken into major and minor numbers.
-type lang struct {
-	major, minor int
-}
-
-// langWant is the desired language version set by the -lang flag.
-// If the -lang flag is not set, this is the zero value, meaning that
-// any language version is supported.
-var langWant lang
-
-// langSupported reports whether language version major.minor is
-// supported in a particular package.
-func langSupported(major, minor int, pkg *types.Pkg) bool {
-	if pkg == nil {
-		// TODO(mdempsky): Set Pkg for local types earlier.
-		pkg = localpkg
-	}
-	if pkg != localpkg {
-		// Assume imported packages passed type-checking.
-		return true
-	}
-
-	if langWant.major == 0 && langWant.minor == 0 {
-		return true
-	}
-	return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
-}
-
-// checkLang verifies that the -lang flag holds a valid value, and
-// exits if not. It initializes data used by langSupported.
-func checkLang() {
-	if flag_lang == "" {
-		return
-	}
-
-	var err error
-	langWant, err = parseLang(flag_lang)
-	if err != nil {
-		log.Fatalf("invalid value %q for -lang: %v", flag_lang, err)
-	}
-
-	if def := currentLang(); flag_lang != def {
-		defVers, err := parseLang(def)
-		if err != nil {
-			log.Fatalf("internal error parsing default lang %q: %v", def, err)
-		}
-		if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
-			log.Fatalf("invalid value %q for -lang: max known version is %q", flag_lang, def)
-		}
-	}
-}
-
-// parseLang parses a -lang option into a langVer.
-func parseLang(s string) (lang, error) {
-	matches := goVersionRE.FindStringSubmatch(s)
-	if matches == nil {
-		return lang{}, fmt.Errorf(`should be something like "go1.12"`)
-	}
-	major, err := strconv.Atoi(matches[1])
-	if err != nil {
-		return lang{}, err
-	}
-	minor, err := strconv.Atoi(matches[2])
-	if err != nil {
-		return lang{}, err
-	}
-	return lang{major: major, minor: minor}, nil
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+	return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
 }
diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go
deleted file mode 100644
index 401aef3..0000000
--- a/src/cmd/compile/internal/gc/mpfloat.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"math"
-	"math/big"
-)
-
-// implements float arithmetic
-
-const (
-	// Maximum size in bits for Mpints before signalling
-	// overflow and also mantissa precision for Mpflts.
-	Mpprec = 512
-	// Turn on for constant arithmetic debugging output.
-	Mpdebug = false
-)
-
-// Mpflt represents a floating-point constant.
-type Mpflt struct {
-	Val big.Float
-}
-
-// Mpcplx represents a complex constant.
-type Mpcplx struct {
-	Real Mpflt
-	Imag Mpflt
-}
-
-// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
-func newMpflt() *Mpflt {
-	var a Mpflt
-	a.Val.SetPrec(Mpprec)
-	return &a
-}
-
-// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
-func newMpcmplx() *Mpcplx {
-	var a Mpcplx
-	a.Real = *newMpflt()
-	a.Imag = *newMpflt()
-	return &a
-}
-
-func (a *Mpflt) SetInt(b *Mpint) {
-	if b.checkOverflow(0) {
-		// sign doesn't really matter but copy anyway
-		a.Val.SetInf(b.Val.Sign() < 0)
-		return
-	}
-	a.Val.SetInt(&b.Val)
-}
-
-func (a *Mpflt) Set(b *Mpflt) {
-	a.Val.Set(&b.Val)
-}
-
-func (a *Mpflt) Add(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("\n%v + %v", a, b)
-	}
-
-	a.Val.Add(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) AddFloat64(c float64) {
-	var b Mpflt
-
-	b.SetFloat64(c)
-	a.Add(&b)
-}
-
-func (a *Mpflt) Sub(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("\n%v - %v", a, b)
-	}
-
-	a.Val.Sub(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) Mul(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("%v\n * %v\n", a, b)
-	}
-
-	a.Val.Mul(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) MulFloat64(c float64) {
-	var b Mpflt
-
-	b.SetFloat64(c)
-	a.Mul(&b)
-}
-
-func (a *Mpflt) Quo(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("%v\n / %v\n", a, b)
-	}
-
-	a.Val.Quo(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) Cmp(b *Mpflt) int {
-	return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpflt) CmpFloat64(c float64) int {
-	if c == 0 {
-		return a.Val.Sign() // common case shortcut
-	}
-	return a.Val.Cmp(big.NewFloat(c))
-}
-
-func (a *Mpflt) Float64() float64 {
-	x, _ := a.Val.Float64()
-
-	// check for overflow
-	if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
-		Fatalf("ovf in Mpflt Float64")
-	}
-
-	return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) Float32() float64 {
-	x32, _ := a.Val.Float32()
-	x := float64(x32)
-
-	// check for overflow
-	if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
-		Fatalf("ovf in Mpflt Float32")
-	}
-
-	return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) SetFloat64(c float64) {
-	if Mpdebug {
-		fmt.Printf("\nconst %g", c)
-	}
-
-	// convert -0 to 0
-	if c == 0 {
-		c = 0
-	}
-	a.Val.SetFloat64(c)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n", a)
-	}
-}
-
-func (a *Mpflt) Neg() {
-	// avoid -0
-	if a.Val.Sign() != 0 {
-		a.Val.Neg(&a.Val)
-	}
-}
-
-func (a *Mpflt) SetString(as string) {
-	f, _, err := a.Val.Parse(as, 0)
-	if err != nil {
-		yyerror("malformed constant: %s (%v)", as, err)
-		a.Val.SetFloat64(0)
-		return
-	}
-
-	if f.IsInf() {
-		yyerror("constant too large: %s", as)
-		a.Val.SetFloat64(0)
-		return
-	}
-
-	// -0 becomes 0
-	if f.Sign() == 0 && f.Signbit() {
-		a.Val.SetFloat64(0)
-	}
-}
-
-func (f *Mpflt) String() string {
-	return f.Val.Text('b', 0)
-}
-
-func (fvp *Mpflt) GoString() string {
-	// determine sign
-	sign := ""
-	f := &fvp.Val
-	if f.Sign() < 0 {
-		sign = "-"
-		f = new(big.Float).Abs(f)
-	}
-
-	// Don't try to convert infinities (will not terminate).
-	if f.IsInf() {
-		return sign + "Inf"
-	}
-
-	// Use exact fmt formatting if in float64 range (common case):
-	// proceed if f doesn't underflow to 0 or overflow to inf.
-	if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
-		return fmt.Sprintf("%s%.6g", sign, x)
-	}
-
-	// Out of float64 range. Do approximate manual to decimal
-	// conversion to avoid precise but possibly slow Float
-	// formatting.
-	// f = mant * 2**exp
-	var mant big.Float
-	exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
-
-	// approximate float64 mantissa m and decimal exponent d
-	// f ~ m * 10**d
-	m, _ := mant.Float64()                     // 0.5 <= m < 1.0
-	d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
-
-	// adjust m for truncated (integer) decimal exponent e
-	e := int64(d)
-	m *= math.Pow(10, d-float64(e))
-
-	// ensure 1 <= m < 10
-	switch {
-	case m < 1-0.5e-6:
-		// The %.6g format below rounds m to 5 digits after the
-		// decimal point. Make sure that m*10 < 10 even after
-		// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
-		m *= 10
-		e--
-	case m >= 10:
-		m /= 10
-		e++
-	}
-
-	return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
-}
-
-// complex multiply v *= rv
-//	(a, b) * (c, d) = (a*c - b*d, b*c + a*d)
-func (v *Mpcplx) Mul(rv *Mpcplx) {
-	var ac, ad, bc, bd Mpflt
-
-	ac.Set(&v.Real)
-	ac.Mul(&rv.Real) // ac
-
-	bd.Set(&v.Imag)
-	bd.Mul(&rv.Imag) // bd
-
-	bc.Set(&v.Imag)
-	bc.Mul(&rv.Real) // bc
-
-	ad.Set(&v.Real)
-	ad.Mul(&rv.Imag) // ad
-
-	v.Real.Set(&ac)
-	v.Real.Sub(&bd) // ac-bd
-
-	v.Imag.Set(&bc)
-	v.Imag.Add(&ad) // bc+ad
-}
-
-// complex divide v /= rv
-//	(a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
-func (v *Mpcplx) Div(rv *Mpcplx) bool {
-	if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
-		return false
-	}
-
-	var ac, ad, bc, bd, cc_plus_dd Mpflt
-
-	cc_plus_dd.Set(&rv.Real)
-	cc_plus_dd.Mul(&rv.Real) // cc
-
-	ac.Set(&rv.Imag)
-	ac.Mul(&rv.Imag)    // dd
-	cc_plus_dd.Add(&ac) // cc+dd
-
-	// We already checked that c and d are not both zero, but we can't
-	// assume that c²+d² != 0 follows, because for tiny values of c
-	// and/or d c²+d² can underflow to zero.  Check that c²+d² is
-	// nonzero, return if it's not.
-	if cc_plus_dd.CmpFloat64(0) == 0 {
-		return false
-	}
-
-	ac.Set(&v.Real)
-	ac.Mul(&rv.Real) // ac
-
-	bd.Set(&v.Imag)
-	bd.Mul(&rv.Imag) // bd
-
-	bc.Set(&v.Imag)
-	bc.Mul(&rv.Real) // bc
-
-	ad.Set(&v.Real)
-	ad.Mul(&rv.Imag) // ad
-
-	v.Real.Set(&ac)
-	v.Real.Add(&bd)         // ac+bd
-	v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
-
-	v.Imag.Set(&bc)
-	v.Imag.Sub(&ad)         // bc-ad
-	v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
-
-	return true
-}
-
-func (v *Mpcplx) String() string {
-	return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
-}
-
-func (v *Mpcplx) GoString() string {
-	var re string
-	sre := v.Real.CmpFloat64(0)
-	if sre != 0 {
-		re = v.Real.GoString()
-	}
-
-	var im string
-	sim := v.Imag.CmpFloat64(0)
-	if sim != 0 {
-		im = v.Imag.GoString()
-	}
-
-	switch {
-	case sre == 0 && sim == 0:
-		return "0"
-	case sre == 0:
-		return im + "i"
-	case sim == 0:
-		return re
-	case sim < 0:
-		return fmt.Sprintf("(%s%si)", re, im)
-	default:
-		return fmt.Sprintf("(%s+%si)", re, im)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go
deleted file mode 100644
index 340350b..0000000
--- a/src/cmd/compile/internal/gc/mpint.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"math/big"
-)
-
-// implements integer arithmetic
-
-// Mpint represents an integer constant.
-type Mpint struct {
-	Val  big.Int
-	Ovf  bool // set if Val overflowed compiler limit (sticky)
-	Rune bool // set if syntax indicates default type rune
-}
-
-func (a *Mpint) SetOverflow() {
-	a.Val.SetUint64(1) // avoid spurious div-zero errors
-	a.Ovf = true
-}
-
-func (a *Mpint) checkOverflow(extra int) bool {
-	// We don't need to be precise here, any reasonable upper limit would do.
-	// For now, use existing limit so we pass all the tests unchanged.
-	if a.Val.BitLen()+extra > Mpprec {
-		a.SetOverflow()
-	}
-	return a.Ovf
-}
-
-func (a *Mpint) Set(b *Mpint) {
-	a.Val.Set(&b.Val)
-}
-
-func (a *Mpint) SetFloat(b *Mpflt) bool {
-	// avoid converting huge floating-point numbers to integers
-	// (2*Mpprec is large enough to permit all tests to pass)
-	if b.Val.MantExp(nil) > 2*Mpprec {
-		a.SetOverflow()
-		return false
-	}
-
-	if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
-		return true
-	}
-
-	const delta = 16 // a reasonably small number of bits > 0
-	var t big.Float
-	t.SetPrec(Mpprec - delta)
-
-	// try rounding down a little
-	t.SetMode(big.ToZero)
-	t.Set(&b.Val)
-	if _, acc := t.Int(&a.Val); acc == big.Exact {
-		return true
-	}
-
-	// try rounding up a little
-	t.SetMode(big.AwayFromZero)
-	t.Set(&b.Val)
-	if _, acc := t.Int(&a.Val); acc == big.Exact {
-		return true
-	}
-
-	a.Ovf = false
-	return false
-}
-
-func (a *Mpint) Add(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Add")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Add(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant addition overflow")
-	}
-}
-
-func (a *Mpint) Sub(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Sub")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Sub(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant subtraction overflow")
-	}
-}
-
-func (a *Mpint) Mul(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Mul")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Mul(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant multiplication overflow")
-	}
-}
-
-func (a *Mpint) Quo(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Quo")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Quo(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		// can only happen for div-0 which should be checked elsewhere
-		yyerror("constant division overflow")
-	}
-}
-
-func (a *Mpint) Rem(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Rem")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Rem(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		// should never happen
-		yyerror("constant modulo overflow")
-	}
-}
-
-func (a *Mpint) Or(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Or")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Or(&a.Val, &b.Val)
-}
-
-func (a *Mpint) And(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint And")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.And(&a.Val, &b.Val)
-}
-
-func (a *Mpint) AndNot(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint AndNot")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.AndNot(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Xor(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Xor")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Xor(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Lsh(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Lsh")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	s := b.Int64()
-	if s < 0 || s >= Mpprec {
-		msg := "shift count too large"
-		if s < 0 {
-			msg = "invalid negative shift count"
-		}
-		yyerror("%s: %d", msg, s)
-		a.SetInt64(0)
-		return
-	}
-
-	if a.checkOverflow(int(s)) {
-		yyerror("constant shift overflow")
-		return
-	}
-	a.Val.Lsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Rsh(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("ovf in Mpint Rsh")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	s := b.Int64()
-	if s < 0 {
-		yyerror("invalid negative shift count: %d", s)
-		if a.Val.Sign() < 0 {
-			a.SetInt64(-1)
-		} else {
-			a.SetInt64(0)
-		}
-		return
-	}
-
-	a.Val.Rsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Cmp(b *Mpint) int {
-	return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpint) CmpInt64(c int64) int {
-	if c == 0 {
-		return a.Val.Sign() // common case shortcut
-	}
-	return a.Val.Cmp(big.NewInt(c))
-}
-
-func (a *Mpint) Neg() {
-	a.Val.Neg(&a.Val)
-}
-
-func (a *Mpint) Int64() int64 {
-	if a.Ovf {
-		if nsavederrors+nerrors == 0 {
-			Fatalf("constant overflow")
-		}
-		return 0
-	}
-
-	return a.Val.Int64()
-}
-
-func (a *Mpint) SetInt64(c int64) {
-	a.Val.SetInt64(c)
-}
-
-func (a *Mpint) SetString(as string) {
-	_, ok := a.Val.SetString(as, 0)
-	if !ok {
-		// The lexer checks for correct syntax of the literal
-		// and reports detailed errors. Thus SetString should
-		// never fail (in theory it might run out of memory,
-		// but that wouldn't be reported as an error here).
-		Fatalf("malformed integer constant: %s", as)
-		return
-	}
-	if a.checkOverflow(0) {
-		yyerror("constant too large: %s", as)
-	}
-}
-
-func (a *Mpint) GoString() string {
-	return a.Val.String()
-}
-
-func (a *Mpint) String() string {
-	return fmt.Sprintf("%#x", &a.Val)
-}
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
deleted file mode 100644
index 7494c3e..0000000
--- a/src/cmd/compile/internal/gc/noder.go
+++ /dev/null
@@ -1,1756 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-)
-
-// parseFiles concurrently parses files into *syntax.File structures.
-// Each declaration in every *syntax.File is converted to a syntax tree
-// and its root represented by *Node is appended to xtop.
-// Returns the total count of parsed lines.
-func parseFiles(filenames []string) uint {
-	noders := make([]*noder, 0, len(filenames))
-	// Limit the number of simultaneously open files.
-	sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
-
-	for _, filename := range filenames {
-		p := &noder{
-			basemap: make(map[*syntax.PosBase]*src.PosBase),
-			err:     make(chan syntax.Error),
-		}
-		noders = append(noders, p)
-
-		go func(filename string) {
-			sem <- struct{}{}
-			defer func() { <-sem }()
-			defer close(p.err)
-			base := syntax.NewFileBase(filename)
-
-			f, err := os.Open(filename)
-			if err != nil {
-				p.error(syntax.Error{Msg: err.Error()})
-				return
-			}
-			defer f.Close()
-
-			p.file, _ = syntax.Parse(base, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error
-		}(filename)
-	}
-
-	var lines uint
-	for _, p := range noders {
-		for e := range p.err {
-			p.yyerrorpos(e.Pos, "%s", e.Msg)
-		}
-
-		p.node()
-		lines += p.file.Lines
-		p.file = nil // release memory
-
-		if nsyntaxerrors != 0 {
-			errorexit()
-		}
-		// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
-		testdclstack()
-	}
-
-	localpkg.Height = myheight
-
-	return lines
-}
-
-// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase.
-func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
-	// fast path: most likely PosBase hasn't changed
-	if p.basecache.last == b0 {
-		return p.basecache.base
-	}
-
-	b1, ok := p.basemap[b0]
-	if !ok {
-		fn := b0.Filename()
-		if b0.IsFileBase() {
-			b1 = src.NewFileBase(fn, absFilename(fn))
-		} else {
-			// line directive base
-			p0 := b0.Pos()
-			p0b := p0.Base()
-			if p0b == b0 {
-				panic("infinite recursion in makeSrcPosBase")
-			}
-			p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
-			b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
-		}
-		p.basemap[b0] = b1
-	}
-
-	// update cache
-	p.basecache.last = b0
-	p.basecache.base = b1
-
-	return b1
-}
-
-func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) {
-	return Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
-}
-
-func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) {
-	yyerrorl(p.makeXPos(pos), format, args...)
-}
-
-var pathPrefix string
-
-// TODO(gri) Can we eliminate fileh in favor of absFilename?
-func fileh(name string) string {
-	return objabi.AbsFile("", name, pathPrefix)
-}
-
-func absFilename(name string) string {
-	return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix)
-}
-
-// noder transforms package syntax's AST into a Node tree.
-type noder struct {
-	basemap   map[*syntax.PosBase]*src.PosBase
-	basecache struct {
-		last *syntax.PosBase
-		base *src.PosBase
-	}
-
-	file           *syntax.File
-	linknames      []linkname
-	pragcgobuf     [][]string
-	err            chan syntax.Error
-	scope          ScopeID
-	importedUnsafe bool
-	importedEmbed  bool
-
-	// scopeVars is a stack tracking the number of variables declared in the
-	// current function at the moment each open scope was opened.
-	scopeVars []int
-
-	lastCloseScopePos syntax.Pos
-}
-
-func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) {
-	oldScope := p.scope
-	p.scope = 0
-	funchdr(fn)
-
-	if block != nil {
-		body := p.stmts(block.List)
-		if body == nil {
-			body = []*Node{nod(OEMPTY, nil, nil)}
-		}
-		fn.Nbody.Set(body)
-
-		lineno = p.makeXPos(block.Rbrace)
-		fn.Func.Endlineno = lineno
-	}
-
-	funcbody()
-	p.scope = oldScope
-}
-
-func (p *noder) openScope(pos syntax.Pos) {
-	types.Markdcl()
-
-	if trackScopes {
-		Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope)
-		p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl))
-		p.scope = ScopeID(len(Curfn.Func.Parents))
-
-		p.markScope(pos)
-	}
-}
-
-func (p *noder) closeScope(pos syntax.Pos) {
-	p.lastCloseScopePos = pos
-	types.Popdcl()
-
-	if trackScopes {
-		scopeVars := p.scopeVars[len(p.scopeVars)-1]
-		p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
-		if scopeVars == len(Curfn.Func.Dcl) {
-			// no variables were declared in this scope, so we can retract it.
-
-			if int(p.scope) != len(Curfn.Func.Parents) {
-				Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
-			}
-
-			p.scope = Curfn.Func.Parents[p.scope-1]
-			Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1]
-
-			nmarks := len(Curfn.Func.Marks)
-			Curfn.Func.Marks[nmarks-1].Scope = p.scope
-			prevScope := ScopeID(0)
-			if nmarks >= 2 {
-				prevScope = Curfn.Func.Marks[nmarks-2].Scope
-			}
-			if Curfn.Func.Marks[nmarks-1].Scope == prevScope {
-				Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1]
-			}
-			return
-		}
-
-		p.scope = Curfn.Func.Parents[p.scope-1]
-
-		p.markScope(pos)
-	}
-}
-
-func (p *noder) markScope(pos syntax.Pos) {
-	xpos := p.makeXPos(pos)
-	if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos {
-		Curfn.Func.Marks[i-1].Scope = p.scope
-	} else {
-		Curfn.Func.Marks = append(Curfn.Func.Marks, Mark{xpos, p.scope})
-	}
-}
-
-// closeAnotherScope is like closeScope, but it reuses the same mark
-// position as the last closeScope call. This is useful for "for" and
-// "if" statements, as their implicit blocks always end at the same
-// position as an explicit block.
-func (p *noder) closeAnotherScope() {
-	p.closeScope(p.lastCloseScopePos)
-}
-
-// linkname records a //go:linkname directive.
-type linkname struct {
-	pos    syntax.Pos
-	local  string
-	remote string
-}
-
-func (p *noder) node() {
-	types.Block = 1
-	p.importedUnsafe = false
-	p.importedEmbed = false
-
-	p.setlineno(p.file.PkgName)
-	mkpackage(p.file.PkgName.Value)
-
-	if pragma, ok := p.file.Pragma.(*Pragma); ok {
-		pragma.Flag &^= GoBuildPragma
-		p.checkUnused(pragma)
-	}
-
-	xtop = append(xtop, p.decls(p.file.DeclList)...)
-
-	for _, n := range p.linknames {
-		if !p.importedUnsafe {
-			p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
-			continue
-		}
-		s := lookup(n.local)
-		if n.remote != "" {
-			s.Linkname = n.remote
-		} else {
-			// Use the default object symbol name if the
-			// user didn't provide one.
-			if myimportpath == "" {
-				p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag")
-			} else {
-				s.Linkname = objabi.PathToPrefix(myimportpath) + "." + n.local
-			}
-		}
-	}
-
-	// The linker expects an ABI0 wrapper for all cgo-exported
-	// functions.
-	for _, prag := range p.pragcgobuf {
-		switch prag[0] {
-		case "cgo_export_static", "cgo_export_dynamic":
-			if symabiRefs == nil {
-				symabiRefs = make(map[string]obj.ABI)
-			}
-			symabiRefs[prag[1]] = obj.ABI0
-		}
-	}
-
-	pragcgobuf = append(pragcgobuf, p.pragcgobuf...)
-	lineno = src.NoXPos
-	clearImports()
-}
-
-func (p *noder) decls(decls []syntax.Decl) (l []*Node) {
-	var cs constState
-
-	for _, decl := range decls {
-		p.setlineno(decl)
-		switch decl := decl.(type) {
-		case *syntax.ImportDecl:
-			p.importDecl(decl)
-
-		case *syntax.VarDecl:
-			l = append(l, p.varDecl(decl)...)
-
-		case *syntax.ConstDecl:
-			l = append(l, p.constDecl(decl, &cs)...)
-
-		case *syntax.TypeDecl:
-			l = append(l, p.typeDecl(decl))
-
-		case *syntax.FuncDecl:
-			l = append(l, p.funcDecl(decl))
-
-		default:
-			panic("unhandled Decl")
-		}
-	}
-
-	return
-}
-
-func (p *noder) importDecl(imp *syntax.ImportDecl) {
-	if imp.Path.Bad {
-		return // avoid follow-on errors if there was a syntax error
-	}
-
-	if pragma, ok := imp.Pragma.(*Pragma); ok {
-		p.checkUnused(pragma)
-	}
-
-	val := p.basicLit(imp.Path)
-	ipkg := importfile(&val)
-	if ipkg == nil {
-		if nerrors == 0 {
-			Fatalf("phase error in import")
-		}
-		return
-	}
-
-	if ipkg == unsafepkg {
-		p.importedUnsafe = true
-	}
-	if ipkg.Path == "embed" {
-		p.importedEmbed = true
-	}
-
-	ipkg.Direct = true
-
-	var my *types.Sym
-	if imp.LocalPkgName != nil {
-		my = p.name(imp.LocalPkgName)
-	} else {
-		my = lookup(ipkg.Name)
-	}
-
-	pack := p.nod(imp, OPACK, nil, nil)
-	pack.Sym = my
-	pack.Name.Pkg = ipkg
-
-	switch my.Name {
-	case ".":
-		importdot(ipkg, pack)
-		return
-	case "init":
-		yyerrorl(pack.Pos, "cannot import package as init - init must be a func")
-		return
-	case "_":
-		return
-	}
-	if my.Def != nil {
-		redeclare(pack.Pos, my, "as imported package name")
-	}
-	my.Def = asTypesNode(pack)
-	my.Lastlineno = pack.Pos
-	my.Block = 1 // at top level
-}
-
-func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
-	names := p.declNames(decl.NameList)
-	typ := p.typeExprOrNil(decl.Type)
-
-	var exprs []*Node
-	if decl.Values != nil {
-		exprs = p.exprList(decl.Values)
-	}
-
-	if pragma, ok := decl.Pragma.(*Pragma); ok {
-		if len(pragma.Embeds) > 0 {
-			if !p.importedEmbed {
-				// This check can't be done when building the list pragma.Embeds
-				// because that list is created before the noder starts walking over the file,
-				// so at that point it hasn't seen the imports.
-				// We're left to check now, just before applying the //go:embed lines.
-				for _, e := range pragma.Embeds {
-					p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
-				}
-			} else {
-				varEmbed(p, names, typ, exprs, pragma.Embeds)
-			}
-			pragma.Embeds = nil
-		}
-		p.checkUnused(pragma)
-	}
-
-	p.setlineno(decl)
-	return variter(names, typ, exprs)
-}
-
-// constState tracks state between constant specifiers within a
-// declaration group. This state is kept separate from noder so nested
-// constant declarations are handled correctly (e.g., issue 15550).
-type constState struct {
-	group  *syntax.Group
-	typ    *Node
-	values []*Node
-	iota   int64
-}
-
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
-	if decl.Group == nil || decl.Group != cs.group {
-		*cs = constState{
-			group: decl.Group,
-		}
-	}
-
-	if pragma, ok := decl.Pragma.(*Pragma); ok {
-		p.checkUnused(pragma)
-	}
-
-	names := p.declNames(decl.NameList)
-	typ := p.typeExprOrNil(decl.Type)
-
-	var values []*Node
-	if decl.Values != nil {
-		values = p.exprList(decl.Values)
-		cs.typ, cs.values = typ, values
-	} else {
-		if typ != nil {
-			yyerror("const declaration cannot have type without expression")
-		}
-		typ, values = cs.typ, cs.values
-	}
-
-	nn := make([]*Node, 0, len(names))
-	for i, n := range names {
-		if i >= len(values) {
-			yyerror("missing value in const declaration")
-			break
-		}
-		v := values[i]
-		if decl.Values == nil {
-			v = treecopy(v, n.Pos)
-		}
-
-		n.Op = OLITERAL
-		declare(n, dclcontext)
-
-		n.Name.Param.Ntype = typ
-		n.Name.Defn = v
-		n.SetIota(cs.iota)
-
-		nn = append(nn, p.nod(decl, ODCLCONST, n, nil))
-	}
-
-	if len(values) > len(names) {
-		yyerror("extra expression in const declaration")
-	}
-
-	cs.iota++
-
-	return nn
-}
-
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
-	n := p.declName(decl.Name)
-	n.Op = OTYPE
-	declare(n, dclcontext)
-
-	// decl.Type may be nil but in that case we got a syntax error during parsing
-	typ := p.typeExprOrNil(decl.Type)
-
-	param := n.Name.Param
-	param.Ntype = typ
-	param.SetAlias(decl.Alias)
-	if pragma, ok := decl.Pragma.(*Pragma); ok {
-		if !decl.Alias {
-			param.SetPragma(pragma.Flag & TypePragmas)
-			pragma.Flag &^= TypePragmas
-		}
-		p.checkUnused(pragma)
-	}
-
-	nod := p.nod(decl, ODCLTYPE, n, nil)
-	if param.Alias() && !langSupported(1, 9, localpkg) {
-		yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
-	}
-	return nod
-}
-
-func (p *noder) declNames(names []*syntax.Name) []*Node {
-	nodes := make([]*Node, 0, len(names))
-	for _, name := range names {
-		nodes = append(nodes, p.declName(name))
-	}
-	return nodes
-}
-
-func (p *noder) declName(name *syntax.Name) *Node {
-	n := dclname(p.name(name))
-	n.Pos = p.pos(name)
-	return n
-}
-
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
-	name := p.name(fun.Name)
-	t := p.signature(fun.Recv, fun.Type)
-	f := p.nod(fun, ODCLFUNC, nil, nil)
-
-	if fun.Recv == nil {
-		if name.Name == "init" {
-			name = renameinit()
-			if t.List.Len() > 0 || t.Rlist.Len() > 0 {
-				yyerrorl(f.Pos, "func init must have no arguments and no return values")
-			}
-		}
-
-		if localpkg.Name == "main" && name.Name == "main" {
-			if t.List.Len() > 0 || t.Rlist.Len() > 0 {
-				yyerrorl(f.Pos, "func main must have no arguments and no return values")
-			}
-		}
-	} else {
-		f.Func.Shortname = name
-		name = nblank.Sym // filled in by typecheckfunc
-	}
-
-	f.Func.Nname = newfuncnamel(p.pos(fun.Name), name)
-	f.Func.Nname.Name.Defn = f
-	f.Func.Nname.Name.Param.Ntype = t
-
-	if pragma, ok := fun.Pragma.(*Pragma); ok {
-		f.Func.Pragma = pragma.Flag & FuncPragmas
-		if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 {
-			yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
-		}
-		pragma.Flag &^= FuncPragmas
-		p.checkUnused(pragma)
-	}
-
-	if fun.Recv == nil {
-		declare(f.Func.Nname, PFUNC)
-	}
-
-	p.funcBody(f, fun.Body)
-
-	if fun.Body != nil {
-		if f.Func.Pragma&Noescape != 0 {
-			yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
-		}
-	} else {
-		if pure_go || strings.HasPrefix(f.funcname(), "init.") {
-			// Linknamed functions are allowed to have no body. Hopefully
-			// the linkname target has a body. See issue 23311.
-			isLinknamed := false
-			for _, n := range p.linknames {
-				if f.funcname() == n.local {
-					isLinknamed = true
-					break
-				}
-			}
-			if !isLinknamed {
-				yyerrorl(f.Pos, "missing function body")
-			}
-		}
-	}
-
-	return f
-}
-
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
-	n := p.nod(typ, OTFUNC, nil, nil)
-	if recv != nil {
-		n.Left = p.param(recv, false, false)
-	}
-	n.List.Set(p.params(typ.ParamList, true))
-	n.Rlist.Set(p.params(typ.ResultList, false))
-	return n
-}
-
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
-	nodes := make([]*Node, 0, len(params))
-	for i, param := range params {
-		p.setlineno(param)
-		nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
-	}
-	return nodes
-}
-
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node {
-	var name *types.Sym
-	if param.Name != nil {
-		name = p.name(param.Name)
-	}
-
-	typ := p.typeExpr(param.Type)
-	n := p.nodSym(param, ODCLFIELD, typ, name)
-
-	// rewrite ...T parameter
-	if typ.Op == ODDD {
-		if !dddOk {
-			// We mark these as syntax errors to get automatic elimination
-			// of multiple such errors per line (see yyerrorl in subr.go).
-			yyerror("syntax error: cannot use ... in receiver or result parameter list")
-		} else if !final {
-			if param.Name == nil {
-				yyerror("syntax error: cannot use ... with non-final parameter")
-			} else {
-				p.yyerrorpos(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
-			}
-		}
-		typ.Op = OTARRAY
-		typ.Right = typ.Left
-		typ.Left = nil
-		n.SetIsDDD(true)
-		if n.Left != nil {
-			n.Left.SetIsDDD(true)
-		}
-	}
-
-	return n
-}
-
-func (p *noder) exprList(expr syntax.Expr) []*Node {
-	if list, ok := expr.(*syntax.ListExpr); ok {
-		return p.exprs(list.ElemList)
-	}
-	return []*Node{p.expr(expr)}
-}
-
-func (p *noder) exprs(exprs []syntax.Expr) []*Node {
-	nodes := make([]*Node, 0, len(exprs))
-	for _, expr := range exprs {
-		nodes = append(nodes, p.expr(expr))
-	}
-	return nodes
-}
-
-func (p *noder) expr(expr syntax.Expr) *Node {
-	p.setlineno(expr)
-	switch expr := expr.(type) {
-	case nil, *syntax.BadExpr:
-		return nil
-	case *syntax.Name:
-		return p.mkname(expr)
-	case *syntax.BasicLit:
-		n := nodlit(p.basicLit(expr))
-		n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
-		return n
-	case *syntax.CompositeLit:
-		n := p.nod(expr, OCOMPLIT, nil, nil)
-		if expr.Type != nil {
-			n.Right = p.expr(expr.Type)
-		}
-		l := p.exprs(expr.ElemList)
-		for i, e := range l {
-			l[i] = p.wrapname(expr.ElemList[i], e)
-		}
-		n.List.Set(l)
-		lineno = p.makeXPos(expr.Rbrace)
-		return n
-	case *syntax.KeyValueExpr:
-		// use position of expr.Key rather than of expr (which has position of ':')
-		return p.nod(expr.Key, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
-	case *syntax.FuncLit:
-		return p.funcLit(expr)
-	case *syntax.ParenExpr:
-		return p.nod(expr, OPAREN, p.expr(expr.X), nil)
-	case *syntax.SelectorExpr:
-		// parser.new_dotname
-		obj := p.expr(expr.X)
-		if obj.Op == OPACK {
-			obj.Name.SetUsed(true)
-			return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
-		}
-		n := nodSym(OXDOT, obj, p.name(expr.Sel))
-		n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
-		return n
-	case *syntax.IndexExpr:
-		return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index))
-	case *syntax.SliceExpr:
-		op := OSLICE
-		if expr.Full {
-			op = OSLICE3
-		}
-		n := p.nod(expr, op, p.expr(expr.X), nil)
-		var index [3]*Node
-		for i, x := range &expr.Index {
-			if x != nil {
-				index[i] = p.expr(x)
-			}
-		}
-		n.SetSliceBounds(index[0], index[1], index[2])
-		return n
-	case *syntax.AssertExpr:
-		return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
-	case *syntax.Operation:
-		if expr.Op == syntax.Add && expr.Y != nil {
-			return p.sum(expr)
-		}
-		x := p.expr(expr.X)
-		if expr.Y == nil {
-			return p.nod(expr, p.unOp(expr.Op), x, nil)
-		}
-		return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
-	case *syntax.CallExpr:
-		n := p.nod(expr, OCALL, p.expr(expr.Fun), nil)
-		n.List.Set(p.exprs(expr.ArgList))
-		n.SetIsDDD(expr.HasDots)
-		return n
-
-	case *syntax.ArrayType:
-		var len *Node
-		if expr.Len != nil {
-			len = p.expr(expr.Len)
-		} else {
-			len = p.nod(expr, ODDD, nil, nil)
-		}
-		return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem))
-	case *syntax.SliceType:
-		return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem))
-	case *syntax.DotsType:
-		return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil)
-	case *syntax.StructType:
-		return p.structType(expr)
-	case *syntax.InterfaceType:
-		return p.interfaceType(expr)
-	case *syntax.FuncType:
-		return p.signature(nil, expr)
-	case *syntax.MapType:
-		return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
-	case *syntax.ChanType:
-		n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
-		n.SetTChanDir(p.chanDir(expr.Dir))
-		return n
-
-	case *syntax.TypeSwitchGuard:
-		n := p.nod(expr, OTYPESW, nil, p.expr(expr.X))
-		if expr.Lhs != nil {
-			n.Left = p.declName(expr.Lhs)
-			if n.Left.isBlank() {
-				yyerror("invalid variable name %v in type switch", n.Left)
-			}
-		}
-		return n
-	}
-	panic("unhandled Expr")
-}
-
-// sum efficiently handles very large summation expressions (such as
-// in issue #16394). In particular, it avoids left recursion and
-// collapses string literals.
-func (p *noder) sum(x syntax.Expr) *Node {
-	// While we need to handle long sums with asymptotic
-	// efficiency, the vast majority of sums are very small: ~95%
-	// have only 2 or 3 operands, and ~99% of string literals are
-	// never concatenated.
-
-	adds := make([]*syntax.Operation, 0, 2)
-	for {
-		add, ok := x.(*syntax.Operation)
-		if !ok || add.Op != syntax.Add || add.Y == nil {
-			break
-		}
-		adds = append(adds, add)
-		x = add.X
-	}
-
-	// nstr is the current rightmost string literal in the
-	// summation (if any), and chunks holds its accumulated
-	// substrings.
-	//
-	// Consider the expression x + "a" + "b" + "c" + y. When we
-	// reach the string literal "a", we assign nstr to point to
-	// its corresponding Node and initialize chunks to {"a"}.
-	// Visiting the subsequent string literals "b" and "c", we
-	// simply append their values to chunks. Finally, when we
-	// reach the non-constant operand y, we'll join chunks to form
-	// "abc" and reassign the "a" string literal's value.
-	//
-	// N.B., we need to be careful about named string constants
-	// (indicated by Sym != nil) because 1) we can't modify their
-	// value, as doing so would affect other uses of the string
-	// constant, and 2) they may have types, which we need to
-	// handle correctly. For now, we avoid these problems by
-	// treating named string constants the same as non-constant
-	// operands.
-	var nstr *Node
-	chunks := make([]string, 0, 1)
-
-	n := p.expr(x)
-	if Isconst(n, CTSTR) && n.Sym == nil {
-		nstr = n
-		chunks = append(chunks, nstr.StringVal())
-	}
-
-	for i := len(adds) - 1; i >= 0; i-- {
-		add := adds[i]
-
-		r := p.expr(add.Y)
-		if Isconst(r, CTSTR) && r.Sym == nil {
-			if nstr != nil {
-				// Collapse r into nstr instead of adding to n.
-				chunks = append(chunks, r.StringVal())
-				continue
-			}
-
-			nstr = r
-			chunks = append(chunks, nstr.StringVal())
-		} else {
-			if len(chunks) > 1 {
-				nstr.SetVal(Val{U: strings.Join(chunks, "")})
-			}
-			nstr = nil
-			chunks = chunks[:0]
-		}
-		n = p.nod(add, OADD, n, r)
-	}
-	if len(chunks) > 1 {
-		nstr.SetVal(Val{U: strings.Join(chunks, "")})
-	}
-
-	return n
-}
-
-func (p *noder) typeExpr(typ syntax.Expr) *Node {
-	// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
-	return p.expr(typ)
-}
-
-func (p *noder) typeExprOrNil(typ syntax.Expr) *Node {
-	if typ != nil {
-		return p.expr(typ)
-	}
-	return nil
-}
-
-func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
-	switch dir {
-	case 0:
-		return types.Cboth
-	case syntax.SendOnly:
-		return types.Csend
-	case syntax.RecvOnly:
-		return types.Crecv
-	}
-	panic("unhandled ChanDir")
-}
-
-func (p *noder) structType(expr *syntax.StructType) *Node {
-	l := make([]*Node, 0, len(expr.FieldList))
-	for i, field := range expr.FieldList {
-		p.setlineno(field)
-		var n *Node
-		if field.Name == nil {
-			n = p.embedded(field.Type)
-		} else {
-			n = p.nodSym(field, ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name))
-		}
-		if i < len(expr.TagList) && expr.TagList[i] != nil {
-			n.SetVal(p.basicLit(expr.TagList[i]))
-		}
-		l = append(l, n)
-	}
-
-	p.setlineno(expr)
-	n := p.nod(expr, OTSTRUCT, nil, nil)
-	n.List.Set(l)
-	return n
-}
-
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
-	l := make([]*Node, 0, len(expr.MethodList))
-	for _, method := range expr.MethodList {
-		p.setlineno(method)
-		var n *Node
-		if method.Name == nil {
-			n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
-		} else {
-			mname := p.name(method.Name)
-			sig := p.typeExpr(method.Type)
-			sig.Left = fakeRecv()
-			n = p.nodSym(method, ODCLFIELD, sig, mname)
-			ifacedcl(n)
-		}
-		l = append(l, n)
-	}
-
-	n := p.nod(expr, OTINTER, nil, nil)
-	n.List.Set(l)
-	return n
-}
-
-func (p *noder) packname(expr syntax.Expr) *types.Sym {
-	switch expr := expr.(type) {
-	case *syntax.Name:
-		name := p.name(expr)
-		if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
-			n.Name.Pack.Name.SetUsed(true)
-		}
-		return name
-	case *syntax.SelectorExpr:
-		name := p.name(expr.X.(*syntax.Name))
-		def := asNode(name.Def)
-		if def == nil {
-			yyerror("undefined: %v", name)
-			return name
-		}
-		var pkg *types.Pkg
-		if def.Op != OPACK {
-			yyerror("%v is not a package", name)
-			pkg = localpkg
-		} else {
-			def.Name.SetUsed(true)
-			pkg = def.Name.Pkg
-		}
-		return pkg.Lookup(expr.Sel.Value)
-	}
-	panic(fmt.Sprintf("unexpected packname: %#v", expr))
-}
-
-func (p *noder) embedded(typ syntax.Expr) *Node {
-	op, isStar := typ.(*syntax.Operation)
-	if isStar {
-		if op.Op != syntax.Mul || op.Y != nil {
-			panic("unexpected Operation")
-		}
-		typ = op.X
-	}
-
-	sym := p.packname(typ)
-	n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
-	n.SetEmbedded(true)
-
-	if isStar {
-		n.Left = p.nod(op, ODEREF, n.Left, nil)
-	}
-	return n
-}
-
-func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
-	return p.stmtsFall(stmts, false)
-}
-
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
-	var nodes []*Node
-	for i, stmt := range stmts {
-		s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
-		if s == nil {
-		} else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
-			nodes = append(nodes, s.List.Slice()...)
-		} else {
-			nodes = append(nodes, s)
-		}
-	}
-	return nodes
-}
-
-func (p *noder) stmt(stmt syntax.Stmt) *Node {
-	return p.stmtFall(stmt, false)
-}
-
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
-	p.setlineno(stmt)
-	switch stmt := stmt.(type) {
-	case *syntax.EmptyStmt:
-		return nil
-	case *syntax.LabeledStmt:
-		return p.labeledStmt(stmt, fallOK)
-	case *syntax.BlockStmt:
-		l := p.blockStmt(stmt)
-		if len(l) == 0 {
-			// TODO(mdempsky): Line number?
-			return nod(OEMPTY, nil, nil)
-		}
-		return liststmt(l)
-	case *syntax.ExprStmt:
-		return p.wrapname(stmt, p.expr(stmt.X))
-	case *syntax.SendStmt:
-		return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
-	case *syntax.DeclStmt:
-		return liststmt(p.decls(stmt.DeclList))
-	case *syntax.AssignStmt:
-		if stmt.Op != 0 && stmt.Op != syntax.Def {
-			n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
-			n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
-			n.SetSubOp(p.binOp(stmt.Op))
-			return n
-		}
-
-		n := p.nod(stmt, OAS, nil, nil) // assume common case
-
-		rhs := p.exprList(stmt.Rhs)
-		lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)
-
-		if len(lhs) == 1 && len(rhs) == 1 {
-			// common case
-			n.Left = lhs[0]
-			n.Right = rhs[0]
-		} else {
-			n.Op = OAS2
-			n.List.Set(lhs)
-			n.Rlist.Set(rhs)
-		}
-		return n
-
-	case *syntax.BranchStmt:
-		var op Op
-		switch stmt.Tok {
-		case syntax.Break:
-			op = OBREAK
-		case syntax.Continue:
-			op = OCONTINUE
-		case syntax.Fallthrough:
-			if !fallOK {
-				yyerror("fallthrough statement out of place")
-			}
-			op = OFALL
-		case syntax.Goto:
-			op = OGOTO
-		default:
-			panic("unhandled BranchStmt")
-		}
-		n := p.nod(stmt, op, nil, nil)
-		if stmt.Label != nil {
-			n.Sym = p.name(stmt.Label)
-		}
-		return n
-	case *syntax.CallStmt:
-		var op Op
-		switch stmt.Tok {
-		case syntax.Defer:
-			op = ODEFER
-		case syntax.Go:
-			op = OGO
-		default:
-			panic("unhandled CallStmt")
-		}
-		return p.nod(stmt, op, p.expr(stmt.Call), nil)
-	case *syntax.ReturnStmt:
-		var results []*Node
-		if stmt.Results != nil {
-			results = p.exprList(stmt.Results)
-		}
-		n := p.nod(stmt, ORETURN, nil, nil)
-		n.List.Set(results)
-		if n.List.Len() == 0 && Curfn != nil {
-			for _, ln := range Curfn.Func.Dcl {
-				if ln.Class() == PPARAM {
-					continue
-				}
-				if ln.Class() != PPARAMOUT {
-					break
-				}
-				if asNode(ln.Sym.Def) != ln {
-					yyerror("%s is shadowed during return", ln.Sym.Name)
-				}
-			}
-		}
-		return n
-	case *syntax.IfStmt:
-		return p.ifStmt(stmt)
-	case *syntax.ForStmt:
-		return p.forStmt(stmt)
-	case *syntax.SwitchStmt:
-		return p.switchStmt(stmt)
-	case *syntax.SelectStmt:
-		return p.selectStmt(stmt)
-	}
-	panic("unhandled Stmt")
-}
-
-func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
-	if !colas {
-		return p.exprList(expr)
-	}
-
-	defn.SetColas(true)
-
-	var exprs []syntax.Expr
-	if list, ok := expr.(*syntax.ListExpr); ok {
-		exprs = list.ElemList
-	} else {
-		exprs = []syntax.Expr{expr}
-	}
-
-	res := make([]*Node, len(exprs))
-	seen := make(map[*types.Sym]bool, len(exprs))
-
-	newOrErr := false
-	for i, expr := range exprs {
-		p.setlineno(expr)
-		res[i] = nblank
-
-		name, ok := expr.(*syntax.Name)
-		if !ok {
-			p.yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
-			newOrErr = true
-			continue
-		}
-
-		sym := p.name(name)
-		if sym.IsBlank() {
-			continue
-		}
-
-		if seen[sym] {
-			p.yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym)
-			newOrErr = true
-			continue
-		}
-		seen[sym] = true
-
-		if sym.Block == types.Block {
-			res[i] = oldname(sym)
-			continue
-		}
-
-		newOrErr = true
-		n := newname(sym)
-		declare(n, dclcontext)
-		n.Name.Defn = defn
-		defn.Ninit.Append(nod(ODCL, n, nil))
-		res[i] = n
-	}
-
-	if !newOrErr {
-		yyerrorl(defn.Pos, "no new variables on left side of :=")
-	}
-	return res
-}
-
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node {
-	p.openScope(stmt.Pos())
-	nodes := p.stmts(stmt.List)
-	p.closeScope(stmt.Rbrace)
-	return nodes
-}
-
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node {
-	p.openScope(stmt.Pos())
-	n := p.nod(stmt, OIF, nil, nil)
-	if stmt.Init != nil {
-		n.Ninit.Set1(p.stmt(stmt.Init))
-	}
-	if stmt.Cond != nil {
-		n.Left = p.expr(stmt.Cond)
-	}
-	n.Nbody.Set(p.blockStmt(stmt.Then))
-	if stmt.Else != nil {
-		e := p.stmt(stmt.Else)
-		if e.Op == OBLOCK && e.Ninit.Len() == 0 {
-			n.Rlist.Set(e.List.Slice())
-		} else {
-			n.Rlist.Set1(e)
-		}
-	}
-	p.closeAnotherScope()
-	return n
-}
-
-func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
-	p.openScope(stmt.Pos())
-	var n *Node
-	if r, ok := stmt.Init.(*syntax.RangeClause); ok {
-		if stmt.Cond != nil || stmt.Post != nil {
-			panic("unexpected RangeClause")
-		}
-
-		n = p.nod(r, ORANGE, nil, p.expr(r.X))
-		if r.Lhs != nil {
-			n.List.Set(p.assignList(r.Lhs, n, r.Def))
-		}
-	} else {
-		n = p.nod(stmt, OFOR, nil, nil)
-		if stmt.Init != nil {
-			n.Ninit.Set1(p.stmt(stmt.Init))
-		}
-		if stmt.Cond != nil {
-			n.Left = p.expr(stmt.Cond)
-		}
-		if stmt.Post != nil {
-			n.Right = p.stmt(stmt.Post)
-		}
-	}
-	n.Nbody.Set(p.blockStmt(stmt.Body))
-	p.closeAnotherScope()
-	return n
-}
-
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
-	p.openScope(stmt.Pos())
-	n := p.nod(stmt, OSWITCH, nil, nil)
-	if stmt.Init != nil {
-		n.Ninit.Set1(p.stmt(stmt.Init))
-	}
-	if stmt.Tag != nil {
-		n.Left = p.expr(stmt.Tag)
-	}
-
-	tswitch := n.Left
-	if tswitch != nil && tswitch.Op != OTYPESW {
-		tswitch = nil
-	}
-	n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
-
-	p.closeScope(stmt.Rbrace)
-	return n
-}
-
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node {
-	nodes := make([]*Node, 0, len(clauses))
-	for i, clause := range clauses {
-		p.setlineno(clause)
-		if i > 0 {
-			p.closeScope(clause.Pos())
-		}
-		p.openScope(clause.Pos())
-
-		n := p.nod(clause, OCASE, nil, nil)
-		if clause.Cases != nil {
-			n.List.Set(p.exprList(clause.Cases))
-		}
-		if tswitch != nil && tswitch.Left != nil {
-			nn := newname(tswitch.Left.Sym)
-			declare(nn, dclcontext)
-			n.Rlist.Set1(nn)
-			// keep track of the instances for reporting unused
-			nn.Name.Defn = tswitch
-		}
-
-		// Trim trailing empty statements. We omit them from
-		// the Node AST anyway, and it's easier to identify
-		// out-of-place fallthrough statements without them.
-		body := clause.Body
-		for len(body) > 0 {
-			if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
-				break
-			}
-			body = body[:len(body)-1]
-		}
-
-		n.Nbody.Set(p.stmtsFall(body, true))
-		if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL {
-			if tswitch != nil {
-				yyerror("cannot fallthrough in type switch")
-			}
-			if i+1 == len(clauses) {
-				yyerror("cannot fallthrough final case in switch")
-			}
-		}
-
-		nodes = append(nodes, n)
-	}
-	if len(clauses) > 0 {
-		p.closeScope(rbrace)
-	}
-	return nodes
-}
-
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
-	n := p.nod(stmt, OSELECT, nil, nil)
-	n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace))
-	return n
-}
-
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node {
-	nodes := make([]*Node, 0, len(clauses))
-	for i, clause := range clauses {
-		p.setlineno(clause)
-		if i > 0 {
-			p.closeScope(clause.Pos())
-		}
-		p.openScope(clause.Pos())
-
-		n := p.nod(clause, OCASE, nil, nil)
-		if clause.Comm != nil {
-			n.List.Set1(p.stmt(clause.Comm))
-		}
-		n.Nbody.Set(p.stmts(clause.Body))
-		nodes = append(nodes, n)
-	}
-	if len(clauses) > 0 {
-		p.closeScope(rbrace)
-	}
-	return nodes
-}
-
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
-	lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label))
-
-	var ls *Node
-	if label.Stmt != nil { // TODO(mdempsky): Should always be present.
-		ls = p.stmtFall(label.Stmt, fallOK)
-	}
-
-	lhs.Name.Defn = ls
-	l := []*Node{lhs}
-	if ls != nil {
-		if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
-			l = append(l, ls.List.Slice()...)
-		} else {
-			l = append(l, ls)
-		}
-	}
-	return liststmt(l)
-}
-
-var unOps = [...]Op{
-	syntax.Recv: ORECV,
-	syntax.Mul:  ODEREF,
-	syntax.And:  OADDR,
-
-	syntax.Not: ONOT,
-	syntax.Xor: OBITNOT,
-	syntax.Add: OPLUS,
-	syntax.Sub: ONEG,
-}
-
-func (p *noder) unOp(op syntax.Operator) Op {
-	if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
-		panic("invalid Operator")
-	}
-	return unOps[op]
-}
-
-var binOps = [...]Op{
-	syntax.OrOr:   OOROR,
-	syntax.AndAnd: OANDAND,
-
-	syntax.Eql: OEQ,
-	syntax.Neq: ONE,
-	syntax.Lss: OLT,
-	syntax.Leq: OLE,
-	syntax.Gtr: OGT,
-	syntax.Geq: OGE,
-
-	syntax.Add: OADD,
-	syntax.Sub: OSUB,
-	syntax.Or:  OOR,
-	syntax.Xor: OXOR,
-
-	syntax.Mul:    OMUL,
-	syntax.Div:    ODIV,
-	syntax.Rem:    OMOD,
-	syntax.And:    OAND,
-	syntax.AndNot: OANDNOT,
-	syntax.Shl:    OLSH,
-	syntax.Shr:    ORSH,
-}
-
-func (p *noder) binOp(op syntax.Operator) Op {
-	if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
-		panic("invalid Operator")
-	}
-	return binOps[op]
-}
-
-// checkLangCompat reports an error if the representation of a numeric
-// literal is not compatible with the current language version.
-func checkLangCompat(lit *syntax.BasicLit) {
-	s := lit.Value
-	if len(s) <= 2 || langSupported(1, 13, localpkg) {
-		return
-	}
-	// len(s) > 2
-	if strings.Contains(s, "_") {
-		yyerrorv("go1.13", "underscores in numeric literals")
-		return
-	}
-	if s[0] != '0' {
-		return
-	}
-	base := s[1]
-	if base == 'b' || base == 'B' {
-		yyerrorv("go1.13", "binary literals")
-		return
-	}
-	if base == 'o' || base == 'O' {
-		yyerrorv("go1.13", "0o/0O-style octal literals")
-		return
-	}
-	if lit.Kind != syntax.IntLit && (base == 'x' || base == 'X') {
-		yyerrorv("go1.13", "hexadecimal floating-point literals")
-	}
-}
-
-func (p *noder) basicLit(lit *syntax.BasicLit) Val {
-	// We don't use the errors of the conversion routines to determine
-	// if a literal string is valid because the conversion routines may
-	// accept a wider syntax than the language permits. Rely on lit.Bad
-	// instead.
-	switch s := lit.Value; lit.Kind {
-	case syntax.IntLit:
-		checkLangCompat(lit)
-		x := new(Mpint)
-		if !lit.Bad {
-			x.SetString(s)
-		}
-		return Val{U: x}
-
-	case syntax.FloatLit:
-		checkLangCompat(lit)
-		x := newMpflt()
-		if !lit.Bad {
-			x.SetString(s)
-		}
-		return Val{U: x}
-
-	case syntax.ImagLit:
-		checkLangCompat(lit)
-		x := newMpcmplx()
-		if !lit.Bad {
-			x.Imag.SetString(strings.TrimSuffix(s, "i"))
-		}
-		return Val{U: x}
-
-	case syntax.RuneLit:
-		x := new(Mpint)
-		x.Rune = true
-		if !lit.Bad {
-			u, _ := strconv.Unquote(s)
-			var r rune
-			if len(u) == 1 {
-				r = rune(u[0])
-			} else {
-				r, _ = utf8.DecodeRuneInString(u)
-			}
-			x.SetInt64(int64(r))
-		}
-		return Val{U: x}
-
-	case syntax.StringLit:
-		var x string
-		if !lit.Bad {
-			if len(s) > 0 && s[0] == '`' {
-				// strip carriage returns from raw string
-				s = strings.Replace(s, "\r", "", -1)
-			}
-			x, _ = strconv.Unquote(s)
-		}
-		return Val{U: x}
-
-	default:
-		panic("unhandled BasicLit kind")
-	}
-}
-
-func (p *noder) name(name *syntax.Name) *types.Sym {
-	return lookup(name.Value)
-}
-
-func (p *noder) mkname(name *syntax.Name) *Node {
-	// TODO(mdempsky): Set line number?
-	return mkname(p.name(name))
-}
-
-func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
-	// These nodes do not carry line numbers.
-	// Introduce a wrapper node to give them the correct line.
-	switch x.Op {
-	case OTYPE, OLITERAL:
-		if x.Sym == nil {
-			break
-		}
-		fallthrough
-	case ONAME, ONONAME, OPACK:
-		x = p.nod(n, OPAREN, x, nil)
-		x.SetImplicit(true)
-	}
-	return x
-}
-
-func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node {
-	return nodl(p.pos(orig), op, left, right)
-}
-
-func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Node {
-	n := nodSym(op, left, sym)
-	n.Pos = p.pos(orig)
-	return n
-}
-
-func (p *noder) pos(n syntax.Node) src.XPos {
-	// TODO(gri): orig.Pos() should always be known - fix package syntax
-	xpos := lineno
-	if pos := n.Pos(); pos.IsKnown() {
-		xpos = p.makeXPos(pos)
-	}
-	return xpos
-}
-
-func (p *noder) setlineno(n syntax.Node) {
-	if n != nil {
-		lineno = p.pos(n)
-	}
-}
-
-// error is called concurrently if files are parsed concurrently.
-func (p *noder) error(err error) {
-	p.err <- err.(syntax.Error)
-}
-
-// pragmas that are allowed in the std lib, but don't have
-// a syntax.Pragma value (see lex.go) associated with them.
-var allowedStdPragmas = map[string]bool{
-	"go:cgo_export_static":  true,
-	"go:cgo_export_dynamic": true,
-	"go:cgo_import_static":  true,
-	"go:cgo_import_dynamic": true,
-	"go:cgo_ldflag":         true,
-	"go:cgo_dynamic_linker": true,
-	"go:embed":              true,
-	"go:generate":           true,
-}
-
-// *Pragma is the value stored in a syntax.Pragma during parsing.
-type Pragma struct {
-	Flag   PragmaFlag  // collected bits
-	Pos    []PragmaPos // position of each individual flag
-	Embeds []PragmaEmbed
-}
-
-type PragmaPos struct {
-	Flag PragmaFlag
-	Pos  syntax.Pos
-}
-
-type PragmaEmbed struct {
-	Pos      syntax.Pos
-	Patterns []string
-}
-
-func (p *noder) checkUnused(pragma *Pragma) {
-	for _, pos := range pragma.Pos {
-		if pos.Flag&pragma.Flag != 0 {
-			p.yyerrorpos(pos.Pos, "misplaced compiler directive")
-		}
-	}
-	if len(pragma.Embeds) > 0 {
-		for _, e := range pragma.Embeds {
-			p.yyerrorpos(e.Pos, "misplaced go:embed directive")
-		}
-	}
-}
-
-func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
-	for _, pos := range pragma.Pos {
-		if pos.Flag&pragma.Flag != 0 {
-			p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
-		}
-	}
-	if len(pragma.Embeds) > 0 {
-		for _, e := range pragma.Embeds {
-			p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
-		}
-	}
-}
-
-// pragma is called concurrently if files are parsed concurrently.
-func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
-	pragma, _ := old.(*Pragma)
-	if pragma == nil {
-		pragma = new(Pragma)
-	}
-
-	if text == "" {
-		// unused pragma; only called with old != nil.
-		p.checkUnusedDuringParse(pragma)
-		return nil
-	}
-
-	if strings.HasPrefix(text, "line ") {
-		// line directives are handled by syntax package
-		panic("unreachable")
-	}
-
-	if !blankLine {
-		// directive must be on line by itself
-		p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"})
-		return pragma
-	}
-
-	switch {
-	case strings.HasPrefix(text, "go:linkname "):
-		f := strings.Fields(text)
-		if !(2 <= len(f) && len(f) <= 3) {
-			p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"})
-			break
-		}
-		// The second argument is optional. If omitted, we use
-		// the default object symbol name for this and
-		// linkname only serves to mark this symbol as
-		// something that may be referenced via the object
-		// symbol name from another package.
-		var target string
-		if len(f) == 3 {
-			target = f[2]
-		}
-		p.linknames = append(p.linknames, linkname{pos, f[1], target})
-
-	case text == "go:embed", strings.HasPrefix(text, "go:embed "):
-		args, err := parseGoEmbed(text[len("go:embed"):])
-		if err != nil {
-			p.error(syntax.Error{Pos: pos, Msg: err.Error()})
-		}
-		if len(args) == 0 {
-			p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
-			break
-		}
-		pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
-
-	case strings.HasPrefix(text, "go:cgo_import_dynamic "):
-		// This is permitted for general use because Solaris
-		// code relies on it in golang.org/x/sys/unix and others.
-		fields := pragmaFields(text)
-		if len(fields) >= 4 {
-			lib := strings.Trim(fields[3], `"`)
-			if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) {
-				p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)})
-			}
-			p.pragcgo(pos, text)
-			pragma.Flag |= pragmaFlag("go:cgo_import_dynamic")
-			break
-		}
-		fallthrough
-	case strings.HasPrefix(text, "go:cgo_"):
-		// For security, we disallow //go:cgo_* directives other
-		// than cgo_import_dynamic outside cgo-generated files.
-		// Exception: they are allowed in the standard library, for runtime and syscall.
-		if !isCgoGeneratedFile(pos) && !compiling_std {
-			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
-		}
-		p.pragcgo(pos, text)
-		fallthrough // because of //go:cgo_unsafe_args
-	default:
-		verb := text
-		if i := strings.Index(text, " "); i >= 0 {
-			verb = verb[:i]
-		}
-		flag := pragmaFlag(verb)
-		const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec
-		if !compiling_runtime && flag&runtimePragmas != 0 {
-			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
-		}
-		if flag == 0 && !allowedStdPragmas[verb] && compiling_std {
-			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
-		}
-		pragma.Flag |= flag
-		pragma.Pos = append(pragma.Pos, PragmaPos{flag, pos})
-	}
-
-	return pragma
-}
-
-// isCgoGeneratedFile reports whether pos is in a file
-// generated by cgo, which is to say a file with name
-// beginning with "_cgo_". Such files are allowed to
-// contain cgo directives, and for security reasons
-// (primarily misuse of linker flags), other files are not.
-// See golang.org/issue/23672.
-func isCgoGeneratedFile(pos syntax.Pos) bool {
-	return strings.HasPrefix(filepath.Base(filepath.Clean(fileh(pos.Base().Filename()))), "_cgo_")
-}
-
-// safeArg reports whether arg is a "safe" command-line argument,
-// meaning that when it appears in a command-line, it probably
-// doesn't have some special meaning other than its own name.
-// This is copied from SafeArg in cmd/go/internal/load/pkg.go.
-func safeArg(name string) bool {
-	if name == "" {
-		return false
-	}
-	c := name[0]
-	return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
-}
-
-func mkname(sym *types.Sym) *Node {
-	n := oldname(sym)
-	if n.Name != nil && n.Name.Pack != nil {
-		n.Name.Pack.Name.SetUsed(true)
-	}
-	return n
-}
-
-// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
-// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
-// go/build/read.go also processes these strings and contains similar logic.
-func parseGoEmbed(args string) ([]string, error) {
-	var list []string
-	for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
-		var path string
-	Switch:
-		switch args[0] {
-		default:
-			i := len(args)
-			for j, c := range args {
-				if unicode.IsSpace(c) {
-					i = j
-					break
-				}
-			}
-			path = args[:i]
-			args = args[i:]
-
-		case '`':
-			i := strings.Index(args[1:], "`")
-			if i < 0 {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-			path = args[1 : 1+i]
-			args = args[1+i+1:]
-
-		case '"':
-			i := 1
-			for ; i < len(args); i++ {
-				if args[i] == '\\' {
-					i++
-					continue
-				}
-				if args[i] == '"' {
-					q, err := strconv.Unquote(args[:i+1])
-					if err != nil {
-						return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
-					}
-					path = q
-					args = args[i+1:]
-					break Switch
-				}
-			}
-			if i >= len(args) {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-		}
-
-		if args != "" {
-			r, _ := utf8.DecodeRuneInString(args)
-			if !unicode.IsSpace(r) {
-				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
-			}
-		}
-		list = append(list, path)
-	}
-	return list, nil
-}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 32aa7c5..0472af7 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -5,28 +5,21 @@
 package gc
 
 import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
+	"cmd/internal/archive"
 	"cmd/internal/bio"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"crypto/sha256"
 	"encoding/json"
 	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"sort"
-	"strconv"
 )
 
-// architecture-independent object file output
-const ArhdrSize = 60
-
-func formathdr(arhdr []byte, name string, size int64) {
-	copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
-}
-
 // These modes say which kind of object file to generate.
 // The default use of the toolchain is to set both bits,
 // generating a combined compiler+linker object, one that
@@ -46,20 +39,20 @@
 )
 
 func dumpobj() {
-	if linkobj == "" {
-		dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
+	if base.Flag.LinkObj == "" {
+		dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
 		return
 	}
-	dumpobj1(outfile, modeCompilerObj)
-	dumpobj1(linkobj, modeLinkerObj)
+	dumpobj1(base.Flag.LowerO, modeCompilerObj)
+	dumpobj1(base.Flag.LinkObj, modeLinkerObj)
 }
 
 func dumpobj1(outfile string, mode int) {
 	bout, err := bio.Create(outfile)
 	if err != nil {
-		flusherrors()
+		base.FlushErrors()
 		fmt.Printf("can't create %s: %v\n", outfile, err)
-		errorexit()
+		base.ErrorExit()
 	}
 	defer bout.Close()
 	bout.WriteString("!<arch>\n")
@@ -78,17 +71,17 @@
 
 func printObjHeader(bout *bio.Writer) {
 	fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
-	if buildid != "" {
-		fmt.Fprintf(bout, "build id %q\n", buildid)
+	if base.Flag.BuildID != "" {
+		fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
 	}
-	if localpkg.Name == "main" {
+	if types.LocalPkg.Name == "main" {
 		fmt.Fprintf(bout, "main\n")
 	}
 	fmt.Fprintf(bout, "\n") // header ends with blank line
 }
 
 func startArchiveEntry(bout *bio.Writer) int64 {
-	var arhdr [ArhdrSize]byte
+	var arhdr [archive.HeaderSize]byte
 	bout.Write(arhdr[:])
 	return bout.Offset()
 }
@@ -99,10 +92,10 @@
 	if size&1 != 0 {
 		bout.WriteByte(0)
 	}
-	bout.MustSeek(start-ArhdrSize, 0)
+	bout.MustSeek(start-archive.HeaderSize, 0)
 
-	var arhdr [ArhdrSize]byte
-	formathdr(arhdr[:], name, size)
+	var arhdr [archive.HeaderSize]byte
+	archive.FormatHeader(arhdr[:], name, size)
 	bout.Write(arhdr[:])
 	bout.Flush()
 	bout.MustSeek(start+size+(size&1), 0)
@@ -114,22 +107,21 @@
 }
 
 func dumpdata() {
-	externs := len(externdcl)
-	xtops := len(xtop)
+	numExterns := len(typecheck.Target.Externs)
+	numDecls := len(typecheck.Target.Decls)
 
-	dumpglobls()
-	addptabs()
-	exportlistLen := len(exportlist)
-	addsignats(externdcl)
-	dumpsignats()
-	dumptabs()
-	ptabsLen := len(ptabs)
-	itabsLen := len(itabs)
-	dumpimportstrings()
-	dumpbasictypes()
+	dumpglobls(typecheck.Target.Externs)
+	reflectdata.CollectPTabs()
+	numExports := len(typecheck.Target.Exports)
+	addsignats(typecheck.Target.Externs)
+	reflectdata.WriteRuntimeTypes()
+	reflectdata.WriteTabs()
+	numPTabs, numITabs := reflectdata.CountTabs()
+	reflectdata.WriteImportStrings()
+	reflectdata.WriteBasicTypes()
 	dumpembeds()
 
-	// Calls to dumpsignats can generate functions,
+	// Calls to WriteRuntimeTypes can generate functions,
 	// like method wrappers and hash and equality routines.
 	// Compile any generated functions, process any new resulting types, repeat.
 	// This can't loop forever, because there is no way to generate an infinite
@@ -137,169 +129,108 @@
 	// In the typical case, we loop 0 or 1 times.
 	// It was not until issue 24761 that we found any code that required a loop at all.
 	for {
-		for i := xtops; i < len(xtop); i++ {
-			n := xtop[i]
-			if n.Op == ODCLFUNC {
-				funccompile(n)
+		for i := numDecls; i < len(typecheck.Target.Decls); i++ {
+			if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
+				enqueueFunc(n)
 			}
 		}
-		xtops = len(xtop)
+		numDecls = len(typecheck.Target.Decls)
 		compileFunctions()
-		dumpsignats()
-		if xtops == len(xtop) {
+		reflectdata.WriteRuntimeTypes()
+		if numDecls == len(typecheck.Target.Decls) {
 			break
 		}
 	}
 
 	// Dump extra globals.
-	tmp := externdcl
+	dumpglobls(typecheck.Target.Externs[numExterns:])
 
-	if externdcl != nil {
-		externdcl = externdcl[externs:]
-	}
-	dumpglobls()
-	externdcl = tmp
-
-	if zerosize > 0 {
-		zero := mappkg.Lookup("zero")
-		ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
+	if reflectdata.ZeroSize > 0 {
+		zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
+		objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
 	}
 
+	staticdata.WriteFuncSyms()
 	addGCLocals()
 
-	if exportlistLen != len(exportlist) {
-		Fatalf("exportlist changed after compile functions loop")
+	if numExports != len(typecheck.Target.Exports) {
+		base.Fatalf("Target.Exports changed after compile functions loop")
 	}
-	if ptabsLen != len(ptabs) {
-		Fatalf("ptabs changed after compile functions loop")
+	newNumPTabs, newNumITabs := reflectdata.CountTabs()
+	if newNumPTabs != numPTabs {
+		base.Fatalf("ptabs changed after compile functions loop")
 	}
-	if itabsLen != len(itabs) {
-		Fatalf("itabs changed after compile functions loop")
+	if newNumITabs != numITabs {
+		base.Fatalf("itabs changed after compile functions loop")
 	}
 }
 
 func dumpLinkerObj(bout *bio.Writer) {
 	printObjHeader(bout)
 
-	if len(pragcgobuf) != 0 {
+	if len(typecheck.Target.CgoPragmas) != 0 {
 		// write empty export section; must be before cgo section
 		fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
 		fmt.Fprintf(bout, "\n$$  // cgo\n")
-		if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
-			Fatalf("serializing pragcgobuf: %v", err)
+		if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
+			base.Fatalf("serializing pragcgobuf: %v", err)
 		}
 		fmt.Fprintf(bout, "\n$$\n\n")
 	}
 
 	fmt.Fprintf(bout, "\n!\n")
 
-	obj.WriteObjFile(Ctxt, bout)
+	obj.WriteObjFile(base.Ctxt, bout)
 }
 
-func addptabs() {
-	if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
+func dumpGlobal(n *ir.Name) {
+	if n.Type() == nil {
+		base.Fatalf("external %v nil type\n", n)
+	}
+	if n.Class == ir.PFUNC {
 		return
 	}
-	for _, exportn := range exportlist {
-		s := exportn.Sym
-		n := asNode(s.Def)
-		if n == nil {
-			continue
-		}
-		if n.Op != ONAME {
-			continue
-		}
-		if !types.IsExported(s.Name) {
-			continue
-		}
-		if s.Pkg.Name != "main" {
-			continue
-		}
-		if n.Type.Etype == TFUNC && n.Class() == PFUNC {
-			// function
-			ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
-		} else {
-			// variable
-			ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
-		}
-	}
-}
-
-func dumpGlobal(n *Node) {
-	if n.Type == nil {
-		Fatalf("external %v nil type\n", n)
-	}
-	if n.Class() == PFUNC {
+	if n.Sym().Pkg != types.LocalPkg {
 		return
 	}
-	if n.Sym.Pkg != localpkg {
-		return
-	}
-	dowidth(n.Type)
+	types.CalcSize(n.Type())
 	ggloblnod(n)
 }
 
-func dumpGlobalConst(n *Node) {
+func dumpGlobalConst(n ir.Node) {
 	// only export typed constants
-	t := n.Type
+	t := n.Type()
 	if t == nil {
 		return
 	}
-	if n.Sym.Pkg != localpkg {
+	if n.Sym().Pkg != types.LocalPkg {
 		return
 	}
 	// only export integer constants for now
-	switch t.Etype {
-	case TINT8:
-	case TINT16:
-	case TINT32:
-	case TINT64:
-	case TINT:
-	case TUINT8:
-	case TUINT16:
-	case TUINT32:
-	case TUINT64:
-	case TUINT:
-	case TUINTPTR:
-		// ok
-	case TIDEAL:
-		if !Isconst(n, CTINT) {
-			return
-		}
-		x := n.Val().U.(*Mpint)
-		if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
-			return
-		}
-		// Ideal integers we export as int (if they fit).
-		t = types.Types[TINT]
-	default:
+	if !t.IsInteger() {
 		return
 	}
-	Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
+	v := n.Val()
+	if t.IsUntyped() {
+		// Export untyped integers as int (if they fit).
+		t = types.Types[types.TINT]
+		if ir.ConstOverflow(v, t) {
+			return
+		}
+	}
+	base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
 }
 
-func dumpglobls() {
+func dumpglobls(externs []ir.Node) {
 	// add globals
-	for _, n := range externdcl {
-		switch n.Op {
-		case ONAME:
-			dumpGlobal(n)
-		case OLITERAL:
+	for _, n := range externs {
+		switch n.Op() {
+		case ir.ONAME:
+			dumpGlobal(n.(*ir.Name))
+		case ir.OLITERAL:
 			dumpGlobalConst(n)
 		}
 	}
-
-	sort.Slice(funcsyms, func(i, j int) bool {
-		return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
-	})
-	for _, s := range funcsyms {
-		sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
-		dsymptr(sf, 0, s.Linksym(), 0)
-		ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	}
-
-	// Do not reprocess funcsyms on next dumpglobls call.
-	funcsyms = nil
 }
 
 // addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
@@ -307,332 +238,60 @@
 // This is done during the sequential phase after compilation, since
 // global symbols can't be declared during parallel compilation.
 func addGCLocals() {
-	for _, s := range Ctxt.Text {
+	for _, s := range base.Ctxt.Text {
 		fn := s.Func()
 		if fn == nil {
 			continue
 		}
 		for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
 			if gcsym != nil && !gcsym.OnList() {
-				ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+				objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
 			}
 		}
 		if x := fn.StackObjects; x != nil {
 			attr := int16(obj.RODATA)
-			ggloblsym(x, int32(len(x.P)), attr)
+			objw.Global(x, int32(len(x.P)), attr)
 			x.Set(obj.AttrStatic, true)
 		}
 		if x := fn.OpenCodedDeferInfo; x != nil {
-			ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+			objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
 		}
 	}
 }
 
-func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
-	if off&(wid-1) != 0 {
-		Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+func ggloblnod(nam *ir.Name) {
+	s := nam.Linksym()
+	s.Gotype = reflectdata.TypeLinksym(nam.Type())
+	flags := 0
+	if nam.Readonly() {
+		flags = obj.RODATA
 	}
-	s.WriteInt(Ctxt, int64(off), wid, int64(v))
-	return off + wid
+	if nam.Type() != nil && !nam.Type().HasPointers() {
+		flags |= obj.NOPTR
+	}
+	base.Ctxt.Globl(s, nam.Type().Width, flags)
+	if nam.LibfuzzerExtraCounter() {
+		s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
+	}
+	if nam.Sym().Linkname != "" {
+		// Make sure linkname'd symbol is non-package. When a symbol is
+		// both imported and linkname'd, s.Pkg may not set to "_" in
+		// types.Sym.Linksym because LSym already exists. Set it here.
+		s.Pkg = "_"
+	}
 }
 
-func duint8(s *obj.LSym, off int, v uint8) int {
-	return duintxx(s, off, uint64(v), 1)
+func dumpembeds() {
+	for _, v := range typecheck.Target.Embeds {
+		staticdata.WriteEmbed(v)
+	}
 }
 
-func duint16(s *obj.LSym, off int, v uint16) int {
-	return duintxx(s, off, uint64(v), 2)
-}
-
-func duint32(s *obj.LSym, off int, v uint32) int {
-	return duintxx(s, off, uint64(v), 4)
-}
-
-func duintptr(s *obj.LSym, off int, v uint64) int {
-	return duintxx(s, off, v, Widthptr)
-}
-
-func dbvec(s *obj.LSym, off int, bv bvec) int {
-	// Runtime reads the bitmaps as byte arrays. Oblige.
-	for j := 0; int32(j) < bv.n; j += 8 {
-		word := bv.b[j/32]
-		off = duint8(s, off, uint8(word>>(uint(j)%32)))
-	}
-	return off
-}
-
-const (
-	stringSymPrefix  = "go.string."
-	stringSymPattern = ".gostring.%d.%x"
-)
-
-// stringsym returns a symbol containing the string s.
-// The symbol contains the string data, not a string header.
-func stringsym(pos src.XPos, s string) (data *obj.LSym) {
-	var symname string
-	if len(s) > 100 {
-		// Huge strings are hashed to avoid long names in object files.
-		// Indulge in some paranoia by writing the length of s, too,
-		// as protection against length extension attacks.
-		// Same pattern is known to fileStringSym below.
-		h := sha256.New()
-		io.WriteString(h, s)
-		symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
-	} else {
-		// Small strings get named directly by their contents.
-		symname = strconv.Quote(s)
-	}
-
-	symdata := Ctxt.Lookup(stringSymPrefix + symname)
-	if !symdata.OnList() {
-		off := dstringdata(symdata, 0, s, pos, "string")
-		ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
-		symdata.Set(obj.AttrContentAddressable, true)
-	}
-
-	return symdata
-}
-
-// fileStringSym returns a symbol for the contents and the size of file.
-// If readonly is true, the symbol shares storage with any literal string
-// or other file with the same content and is placed in a read-only section.
-// If readonly is false, the symbol is a read-write copy separate from any other,
-// for use as the backing store of a []byte.
-// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
-// The returned symbol contains the data itself, not a string header.
-func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
-	f, err := os.Open(file)
-	if err != nil {
-		return nil, 0, err
-	}
-	defer f.Close()
-	info, err := f.Stat()
-	if err != nil {
-		return nil, 0, err
-	}
-	if !info.Mode().IsRegular() {
-		return nil, 0, fmt.Errorf("not a regular file")
-	}
-	size := info.Size()
-	if size <= 1*1024 {
-		data, err := ioutil.ReadAll(f)
-		if err != nil {
-			return nil, 0, err
+func addsignats(dcls []ir.Node) {
+	// copy types from dcl list to signatset
+	for _, n := range dcls {
+		if n.Op() == ir.OTYPE {
+			reflectdata.NeedRuntimeType(n.Type())
 		}
-		if int64(len(data)) != size {
-			return nil, 0, fmt.Errorf("file changed between reads")
-		}
-		var sym *obj.LSym
-		if readonly {
-			sym = stringsym(pos, string(data))
-		} else {
-			sym = slicedata(pos, string(data)).Sym.Linksym()
-		}
-		if len(hash) > 0 {
-			sum := sha256.Sum256(data)
-			copy(hash, sum[:])
-		}
-		return sym, size, nil
-	}
-	if size > 2e9 {
-		// ggloblsym takes an int32,
-		// and probably the rest of the toolchain
-		// can't handle such big symbols either.
-		// See golang.org/issue/9862.
-		return nil, 0, fmt.Errorf("file too large")
-	}
-
-	// File is too big to read and keep in memory.
-	// Compute hash if needed for read-only content hashing or if the caller wants it.
-	var sum []byte
-	if readonly || len(hash) > 0 {
-		h := sha256.New()
-		n, err := io.Copy(h, f)
-		if err != nil {
-			return nil, 0, err
-		}
-		if n != size {
-			return nil, 0, fmt.Errorf("file changed between reads")
-		}
-		sum = h.Sum(nil)
-		copy(hash, sum)
-	}
-
-	var symdata *obj.LSym
-	if readonly {
-		symname := fmt.Sprintf(stringSymPattern, size, sum)
-		symdata = Ctxt.Lookup(stringSymPrefix + symname)
-		if !symdata.OnList() {
-			info := symdata.NewFileInfo()
-			info.Name = file
-			info.Size = size
-			ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
-			// Note: AttrContentAddressable cannot be set here,
-			// because the content-addressable-handling code
-			// does not know about file symbols.
-		}
-	} else {
-		// Emit a zero-length data symbol
-		// and then fix up length and content to use file.
-		symdata = slicedata(pos, "").Sym.Linksym()
-		symdata.Size = size
-		symdata.Type = objabi.SNOPTRDATA
-		info := symdata.NewFileInfo()
-		info.Name = file
-		info.Size = size
-	}
-
-	return symdata, size, nil
-}
-
-var slicedataGen int
-
-func slicedata(pos src.XPos, s string) *Node {
-	slicedataGen++
-	symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
-	sym := localpkg.Lookup(symname)
-	symnode := newname(sym)
-	sym.Def = asTypesNode(symnode)
-
-	lsym := sym.Linksym()
-	off := dstringdata(lsym, 0, s, pos, "slice")
-	ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
-
-	return symnode
-}
-
-func slicebytes(nam *Node, s string) {
-	if nam.Op != ONAME {
-		Fatalf("slicebytes %v", nam)
-	}
-	slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
-}
-
-func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
-	// Objects that are too large will cause the data section to overflow right away,
-	// causing a cryptic error message by the linker. Check for oversize objects here
-	// and provide a useful error message instead.
-	if int64(len(t)) > 2e9 {
-		yyerrorl(pos, "%v with length %v is too big", what, len(t))
-		return 0
-	}
-
-	s.WriteString(Ctxt, int64(off), len(t), t)
-	return off + len(t)
-}
-
-func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
-	off = int(Rnd(int64(off), int64(Widthptr)))
-	s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
-	off += Widthptr
-	return off
-}
-
-func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
-	s.WriteOff(Ctxt, int64(off), x, 0)
-	off += 4
-	return off
-}
-
-func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
-	s.WriteWeakOff(Ctxt, int64(off), x, 0)
-	off += 4
-	return off
-}
-
-// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
-// arr must be an ONAME. slicesym does not modify n.
-func slicesym(n, arr *Node, lencap int64) {
-	s := n.Sym.Linksym()
-	base := n.Xoffset
-	if arr.Op != ONAME {
-		Fatalf("slicesym non-name arr %v", arr)
-	}
-	s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
-	s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
-	s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
-}
-
-// addrsym writes the static address of a to n. a must be an ONAME.
-// Neither n nor a is modified.
-func addrsym(n, a *Node) {
-	if n.Op != ONAME {
-		Fatalf("addrsym n op %v", n.Op)
-	}
-	if n.Sym == nil {
-		Fatalf("addrsym nil n sym")
-	}
-	if a.Op != ONAME {
-		Fatalf("addrsym a op %v", a.Op)
-	}
-	s := n.Sym.Linksym()
-	s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
-}
-
-// pfuncsym writes the static address of f to n. f must be a global function.
-// Neither n nor f is modified.
-func pfuncsym(n, f *Node) {
-	if n.Op != ONAME {
-		Fatalf("pfuncsym n op %v", n.Op)
-	}
-	if n.Sym == nil {
-		Fatalf("pfuncsym nil n sym")
-	}
-	if f.Class() != PFUNC {
-		Fatalf("pfuncsym class not PFUNC %d", f.Class())
-	}
-	s := n.Sym.Linksym()
-	s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
-}
-
-// litsym writes the static literal c to n.
-// Neither n nor c is modified.
-func litsym(n, c *Node, wid int) {
-	if n.Op != ONAME {
-		Fatalf("litsym n op %v", n.Op)
-	}
-	if c.Op != OLITERAL {
-		Fatalf("litsym c op %v", c.Op)
-	}
-	if n.Sym == nil {
-		Fatalf("litsym nil n sym")
-	}
-	s := n.Sym.Linksym()
-	switch u := c.Val().U.(type) {
-	case bool:
-		i := int64(obj.Bool2int(u))
-		s.WriteInt(Ctxt, n.Xoffset, wid, i)
-
-	case *Mpint:
-		s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
-
-	case *Mpflt:
-		f := u.Float64()
-		switch n.Type.Etype {
-		case TFLOAT32:
-			s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
-		case TFLOAT64:
-			s.WriteFloat64(Ctxt, n.Xoffset, f)
-		}
-
-	case *Mpcplx:
-		r := u.Real.Float64()
-		i := u.Imag.Float64()
-		switch n.Type.Etype {
-		case TCOMPLEX64:
-			s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
-			s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
-		case TCOMPLEX128:
-			s.WriteFloat64(Ctxt, n.Xoffset, r)
-			s.WriteFloat64(Ctxt, n.Xoffset+8, i)
-		}
-
-	case string:
-		symdata := stringsym(n.Pos, u)
-		s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
-		s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
-
-	default:
-		Fatalf("litsym unhandled OLITERAL %v", c)
 	}
 }
diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go
deleted file mode 100644
index 41d5883..0000000
--- a/src/cmd/compile/internal/gc/op_string.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
-
-package gc
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[OXXX-0]
-	_ = x[ONAME-1]
-	_ = x[ONONAME-2]
-	_ = x[OTYPE-3]
-	_ = x[OPACK-4]
-	_ = x[OLITERAL-5]
-	_ = x[OADD-6]
-	_ = x[OSUB-7]
-	_ = x[OOR-8]
-	_ = x[OXOR-9]
-	_ = x[OADDSTR-10]
-	_ = x[OADDR-11]
-	_ = x[OANDAND-12]
-	_ = x[OAPPEND-13]
-	_ = x[OBYTES2STR-14]
-	_ = x[OBYTES2STRTMP-15]
-	_ = x[ORUNES2STR-16]
-	_ = x[OSTR2BYTES-17]
-	_ = x[OSTR2BYTESTMP-18]
-	_ = x[OSTR2RUNES-19]
-	_ = x[OAS-20]
-	_ = x[OAS2-21]
-	_ = x[OAS2DOTTYPE-22]
-	_ = x[OAS2FUNC-23]
-	_ = x[OAS2MAPR-24]
-	_ = x[OAS2RECV-25]
-	_ = x[OASOP-26]
-	_ = x[OCALL-27]
-	_ = x[OCALLFUNC-28]
-	_ = x[OCALLMETH-29]
-	_ = x[OCALLINTER-30]
-	_ = x[OCALLPART-31]
-	_ = x[OCAP-32]
-	_ = x[OCLOSE-33]
-	_ = x[OCLOSURE-34]
-	_ = x[OCOMPLIT-35]
-	_ = x[OMAPLIT-36]
-	_ = x[OSTRUCTLIT-37]
-	_ = x[OARRAYLIT-38]
-	_ = x[OSLICELIT-39]
-	_ = x[OPTRLIT-40]
-	_ = x[OCONV-41]
-	_ = x[OCONVIFACE-42]
-	_ = x[OCONVNOP-43]
-	_ = x[OCOPY-44]
-	_ = x[ODCL-45]
-	_ = x[ODCLFUNC-46]
-	_ = x[ODCLFIELD-47]
-	_ = x[ODCLCONST-48]
-	_ = x[ODCLTYPE-49]
-	_ = x[ODELETE-50]
-	_ = x[ODOT-51]
-	_ = x[ODOTPTR-52]
-	_ = x[ODOTMETH-53]
-	_ = x[ODOTINTER-54]
-	_ = x[OXDOT-55]
-	_ = x[ODOTTYPE-56]
-	_ = x[ODOTTYPE2-57]
-	_ = x[OEQ-58]
-	_ = x[ONE-59]
-	_ = x[OLT-60]
-	_ = x[OLE-61]
-	_ = x[OGE-62]
-	_ = x[OGT-63]
-	_ = x[ODEREF-64]
-	_ = x[OINDEX-65]
-	_ = x[OINDEXMAP-66]
-	_ = x[OKEY-67]
-	_ = x[OSTRUCTKEY-68]
-	_ = x[OLEN-69]
-	_ = x[OMAKE-70]
-	_ = x[OMAKECHAN-71]
-	_ = x[OMAKEMAP-72]
-	_ = x[OMAKESLICE-73]
-	_ = x[OMAKESLICECOPY-74]
-	_ = x[OMUL-75]
-	_ = x[ODIV-76]
-	_ = x[OMOD-77]
-	_ = x[OLSH-78]
-	_ = x[ORSH-79]
-	_ = x[OAND-80]
-	_ = x[OANDNOT-81]
-	_ = x[ONEW-82]
-	_ = x[ONEWOBJ-83]
-	_ = x[ONOT-84]
-	_ = x[OBITNOT-85]
-	_ = x[OPLUS-86]
-	_ = x[ONEG-87]
-	_ = x[OOROR-88]
-	_ = x[OPANIC-89]
-	_ = x[OPRINT-90]
-	_ = x[OPRINTN-91]
-	_ = x[OPAREN-92]
-	_ = x[OSEND-93]
-	_ = x[OSLICE-94]
-	_ = x[OSLICEARR-95]
-	_ = x[OSLICESTR-96]
-	_ = x[OSLICE3-97]
-	_ = x[OSLICE3ARR-98]
-	_ = x[OSLICEHEADER-99]
-	_ = x[ORECOVER-100]
-	_ = x[ORECV-101]
-	_ = x[ORUNESTR-102]
-	_ = x[OSELRECV-103]
-	_ = x[OSELRECV2-104]
-	_ = x[OIOTA-105]
-	_ = x[OREAL-106]
-	_ = x[OIMAG-107]
-	_ = x[OCOMPLEX-108]
-	_ = x[OALIGNOF-109]
-	_ = x[OOFFSETOF-110]
-	_ = x[OSIZEOF-111]
-	_ = x[OBLOCK-112]
-	_ = x[OBREAK-113]
-	_ = x[OCASE-114]
-	_ = x[OCONTINUE-115]
-	_ = x[ODEFER-116]
-	_ = x[OEMPTY-117]
-	_ = x[OFALL-118]
-	_ = x[OFOR-119]
-	_ = x[OFORUNTIL-120]
-	_ = x[OGOTO-121]
-	_ = x[OIF-122]
-	_ = x[OLABEL-123]
-	_ = x[OGO-124]
-	_ = x[ORANGE-125]
-	_ = x[ORETURN-126]
-	_ = x[OSELECT-127]
-	_ = x[OSWITCH-128]
-	_ = x[OTYPESW-129]
-	_ = x[OTCHAN-130]
-	_ = x[OTMAP-131]
-	_ = x[OTSTRUCT-132]
-	_ = x[OTINTER-133]
-	_ = x[OTFUNC-134]
-	_ = x[OTARRAY-135]
-	_ = x[ODDD-136]
-	_ = x[OINLCALL-137]
-	_ = x[OEFACE-138]
-	_ = x[OITAB-139]
-	_ = x[OIDATA-140]
-	_ = x[OSPTR-141]
-	_ = x[OCLOSUREVAR-142]
-	_ = x[OCFUNC-143]
-	_ = x[OCHECKNIL-144]
-	_ = x[OVARDEF-145]
-	_ = x[OVARKILL-146]
-	_ = x[OVARLIVE-147]
-	_ = x[ORESULT-148]
-	_ = x[OINLMARK-149]
-	_ = x[ORETJMP-150]
-	_ = x[OGETG-151]
-	_ = x[OEND-152]
-}
-
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
-
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
-
-func (i Op) String() string {
-	if i >= Op(len(_Op_index)-1) {
-		return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _Op_name[_Op_index[i]:_Op_index[i+1]]
-}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
deleted file mode 100644
index 30e1535..0000000
--- a/src/cmd/compile/internal/gc/order.go
+++ /dev/null
@@ -1,1441 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-)
-
-// Rewrite tree to use separate statements to enforce
-// order of evaluation. Makes walk easier, because it
-// can (after this runs) reorder at will within an expression.
-//
-// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
-//
-// Introduce temporaries as needed by runtime routines.
-// For example, the map runtime routines take the map key
-// by reference, so make sure all map keys are addressable
-// by copying them to temporaries as needed.
-// The same is true for channel operations.
-//
-// Arrange that map index expressions only appear in direct
-// assignments x = m[k] or m[k] = x, never in larger expressions.
-//
-// Arrange that receive expressions only appear in direct assignments
-// x = <-c or as standalone statements <-c, never in larger expressions.
-
-// TODO(rsc): The temporary introduction during multiple assignments
-// should be moved into this file, so that the temporaries can be cleaned
-// and so that conversions implicit in the OAS2FUNC and OAS2RECV
-// nodes can be made explicit and then have their temporaries cleaned.
-
-// TODO(rsc): Goto and multilevel break/continue can jump over
-// inserted VARKILL annotations. Work out a way to handle these.
-// The current implementation is safe, in that it will execute correctly.
-// But it won't reuse temporaries as aggressively as it might, and
-// it can result in unnecessary zeroing of those variables in the function
-// prologue.
-
-// Order holds state during the ordering process.
-type Order struct {
-	out  []*Node            // list of generated statements
-	temp []*Node            // stack of temporary variables
-	free map[string][]*Node // free list of unused temporaries, by type.LongString().
-}
-
-// Order rewrites fn.Nbody to apply the ordering constraints
-// described in the comment at the top of the file.
-func order(fn *Node) {
-	if Debug.W > 1 {
-		s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
-		dumplist(s, fn.Nbody)
-	}
-
-	orderBlock(&fn.Nbody, map[string][]*Node{})
-}
-
-// newTemp allocates a new temporary with the given type,
-// pushes it onto the temp stack, and returns it.
-// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *Node {
-	var v *Node
-	// Note: LongString is close to the type equality we want,
-	// but not exactly. We still need to double-check with types.Identical.
-	key := t.LongString()
-	a := o.free[key]
-	for i, n := range a {
-		if types.Identical(t, n.Type) {
-			v = a[i]
-			a[i] = a[len(a)-1]
-			a = a[:len(a)-1]
-			o.free[key] = a
-			break
-		}
-	}
-	if v == nil {
-		v = temp(t)
-	}
-	if clear {
-		a := nod(OAS, v, nil)
-		a = typecheck(a, ctxStmt)
-		o.out = append(o.out, a)
-	}
-
-	o.temp = append(o.temp, v)
-	return v
-}
-
-// copyExpr behaves like newTemp but also emits
-// code to initialize the temporary to the value n.
-//
-// The clear argument is provided for use when the evaluation
-// of tmp = n turns into a function call that is passed a pointer
-// to the temporary as the output space. If the call blocks before
-// tmp has been written, the garbage collector will still treat the
-// temporary as live, so we must zero it before entering that call.
-// Today, this only happens for channel receive operations.
-// (The other candidate would be map access, but map access
-// returns a pointer to the result data instead of taking a pointer
-// to be filled in.)
-func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node {
-	v := o.newTemp(t, clear)
-	a := nod(OAS, v, n)
-	a = typecheck(a, ctxStmt)
-	o.out = append(o.out, a)
-	return v
-}
-
-// cheapExpr returns a cheap version of n.
-// The definition of cheap is that n is a variable or constant.
-// If not, cheapExpr allocates a new tmp, emits tmp = n,
-// and then returns tmp.
-func (o *Order) cheapExpr(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-	case OLEN, OCAP:
-		l := o.cheapExpr(n.Left)
-		if l == n.Left {
-			return n
-		}
-		a := n.sepcopy()
-		a.Left = l
-		return typecheck(a, ctxExpr)
-	}
-
-	return o.copyExpr(n, n.Type, false)
-}
-
-// safeExpr returns a safe version of n.
-// The definition of safe is that n can appear multiple times
-// without violating the semantics of the original program,
-// and that assigning to the safe version has the same effect
-// as assigning to the original n.
-//
-// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n *Node) *Node {
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-
-	case ODOT, OLEN, OCAP:
-		l := o.safeExpr(n.Left)
-		if l == n.Left {
-			return n
-		}
-		a := n.sepcopy()
-		a.Left = l
-		return typecheck(a, ctxExpr)
-
-	case ODOTPTR, ODEREF:
-		l := o.cheapExpr(n.Left)
-		if l == n.Left {
-			return n
-		}
-		a := n.sepcopy()
-		a.Left = l
-		return typecheck(a, ctxExpr)
-
-	case OINDEX, OINDEXMAP:
-		var l *Node
-		if n.Left.Type.IsArray() {
-			l = o.safeExpr(n.Left)
-		} else {
-			l = o.cheapExpr(n.Left)
-		}
-		r := o.cheapExpr(n.Right)
-		if l == n.Left && r == n.Right {
-			return n
-		}
-		a := n.sepcopy()
-		a.Left = l
-		a.Right = r
-		return typecheck(a, ctxExpr)
-
-	default:
-		Fatalf("order.safeExpr %v", n.Op)
-		return nil // not reached
-	}
-}
-
-// isaddrokay reports whether it is okay to pass n's address to runtime routines.
-// Taking the address of a variable makes the liveness and optimization analyses
-// lose track of where the variable's lifetime ends. To avoid hurting the analyses
-// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
-// because we emit explicit VARKILL instructions marking the end of those
-// temporaries' lifetimes.
-func isaddrokay(n *Node) bool {
-	return islvalue(n) && (n.Op != ONAME || n.Class() == PEXTERN || n.IsAutoTmp())
-}
-
-// addrTemp ensures that n is okay to pass by address to runtime routines.
-// If the original argument n is not okay, addrTemp creates a tmp, emits
-// tmp = n, and then returns tmp.
-// The result of addrTemp MUST be assigned back to n, e.g.
-// 	n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n *Node) *Node {
-	if consttype(n) != CTxxx {
-		// TODO: expand this to all static composite literal nodes?
-		n = defaultlit(n, nil)
-		dowidth(n.Type)
-		vstat := readonlystaticname(n.Type)
-		var s InitSchedule
-		s.staticassign(vstat, n)
-		if s.out != nil {
-			Fatalf("staticassign of const generated code: %+v", n)
-		}
-		vstat = typecheck(vstat, ctxExpr)
-		return vstat
-	}
-	if isaddrokay(n) {
-		return n
-	}
-	return o.copyExpr(n, n.Type, false)
-}
-
-// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
-// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
-	// Most map calls need to take the address of the key.
-	// Exception: map*_fast* calls. See golang.org/issue/19015.
-	if mapfast(t) == mapslow {
-		return o.addrTemp(n)
-	}
-	return n
-}
-
-// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
-// in n to avoid string allocations for keys in map lookups.
-// Returns a bool that signals if a modification was made.
-//
-// For:
-//  x = m[string(k)]
-//  x = m[T1{... Tn{..., string(k), ...}]
-// where k is []byte, T1 to Tn is a nesting of struct and array literals,
-// the allocation of backing bytes for the string can be avoided
-// by reusing the []byte backing array. These are special cases
-// for avoiding allocations when converting byte slices to strings.
-// It would be nice to handle these generally, but because
-// []byte keys are not allowed in maps, the use of string(k)
-// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n *Node) bool {
-	var replaced bool
-	switch n.Op {
-	case OBYTES2STR:
-		n.Op = OBYTES2STRTMP
-		replaced = true
-	case OSTRUCTLIT:
-		for _, elem := range n.List.Slice() {
-			if mapKeyReplaceStrConv(elem.Left) {
-				replaced = true
-			}
-		}
-	case OARRAYLIT:
-		for _, elem := range n.List.Slice() {
-			if elem.Op == OKEY {
-				elem = elem.Right
-			}
-			if mapKeyReplaceStrConv(elem) {
-				replaced = true
-			}
-		}
-	}
-	return replaced
-}
-
-type ordermarker int
-
-// markTemp returns the top of the temporary variable stack.
-func (o *Order) markTemp() ordermarker {
-	return ordermarker(len(o.temp))
-}
-
-// popTemp pops temporaries off the stack until reaching the mark,
-// which must have been returned by markTemp.
-func (o *Order) popTemp(mark ordermarker) {
-	for _, n := range o.temp[mark:] {
-		key := n.Type.LongString()
-		o.free[key] = append(o.free[key], n)
-	}
-	o.temp = o.temp[:mark]
-}
-
-// cleanTempNoPop emits VARKILL instructions to *out
-// for each temporary above the mark on the temporary stack.
-// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
-	var out []*Node
-	for i := len(o.temp) - 1; i >= int(mark); i-- {
-		n := o.temp[i]
-		kill := nod(OVARKILL, n, nil)
-		kill = typecheck(kill, ctxStmt)
-		out = append(out, kill)
-	}
-	return out
-}
-
-// cleanTemp emits VARKILL instructions for each temporary above the
-// mark on the temporary stack and removes them from the stack.
-func (o *Order) cleanTemp(top ordermarker) {
-	o.out = append(o.out, o.cleanTempNoPop(top)...)
-	o.popTemp(top)
-}
-
-// stmtList orders each of the statements in the list.
-func (o *Order) stmtList(l Nodes) {
-	s := l.Slice()
-	for i := range s {
-		orderMakeSliceCopy(s[i:])
-		o.stmt(s[i])
-	}
-}
-
-// orderMakeSliceCopy matches the pattern:
-//  m = OMAKESLICE([]T, x); OCOPY(m, s)
-// and rewrites it to:
-//  m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []*Node) {
-	if Debug.N != 0 || instrumenting {
-		return
-	}
-
-	if len(s) < 2 {
-		return
-	}
-
-	asn := s[0]
-	copyn := s[1]
-
-	if asn == nil || asn.Op != OAS {
-		return
-	}
-	if asn.Left.Op != ONAME {
-		return
-	}
-	if asn.Left.isBlank() {
-		return
-	}
-	maken := asn.Right
-	if maken == nil || maken.Op != OMAKESLICE {
-		return
-	}
-	if maken.Esc == EscNone {
-		return
-	}
-	if maken.Left == nil || maken.Right != nil {
-		return
-	}
-	if copyn.Op != OCOPY {
-		return
-	}
-	if copyn.Left.Op != ONAME {
-		return
-	}
-	if asn.Left.Sym != copyn.Left.Sym {
-		return
-	}
-	if copyn.Right.Op != ONAME {
-		return
-	}
-
-	if copyn.Left.Sym == copyn.Right.Sym {
-		return
-	}
-
-	maken.Op = OMAKESLICECOPY
-	maken.Right = copyn.Right
-	// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
-	maken.SetBounded(maken.Left.Op == OLEN && samesafeexpr(maken.Left.Left, copyn.Right))
-
-	maken = typecheck(maken, ctxExpr)
-
-	s[1] = nil // remove separate copy call
-
-	return
-}
-
-// edge inserts coverage instrumentation for libfuzzer.
-func (o *Order) edge() {
-	if Debug_libfuzzer == 0 {
-		return
-	}
-
-	// Create a new uint8 counter to be allocated in section
-	// __libfuzzer_extra_counters.
-	counter := staticname(types.Types[TUINT8])
-	counter.Name.SetLibfuzzerExtraCounter(true)
-
-	// counter += 1
-	incr := nod(OASOP, counter, nodintconst(1))
-	incr.SetSubOp(OADD)
-	incr = typecheck(incr, ctxStmt)
-
-	o.out = append(o.out, incr)
-}
-
-// orderBlock orders the block of statements in n into a new slice,
-// and then replaces the old slice in n with the new slice.
-// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *Nodes, free map[string][]*Node) {
-	var order Order
-	order.free = free
-	mark := order.markTemp()
-	order.edge()
-	order.stmtList(*n)
-	order.cleanTemp(mark)
-	n.Set(order.out)
-}
-
-// exprInPlace orders the side effects in *np and
-// leaves them as the init list of the final *np.
-// The result of exprInPlace MUST be assigned back to n, e.g.
-// 	n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n *Node) *Node {
-	var order Order
-	order.free = o.free
-	n = order.expr(n, nil)
-	n = addinit(n, order.out)
-
-	// insert new temporaries from order
-	// at head of outer list.
-	o.temp = append(o.temp, order.temp...)
-	return n
-}
-
-// orderStmtInPlace orders the side effects of the single statement *np
-// and replaces it with the resulting statement list.
-// The result of orderStmtInPlace MUST be assigned back to n, e.g.
-// 	n.Left = orderStmtInPlace(n.Left)
-// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
-	var order Order
-	order.free = free
-	mark := order.markTemp()
-	order.stmt(n)
-	order.cleanTemp(mark)
-	return liststmt(order.out)
-}
-
-// init moves n's init list to o.out.
-func (o *Order) init(n *Node) {
-	if n.mayBeShared() {
-		// For concurrency safety, don't mutate potentially shared nodes.
-		// First, ensure that no work is required here.
-		if n.Ninit.Len() > 0 {
-			Fatalf("order.init shared node with ninit")
-		}
-		return
-	}
-	o.stmtList(n.Ninit)
-	n.Ninit.Set(nil)
-}
-
-// call orders the call expression n.
-// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(n *Node) {
-	if n.Ninit.Len() > 0 {
-		// Caller should have already called o.init(n).
-		Fatalf("%v with unexpected ninit", n.Op)
-	}
-
-	// Builtin functions.
-	if n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER {
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		o.exprList(n.List)
-		return
-	}
-
-	fixVariadicCall(n)
-	n.Left = o.expr(n.Left, nil)
-	o.exprList(n.List)
-
-	if n.Op == OCALLINTER {
-		return
-	}
-	keepAlive := func(arg *Node) {
-		// If the argument is really a pointer being converted to uintptr,
-		// arrange for the pointer to be kept alive until the call returns,
-		// by copying it into a temp and marking that temp
-		// still alive when we pop the temp stack.
-		if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() {
-			x := o.copyExpr(arg.Left, arg.Left.Type, false)
-			arg.Left = x
-			x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
-			n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt))
-		}
-	}
-
-	// Check for "unsafe-uintptr" tag provided by escape analysis.
-	for i, param := range n.Left.Type.Params().FieldSlice() {
-		if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
-			if arg := n.List.Index(i); arg.Op == OSLICELIT {
-				for _, elt := range arg.List.Slice() {
-					keepAlive(elt)
-				}
-			} else {
-				keepAlive(arg)
-			}
-		}
-	}
-}
-
-// mapAssign appends n to o.out, introducing temporaries
-// to make sure that all map assignments have the form m[k] = x.
-// (Note: expr has already been called on n, so we know k is addressable.)
-//
-// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is
-//	t1 = m
-//	t2 = k
-//	...., t3, ... = ..., x, ...
-//	t1[t2] = t3
-//
-// The temporaries t1, t2 are needed in case the ... being assigned
-// contain m or k. They are usually unnecessary, but in the unnecessary
-// cases they are also typically registerizable, so not much harm done.
-// And this only applies to the multiple-assignment form.
-// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n *Node) {
-	switch n.Op {
-	default:
-		Fatalf("order.mapAssign %v", n.Op)
-
-	case OAS, OASOP:
-		if n.Left.Op == OINDEXMAP {
-			// Make sure we evaluate the RHS before starting the map insert.
-			// We need to make sure the RHS won't panic.  See issue 22881.
-			if n.Right.Op == OAPPEND {
-				s := n.Right.List.Slice()[1:]
-				for i, n := range s {
-					s[i] = o.cheapExpr(n)
-				}
-			} else {
-				n.Right = o.cheapExpr(n.Right)
-			}
-		}
-		o.out = append(o.out, n)
-
-	case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
-		var post []*Node
-		for i, m := range n.List.Slice() {
-			switch {
-			case m.Op == OINDEXMAP:
-				if !m.Left.IsAutoTmp() {
-					m.Left = o.copyExpr(m.Left, m.Left.Type, false)
-				}
-				if !m.Right.IsAutoTmp() {
-					m.Right = o.copyExpr(m.Right, m.Right.Type, false)
-				}
-				fallthrough
-			case instrumenting && n.Op == OAS2FUNC && !m.isBlank():
-				t := o.newTemp(m.Type, false)
-				n.List.SetIndex(i, t)
-				a := nod(OAS, m, t)
-				a = typecheck(a, ctxStmt)
-				post = append(post, a)
-			}
-		}
-
-		o.out = append(o.out, n)
-		o.out = append(o.out, post...)
-	}
-}
-
-// stmt orders the statement n, appending to o.out.
-// Temporaries created during the statement are cleaned
-// up using VARKILL instructions as possible.
-func (o *Order) stmt(n *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-	o.init(n)
-
-	switch n.Op {
-	default:
-		Fatalf("order.stmt %v", n.Op)
-
-	case OVARKILL, OVARLIVE, OINLMARK:
-		o.out = append(o.out, n)
-
-	case OAS:
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, n.Left)
-		o.mapAssign(n)
-		o.cleanTemp(t)
-
-	case OASOP:
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-
-		if instrumenting || n.Left.Op == OINDEXMAP && (n.SubOp() == ODIV || n.SubOp() == OMOD) {
-			// Rewrite m[k] op= r into m[k] = m[k] op r so
-			// that we can ensure that if op panics
-			// because r is zero, the panic happens before
-			// the map assignment.
-
-			n.Left = o.safeExpr(n.Left)
-
-			l := treecopy(n.Left, src.NoXPos)
-			if l.Op == OINDEXMAP {
-				l.SetIndexMapLValue(false)
-			}
-			l = o.copyExpr(l, n.Left.Type, false)
-			n.Right = nod(n.SubOp(), l, n.Right)
-			n.Right = typecheck(n.Right, ctxExpr)
-			n.Right = o.expr(n.Right, nil)
-
-			n.Op = OAS
-			n.ResetAux()
-		}
-
-		o.mapAssign(n)
-		o.cleanTemp(t)
-
-	case OAS2:
-		t := o.markTemp()
-		o.exprList(n.List)
-		o.exprList(n.Rlist)
-		o.mapAssign(n)
-		o.cleanTemp(t)
-
-	// Special: avoid copy of func call n.Right
-	case OAS2FUNC:
-		t := o.markTemp()
-		o.exprList(n.List)
-		o.init(n.Right)
-		o.call(n.Right)
-		o.as2(n)
-		o.cleanTemp(t)
-
-	// Special: use temporary variables to hold result,
-	// so that runtime can take address of temporary.
-	// No temporary for blank assignment.
-	//
-	// OAS2MAPR: make sure key is addressable if needed,
-	//           and make sure OINDEXMAP is not copied out.
-	case OAS2DOTTYPE, OAS2RECV, OAS2MAPR:
-		t := o.markTemp()
-		o.exprList(n.List)
-
-		switch r := n.Right; r.Op {
-		case ODOTTYPE2, ORECV:
-			r.Left = o.expr(r.Left, nil)
-		case OINDEXMAP:
-			r.Left = o.expr(r.Left, nil)
-			r.Right = o.expr(r.Right, nil)
-			// See similar conversion for OINDEXMAP below.
-			_ = mapKeyReplaceStrConv(r.Right)
-			r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
-		default:
-			Fatalf("order.stmt: %v", r.Op)
-		}
-
-		o.okAs2(n)
-		o.cleanTemp(t)
-
-	// Special: does not save n onto out.
-	case OBLOCK, OEMPTY:
-		o.stmtList(n.List)
-
-	// Special: n->left is not an expression; save as is.
-	case OBREAK,
-		OCONTINUE,
-		ODCL,
-		ODCLCONST,
-		ODCLTYPE,
-		OFALL,
-		OGOTO,
-		OLABEL,
-		ORETJMP:
-		o.out = append(o.out, n)
-
-	// Special: handle call arguments.
-	case OCALLFUNC, OCALLINTER, OCALLMETH:
-		t := o.markTemp()
-		o.call(n)
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	case OCLOSE,
-		OCOPY,
-		OPRINT,
-		OPRINTN,
-		ORECOVER,
-		ORECV:
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		o.exprList(n.List)
-		o.exprList(n.Rlist)
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	// Special: order arguments to inner call but not call itself.
-	case ODEFER, OGO:
-		t := o.markTemp()
-		o.init(n.Left)
-		o.call(n.Left)
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	case ODELETE:
-		t := o.markTemp()
-		n.List.SetFirst(o.expr(n.List.First(), nil))
-		n.List.SetSecond(o.expr(n.List.Second(), nil))
-		n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second()))
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	// Clean temporaries from condition evaluation at
-	// beginning of loop body and after for statement.
-	case OFOR:
-		t := o.markTemp()
-		n.Left = o.exprInPlace(n.Left)
-		n.Nbody.Prepend(o.cleanTempNoPop(t)...)
-		orderBlock(&n.Nbody, o.free)
-		n.Right = orderStmtInPlace(n.Right, o.free)
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	// Clean temporaries from condition at
-	// beginning of both branches.
-	case OIF:
-		t := o.markTemp()
-		n.Left = o.exprInPlace(n.Left)
-		n.Nbody.Prepend(o.cleanTempNoPop(t)...)
-		n.Rlist.Prepend(o.cleanTempNoPop(t)...)
-		o.popTemp(t)
-		orderBlock(&n.Nbody, o.free)
-		orderBlock(&n.Rlist, o.free)
-		o.out = append(o.out, n)
-
-	// Special: argument will be converted to interface using convT2E
-	// so make sure it is an addressable temporary.
-	case OPANIC:
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		if !n.Left.Type.IsInterface() {
-			n.Left = o.addrTemp(n.Left)
-		}
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	case ORANGE:
-		// n.Right is the expression being ranged over.
-		// order it, and then make a copy if we need one.
-		// We almost always do, to ensure that we don't
-		// see any value changes made during the loop.
-		// Usually the copy is cheap (e.g., array pointer,
-		// chan, slice, string are all tiny).
-		// The exception is ranging over an array value
-		// (not a slice, not a pointer to array),
-		// which must make a copy to avoid seeing updates made during
-		// the range body. Ranging over an array value is uncommon though.
-
-		// Mark []byte(str) range expression to reuse string backing storage.
-		// It is safe because the storage cannot be mutated.
-		if n.Right.Op == OSTR2BYTES {
-			n.Right.Op = OSTR2BYTESTMP
-		}
-
-		t := o.markTemp()
-		n.Right = o.expr(n.Right, nil)
-
-		orderBody := true
-		switch n.Type.Etype {
-		default:
-			Fatalf("order.stmt range %v", n.Type)
-
-		case TARRAY, TSLICE:
-			if n.List.Len() < 2 || n.List.Second().isBlank() {
-				// for i := range x will only use x once, to compute len(x).
-				// No need to copy it.
-				break
-			}
-			fallthrough
-
-		case TCHAN, TSTRING:
-			// chan, string, slice, array ranges use value multiple times.
-			// make copy.
-			r := n.Right
-
-			if r.Type.IsString() && r.Type != types.Types[TSTRING] {
-				r = nod(OCONV, r, nil)
-				r.Type = types.Types[TSTRING]
-				r = typecheck(r, ctxExpr)
-			}
-
-			n.Right = o.copyExpr(r, r.Type, false)
-
-		case TMAP:
-			if isMapClear(n) {
-				// Preserve the body of the map clear pattern so it can
-				// be detected during walk. The loop body will not be used
-				// when optimizing away the range loop to a runtime call.
-				orderBody = false
-				break
-			}
-
-			// copy the map value in case it is a map literal.
-			// TODO(rsc): Make tmp = literal expressions reuse tmp.
-			// For maps tmp is just one word so it hardly matters.
-			r := n.Right
-			n.Right = o.copyExpr(r, r.Type, false)
-
-			// prealloc[n] is the temp for the iterator.
-			// hiter contains pointers and needs to be zeroed.
-			prealloc[n] = o.newTemp(hiter(n.Type), true)
-		}
-		o.exprListInPlace(n.List)
-		if orderBody {
-			orderBlock(&n.Nbody, o.free)
-		}
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	case ORETURN:
-		o.exprList(n.List)
-		o.out = append(o.out, n)
-
-	// Special: clean case temporaries in each block entry.
-	// Select must enter one of its blocks, so there is no
-	// need for a cleaning at the end.
-	// Doubly special: evaluation order for select is stricter
-	// than ordinary expressions. Even something like p.c
-	// has to be hoisted into a temporary, so that it cannot be
-	// reordered after the channel evaluation for a different
-	// case (if p were nil, then the timing of the fault would
-	// give this away).
-	case OSELECT:
-		t := o.markTemp()
-
-		for _, n2 := range n.List.Slice() {
-			if n2.Op != OCASE {
-				Fatalf("order select case %v", n2.Op)
-			}
-			r := n2.Left
-			setlineno(n2)
-
-			// Append any new body prologue to ninit.
-			// The next loop will insert ninit into nbody.
-			if n2.Ninit.Len() != 0 {
-				Fatalf("order select ninit")
-			}
-			if r == nil {
-				continue
-			}
-			switch r.Op {
-			default:
-				Dump("select case", r)
-				Fatalf("unknown op in select %v", r.Op)
-
-			// If this is case x := <-ch or case x, y := <-ch, the case has
-			// the ODCL nodes to declare x and y. We want to delay that
-			// declaration (and possible allocation) until inside the case body.
-			// Delete the ODCL nodes here and recreate them inside the body below.
-			case OSELRECV, OSELRECV2:
-				if r.Colas() {
-					i := 0
-					if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left {
-						i++
-					}
-					if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
-						i++
-					}
-					if i >= r.Ninit.Len() {
-						r.Ninit.Set(nil)
-					}
-				}
-
-				if r.Ninit.Len() != 0 {
-					dumplist("ninit", r.Ninit)
-					Fatalf("ninit on select recv")
-				}
-
-				// case x = <-c
-				// case x, ok = <-c
-				// r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
-				// r->left == N means 'case <-c'.
-				// c is always evaluated; x and ok are only evaluated when assigned.
-				r.Right.Left = o.expr(r.Right.Left, nil)
-
-				if !r.Right.Left.IsAutoTmp() {
-					r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false)
-				}
-
-				// Introduce temporary for receive and move actual copy into case body.
-				// avoids problems with target being addressed, as usual.
-				// NOTE: If we wanted to be clever, we could arrange for just one
-				// temporary per distinct type, sharing the temp among all receives
-				// with that temp. Similarly one ok bool could be shared among all
-				// the x,ok receives. Not worth doing until there's a clear need.
-				if r.Left != nil && r.Left.isBlank() {
-					r.Left = nil
-				}
-				if r.Left != nil {
-					// use channel element type for temporary to avoid conversions,
-					// such as in case interfacevalue = <-intchan.
-					// the conversion happens in the OAS instead.
-					tmp1 := r.Left
-
-					if r.Colas() {
-						tmp2 := nod(ODCL, tmp1, nil)
-						tmp2 = typecheck(tmp2, ctxStmt)
-						n2.Ninit.Append(tmp2)
-					}
-
-					r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
-					tmp2 := nod(OAS, tmp1, r.Left)
-					tmp2 = typecheck(tmp2, ctxStmt)
-					n2.Ninit.Append(tmp2)
-				}
-
-				if r.List.Len() != 0 && r.List.First().isBlank() {
-					r.List.Set(nil)
-				}
-				if r.List.Len() != 0 {
-					tmp1 := r.List.First()
-					if r.Colas() {
-						tmp2 := nod(ODCL, tmp1, nil)
-						tmp2 = typecheck(tmp2, ctxStmt)
-						n2.Ninit.Append(tmp2)
-					}
-
-					r.List.Set1(o.newTemp(types.Types[TBOOL], false))
-					tmp2 := okas(tmp1, r.List.First())
-					tmp2 = typecheck(tmp2, ctxStmt)
-					n2.Ninit.Append(tmp2)
-				}
-				orderBlock(&n2.Ninit, o.free)
-
-			case OSEND:
-				if r.Ninit.Len() != 0 {
-					dumplist("ninit", r.Ninit)
-					Fatalf("ninit on select send")
-				}
-
-				// case c <- x
-				// r->left is c, r->right is x, both are always evaluated.
-				r.Left = o.expr(r.Left, nil)
-
-				if !r.Left.IsAutoTmp() {
-					r.Left = o.copyExpr(r.Left, r.Left.Type, false)
-				}
-				r.Right = o.expr(r.Right, nil)
-				if !r.Right.IsAutoTmp() {
-					r.Right = o.copyExpr(r.Right, r.Right.Type, false)
-				}
-			}
-		}
-		// Now that we have accumulated all the temporaries, clean them.
-		// Also insert any ninit queued during the previous loop.
-		// (The temporary cleaning must follow that ninit work.)
-		for _, n3 := range n.List.Slice() {
-			orderBlock(&n3.Nbody, o.free)
-			n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
-
-			// TODO(mdempsky): Is this actually necessary?
-			// walkselect appears to walk Ninit.
-			n3.Nbody.Prepend(n3.Ninit.Slice()...)
-			n3.Ninit.Set(nil)
-		}
-
-		o.out = append(o.out, n)
-		o.popTemp(t)
-
-	// Special: value being sent is passed as a pointer; make it addressable.
-	case OSEND:
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		if instrumenting {
-			// Force copying to the stack so that (chan T)(nil) <- x
-			// is still instrumented as a read of x.
-			n.Right = o.copyExpr(n.Right, n.Right.Type, false)
-		} else {
-			n.Right = o.addrTemp(n.Right)
-		}
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-
-	// TODO(rsc): Clean temporaries more aggressively.
-	// Note that because walkswitch will rewrite some of the
-	// switch into a binary search, this is not as easy as it looks.
-	// (If we ran that code here we could invoke order.stmt on
-	// the if-else chain instead.)
-	// For now just clean all the temporaries at the end.
-	// In practice that's fine.
-	case OSWITCH:
-		if Debug_libfuzzer != 0 && !hasDefaultCase(n) {
-			// Add empty "default:" case for instrumentation.
-			n.List.Append(nod(OCASE, nil, nil))
-		}
-
-		t := o.markTemp()
-		n.Left = o.expr(n.Left, nil)
-		for _, ncas := range n.List.Slice() {
-			if ncas.Op != OCASE {
-				Fatalf("order switch case %v", ncas.Op)
-			}
-			o.exprListInPlace(ncas.List)
-			orderBlock(&ncas.Nbody, o.free)
-		}
-
-		o.out = append(o.out, n)
-		o.cleanTemp(t)
-	}
-
-	lineno = lno
-}
-
-func hasDefaultCase(n *Node) bool {
-	for _, ncas := range n.List.Slice() {
-		if ncas.Op != OCASE {
-			Fatalf("expected case, found %v", ncas.Op)
-		}
-		if ncas.List.Len() == 0 {
-			return true
-		}
-	}
-	return false
-}
-
-// exprList orders the expression list l into o.
-func (o *Order) exprList(l Nodes) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = o.expr(s[i], nil)
-	}
-}
-
-// exprListInPlace orders the expression list l but saves
-// the side effects on the individual expression ninit lists.
-func (o *Order) exprListInPlace(l Nodes) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = o.exprInPlace(s[i])
-	}
-}
-
-// prealloc[x] records the allocation to use for x.
-var prealloc = map[*Node]*Node{}
-
-// expr orders a single expression, appending side
-// effects to o.out as needed.
-// If this is part of an assignment lhs = *np, lhs is given.
-// Otherwise lhs == nil. (When lhs != nil it may be possible
-// to avoid copying the result of the expression to a temporary.)
-// The result of expr MUST be assigned back to n, e.g.
-// 	n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs *Node) *Node {
-	if n == nil {
-		return n
-	}
-
-	lno := setlineno(n)
-	o.init(n)
-
-	switch n.Op {
-	default:
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		o.exprList(n.List)
-		o.exprList(n.Rlist)
-
-	// Addition of strings turns into a function call.
-	// Allocate a temporary to hold the strings.
-	// Fewer than 5 strings use direct runtime helpers.
-	case OADDSTR:
-		o.exprList(n.List)
-
-		if n.List.Len() > 5 {
-			t := types.NewArray(types.Types[TSTRING], int64(n.List.Len()))
-			prealloc[n] = o.newTemp(t, false)
-		}
-
-		// Mark string(byteSlice) arguments to reuse byteSlice backing
-		// buffer during conversion. String concatenation does not
-		// memorize the strings for later use, so it is safe.
-		// However, we can do it only if there is at least one non-empty string literal.
-		// Otherwise if all other arguments are empty strings,
-		// concatstrings will return the reference to the temp string
-		// to the caller.
-		hasbyte := false
-
-		haslit := false
-		for _, n1 := range n.List.Slice() {
-			hasbyte = hasbyte || n1.Op == OBYTES2STR
-			haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
-		}
-
-		if haslit && hasbyte {
-			for _, n2 := range n.List.Slice() {
-				if n2.Op == OBYTES2STR {
-					n2.Op = OBYTES2STRTMP
-				}
-			}
-		}
-
-	case OINDEXMAP:
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		needCopy := false
-
-		if !n.IndexMapLValue() {
-			// Enforce that any []byte slices we are not copying
-			// can not be changed before the map index by forcing
-			// the map index to happen immediately following the
-			// conversions. See copyExpr a few lines below.
-			needCopy = mapKeyReplaceStrConv(n.Right)
-
-			if instrumenting {
-				// Race detector needs the copy so it can
-				// call treecopy on the result.
-				needCopy = true
-			}
-		}
-
-		// key must be addressable
-		n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
-		if needCopy {
-			n = o.copyExpr(n, n.Type, false)
-		}
-
-	// concrete type (not interface) argument might need an addressable
-	// temporary to pass to the runtime conversion routine.
-	case OCONVIFACE:
-		n.Left = o.expr(n.Left, nil)
-		if n.Left.Type.IsInterface() {
-			break
-		}
-		if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) {
-			// Need a temp if we need to pass the address to the conversion function.
-			// We also process static composite literal node here, making a named static global
-			// whose address we can put directly in an interface (see OCONVIFACE case in walk).
-			n.Left = o.addrTemp(n.Left)
-		}
-
-	case OCONVNOP:
-		if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) {
-			// When reordering unsafe.Pointer(f()) into a separate
-			// statement, the conversion and function call must stay
-			// together. See golang.org/issue/15329.
-			o.init(n.Left)
-			o.call(n.Left)
-			if lhs == nil || lhs.Op != ONAME || instrumenting {
-				n = o.copyExpr(n, n.Type, false)
-			}
-		} else {
-			n.Left = o.expr(n.Left, nil)
-		}
-
-	case OANDAND, OOROR:
-		// ... = LHS && RHS
-		//
-		// var r bool
-		// r = LHS
-		// if r {       // or !r, for OROR
-		//     r = RHS
-		// }
-		// ... = r
-
-		r := o.newTemp(n.Type, false)
-
-		// Evaluate left-hand side.
-		lhs := o.expr(n.Left, nil)
-		o.out = append(o.out, typecheck(nod(OAS, r, lhs), ctxStmt))
-
-		// Evaluate right-hand side, save generated code.
-		saveout := o.out
-		o.out = nil
-		t := o.markTemp()
-		o.edge()
-		rhs := o.expr(n.Right, nil)
-		o.out = append(o.out, typecheck(nod(OAS, r, rhs), ctxStmt))
-		o.cleanTemp(t)
-		gen := o.out
-		o.out = saveout
-
-		// If left-hand side doesn't cause a short-circuit, issue right-hand side.
-		nif := nod(OIF, r, nil)
-		if n.Op == OANDAND {
-			nif.Nbody.Set(gen)
-		} else {
-			nif.Rlist.Set(gen)
-		}
-		o.out = append(o.out, nif)
-		n = r
-
-	case OCALLFUNC,
-		OCALLINTER,
-		OCALLMETH,
-		OCAP,
-		OCOMPLEX,
-		OCOPY,
-		OIMAG,
-		OLEN,
-		OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		OMAKESLICECOPY,
-		ONEW,
-		OREAL,
-		ORECOVER,
-		OSTR2BYTES,
-		OSTR2BYTESTMP,
-		OSTR2RUNES:
-
-		if isRuneCount(n) {
-			// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
-			n.Left.Left = o.expr(n.Left.Left, nil)
-		} else {
-			o.call(n)
-		}
-
-		if lhs == nil || lhs.Op != ONAME || instrumenting {
-			n = o.copyExpr(n, n.Type, false)
-		}
-
-	case OAPPEND:
-		// Check for append(x, make([]T, y)...) .
-		if isAppendOfMake(n) {
-			n.List.SetFirst(o.expr(n.List.First(), nil))             // order x
-			n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y
-		} else {
-			o.exprList(n.List)
-		}
-
-		if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
-			n = o.copyExpr(n, n.Type, false)
-		}
-
-	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
-		n.Left = o.expr(n.Left, nil)
-		low, high, max := n.SliceBounds()
-		low = o.expr(low, nil)
-		low = o.cheapExpr(low)
-		high = o.expr(high, nil)
-		high = o.cheapExpr(high)
-		max = o.expr(max, nil)
-		max = o.cheapExpr(max)
-		n.SetSliceBounds(low, high, max)
-		if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
-			n = o.copyExpr(n, n.Type, false)
-		}
-
-	case OCLOSURE:
-		if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 {
-			prealloc[n] = o.newTemp(closureType(n), false)
-		}
-
-	case OSLICELIT, OCALLPART:
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-		o.exprList(n.List)
-		o.exprList(n.Rlist)
-		if n.Transient() {
-			var t *types.Type
-			switch n.Op {
-			case OSLICELIT:
-				t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
-			case OCALLPART:
-				t = partialCallType(n)
-			}
-			prealloc[n] = o.newTemp(t, false)
-		}
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left = o.expr(n.Left, nil)
-		if !isdirectiface(n.Type) || instrumenting {
-			n = o.copyExpr(n, n.Type, true)
-		}
-
-	case ORECV:
-		n.Left = o.expr(n.Left, nil)
-		n = o.copyExpr(n, n.Type, true)
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		n.Left = o.expr(n.Left, nil)
-		n.Right = o.expr(n.Right, nil)
-
-		t := n.Left.Type
-		switch {
-		case t.IsString():
-			// Mark string(byteSlice) arguments to reuse byteSlice backing
-			// buffer during conversion. String comparison does not
-			// memorize the strings for later use, so it is safe.
-			if n.Left.Op == OBYTES2STR {
-				n.Left.Op = OBYTES2STRTMP
-			}
-			if n.Right.Op == OBYTES2STR {
-				n.Right.Op = OBYTES2STRTMP
-			}
-
-		case t.IsStruct() || t.IsArray():
-			// for complex comparisons, we need both args to be
-			// addressable so we can pass them to the runtime.
-			n.Left = o.addrTemp(n.Left)
-			n.Right = o.addrTemp(n.Right)
-		}
-	case OMAPLIT:
-		// Order map by converting:
-		//   map[int]int{
-		//     a(): b(),
-		//     c(): d(),
-		//     e(): f(),
-		//   }
-		// to
-		//   m := map[int]int{}
-		//   m[a()] = b()
-		//   m[c()] = d()
-		//   m[e()] = f()
-		// Then order the result.
-		// Without this special case, order would otherwise compute all
-		// the keys and values before storing any of them to the map.
-		// See issue 26552.
-		entries := n.List.Slice()
-		statics := entries[:0]
-		var dynamics []*Node
-		for _, r := range entries {
-			if r.Op != OKEY {
-				Fatalf("OMAPLIT entry not OKEY: %v\n", r)
-			}
-
-			if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
-				dynamics = append(dynamics, r)
-				continue
-			}
-
-			// Recursively ordering some static entries can change them to dynamic;
-			// e.g., OCONVIFACE nodes. See #31777.
-			r = o.expr(r, nil)
-			if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
-				dynamics = append(dynamics, r)
-				continue
-			}
-
-			statics = append(statics, r)
-		}
-		n.List.Set(statics)
-
-		if len(dynamics) == 0 {
-			break
-		}
-
-		// Emit the creation of the map (with all its static entries).
-		m := o.newTemp(n.Type, false)
-		as := nod(OAS, m, n)
-		typecheck(as, ctxStmt)
-		o.stmt(as)
-		n = m
-
-		// Emit eval+insert of dynamic entries, one at a time.
-		for _, r := range dynamics {
-			as := nod(OAS, nod(OINDEX, n, r.Left), r.Right)
-			typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
-			o.stmt(as)
-		}
-	}
-
-	lineno = lno
-	return n
-}
-
-// okas creates and returns an assignment of val to ok,
-// including an explicit conversion if necessary.
-func okas(ok, val *Node) *Node {
-	if !ok.isBlank() {
-		val = conv(val, ok.Type)
-	}
-	return nod(OAS, ok, val)
-}
-
-// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
-// The caller should order the right-hand side of the assignment before calling order.as2.
-// It rewrites,
-// 	a, b, a = ...
-// as
-//	tmp1, tmp2, tmp3 = ...
-// 	a, b, a = tmp1, tmp2, tmp3
-// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *Node) {
-	tmplist := []*Node{}
-	left := []*Node{}
-	for ni, l := range n.List.Slice() {
-		if !l.isBlank() {
-			tmp := o.newTemp(l.Type, l.Type.HasPointers())
-			n.List.SetIndex(ni, tmp)
-			tmplist = append(tmplist, tmp)
-			left = append(left, l)
-		}
-	}
-
-	o.out = append(o.out, n)
-
-	as := nod(OAS2, nil, nil)
-	as.List.Set(left)
-	as.Rlist.Set(tmplist)
-	as = typecheck(as, ctxStmt)
-	o.stmt(as)
-}
-
-// okAs2 orders OAS2XXX with ok.
-// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *Node) {
-	var tmp1, tmp2 *Node
-	if !n.List.First().isBlank() {
-		typ := n.Right.Type
-		tmp1 = o.newTemp(typ, typ.HasPointers())
-	}
-
-	if !n.List.Second().isBlank() {
-		tmp2 = o.newTemp(types.Types[TBOOL], false)
-	}
-
-	o.out = append(o.out, n)
-
-	if tmp1 != nil {
-		r := nod(OAS, n.List.First(), tmp1)
-		r = typecheck(r, ctxStmt)
-		o.mapAssign(r)
-		n.List.SetFirst(tmp1)
-	}
-	if tmp2 != nil {
-		r := okas(n.List.Second(), tmp2)
-		r = typecheck(r, ctxStmt)
-		o.mapAssign(r)
-		n.List.SetSecond(tmp2)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
deleted file mode 100644
index 353f4b0..0000000
--- a/src/cmd/compile/internal/gc/pgen.go
+++ /dev/null
@@ -1,798 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/ssa"
-	"cmd/compile/internal/types"
-	"cmd/internal/dwarf"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"cmd/internal/sys"
-	"internal/race"
-	"math/rand"
-	"sort"
-	"sync"
-	"time"
-)
-
-// "Portable" code generation.
-
-var (
-	nBackendWorkers int     // number of concurrent backend workers, set by a compiler flag
-	compilequeue    []*Node // functions waiting to be compiled
-)
-
-func emitptrargsmap(fn *Node) {
-	if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
-		return
-	}
-	lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
-
-	nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
-	bv := bvalloc(int32(nptr) * 2)
-	nbitmap := 1
-	if fn.Type.NumResults() > 0 {
-		nbitmap = 2
-	}
-	off := duint32(lsym, 0, uint32(nbitmap))
-	off = duint32(lsym, off, uint32(bv.n))
-
-	if fn.IsMethod() {
-		onebitwalktype1(fn.Type.Recvs(), 0, bv)
-	}
-	if fn.Type.NumParams() > 0 {
-		onebitwalktype1(fn.Type.Params(), 0, bv)
-	}
-	off = dbvec(lsym, off, bv)
-
-	if fn.Type.NumResults() > 0 {
-		onebitwalktype1(fn.Type.Results(), 0, bv)
-		off = dbvec(lsym, off, bv)
-	}
-
-	ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
-}
-
-// cmpstackvarlt reports whether the stack variable a sorts before b.
-//
-// Sort the list of stack variables. Autos after anything else,
-// within autos, unused after used, within used, things with
-// pointers first, zeroed things first, and then decreasing size.
-// Because autos are laid out in decreasing addresses
-// on the stack, pointers first, zeroed things first and decreasing size
-// really means, in memory, things with pointers needing zeroing at
-// the top of the stack and increasing in size.
-// Non-autos sort on offset.
-func cmpstackvarlt(a, b *Node) bool {
-	if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
-		return b.Class() == PAUTO
-	}
-
-	if a.Class() != PAUTO {
-		return a.Xoffset < b.Xoffset
-	}
-
-	if a.Name.Used() != b.Name.Used() {
-		return a.Name.Used()
-	}
-
-	ap := a.Type.HasPointers()
-	bp := b.Type.HasPointers()
-	if ap != bp {
-		return ap
-	}
-
-	ap = a.Name.Needzero()
-	bp = b.Name.Needzero()
-	if ap != bp {
-		return ap
-	}
-
-	if a.Type.Width != b.Type.Width {
-		return a.Type.Width > b.Type.Width
-	}
-
-	return a.Sym.Name < b.Sym.Name
-}
-
-// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*Node
-
-func (s byStackVar) Len() int           { return len(s) }
-func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
-func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-func (s *ssafn) AllocFrame(f *ssa.Func) {
-	s.stksize = 0
-	s.stkptrsize = 0
-	fn := s.curfn.Func
-
-	// Mark the PAUTO's unused.
-	for _, ln := range fn.Dcl {
-		if ln.Class() == PAUTO {
-			ln.Name.SetUsed(false)
-		}
-	}
-
-	for _, l := range f.RegAlloc {
-		if ls, ok := l.(ssa.LocalSlot); ok {
-			ls.N.(*Node).Name.SetUsed(true)
-		}
-	}
-
-	scratchUsed := false
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if n, ok := v.Aux.(*Node); ok {
-				switch n.Class() {
-				case PPARAM, PPARAMOUT:
-					// Don't modify nodfp; it is a global.
-					if n != nodfp {
-						n.Name.SetUsed(true)
-					}
-				case PAUTO:
-					n.Name.SetUsed(true)
-				}
-			}
-			if !scratchUsed {
-				scratchUsed = v.Op.UsesScratch()
-			}
-
-		}
-	}
-
-	if f.Config.NeedsFpScratch && scratchUsed {
-		s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
-	}
-
-	sort.Sort(byStackVar(fn.Dcl))
-
-	// Reassign stack offsets of the locals that are used.
-	lastHasPtr := false
-	for i, n := range fn.Dcl {
-		if n.Op != ONAME || n.Class() != PAUTO {
-			continue
-		}
-		if !n.Name.Used() {
-			fn.Dcl = fn.Dcl[:i]
-			break
-		}
-
-		dowidth(n.Type)
-		w := n.Type.Width
-		if w >= thearch.MAXWIDTH || w < 0 {
-			Fatalf("bad width")
-		}
-		if w == 0 && lastHasPtr {
-			// Pad between a pointer-containing object and a zero-sized object.
-			// This prevents a pointer to the zero-sized object from being interpreted
-			// as a pointer to the pointer-containing object (and causing it
-			// to be scanned when it shouldn't be). See issue 24993.
-			w = 1
-		}
-		s.stksize += w
-		s.stksize = Rnd(s.stksize, int64(n.Type.Align))
-		if n.Type.HasPointers() {
-			s.stkptrsize = s.stksize
-			lastHasPtr = true
-		} else {
-			lastHasPtr = false
-		}
-		if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
-			s.stksize = Rnd(s.stksize, int64(Widthptr))
-		}
-		n.Xoffset = -s.stksize
-	}
-
-	s.stksize = Rnd(s.stksize, int64(Widthreg))
-	s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
-}
-
-func funccompile(fn *Node) {
-	if Curfn != nil {
-		Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
-	}
-
-	if fn.Type == nil {
-		if nerrors == 0 {
-			Fatalf("funccompile missing type")
-		}
-		return
-	}
-
-	// assign parameter offsets
-	dowidth(fn.Type)
-
-	if fn.Nbody.Len() == 0 {
-		// Initialize ABI wrappers if necessary.
-		fn.Func.initLSym(false)
-		emitptrargsmap(fn)
-		return
-	}
-
-	dclcontext = PAUTO
-	Curfn = fn
-
-	compile(fn)
-
-	Curfn = nil
-	dclcontext = PEXTERN
-}
-
-func compile(fn *Node) {
-	saveerrors()
-
-	order(fn)
-	if nerrors != 0 {
-		return
-	}
-
-	// Set up the function's LSym early to avoid data races with the assemblers.
-	// Do this before walk, as walk needs the LSym to set attributes/relocations
-	// (e.g. in markTypeUsedInInterface).
-	fn.Func.initLSym(true)
-
-	walk(fn)
-	if nerrors != 0 {
-		return
-	}
-	if instrumenting {
-		instrument(fn)
-	}
-
-	// From this point, there should be no uses of Curfn. Enforce that.
-	Curfn = nil
-
-	if fn.funcname() == "_" {
-		// We don't need to generate code for this function, just report errors in its body.
-		// At this point we've generated any errors needed.
-		// (Beyond here we generate only non-spec errors, like "stack frame too large".)
-		// See issue 29870.
-		return
-	}
-
-	// Make sure type syms are declared for all types that might
-	// be types of stack objects. We need to do this here
-	// because symbols must be allocated before the parallel
-	// phase of the compiler.
-	for _, n := range fn.Func.Dcl {
-		switch n.Class() {
-		case PPARAM, PPARAMOUT, PAUTO:
-			if livenessShouldTrack(n) && n.Name.Addrtaken() {
-				dtypesym(n.Type)
-				// Also make sure we allocate a linker symbol
-				// for the stack object data, for the same reason.
-				if fn.Func.lsym.Func().StackObjects == nil {
-					fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
-				}
-			}
-		}
-	}
-
-	if compilenow(fn) {
-		compileSSA(fn, 0)
-	} else {
-		compilequeue = append(compilequeue, fn)
-	}
-}
-
-// compilenow reports whether to compile immediately.
-// If functions are not compiled immediately,
-// they are enqueued in compilequeue,
-// which is drained by compileFunctions.
-func compilenow(fn *Node) bool {
-	// Issue 38068: if this function is a method AND an inline
-	// candidate AND was not inlined (yet), put it onto the compile
-	// queue instead of compiling it immediately. This is in case we
-	// wind up inlining it into a method wrapper that is generated by
-	// compiling a function later on in the xtop list.
-	if fn.IsMethod() && isInlinableButNotInlined(fn) {
-		return false
-	}
-	return nBackendWorkers == 1 && Debug_compilelater == 0
-}
-
-// isInlinableButNotInlined returns true if 'fn' was marked as an
-// inline candidate but then never inlined (presumably because we
-// found no call sites).
-func isInlinableButNotInlined(fn *Node) bool {
-	if fn.Func.Nname.Func.Inl == nil {
-		return false
-	}
-	if fn.Sym == nil {
-		return true
-	}
-	return !fn.Sym.Linksym().WasInlined()
-}
-
-const maxStackSize = 1 << 30
-
-// compileSSA builds an SSA backend function,
-// uses it to generate a plist,
-// and flushes that plist to machine code.
-// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *Node, worker int) {
-	f := buildssa(fn, worker)
-	// Note: check arg size to fix issue 25507.
-	if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
-		largeStackFramesMu.Lock()
-		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
-		largeStackFramesMu.Unlock()
-		return
-	}
-	pp := newProgs(fn, worker)
-	defer pp.Free()
-	genssa(f, pp)
-	// Check frame size again.
-	// The check above included only the space needed for local variables.
-	// After genssa, the space needed includes local variables and the callee arg region.
-	// We must do this check prior to calling pp.Flush.
-	// If there are any oversized stack frames,
-	// the assembler may emit inscrutable complaints about invalid instructions.
-	if pp.Text.To.Offset >= maxStackSize {
-		largeStackFramesMu.Lock()
-		locals := f.Frontend().(*ssafn).stksize
-		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
-		largeStackFramesMu.Unlock()
-		return
-	}
-
-	pp.Flush() // assemble, fill in boilerplate, etc.
-	// fieldtrack must be called after pp.Flush. See issue 20014.
-	fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
-}
-
-func init() {
-	if race.Enabled {
-		rand.Seed(time.Now().UnixNano())
-	}
-}
-
-// compileFunctions compiles all functions in compilequeue.
-// It fans out nBackendWorkers to do the work
-// and waits for them to complete.
-func compileFunctions() {
-	if len(compilequeue) != 0 {
-		sizeCalculationDisabled = true // not safe to calculate sizes concurrently
-		if race.Enabled {
-			// Randomize compilation order to try to shake out races.
-			tmp := make([]*Node, len(compilequeue))
-			perm := rand.Perm(len(compilequeue))
-			for i, v := range perm {
-				tmp[v] = compilequeue[i]
-			}
-			copy(compilequeue, tmp)
-		} else {
-			// Compile the longest functions first,
-			// since they're most likely to be the slowest.
-			// This helps avoid stragglers.
-			sort.Slice(compilequeue, func(i, j int) bool {
-				return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
-			})
-		}
-		var wg sync.WaitGroup
-		Ctxt.InParallel = true
-		c := make(chan *Node, nBackendWorkers)
-		for i := 0; i < nBackendWorkers; i++ {
-			wg.Add(1)
-			go func(worker int) {
-				for fn := range c {
-					compileSSA(fn, worker)
-				}
-				wg.Done()
-			}(i)
-		}
-		for _, fn := range compilequeue {
-			c <- fn
-		}
-		close(c)
-		compilequeue = nil
-		wg.Wait()
-		Ctxt.InParallel = false
-		sizeCalculationDisabled = false
-	}
-}
-
-func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
-	fn := curfn.(*Node)
-	if fn.Func.Nname != nil {
-		if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
-			Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
-		}
-	}
-
-	var apdecls []*Node
-	// Populate decls for fn.
-	for _, n := range fn.Func.Dcl {
-		if n.Op != ONAME { // might be OTYPE or OLITERAL
-			continue
-		}
-		switch n.Class() {
-		case PAUTO:
-			if !n.Name.Used() {
-				// Text == nil -> generating abstract function
-				if fnsym.Func().Text != nil {
-					Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
-				}
-				continue
-			}
-		case PPARAM, PPARAMOUT:
-		default:
-			continue
-		}
-		apdecls = append(apdecls, n)
-		fnsym.Func().RecordAutoType(ngotype(n).Linksym())
-	}
-
-	decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
-
-	// For each type referenced by the functions auto vars but not
-	// already referenced by a dwarf var, attach a dummy relocation to
-	// the function symbol to insure that the type included in DWARF
-	// processing during linking.
-	typesyms := []*obj.LSym{}
-	for t, _ := range fnsym.Func().Autot {
-		typesyms = append(typesyms, t)
-	}
-	sort.Sort(obj.BySymName(typesyms))
-	for _, sym := range typesyms {
-		r := obj.Addrel(infosym)
-		r.Sym = sym
-		r.Type = objabi.R_USETYPE
-	}
-	fnsym.Func().Autot = nil
-
-	var varScopes []ScopeID
-	for _, decl := range decls {
-		pos := declPos(decl)
-		varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
-	}
-
-	scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
-	var inlcalls dwarf.InlCalls
-	if genDwarfInline > 0 {
-		inlcalls = assembleInlines(fnsym, dwarfVars)
-	}
-	return scopes, inlcalls
-}
-
-func declPos(decl *Node) src.XPos {
-	if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
-		// It's not clear which position is correct for captured variables here:
-		// * decl.Pos is the wrong position for captured variables, in the inner
-		//   function, but it is the right position in the outer function.
-		// * decl.Name.Defn is nil for captured variables that were arguments
-		//   on the outer function, however the decl.Pos for those seems to be
-		//   correct.
-		// * decl.Name.Defn is the "wrong" thing for variables declared in the
-		//   header of a type switch, it's their position in the header, rather
-		//   than the position of the case statement. In principle this is the
-		//   right thing, but here we prefer the latter because it makes each
-		//   instance of the header variable local to the lexical block of its
-		//   case statement.
-		// This code is probably wrong for type switch variables that are also
-		// captured.
-		return decl.Name.Defn.Pos
-	}
-	return decl.Pos
-}
-
-// createSimpleVars creates a DWARF entry for every variable declared in the
-// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
-	var vars []*dwarf.Var
-	var decls []*Node
-	selected := make(map[*Node]bool)
-	for _, n := range apDecls {
-		if n.IsAutoTmp() {
-			continue
-		}
-
-		decls = append(decls, n)
-		vars = append(vars, createSimpleVar(fnsym, n))
-		selected[n] = true
-	}
-	return decls, vars, selected
-}
-
-func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
-	var abbrev int
-	offs := n.Xoffset
-
-	switch n.Class() {
-	case PAUTO:
-		abbrev = dwarf.DW_ABRV_AUTO
-		if Ctxt.FixedFrameSize() == 0 {
-			offs -= int64(Widthptr)
-		}
-		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
-			// There is a word space for FP on ARM64 even if the frame pointer is disabled
-			offs -= int64(Widthptr)
-		}
-
-	case PPARAM, PPARAMOUT:
-		abbrev = dwarf.DW_ABRV_PARAM
-		offs += Ctxt.FixedFrameSize()
-	default:
-		Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
-	}
-
-	typename := dwarf.InfoPrefix + typesymname(n.Type)
-	delete(fnsym.Func().Autot, ngotype(n).Linksym())
-	inlIndex := 0
-	if genDwarfInline > 1 {
-		if n.Name.InlFormal() || n.Name.InlLocal() {
-			inlIndex = posInlIndex(n.Pos) + 1
-			if n.Name.InlFormal() {
-				abbrev = dwarf.DW_ABRV_PARAM
-			}
-		}
-	}
-	declpos := Ctxt.InnermostPos(declPos(n))
-	return &dwarf.Var{
-		Name:          n.Sym.Name,
-		IsReturnValue: n.Class() == PPARAMOUT,
-		IsInlFormal:   n.Name.InlFormal(),
-		Abbrev:        abbrev,
-		StackOffset:   int32(offs),
-		Type:          Ctxt.Lookup(typename),
-		DeclFile:      declpos.RelFilename(),
-		DeclLine:      declpos.RelLine(),
-		DeclCol:       declpos.Col(),
-		InlIndex:      int32(inlIndex),
-		ChildIndex:    -1,
-	}
-}
-
-// createComplexVars creates recomposed DWARF vars with location lists,
-// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
-	debugInfo := fn.DebugInfo
-
-	// Produce a DWARF variable entry for each user variable.
-	var decls []*Node
-	var vars []*dwarf.Var
-	ssaVars := make(map[*Node]bool)
-
-	for varID, dvar := range debugInfo.Vars {
-		n := dvar.(*Node)
-		ssaVars[n] = true
-		for _, slot := range debugInfo.VarSlots[varID] {
-			ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
-		}
-
-		if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
-			decls = append(decls, n)
-			vars = append(vars, dvar)
-		}
-	}
-
-	return decls, vars, ssaVars
-}
-
-// createDwarfVars process fn, returning a list of DWARF variables and the
-// Nodes they represent.
-func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
-	// Collect a raw list of DWARF vars.
-	var vars []*dwarf.Var
-	var decls []*Node
-	var selected map[*Node]bool
-	if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
-		decls, vars, selected = createComplexVars(fnsym, fn)
-	} else {
-		decls, vars, selected = createSimpleVars(fnsym, apDecls)
-	}
-
-	dcl := apDecls
-	if fnsym.WasInlined() {
-		dcl = preInliningDcls(fnsym)
-	}
-
-	// If optimization is enabled, the list above will typically be
-	// missing some of the original pre-optimization variables in the
-	// function (they may have been promoted to registers, folded into
-	// constants, dead-coded away, etc).  Input arguments not eligible
-	// for SSA optimization are also missing.  Here we add back in entries
-	// for selected missing vars. Note that the recipe below creates a
-	// conservative location. The idea here is that we want to
-	// communicate to the user that "yes, there is a variable named X
-	// in this function, but no, I don't have enough information to
-	// reliably report its contents."
-	// For non-SSA-able arguments, however, the correct information
-	// is known -- they have a single home on the stack.
-	for _, n := range dcl {
-		if _, found := selected[n]; found {
-			continue
-		}
-		c := n.Sym.Name[0]
-		if c == '.' || n.Type.IsUntyped() {
-			continue
-		}
-		if n.Class() == PPARAM && !canSSAType(n.Type) {
-			// SSA-able args get location lists, and may move in and
-			// out of registers, so those are handled elsewhere.
-			// Autos and named output params seem to get handled
-			// with VARDEF, which creates location lists.
-			// Args not of SSA-able type are treated here; they
-			// are homed on the stack in a single place for the
-			// entire call.
-			vars = append(vars, createSimpleVar(fnsym, n))
-			decls = append(decls, n)
-			continue
-		}
-		typename := dwarf.InfoPrefix + typesymname(n.Type)
-		decls = append(decls, n)
-		abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
-		isReturnValue := (n.Class() == PPARAMOUT)
-		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
-			abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
-		} else if n.Class() == PAUTOHEAP {
-			// If dcl in question has been promoted to heap, do a bit
-			// of extra work to recover original class (auto or param);
-			// see issue 30908. This insures that we get the proper
-			// signature in the abstract function DIE, but leaves a
-			// misleading location for the param (we want pointer-to-heap
-			// and not stack).
-			// TODO(thanm): generate a better location expression
-			stackcopy := n.Name.Param.Stackcopy
-			if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
-				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
-				isReturnValue = (stackcopy.Class() == PPARAMOUT)
-			}
-		}
-		inlIndex := 0
-		if genDwarfInline > 1 {
-			if n.Name.InlFormal() || n.Name.InlLocal() {
-				inlIndex = posInlIndex(n.Pos) + 1
-				if n.Name.InlFormal() {
-					abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
-				}
-			}
-		}
-		declpos := Ctxt.InnermostPos(n.Pos)
-		vars = append(vars, &dwarf.Var{
-			Name:          n.Sym.Name,
-			IsReturnValue: isReturnValue,
-			Abbrev:        abbrev,
-			StackOffset:   int32(n.Xoffset),
-			Type:          Ctxt.Lookup(typename),
-			DeclFile:      declpos.RelFilename(),
-			DeclLine:      declpos.RelLine(),
-			DeclCol:       declpos.Col(),
-			InlIndex:      int32(inlIndex),
-			ChildIndex:    -1,
-		})
-		// Record go type of to insure that it gets emitted by the linker.
-		fnsym.Func().RecordAutoType(ngotype(n).Linksym())
-	}
-
-	return decls, vars
-}
-
-// Given a function that was inlined at some point during the
-// compilation, return a sorted list of nodes corresponding to the
-// autos/locals in that function prior to inlining. If this is a
-// function that is not local to the package being compiled, then the
-// names of the variables may have been "versioned" to avoid conflicts
-// with local vars; disregard this versioning when sorting.
-func preInliningDcls(fnsym *obj.LSym) []*Node {
-	fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
-	var rdcl []*Node
-	for _, n := range fn.Func.Inl.Dcl {
-		c := n.Sym.Name[0]
-		// Avoid reporting "_" parameters, since if there are more than
-		// one, it can result in a collision later on, as in #23179.
-		if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
-			continue
-		}
-		rdcl = append(rdcl, n)
-	}
-	return rdcl
-}
-
-// stackOffset returns the stack location of a LocalSlot relative to the
-// stack pointer, suitable for use in a DWARF location entry. This has nothing
-// to do with its offset in the user variable.
-func stackOffset(slot ssa.LocalSlot) int32 {
-	n := slot.N.(*Node)
-	var base int64
-	switch n.Class() {
-	case PAUTO:
-		if Ctxt.FixedFrameSize() == 0 {
-			base -= int64(Widthptr)
-		}
-		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
-			// There is a word space for FP on ARM64 even if the frame pointer is disabled
-			base -= int64(Widthptr)
-		}
-	case PPARAM, PPARAMOUT:
-		base += Ctxt.FixedFrameSize()
-	}
-	return int32(base + n.Xoffset + slot.Off)
-}
-
-// createComplexVar builds a single DWARF variable entry and location list.
-func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
-	debug := fn.DebugInfo
-	n := debug.Vars[varID].(*Node)
-
-	var abbrev int
-	switch n.Class() {
-	case PAUTO:
-		abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
-	case PPARAM, PPARAMOUT:
-		abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
-	default:
-		return nil
-	}
-
-	gotype := ngotype(n).Linksym()
-	delete(fnsym.Func().Autot, gotype)
-	typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
-	inlIndex := 0
-	if genDwarfInline > 1 {
-		if n.Name.InlFormal() || n.Name.InlLocal() {
-			inlIndex = posInlIndex(n.Pos) + 1
-			if n.Name.InlFormal() {
-				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
-			}
-		}
-	}
-	declpos := Ctxt.InnermostPos(n.Pos)
-	dvar := &dwarf.Var{
-		Name:          n.Sym.Name,
-		IsReturnValue: n.Class() == PPARAMOUT,
-		IsInlFormal:   n.Name.InlFormal(),
-		Abbrev:        abbrev,
-		Type:          Ctxt.Lookup(typename),
-		// The stack offset is used as a sorting key, so for decomposed
-		// variables just give it the first one. It's not used otherwise.
-		// This won't work well if the first slot hasn't been assigned a stack
-		// location, but it's not obvious how to do better.
-		StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
-		DeclFile:    declpos.RelFilename(),
-		DeclLine:    declpos.RelLine(),
-		DeclCol:     declpos.Col(),
-		InlIndex:    int32(inlIndex),
-		ChildIndex:  -1,
-	}
-	list := debug.LocationLists[varID]
-	if len(list) != 0 {
-		dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
-			debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
-		}
-	}
-	return dvar
-}
-
-// fieldtrack adds R_USEFIELD relocations to fnsym to record any
-// struct fields that it used.
-func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
-	if fnsym == nil {
-		return
-	}
-	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
-		return
-	}
-
-	trackSyms := make([]*types.Sym, 0, len(tracked))
-	for sym := range tracked {
-		trackSyms = append(trackSyms, sym)
-	}
-	sort.Sort(symByName(trackSyms))
-	for _, sym := range trackSyms {
-		r := obj.Addrel(fnsym)
-		r.Sym = sym.Linksym()
-		r.Type = objabi.R_USEFIELD
-	}
-}
-
-type symByName []*types.Sym
-
-func (a symByName) Len() int           { return len(a) }
-func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
-func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go
deleted file mode 100644
index b1db298..0000000
--- a/src/cmd/compile/internal/gc/pgen_test.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"reflect"
-	"sort"
-	"testing"
-)
-
-func typeWithoutPointers() *types.Type {
-	t := types.New(TSTRUCT)
-	f := &types.Field{Type: types.New(TINT)}
-	t.SetFields([]*types.Field{f})
-	return t
-}
-
-func typeWithPointers() *types.Type {
-	t := types.New(TSTRUCT)
-	f := &types.Field{Type: types.NewPtr(types.New(TINT))}
-	t.SetFields([]*types.Field{f})
-	return t
-}
-
-func markUsed(n *Node) *Node {
-	n.Name.SetUsed(true)
-	return n
-}
-
-func markNeedZero(n *Node) *Node {
-	n.Name.SetNeedzero(true)
-	return n
-}
-
-func nodeWithClass(n Node, c Class) *Node {
-	n.SetClass(c)
-	n.Name = new(Name)
-	return &n
-}
-
-// Test all code paths for cmpstackvarlt.
-func TestCmpstackvar(t *testing.T) {
-	testdata := []struct {
-		a, b *Node
-		lt   bool
-	}{
-		{
-			nodeWithClass(Node{}, PAUTO),
-			nodeWithClass(Node{}, PFUNC),
-			false,
-		},
-		{
-			nodeWithClass(Node{}, PFUNC),
-			nodeWithClass(Node{}, PAUTO),
-			true,
-		},
-		{
-			nodeWithClass(Node{Xoffset: 0}, PFUNC),
-			nodeWithClass(Node{Xoffset: 10}, PFUNC),
-			true,
-		},
-		{
-			nodeWithClass(Node{Xoffset: 20}, PFUNC),
-			nodeWithClass(Node{Xoffset: 10}, PFUNC),
-			false,
-		},
-		{
-			nodeWithClass(Node{Xoffset: 10}, PFUNC),
-			nodeWithClass(Node{Xoffset: 10}, PFUNC),
-			false,
-		},
-		{
-			nodeWithClass(Node{Xoffset: 10}, PPARAM),
-			nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
-			true,
-		},
-		{
-			nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
-			nodeWithClass(Node{Xoffset: 20}, PPARAM),
-			true,
-		},
-		{
-			markUsed(nodeWithClass(Node{}, PAUTO)),
-			nodeWithClass(Node{}, PAUTO),
-			true,
-		},
-		{
-			nodeWithClass(Node{}, PAUTO),
-			markUsed(nodeWithClass(Node{}, PAUTO)),
-			false,
-		},
-		{
-			nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
-			nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
-			false,
-		},
-		{
-			nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
-			nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
-			true,
-		},
-		{
-			markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
-			nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
-			true,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
-			markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
-			false,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
-			nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
-			false,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
-			nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
-			true,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
-			true,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-			false,
-		},
-		{
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
-			nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-			false,
-		},
-	}
-	for _, d := range testdata {
-		got := cmpstackvarlt(d.a, d.b)
-		if got != d.lt {
-			t.Errorf("want %#v < %#v", d.a, d.b)
-		}
-		// If we expect a < b to be true, check that b < a is false.
-		if d.lt && cmpstackvarlt(d.b, d.a) {
-			t.Errorf("unexpected %#v < %#v", d.b, d.a)
-		}
-	}
-}
-
-func TestStackvarSort(t *testing.T) {
-	inp := []*Node{
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
-		nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
-		markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
-		nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
-	}
-	want := []*Node{
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
-		markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
-		markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
-		nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
-		nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
-		nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
-	}
-	sort.Sort(byStackVar(inp))
-	if !reflect.DeepEqual(want, inp) {
-		t.Error("sort failed")
-		for i := range inp {
-			g := inp[i]
-			w := want[i]
-			eq := reflect.DeepEqual(w, g)
-			if !eq {
-				t.Log(i, w, g)
-			}
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
deleted file mode 100644
index 3552617..0000000
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"cmd/internal/sys"
-)
-
-// The racewalk pass is currently handled in three parts.
-//
-// First, for flag_race, it inserts calls to racefuncenter and
-// racefuncexit at the start and end (respectively) of each
-// function. This is handled below.
-//
-// Second, during buildssa, it inserts appropriate instrumentation
-// calls immediately before each memory load or store. This is handled
-// by the (*state).instrument method in ssa.go, so here we just set
-// the Func.InstrumentBody flag as needed. For background on why this
-// is done during SSA construction rather than a separate SSA pass,
-// see issue #19054.
-//
-// Third we remove calls to racefuncenter and racefuncexit, for leaf
-// functions without instrumented operations. This is done as part of
-// ssa opt pass via special rule.
-
-// TODO(dvyukov): do not instrument initialization as writes:
-// a := make([]int, 10)
-
-// Do not instrument the following packages at all,
-// at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{
-	"runtime/internal/atomic",
-	"runtime/internal/sys",
-	"runtime/internal/math",
-	"runtime",
-	"runtime/race",
-	"runtime/msan",
-	"internal/cpu",
-}
-
-// Don't insert racefuncenterfp/racefuncexit into the following packages.
-// Memory accesses in the packages are either uninteresting or will cause false positives.
-var norace_inst_pkgs = []string{"sync", "sync/atomic"}
-
-func ispkgin(pkgs []string) bool {
-	if myimportpath != "" {
-		for _, p := range pkgs {
-			if myimportpath == p {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-func instrument(fn *Node) {
-	if fn.Func.Pragma&Norace != 0 {
-		return
-	}
-
-	if !flag_race || !ispkgin(norace_inst_pkgs) {
-		fn.Func.SetInstrumentBody(true)
-	}
-
-	if flag_race {
-		lno := lineno
-		lineno = src.NoXPos
-
-		if thearch.LinkArch.Arch.Family != sys.AMD64 {
-			fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
-			fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
-		} else {
-
-			// nodpc is the PC of the caller as extracted by
-			// getcallerpc. We use -widthptr(FP) for x86.
-			// This only works for amd64. This will not
-			// work on arm or others that might support
-			// race in the future.
-			nodpc := nodfp.copy()
-			nodpc.Type = types.Types[TUINTPTR]
-			nodpc.Xoffset = int64(-Widthptr)
-			fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
-			fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
-			fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
-		}
-		lineno = lno
-	}
-}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
deleted file mode 100644
index 1b4d765..0000000
--- a/src/cmd/compile/internal/gc/range.go
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/sys"
-	"unicode/utf8"
-)
-
-// range
-func typecheckrange(n *Node) {
-	// Typechecking order is important here:
-	// 0. first typecheck range expression (slice/map/chan),
-	//	it is evaluated only once and so logically it is not part of the loop.
-	// 1. typecheck produced values,
-	//	this part can declare new vars and so it must be typechecked before body,
-	//	because body can contain a closure that captures the vars.
-	// 2. decldepth++ to denote loop body.
-	// 3. typecheck body.
-	// 4. decldepth--.
-	typecheckrangeExpr(n)
-
-	// second half of dance, the first half being typecheckrangeExpr
-	n.SetTypecheck(1)
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Typecheck() == 0 {
-			ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
-		}
-	}
-
-	decldepth++
-	typecheckslice(n.Nbody.Slice(), ctxStmt)
-	decldepth--
-}
-
-func typecheckrangeExpr(n *Node) {
-	n.Right = typecheck(n.Right, ctxExpr)
-
-	t := n.Right.Type
-	if t == nil {
-		return
-	}
-	// delicate little dance.  see typecheckas2
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Name == nil || n1.Name.Defn != n {
-			ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
-		}
-	}
-
-	if t.IsPtr() && t.Elem().IsArray() {
-		t = t.Elem()
-	}
-	n.Type = t
-
-	var t1, t2 *types.Type
-	toomany := false
-	switch t.Etype {
-	default:
-		yyerrorl(n.Pos, "cannot range over %L", n.Right)
-		return
-
-	case TARRAY, TSLICE:
-		t1 = types.Types[TINT]
-		t2 = t.Elem()
-
-	case TMAP:
-		t1 = t.Key()
-		t2 = t.Elem()
-
-	case TCHAN:
-		if !t.ChanDir().CanRecv() {
-			yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
-			return
-		}
-
-		t1 = t.Elem()
-		t2 = nil
-		if n.List.Len() == 2 {
-			toomany = true
-		}
-
-	case TSTRING:
-		t1 = types.Types[TINT]
-		t2 = types.Runetype
-	}
-
-	if n.List.Len() > 2 || toomany {
-		yyerrorl(n.Pos, "too many variables in range")
-	}
-
-	var v1, v2 *Node
-	if n.List.Len() != 0 {
-		v1 = n.List.First()
-	}
-	if n.List.Len() > 1 {
-		v2 = n.List.Second()
-	}
-
-	// this is not only an optimization but also a requirement in the spec.
-	// "if the second iteration variable is the blank identifier, the range
-	// clause is equivalent to the same clause with only the first variable
-	// present."
-	if v2.isBlank() {
-		if v1 != nil {
-			n.List.Set1(v1)
-		}
-		v2 = nil
-	}
-
-	if v1 != nil {
-		if v1.Name != nil && v1.Name.Defn == n {
-			v1.Type = t1
-		} else if v1.Type != nil {
-			if op, why := assignop(t1, v1.Type); op == OXXX {
-				yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
-			}
-		}
-		checkassign(n, v1)
-	}
-
-	if v2 != nil {
-		if v2.Name != nil && v2.Name.Defn == n {
-			v2.Type = t2
-		} else if v2.Type != nil {
-			if op, why := assignop(t2, v2.Type); op == OXXX {
-				yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
-			}
-		}
-		checkassign(n, v2)
-	}
-}
-
-func cheapComputableIndex(width int64) bool {
-	switch thearch.LinkArch.Family {
-	// MIPS does not have R+R addressing
-	// Arm64 may lack ability to generate this code in our assembler,
-	// but the architecture supports it.
-	case sys.PPC64, sys.S390X:
-		return width == 1
-	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
-		switch width {
-		case 1, 2, 4, 8:
-			return true
-		}
-	}
-	return false
-}
-
-// walkrange transforms various forms of ORANGE into
-// simpler forms.  The result must be assigned back to n.
-// Node n may also be modified in place, and may also be
-// the returned node.
-func walkrange(n *Node) *Node {
-	if isMapClear(n) {
-		m := n.Right
-		lno := setlineno(m)
-		n = mapClear(m)
-		lineno = lno
-		return n
-	}
-
-	// variable name conventions:
-	//	ohv1, hv1, hv2: hidden (old) val 1, 2
-	//	ha, hit: hidden aggregate, iterator
-	//	hn, hp: hidden len, pointer
-	//	hb: hidden bool
-	//	a, v1, v2: not hidden aggregate, val 1, 2
-
-	t := n.Type
-
-	a := n.Right
-	lno := setlineno(a)
-	n.Right = nil
-
-	var v1, v2 *Node
-	l := n.List.Len()
-	if l > 0 {
-		v1 = n.List.First()
-	}
-
-	if l > 1 {
-		v2 = n.List.Second()
-	}
-
-	if v2.isBlank() {
-		v2 = nil
-	}
-
-	if v1.isBlank() && v2 == nil {
-		v1 = nil
-	}
-
-	if v1 == nil && v2 != nil {
-		Fatalf("walkrange: v2 != nil while v1 == nil")
-	}
-
-	// n.List has no meaning anymore, clear it
-	// to avoid erroneous processing by racewalk.
-	n.List.Set(nil)
-
-	var ifGuard *Node
-
-	translatedLoopOp := OFOR
-
-	var body []*Node
-	var init []*Node
-	switch t.Etype {
-	default:
-		Fatalf("walkrange")
-
-	case TARRAY, TSLICE:
-		if arrayClear(n, v1, v2, a) {
-			lineno = lno
-			return n
-		}
-
-		// order.stmt arranged for a copy of the array/slice variable if needed.
-		ha := a
-
-		hv1 := temp(types.Types[TINT])
-		hn := temp(types.Types[TINT])
-
-		init = append(init, nod(OAS, hv1, nil))
-		init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
-
-		n.Left = nod(OLT, hv1, hn)
-		n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
-
-		// for range ha { body }
-		if v1 == nil {
-			break
-		}
-
-		// for v1 := range ha { body }
-		if v2 == nil {
-			body = []*Node{nod(OAS, v1, hv1)}
-			break
-		}
-
-		// for v1, v2 := range ha { body }
-		if cheapComputableIndex(n.Type.Elem().Width) {
-			// v1, v2 = hv1, ha[hv1]
-			tmp := nod(OINDEX, ha, hv1)
-			tmp.SetBounded(true)
-			// Use OAS2 to correctly handle assignments
-			// of the form "v1, a[v1] := range".
-			a := nod(OAS2, nil, nil)
-			a.List.Set2(v1, v2)
-			a.Rlist.Set2(hv1, tmp)
-			body = []*Node{a}
-			break
-		}
-
-		// TODO(austin): OFORUNTIL is a strange beast, but is
-		// necessary for expressing the control flow we need
-		// while also making "break" and "continue" work. It
-		// would be nice to just lower ORANGE during SSA, but
-		// racewalk needs to see many of the operations
-		// involved in ORANGE's implementation. If racewalk
-		// moves into SSA, consider moving ORANGE into SSA and
-		// eliminating OFORUNTIL.
-
-		// TODO(austin): OFORUNTIL inhibits bounds-check
-		// elimination on the index variable (see #20711).
-		// Enhance the prove pass to understand this.
-		ifGuard = nod(OIF, nil, nil)
-		ifGuard.Left = nod(OLT, hv1, hn)
-		translatedLoopOp = OFORUNTIL
-
-		hp := temp(types.NewPtr(n.Type.Elem()))
-		tmp := nod(OINDEX, ha, nodintconst(0))
-		tmp.SetBounded(true)
-		init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
-
-		// Use OAS2 to correctly handle assignments
-		// of the form "v1, a[v1] := range".
-		a := nod(OAS2, nil, nil)
-		a.List.Set2(v1, v2)
-		a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
-		body = append(body, a)
-
-		// Advance pointer as part of the late increment.
-		//
-		// This runs *after* the condition check, so we know
-		// advancing the pointer is safe and won't go past the
-		// end of the allocation.
-		a = nod(OAS, hp, addptr(hp, t.Elem().Width))
-		a = typecheck(a, ctxStmt)
-		n.List.Set1(a)
-
-	case TMAP:
-		// order.stmt allocated the iterator for us.
-		// we only use a once, so no copy needed.
-		ha := a
-
-		hit := prealloc[n]
-		th := hit.Type
-		n.Left = nil
-		keysym := th.Field(0).Sym  // depends on layout of iterator struct.  See reflect.go:hiter
-		elemsym := th.Field(1).Sym // ditto
-
-		fn := syslook("mapiterinit")
-
-		fn = substArgTypes(fn, t.Key(), t.Elem(), th)
-		init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
-		n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
-
-		fn = syslook("mapiternext")
-		fn = substArgTypes(fn, th)
-		n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
-
-		key := nodSym(ODOT, hit, keysym)
-		key = nod(ODEREF, key, nil)
-		if v1 == nil {
-			body = nil
-		} else if v2 == nil {
-			body = []*Node{nod(OAS, v1, key)}
-		} else {
-			elem := nodSym(ODOT, hit, elemsym)
-			elem = nod(ODEREF, elem, nil)
-			a := nod(OAS2, nil, nil)
-			a.List.Set2(v1, v2)
-			a.Rlist.Set2(key, elem)
-			body = []*Node{a}
-		}
-
-	case TCHAN:
-		// order.stmt arranged for a copy of the channel variable.
-		ha := a
-
-		n.Left = nil
-
-		hv1 := temp(t.Elem())
-		hv1.SetTypecheck(1)
-		if t.Elem().HasPointers() {
-			init = append(init, nod(OAS, hv1, nil))
-		}
-		hb := temp(types.Types[TBOOL])
-
-		n.Left = nod(ONE, hb, nodbool(false))
-		a := nod(OAS2RECV, nil, nil)
-		a.SetTypecheck(1)
-		a.List.Set2(hv1, hb)
-		a.Right = nod(ORECV, ha, nil)
-		n.Left.Ninit.Set1(a)
-		if v1 == nil {
-			body = nil
-		} else {
-			body = []*Node{nod(OAS, v1, hv1)}
-		}
-		// Zero hv1. This prevents hv1 from being the sole, inaccessible
-		// reference to an otherwise GC-able value during the next channel receive.
-		// See issue 15281.
-		body = append(body, nod(OAS, hv1, nil))
-
-	case TSTRING:
-		// Transform string range statements like "for v1, v2 = range a" into
-		//
-		// ha := a
-		// for hv1 := 0; hv1 < len(ha); {
-		//   hv1t := hv1
-		//   hv2 := rune(ha[hv1])
-		//   if hv2 < utf8.RuneSelf {
-		//      hv1++
-		//   } else {
-		//      hv2, hv1 = decoderune(ha, hv1)
-		//   }
-		//   v1, v2 = hv1t, hv2
-		//   // original body
-		// }
-
-		// order.stmt arranged for a copy of the string variable.
-		ha := a
-
-		hv1 := temp(types.Types[TINT])
-		hv1t := temp(types.Types[TINT])
-		hv2 := temp(types.Runetype)
-
-		// hv1 := 0
-		init = append(init, nod(OAS, hv1, nil))
-
-		// hv1 < len(ha)
-		n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
-
-		if v1 != nil {
-			// hv1t = hv1
-			body = append(body, nod(OAS, hv1t, hv1))
-		}
-
-		// hv2 := rune(ha[hv1])
-		nind := nod(OINDEX, ha, hv1)
-		nind.SetBounded(true)
-		body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
-
-		// if hv2 < utf8.RuneSelf
-		nif := nod(OIF, nil, nil)
-		nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
-
-		// hv1++
-		nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
-
-		// } else {
-		eif := nod(OAS2, nil, nil)
-		nif.Rlist.Set1(eif)
-
-		// hv2, hv1 = decoderune(ha, hv1)
-		eif.List.Set2(hv2, hv1)
-		fn := syslook("decoderune")
-		eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
-
-		body = append(body, nif)
-
-		if v1 != nil {
-			if v2 != nil {
-				// v1, v2 = hv1t, hv2
-				a := nod(OAS2, nil, nil)
-				a.List.Set2(v1, v2)
-				a.Rlist.Set2(hv1t, hv2)
-				body = append(body, a)
-			} else {
-				// v1 = hv1t
-				body = append(body, nod(OAS, v1, hv1t))
-			}
-		}
-	}
-
-	n.Op = translatedLoopOp
-	typecheckslice(init, ctxStmt)
-
-	if ifGuard != nil {
-		ifGuard.Ninit.Append(init...)
-		ifGuard = typecheck(ifGuard, ctxStmt)
-	} else {
-		n.Ninit.Append(init...)
-	}
-
-	typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
-
-	n.Left = typecheck(n.Left, ctxExpr)
-	n.Left = defaultlit(n.Left, nil)
-	n.Right = typecheck(n.Right, ctxStmt)
-	typecheckslice(body, ctxStmt)
-	n.Nbody.Prepend(body...)
-
-	if ifGuard != nil {
-		ifGuard.Nbody.Set1(n)
-		n = ifGuard
-	}
-
-	n = walkstmt(n)
-
-	lineno = lno
-	return n
-}
-
-// isMapClear checks if n is of the form:
-//
-// for k := range m {
-//   delete(m, k)
-// }
-//
-// where == for keys of map m is reflexive.
-func isMapClear(n *Node) bool {
-	if Debug.N != 0 || instrumenting {
-		return false
-	}
-
-	if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
-		return false
-	}
-
-	k := n.List.First()
-	if k == nil || k.isBlank() {
-		return false
-	}
-
-	// Require k to be a new variable name.
-	if k.Name == nil || k.Name.Defn != n {
-		return false
-	}
-
-	if n.Nbody.Len() != 1 {
-		return false
-	}
-
-	stmt := n.Nbody.First() // only stmt in body
-	if stmt == nil || stmt.Op != ODELETE {
-		return false
-	}
-
-	m := n.Right
-	if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
-		return false
-	}
-
-	// Keys where equality is not reflexive can not be deleted from maps.
-	if !isreflexive(m.Type.Key()) {
-		return false
-	}
-
-	return true
-}
-
-// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m *Node) *Node {
-	t := m.Type
-
-	// instantiate mapclear(typ *type, hmap map[any]any)
-	fn := syslook("mapclear")
-	fn = substArgTypes(fn, t.Key(), t.Elem())
-	n := mkcall1(fn, nil, nil, typename(t), m)
-
-	n = typecheck(n, ctxStmt)
-	n = walkstmt(n)
-
-	return n
-}
-
-// Lower n into runtime·memclr if possible, for
-// fast zeroing of slices and arrays (issue 5373).
-// Look for instances of
-//
-// for i := range a {
-// 	a[i] = zero
-// }
-//
-// in which the evaluation of a is side-effect-free.
-//
-// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(n, v1, v2, a *Node) bool {
-	if Debug.N != 0 || instrumenting {
-		return false
-	}
-
-	if v1 == nil || v2 != nil {
-		return false
-	}
-
-	if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
-		return false
-	}
-
-	stmt := n.Nbody.First() // only stmt in body
-	if stmt.Op != OAS || stmt.Left.Op != OINDEX {
-		return false
-	}
-
-	if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
-		return false
-	}
-
-	elemsize := n.Type.Elem().Width
-	if elemsize <= 0 || !isZero(stmt.Right) {
-		return false
-	}
-
-	// Convert to
-	// if len(a) != 0 {
-	// 	hp = &a[0]
-	// 	hn = len(a)*sizeof(elem(a))
-	// 	memclr{NoHeap,Has}Pointers(hp, hn)
-	// 	i = len(a) - 1
-	// }
-	n.Op = OIF
-
-	n.Nbody.Set(nil)
-	n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
-
-	// hp = &a[0]
-	hp := temp(types.Types[TUNSAFEPTR])
-
-	tmp := nod(OINDEX, a, nodintconst(0))
-	tmp.SetBounded(true)
-	tmp = nod(OADDR, tmp, nil)
-	tmp = convnop(tmp, types.Types[TUNSAFEPTR])
-	n.Nbody.Append(nod(OAS, hp, tmp))
-
-	// hn = len(a) * sizeof(elem(a))
-	hn := temp(types.Types[TUINTPTR])
-
-	tmp = nod(OLEN, a, nil)
-	tmp = nod(OMUL, tmp, nodintconst(elemsize))
-	tmp = conv(tmp, types.Types[TUINTPTR])
-	n.Nbody.Append(nod(OAS, hn, tmp))
-
-	var fn *Node
-	if a.Type.Elem().HasPointers() {
-		// memclrHasPointers(hp, hn)
-		Curfn.Func.setWBPos(stmt.Pos)
-		fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
-	} else {
-		// memclrNoHeapPointers(hp, hn)
-		fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
-	}
-
-	n.Nbody.Append(fn)
-
-	// i = len(a) - 1
-	v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
-
-	n.Nbody.Append(v1)
-
-	n.Left = typecheck(n.Left, ctxExpr)
-	n.Left = defaultlit(n.Left, nil)
-	typecheckslice(n.Nbody.Slice(), ctxStmt)
-	n = walkstmt(n)
-	return true
-}
-
-// addptr returns (*T)(uintptr(p) + n).
-func addptr(p *Node, n int64) *Node {
-	t := p.Type
-
-	p = nod(OCONVNOP, p, nil)
-	p.Type = types.Types[TUINTPTR]
-
-	p = nod(OADD, p, nodintconst(n))
-
-	p = nod(OCONVNOP, p, nil)
-	p.Type = t
-
-	return p
-}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
deleted file mode 100644
index 9401eba..0000000
--- a/src/cmd/compile/internal/gc/reflect.go
+++ /dev/null
@@ -1,1901 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/gcprog"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"fmt"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-)
-
-type itabEntry struct {
-	t, itype *types.Type
-	lsym     *obj.LSym // symbol of the itab itself
-
-	// symbols of each method in
-	// the itab, sorted by byte offset;
-	// filled in by peekitabs
-	entries []*obj.LSym
-}
-
-type ptabEntry struct {
-	s *types.Sym
-	t *types.Type
-}
-
-// runtime interface and reflection data structures
-var (
-	signatmu    sync.Mutex // protects signatset and signatslice
-	signatset   = make(map[*types.Type]struct{})
-	signatslice []*types.Type
-
-	itabs []itabEntry
-	ptabs []ptabEntry
-)
-
-type Sig struct {
-	name  *types.Sym
-	isym  *types.Sym
-	tsym  *types.Sym
-	type_ *types.Type
-	mtype *types.Type
-}
-
-// Builds a type representing a Bucket structure for
-// the given map type. This type is not visible to users -
-// we include only enough information to generate a correct GC
-// program for it.
-// Make sure this stays in sync with runtime/map.go.
-const (
-	BUCKETSIZE  = 8
-	MAXKEYSIZE  = 128
-	MAXELEMSIZE = 128
-)
-
-func structfieldSize() int { return 3 * Widthptr }       // Sizeof(runtime.structfield{})
-func imethodSize() int     { return 4 + 4 }              // Sizeof(runtime.imethod{})
-func commonSize() int      { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{})
-
-func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
-	if t.Sym == nil && len(methods(t)) == 0 {
-		return 0
-	}
-	return 4 + 2 + 2 + 4 + 4
-}
-
-func makefield(name string, t *types.Type) *types.Field {
-	f := types.NewField()
-	f.Type = t
-	f.Sym = (*types.Pkg)(nil).Lookup(name)
-	return f
-}
-
-// bmap makes the map bucket type given the type of the map.
-func bmap(t *types.Type) *types.Type {
-	if t.MapType().Bucket != nil {
-		return t.MapType().Bucket
-	}
-
-	bucket := types.New(TSTRUCT)
-	keytype := t.Key()
-	elemtype := t.Elem()
-	dowidth(keytype)
-	dowidth(elemtype)
-	if keytype.Width > MAXKEYSIZE {
-		keytype = types.NewPtr(keytype)
-	}
-	if elemtype.Width > MAXELEMSIZE {
-		elemtype = types.NewPtr(elemtype)
-	}
-
-	field := make([]*types.Field, 0, 5)
-
-	// The first field is: uint8 topbits[BUCKETSIZE].
-	arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
-	field = append(field, makefield("topbits", arr))
-
-	arr = types.NewArray(keytype, BUCKETSIZE)
-	arr.SetNoalg(true)
-	keys := makefield("keys", arr)
-	field = append(field, keys)
-
-	arr = types.NewArray(elemtype, BUCKETSIZE)
-	arr.SetNoalg(true)
-	elems := makefield("elems", arr)
-	field = append(field, elems)
-
-	// If keys and elems have no pointers, the map implementation
-	// can keep a list of overflow pointers on the side so that
-	// buckets can be marked as having no pointers.
-	// Arrange for the bucket to have no pointers by changing
-	// the type of the overflow field to uintptr in this case.
-	// See comment on hmap.overflow in runtime/map.go.
-	otyp := types.NewPtr(bucket)
-	if !elemtype.HasPointers() && !keytype.HasPointers() {
-		otyp = types.Types[TUINTPTR]
-	}
-	overflow := makefield("overflow", otyp)
-	field = append(field, overflow)
-
-	// link up fields
-	bucket.SetNoalg(true)
-	bucket.SetFields(field[:])
-	dowidth(bucket)
-
-	// Check invariants that map code depends on.
-	if !IsComparable(t.Key()) {
-		Fatalf("unsupported map key type for %v", t)
-	}
-	if BUCKETSIZE < 8 {
-		Fatalf("bucket size too small for proper alignment")
-	}
-	if keytype.Align > BUCKETSIZE {
-		Fatalf("key align too big for %v", t)
-	}
-	if elemtype.Align > BUCKETSIZE {
-		Fatalf("elem align too big for %v", t)
-	}
-	if keytype.Width > MAXKEYSIZE {
-		Fatalf("key size to large for %v", t)
-	}
-	if elemtype.Width > MAXELEMSIZE {
-		Fatalf("elem size to large for %v", t)
-	}
-	if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
-		Fatalf("key indirect incorrect for %v", t)
-	}
-	if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
-		Fatalf("elem indirect incorrect for %v", t)
-	}
-	if keytype.Width%int64(keytype.Align) != 0 {
-		Fatalf("key size not a multiple of key align for %v", t)
-	}
-	if elemtype.Width%int64(elemtype.Align) != 0 {
-		Fatalf("elem size not a multiple of elem align for %v", t)
-	}
-	if bucket.Align%keytype.Align != 0 {
-		Fatalf("bucket align not multiple of key align %v", t)
-	}
-	if bucket.Align%elemtype.Align != 0 {
-		Fatalf("bucket align not multiple of elem align %v", t)
-	}
-	if keys.Offset%int64(keytype.Align) != 0 {
-		Fatalf("bad alignment of keys in bmap for %v", t)
-	}
-	if elems.Offset%int64(elemtype.Align) != 0 {
-		Fatalf("bad alignment of elems in bmap for %v", t)
-	}
-
-	// Double-check that overflow field is final memory in struct,
-	// with no padding at end.
-	if overflow.Offset != bucket.Width-int64(Widthptr) {
-		Fatalf("bad offset of overflow in bmap for %v", t)
-	}
-
-	t.MapType().Bucket = bucket
-
-	bucket.StructType().Map = t
-	return bucket
-}
-
-// hmap builds a type representing a Hmap structure for the given map type.
-// Make sure this stays in sync with runtime/map.go.
-func hmap(t *types.Type) *types.Type {
-	if t.MapType().Hmap != nil {
-		return t.MapType().Hmap
-	}
-
-	bmap := bmap(t)
-
-	// build a struct:
-	// type hmap struct {
-	//    count      int
-	//    flags      uint8
-	//    B          uint8
-	//    noverflow  uint16
-	//    hash0      uint32
-	//    buckets    *bmap
-	//    oldbuckets *bmap
-	//    nevacuate  uintptr
-	//    extra      unsafe.Pointer // *mapextra
-	// }
-	// must match runtime/map.go:hmap.
-	fields := []*types.Field{
-		makefield("count", types.Types[TINT]),
-		makefield("flags", types.Types[TUINT8]),
-		makefield("B", types.Types[TUINT8]),
-		makefield("noverflow", types.Types[TUINT16]),
-		makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
-		makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
-		makefield("oldbuckets", types.NewPtr(bmap)),
-		makefield("nevacuate", types.Types[TUINTPTR]),
-		makefield("extra", types.Types[TUNSAFEPTR]),
-	}
-
-	hmap := types.New(TSTRUCT)
-	hmap.SetNoalg(true)
-	hmap.SetFields(fields)
-	dowidth(hmap)
-
-	// The size of hmap should be 48 bytes on 64 bit
-	// and 28 bytes on 32 bit platforms.
-	if size := int64(8 + 5*Widthptr); hmap.Width != size {
-		Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
-	}
-
-	t.MapType().Hmap = hmap
-	hmap.StructType().Map = t
-	return hmap
-}
-
-// hiter builds a type representing an Hiter structure for the given map type.
-// Make sure this stays in sync with runtime/map.go.
-func hiter(t *types.Type) *types.Type {
-	if t.MapType().Hiter != nil {
-		return t.MapType().Hiter
-	}
-
-	hmap := hmap(t)
-	bmap := bmap(t)
-
-	// build a struct:
-	// type hiter struct {
-	//    key         *Key
-	//    elem        *Elem
-	//    t           unsafe.Pointer // *MapType
-	//    h           *hmap
-	//    buckets     *bmap
-	//    bptr        *bmap
-	//    overflow    unsafe.Pointer // *[]*bmap
-	//    oldoverflow unsafe.Pointer // *[]*bmap
-	//    startBucket uintptr
-	//    offset      uint8
-	//    wrapped     bool
-	//    B           uint8
-	//    i           uint8
-	//    bucket      uintptr
-	//    checkBucket uintptr
-	// }
-	// must match runtime/map.go:hiter.
-	fields := []*types.Field{
-		makefield("key", types.NewPtr(t.Key())),   // Used in range.go for TMAP.
-		makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
-		makefield("t", types.Types[TUNSAFEPTR]),
-		makefield("h", types.NewPtr(hmap)),
-		makefield("buckets", types.NewPtr(bmap)),
-		makefield("bptr", types.NewPtr(bmap)),
-		makefield("overflow", types.Types[TUNSAFEPTR]),
-		makefield("oldoverflow", types.Types[TUNSAFEPTR]),
-		makefield("startBucket", types.Types[TUINTPTR]),
-		makefield("offset", types.Types[TUINT8]),
-		makefield("wrapped", types.Types[TBOOL]),
-		makefield("B", types.Types[TUINT8]),
-		makefield("i", types.Types[TUINT8]),
-		makefield("bucket", types.Types[TUINTPTR]),
-		makefield("checkBucket", types.Types[TUINTPTR]),
-	}
-
-	// build iterator struct holding the above fields
-	hiter := types.New(TSTRUCT)
-	hiter.SetNoalg(true)
-	hiter.SetFields(fields)
-	dowidth(hiter)
-	if hiter.Width != int64(12*Widthptr) {
-		Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
-	}
-	t.MapType().Hiter = hiter
-	hiter.StructType().Map = t
-	return hiter
-}
-
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
-	makefield := func(name string, typ *types.Type) *types.Field {
-		f := types.NewField()
-		f.Type = typ
-		// Unlike the global makefield function, this one needs to set Pkg
-		// because these types might be compared (in SSA CSE sorting).
-		// TODO: unify this makefield and the global one above.
-		f.Sym = &types.Sym{Name: name, Pkg: localpkg}
-		return f
-	}
-	argtype := types.NewArray(types.Types[TUINT8], stksize)
-	argtype.Width = stksize
-	argtype.Align = 1
-	// These fields must match the ones in runtime/runtime2.go:_defer and
-	// cmd/compile/internal/gc/ssa.go:(*state).call.
-	fields := []*types.Field{
-		makefield("siz", types.Types[TUINT32]),
-		makefield("started", types.Types[TBOOL]),
-		makefield("heap", types.Types[TBOOL]),
-		makefield("openDefer", types.Types[TBOOL]),
-		makefield("sp", types.Types[TUINTPTR]),
-		makefield("pc", types.Types[TUINTPTR]),
-		// Note: the types here don't really matter. Defer structures
-		// are always scanned explicitly during stack copying and GC,
-		// so we make them uintptr type even though they are real pointers.
-		makefield("fn", types.Types[TUINTPTR]),
-		makefield("_panic", types.Types[TUINTPTR]),
-		makefield("link", types.Types[TUINTPTR]),
-		makefield("framepc", types.Types[TUINTPTR]),
-		makefield("varp", types.Types[TUINTPTR]),
-		makefield("fd", types.Types[TUINTPTR]),
-		makefield("args", argtype),
-	}
-
-	// build struct holding the above fields
-	s := types.New(TSTRUCT)
-	s.SetNoalg(true)
-	s.SetFields(fields)
-	s.Width = widstruct(s, s, 0, 1)
-	s.Align = uint8(Widthptr)
-	return s
-}
-
-// f is method type, with receiver.
-// return function type, receiver as first argument (or not).
-func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
-	inLen := f.Params().Fields().Len()
-	if receiver != nil {
-		inLen++
-	}
-	in := make([]*Node, 0, inLen)
-
-	if receiver != nil {
-		d := anonfield(receiver)
-		in = append(in, d)
-	}
-
-	for _, t := range f.Params().Fields().Slice() {
-		d := anonfield(t.Type)
-		d.SetIsDDD(t.IsDDD())
-		in = append(in, d)
-	}
-
-	outLen := f.Results().Fields().Len()
-	out := make([]*Node, 0, outLen)
-	for _, t := range f.Results().Fields().Slice() {
-		d := anonfield(t.Type)
-		out = append(out, d)
-	}
-
-	t := functype(nil, in, out)
-	if f.Nname() != nil {
-		// Link to name of original method function.
-		t.SetNname(f.Nname())
-	}
-
-	return t
-}
-
-// methods returns the methods of the non-interface type t, sorted by name.
-// Generates stub functions as needed.
-func methods(t *types.Type) []*Sig {
-	// method type
-	mt := methtype(t)
-
-	if mt == nil {
-		return nil
-	}
-	expandmeth(mt)
-
-	// type stored in interface word
-	it := t
-
-	if !isdirectiface(it) {
-		it = types.NewPtr(t)
-	}
-
-	// make list of methods for t,
-	// generating code if necessary.
-	var ms []*Sig
-	for _, f := range mt.AllMethods().Slice() {
-		if !f.IsMethod() {
-			Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
-		}
-		if f.Type.Recv() == nil {
-			Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
-		}
-		if f.Nointerface() {
-			continue
-		}
-
-		method := f.Sym
-		if method == nil {
-			break
-		}
-
-		// get receiver type for this particular method.
-		// if pointer receiver but non-pointer t and
-		// this is not an embedded pointer inside a struct,
-		// method does not apply.
-		if !isMethodApplicable(t, f) {
-			continue
-		}
-
-		sig := &Sig{
-			name:  method,
-			isym:  methodSym(it, method),
-			tsym:  methodSym(t, method),
-			type_: methodfunc(f.Type, t),
-			mtype: methodfunc(f.Type, nil),
-		}
-		ms = append(ms, sig)
-
-		this := f.Type.Recv().Type
-
-		if !sig.isym.Siggen() {
-			sig.isym.SetSiggen(true)
-			if !types.Identical(this, it) {
-				genwrapper(it, f, sig.isym)
-			}
-		}
-
-		if !sig.tsym.Siggen() {
-			sig.tsym.SetSiggen(true)
-			if !types.Identical(this, t) {
-				genwrapper(t, f, sig.tsym)
-			}
-		}
-	}
-
-	return ms
-}
-
-// imethods returns the methods of the interface type t, sorted by name.
-func imethods(t *types.Type) []*Sig {
-	var methods []*Sig
-	for _, f := range t.Fields().Slice() {
-		if f.Type.Etype != TFUNC || f.Sym == nil {
-			continue
-		}
-		if f.Sym.IsBlank() {
-			Fatalf("unexpected blank symbol in interface method set")
-		}
-		if n := len(methods); n > 0 {
-			last := methods[n-1]
-			if !last.name.Less(f.Sym) {
-				Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
-			}
-		}
-
-		sig := &Sig{
-			name:  f.Sym,
-			mtype: f.Type,
-			type_: methodfunc(f.Type, nil),
-		}
-		methods = append(methods, sig)
-
-		// NOTE(rsc): Perhaps an oversight that
-		// IfaceType.Method is not in the reflect data.
-		// Generate the method body, so that compiled
-		// code can refer to it.
-		isym := methodSym(t, f.Sym)
-		if !isym.Siggen() {
-			isym.SetSiggen(true)
-			genwrapper(t, f, isym)
-		}
-	}
-
-	return methods
-}
-
-func dimportpath(p *types.Pkg) {
-	if p.Pathsym != nil {
-		return
-	}
-
-	// If we are compiling the runtime package, there are two runtime packages around
-	// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
-	// both of them, so just produce one for localpkg.
-	if myimportpath == "runtime" && p == Runtimepkg {
-		return
-	}
-
-	str := p.Path
-	if p == localpkg {
-		// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
-		str = myimportpath
-	}
-
-	s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
-	ot := dnameData(s, 0, str, "", nil, false)
-	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
-	s.Set(obj.AttrContentAddressable, true)
-	p.Pathsym = s
-}
-
-func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
-	if pkg == nil {
-		return duintptr(s, ot, 0)
-	}
-
-	if pkg == localpkg && myimportpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type..importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := Ctxt.Lookup(`type..importpath."".`)
-		return dsymptr(s, ot, ns, 0)
-	}
-
-	dimportpath(pkg)
-	return dsymptr(s, ot, pkg.Pathsym, 0)
-}
-
-// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
-func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
-	if pkg == nil {
-		return duint32(s, ot, 0)
-	}
-	if pkg == localpkg && myimportpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type..importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := Ctxt.Lookup(`type..importpath."".`)
-		return dsymptrOff(s, ot, ns)
-	}
-
-	dimportpath(pkg)
-	return dsymptrOff(s, ot, pkg.Pathsym)
-}
-
-// dnameField dumps a reflect.name for a struct field.
-func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
-	if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
-		Fatalf("package mismatch for %v", ft.Sym)
-	}
-	nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
-	return dsymptr(lsym, ot, nsym, 0)
-}
-
-// dnameData writes the contents of a reflect.name into s at offset ot.
-func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
-	if len(name) > 1<<16-1 {
-		Fatalf("name too long: %s", name)
-	}
-	if len(tag) > 1<<16-1 {
-		Fatalf("tag too long: %s", tag)
-	}
-
-	// Encode name and tag. See reflect/type.go for details.
-	var bits byte
-	l := 1 + 2 + len(name)
-	if exported {
-		bits |= 1 << 0
-	}
-	if len(tag) > 0 {
-		l += 2 + len(tag)
-		bits |= 1 << 1
-	}
-	if pkg != nil {
-		bits |= 1 << 2
-	}
-	b := make([]byte, l)
-	b[0] = bits
-	b[1] = uint8(len(name) >> 8)
-	b[2] = uint8(len(name))
-	copy(b[3:], name)
-	if len(tag) > 0 {
-		tb := b[3+len(name):]
-		tb[0] = uint8(len(tag) >> 8)
-		tb[1] = uint8(len(tag))
-		copy(tb[2:], tag)
-	}
-
-	ot = int(s.WriteBytes(Ctxt, int64(ot), b))
-
-	if pkg != nil {
-		ot = dgopkgpathOff(s, ot, pkg)
-	}
-
-	return ot
-}
-
-var dnameCount int
-
-// dname creates a reflect.name for a struct field or method.
-func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
-	// Write out data as "type.." to signal two things to the
-	// linker, first that when dynamically linking, the symbol
-	// should be moved to a relro section, and second that the
-	// contents should not be decoded as a type.
-	sname := "type..namedata."
-	if pkg == nil {
-		// In the common case, share data with other packages.
-		if name == "" {
-			if exported {
-				sname += "-noname-exported." + tag
-			} else {
-				sname += "-noname-unexported." + tag
-			}
-		} else {
-			if exported {
-				sname += name + "." + tag
-			} else {
-				sname += name + "-" + tag
-			}
-		}
-	} else {
-		sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
-		dnameCount++
-	}
-	s := Ctxt.Lookup(sname)
-	if len(s.P) > 0 {
-		return s
-	}
-	ot := dnameData(s, 0, name, tag, pkg, exported)
-	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
-	s.Set(obj.AttrContentAddressable, true)
-	return s
-}
-
-// dextratype dumps the fields of a runtime.uncommontype.
-// dataAdd is the offset in bytes after the header where the
-// backing array of the []method field is written (by dextratypeData).
-func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
-	m := methods(t)
-	if t.Sym == nil && len(m) == 0 {
-		return ot
-	}
-	noff := int(Rnd(int64(ot), int64(Widthptr)))
-	if noff != ot {
-		Fatalf("unexpected alignment in dextratype for %v", t)
-	}
-
-	for _, a := range m {
-		dtypesym(a.type_)
-	}
-
-	ot = dgopkgpathOff(lsym, ot, typePkg(t))
-
-	dataAdd += uncommonSize(t)
-	mcount := len(m)
-	if mcount != int(uint16(mcount)) {
-		Fatalf("too many methods on %v: %d", t, mcount)
-	}
-	xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
-	if dataAdd != int(uint32(dataAdd)) {
-		Fatalf("methods are too far away on %v: %d", t, dataAdd)
-	}
-
-	ot = duint16(lsym, ot, uint16(mcount))
-	ot = duint16(lsym, ot, uint16(xcount))
-	ot = duint32(lsym, ot, uint32(dataAdd))
-	ot = duint32(lsym, ot, 0)
-	return ot
-}
-
-func typePkg(t *types.Type) *types.Pkg {
-	tsym := t.Sym
-	if tsym == nil {
-		switch t.Etype {
-		case TARRAY, TSLICE, TPTR, TCHAN:
-			if t.Elem() != nil {
-				tsym = t.Elem().Sym
-			}
-		}
-	}
-	if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
-		return tsym.Pkg
-	}
-	return nil
-}
-
-// dextratypeData dumps the backing array for the []method field of
-// runtime.uncommontype.
-func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
-	for _, a := range methods(t) {
-		// ../../../../runtime/type.go:/method
-		exported := types.IsExported(a.name.Name)
-		var pkg *types.Pkg
-		if !exported && a.name.Pkg != typePkg(t) {
-			pkg = a.name.Pkg
-		}
-		nsym := dname(a.name.Name, "", pkg, exported)
-
-		ot = dsymptrOff(lsym, ot, nsym)
-		ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
-		ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
-		ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
-	}
-	return ot
-}
-
-func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
-	duint32(s, ot, 0)
-	r := obj.Addrel(s)
-	r.Off = int32(ot)
-	r.Siz = 4
-	r.Sym = x
-	r.Type = objabi.R_METHODOFF
-	return ot + 4
-}
-
-var kinds = []int{
-	TINT:        objabi.KindInt,
-	TUINT:       objabi.KindUint,
-	TINT8:       objabi.KindInt8,
-	TUINT8:      objabi.KindUint8,
-	TINT16:      objabi.KindInt16,
-	TUINT16:     objabi.KindUint16,
-	TINT32:      objabi.KindInt32,
-	TUINT32:     objabi.KindUint32,
-	TINT64:      objabi.KindInt64,
-	TUINT64:     objabi.KindUint64,
-	TUINTPTR:    objabi.KindUintptr,
-	TFLOAT32:    objabi.KindFloat32,
-	TFLOAT64:    objabi.KindFloat64,
-	TBOOL:       objabi.KindBool,
-	TSTRING:     objabi.KindString,
-	TPTR:        objabi.KindPtr,
-	TSTRUCT:     objabi.KindStruct,
-	TINTER:      objabi.KindInterface,
-	TCHAN:       objabi.KindChan,
-	TMAP:        objabi.KindMap,
-	TARRAY:      objabi.KindArray,
-	TSLICE:      objabi.KindSlice,
-	TFUNC:       objabi.KindFunc,
-	TCOMPLEX64:  objabi.KindComplex64,
-	TCOMPLEX128: objabi.KindComplex128,
-	TUNSAFEPTR:  objabi.KindUnsafePointer,
-}
-
-// typeptrdata returns the length in bytes of the prefix of t
-// containing pointer data. Anything after this offset is scalar data.
-func typeptrdata(t *types.Type) int64 {
-	if !t.HasPointers() {
-		return 0
-	}
-
-	switch t.Etype {
-	case TPTR,
-		TUNSAFEPTR,
-		TFUNC,
-		TCHAN,
-		TMAP:
-		return int64(Widthptr)
-
-	case TSTRING:
-		// struct { byte *str; intgo len; }
-		return int64(Widthptr)
-
-	case TINTER:
-		// struct { Itab *tab;	void *data; } or
-		// struct { Type *type; void *data; }
-		// Note: see comment in plive.go:onebitwalktype1.
-		return 2 * int64(Widthptr)
-
-	case TSLICE:
-		// struct { byte *array; uintgo len; uintgo cap; }
-		return int64(Widthptr)
-
-	case TARRAY:
-		// haspointers already eliminated t.NumElem() == 0.
-		return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
-
-	case TSTRUCT:
-		// Find the last field that has pointers.
-		var lastPtrField *types.Field
-		for _, t1 := range t.Fields().Slice() {
-			if t1.Type.HasPointers() {
-				lastPtrField = t1
-			}
-		}
-		return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
-
-	default:
-		Fatalf("typeptrdata: unexpected type, %v", t)
-		return 0
-	}
-}
-
-// tflag is documented in reflect/type.go.
-//
-// tflag values must be kept in sync with copies in:
-//	cmd/compile/internal/gc/reflect.go
-//	cmd/link/internal/ld/decodesym.go
-//	reflect/type.go
-//	runtime/type.go
-const (
-	tflagUncommon      = 1 << 0
-	tflagExtraStar     = 1 << 1
-	tflagNamed         = 1 << 2
-	tflagRegularMemory = 1 << 3
-)
-
-var (
-	memhashvarlen  *obj.LSym
-	memequalvarlen *obj.LSym
-)
-
-// dcommontype dumps the contents of a reflect.rtype (runtime._type).
-func dcommontype(lsym *obj.LSym, t *types.Type) int {
-	dowidth(t)
-	eqfunc := geneq(t)
-
-	sptrWeak := true
-	var sptr *obj.LSym
-	if !t.IsPtr() || t.IsPtrElem() {
-		tptr := types.NewPtr(t)
-		if t.Sym != nil || methods(tptr) != nil {
-			sptrWeak = false
-		}
-		sptr = dtypesym(tptr)
-	}
-
-	gcsym, useGCProg, ptrdata := dgcsym(t)
-
-	// ../../../../reflect/type.go:/^type.rtype
-	// actual type structure
-	//	type rtype struct {
-	//		size          uintptr
-	//		ptrdata       uintptr
-	//		hash          uint32
-	//		tflag         tflag
-	//		align         uint8
-	//		fieldAlign    uint8
-	//		kind          uint8
-	//		equal         func(unsafe.Pointer, unsafe.Pointer) bool
-	//		gcdata        *byte
-	//		str           nameOff
-	//		ptrToThis     typeOff
-	//	}
-	ot := 0
-	ot = duintptr(lsym, ot, uint64(t.Width))
-	ot = duintptr(lsym, ot, uint64(ptrdata))
-	ot = duint32(lsym, ot, typehash(t))
-
-	var tflag uint8
-	if uncommonSize(t) != 0 {
-		tflag |= tflagUncommon
-	}
-	if t.Sym != nil && t.Sym.Name != "" {
-		tflag |= tflagNamed
-	}
-	if IsRegularMemory(t) {
-		tflag |= tflagRegularMemory
-	}
-
-	exported := false
-	p := t.LongString()
-	// If we're writing out type T,
-	// we are very likely to write out type *T as well.
-	// Use the string "*T"[1:] for "T", so that the two
-	// share storage. This is a cheap way to reduce the
-	// amount of space taken up by reflect strings.
-	if !strings.HasPrefix(p, "*") {
-		p = "*" + p
-		tflag |= tflagExtraStar
-		if t.Sym != nil {
-			exported = types.IsExported(t.Sym.Name)
-		}
-	} else {
-		if t.Elem() != nil && t.Elem().Sym != nil {
-			exported = types.IsExported(t.Elem().Sym.Name)
-		}
-	}
-
-	ot = duint8(lsym, ot, tflag)
-
-	// runtime (and common sense) expects alignment to be a power of two.
-	i := int(t.Align)
-
-	if i == 0 {
-		i = 1
-	}
-	if i&(i-1) != 0 {
-		Fatalf("invalid alignment %d for %v", t.Align, t)
-	}
-	ot = duint8(lsym, ot, t.Align) // align
-	ot = duint8(lsym, ot, t.Align) // fieldAlign
-
-	i = kinds[t.Etype]
-	if isdirectiface(t) {
-		i |= objabi.KindDirectIface
-	}
-	if useGCProg {
-		i |= objabi.KindGCProg
-	}
-	ot = duint8(lsym, ot, uint8(i)) // kind
-	if eqfunc != nil {
-		ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
-	} else {
-		ot = duintptr(lsym, ot, 0) // type we can't do == with
-	}
-	ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
-
-	nsym := dname(p, "", nil, exported)
-	ot = dsymptrOff(lsym, ot, nsym) // str
-	// ptrToThis
-	if sptr == nil {
-		ot = duint32(lsym, ot, 0)
-	} else if sptrWeak {
-		ot = dsymptrWeakOff(lsym, ot, sptr)
-	} else {
-		ot = dsymptrOff(lsym, ot, sptr)
-	}
-
-	return ot
-}
-
-// typeHasNoAlg reports whether t does not have any associated hash/eq
-// algorithms because t, or some component of t, is marked Noalg.
-func typeHasNoAlg(t *types.Type) bool {
-	a, bad := algtype1(t)
-	return a == ANOEQ && bad.Noalg()
-}
-
-func typesymname(t *types.Type) string {
-	name := t.ShortString()
-	// Use a separate symbol name for Noalg types for #17752.
-	if typeHasNoAlg(t) {
-		name = "noalg." + name
-	}
-	return name
-}
-
-// Fake package for runtime type info (headers)
-// Don't access directly, use typeLookup below.
-var (
-	typepkgmu sync.Mutex // protects typepkg lookups
-	typepkg   = types.NewPkg("type", "type")
-)
-
-func typeLookup(name string) *types.Sym {
-	typepkgmu.Lock()
-	s := typepkg.Lookup(name)
-	typepkgmu.Unlock()
-	return s
-}
-
-func typesym(t *types.Type) *types.Sym {
-	return typeLookup(typesymname(t))
-}
-
-// tracksym returns the symbol for tracking use of field/method f, assumed
-// to be a member of struct/interface type t.
-func tracksym(t *types.Type, f *types.Field) *types.Sym {
-	return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
-}
-
-func typesymprefix(prefix string, t *types.Type) *types.Sym {
-	p := prefix + "." + t.ShortString()
-	s := typeLookup(p)
-
-	// This function is for looking up type-related generated functions
-	// (e.g. eq and hash). Make sure they are indeed generated.
-	signatmu.Lock()
-	addsignat(t)
-	signatmu.Unlock()
-
-	//print("algsym: %s -> %+S\n", p, s);
-
-	return s
-}
-
-func typenamesym(t *types.Type) *types.Sym {
-	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
-		Fatalf("typenamesym %v", t)
-	}
-	s := typesym(t)
-	signatmu.Lock()
-	addsignat(t)
-	signatmu.Unlock()
-	return s
-}
-
-func typename(t *types.Type) *Node {
-	s := typenamesym(t)
-	if s.Def == nil {
-		n := newnamel(src.NoXPos, s)
-		n.Type = types.Types[TUINT8]
-		n.SetClass(PEXTERN)
-		n.SetTypecheck(1)
-		s.Def = asTypesNode(n)
-	}
-
-	n := nod(OADDR, asNode(s.Def), nil)
-	n.Type = types.NewPtr(asNode(s.Def).Type)
-	n.SetTypecheck(1)
-	return n
-}
-
-func itabname(t, itype *types.Type) *Node {
-	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
-		Fatalf("itabname(%v, %v)", t, itype)
-	}
-	s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
-	if s.Def == nil {
-		n := newname(s)
-		n.Type = types.Types[TUINT8]
-		n.SetClass(PEXTERN)
-		n.SetTypecheck(1)
-		s.Def = asTypesNode(n)
-		itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
-	}
-
-	n := nod(OADDR, asNode(s.Def), nil)
-	n.Type = types.NewPtr(asNode(s.Def).Type)
-	n.SetTypecheck(1)
-	return n
-}
-
-// isreflexive reports whether t has a reflexive equality operator.
-// That is, if x==x for all x of type t.
-func isreflexive(t *types.Type) bool {
-	switch t.Etype {
-	case TBOOL,
-		TINT,
-		TUINT,
-		TINT8,
-		TUINT8,
-		TINT16,
-		TUINT16,
-		TINT32,
-		TUINT32,
-		TINT64,
-		TUINT64,
-		TUINTPTR,
-		TPTR,
-		TUNSAFEPTR,
-		TSTRING,
-		TCHAN:
-		return true
-
-	case TFLOAT32,
-		TFLOAT64,
-		TCOMPLEX64,
-		TCOMPLEX128,
-		TINTER:
-		return false
-
-	case TARRAY:
-		return isreflexive(t.Elem())
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			if !isreflexive(t1.Type) {
-				return false
-			}
-		}
-		return true
-
-	default:
-		Fatalf("bad type for map key: %v", t)
-		return false
-	}
-}
-
-// needkeyupdate reports whether map updates with t as a key
-// need the key to be updated.
-func needkeyupdate(t *types.Type) bool {
-	switch t.Etype {
-	case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
-		TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
-		return false
-
-	case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
-		TINTER,
-		TSTRING: // strings might have smaller backing stores
-		return true
-
-	case TARRAY:
-		return needkeyupdate(t.Elem())
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			if needkeyupdate(t1.Type) {
-				return true
-			}
-		}
-		return false
-
-	default:
-		Fatalf("bad type for map key: %v", t)
-		return true
-	}
-}
-
-// hashMightPanic reports whether the hash of a map key of type t might panic.
-func hashMightPanic(t *types.Type) bool {
-	switch t.Etype {
-	case TINTER:
-		return true
-
-	case TARRAY:
-		return hashMightPanic(t.Elem())
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			if hashMightPanic(t1.Type) {
-				return true
-			}
-		}
-		return false
-
-	default:
-		return false
-	}
-}
-
-// formalType replaces byte and rune aliases with real types.
-// They've been separate internally to make error messages
-// better, but we have to merge them in the reflect tables.
-func formalType(t *types.Type) *types.Type {
-	if t == types.Bytetype || t == types.Runetype {
-		return types.Types[t.Etype]
-	}
-	return t
-}
-
-func dtypesym(t *types.Type) *obj.LSym {
-	t = formalType(t)
-	if t.IsUntyped() {
-		Fatalf("dtypesym %v", t)
-	}
-
-	s := typesym(t)
-	lsym := s.Linksym()
-	if s.Siggen() {
-		return lsym
-	}
-	s.SetSiggen(true)
-
-	// special case (look for runtime below):
-	// when compiling package runtime,
-	// emit the type structures for int, float, etc.
-	tbase := t
-
-	if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
-		tbase = t.Elem()
-	}
-	dupok := 0
-	if tbase.Sym == nil {
-		dupok = obj.DUPOK
-	}
-
-	if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
-		// named types from other files are defined only by those files
-		if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
-			if i, ok := typeSymIdx[tbase]; ok {
-				lsym.Pkg = tbase.Sym.Pkg.Prefix
-				if t != tbase {
-					lsym.SymIdx = int32(i[1])
-				} else {
-					lsym.SymIdx = int32(i[0])
-				}
-				lsym.Set(obj.AttrIndexed, true)
-			}
-			return lsym
-		}
-		// TODO(mdempsky): Investigate whether this can happen.
-		if tbase.Etype == TFORW {
-			return lsym
-		}
-	}
-
-	ot := 0
-	switch t.Etype {
-	default:
-		ot = dcommontype(lsym, t)
-		ot = dextratype(lsym, ot, t, 0)
-
-	case TARRAY:
-		// ../../../../runtime/type.go:/arrayType
-		s1 := dtypesym(t.Elem())
-		t2 := types.NewSlice(t.Elem())
-		s2 := dtypesym(t2)
-		ot = dcommontype(lsym, t)
-		ot = dsymptr(lsym, ot, s1, 0)
-		ot = dsymptr(lsym, ot, s2, 0)
-		ot = duintptr(lsym, ot, uint64(t.NumElem()))
-		ot = dextratype(lsym, ot, t, 0)
-
-	case TSLICE:
-		// ../../../../runtime/type.go:/sliceType
-		s1 := dtypesym(t.Elem())
-		ot = dcommontype(lsym, t)
-		ot = dsymptr(lsym, ot, s1, 0)
-		ot = dextratype(lsym, ot, t, 0)
-
-	case TCHAN:
-		// ../../../../runtime/type.go:/chanType
-		s1 := dtypesym(t.Elem())
-		ot = dcommontype(lsym, t)
-		ot = dsymptr(lsym, ot, s1, 0)
-		ot = duintptr(lsym, ot, uint64(t.ChanDir()))
-		ot = dextratype(lsym, ot, t, 0)
-
-	case TFUNC:
-		for _, t1 := range t.Recvs().Fields().Slice() {
-			dtypesym(t1.Type)
-		}
-		isddd := false
-		for _, t1 := range t.Params().Fields().Slice() {
-			isddd = t1.IsDDD()
-			dtypesym(t1.Type)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			dtypesym(t1.Type)
-		}
-
-		ot = dcommontype(lsym, t)
-		inCount := t.NumRecvs() + t.NumParams()
-		outCount := t.NumResults()
-		if isddd {
-			outCount |= 1 << 15
-		}
-		ot = duint16(lsym, ot, uint16(inCount))
-		ot = duint16(lsym, ot, uint16(outCount))
-		if Widthptr == 8 {
-			ot += 4 // align for *rtype
-		}
-
-		dataAdd := (inCount + t.NumResults()) * Widthptr
-		ot = dextratype(lsym, ot, t, dataAdd)
-
-		// Array of rtype pointers follows funcType.
-		for _, t1 := range t.Recvs().Fields().Slice() {
-			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
-		}
-		for _, t1 := range t.Params().Fields().Slice() {
-			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
-		}
-
-	case TINTER:
-		m := imethods(t)
-		n := len(m)
-		for _, a := range m {
-			dtypesym(a.type_)
-		}
-
-		// ../../../../runtime/type.go:/interfaceType
-		ot = dcommontype(lsym, t)
-
-		var tpkg *types.Pkg
-		if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
-			tpkg = t.Sym.Pkg
-		}
-		ot = dgopkgpath(lsym, ot, tpkg)
-
-		ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
-		ot = duintptr(lsym, ot, uint64(n))
-		ot = duintptr(lsym, ot, uint64(n))
-		dataAdd := imethodSize() * n
-		ot = dextratype(lsym, ot, t, dataAdd)
-
-		for _, a := range m {
-			// ../../../../runtime/type.go:/imethod
-			exported := types.IsExported(a.name.Name)
-			var pkg *types.Pkg
-			if !exported && a.name.Pkg != tpkg {
-				pkg = a.name.Pkg
-			}
-			nsym := dname(a.name.Name, "", pkg, exported)
-
-			ot = dsymptrOff(lsym, ot, nsym)
-			ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
-		}
-
-	// ../../../../runtime/type.go:/mapType
-	case TMAP:
-		s1 := dtypesym(t.Key())
-		s2 := dtypesym(t.Elem())
-		s3 := dtypesym(bmap(t))
-		hasher := genhash(t.Key())
-
-		ot = dcommontype(lsym, t)
-		ot = dsymptr(lsym, ot, s1, 0)
-		ot = dsymptr(lsym, ot, s2, 0)
-		ot = dsymptr(lsym, ot, s3, 0)
-		ot = dsymptr(lsym, ot, hasher, 0)
-		var flags uint32
-		// Note: flags must match maptype accessors in ../../../../runtime/type.go
-		// and maptype builder in ../../../../reflect/type.go:MapOf.
-		if t.Key().Width > MAXKEYSIZE {
-			ot = duint8(lsym, ot, uint8(Widthptr))
-			flags |= 1 // indirect key
-		} else {
-			ot = duint8(lsym, ot, uint8(t.Key().Width))
-		}
-
-		if t.Elem().Width > MAXELEMSIZE {
-			ot = duint8(lsym, ot, uint8(Widthptr))
-			flags |= 2 // indirect value
-		} else {
-			ot = duint8(lsym, ot, uint8(t.Elem().Width))
-		}
-		ot = duint16(lsym, ot, uint16(bmap(t).Width))
-		if isreflexive(t.Key()) {
-			flags |= 4 // reflexive key
-		}
-		if needkeyupdate(t.Key()) {
-			flags |= 8 // need key update
-		}
-		if hashMightPanic(t.Key()) {
-			flags |= 16 // hash might panic
-		}
-		ot = duint32(lsym, ot, flags)
-		ot = dextratype(lsym, ot, t, 0)
-
-	case TPTR:
-		if t.Elem().Etype == TANY {
-			// ../../../../runtime/type.go:/UnsafePointerType
-			ot = dcommontype(lsym, t)
-			ot = dextratype(lsym, ot, t, 0)
-
-			break
-		}
-
-		// ../../../../runtime/type.go:/ptrType
-		s1 := dtypesym(t.Elem())
-
-		ot = dcommontype(lsym, t)
-		ot = dsymptr(lsym, ot, s1, 0)
-		ot = dextratype(lsym, ot, t, 0)
-
-	// ../../../../runtime/type.go:/structType
-	// for security, only the exported fields.
-	case TSTRUCT:
-		fields := t.Fields().Slice()
-		for _, t1 := range fields {
-			dtypesym(t1.Type)
-		}
-
-		// All non-exported struct field names within a struct
-		// type must originate from a single package. By
-		// identifying and recording that package within the
-		// struct type descriptor, we can omit that
-		// information from the field descriptors.
-		var spkg *types.Pkg
-		for _, f := range fields {
-			if !types.IsExported(f.Sym.Name) {
-				spkg = f.Sym.Pkg
-				break
-			}
-		}
-
-		ot = dcommontype(lsym, t)
-		ot = dgopkgpath(lsym, ot, spkg)
-		ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
-		ot = duintptr(lsym, ot, uint64(len(fields)))
-		ot = duintptr(lsym, ot, uint64(len(fields)))
-
-		dataAdd := len(fields) * structfieldSize()
-		ot = dextratype(lsym, ot, t, dataAdd)
-
-		for _, f := range fields {
-			// ../../../../runtime/type.go:/structField
-			ot = dnameField(lsym, ot, spkg, f)
-			ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
-			offsetAnon := uint64(f.Offset) << 1
-			if offsetAnon>>1 != uint64(f.Offset) {
-				Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
-			}
-			if f.Embedded != 0 {
-				offsetAnon |= 1
-			}
-			ot = duintptr(lsym, ot, offsetAnon)
-		}
-	}
-
-	ot = dextratypeData(lsym, ot, t)
-	ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
-
-	// The linker will leave a table of all the typelinks for
-	// types in the binary, so the runtime can find them.
-	//
-	// When buildmode=shared, all types are in typelinks so the
-	// runtime can deduplicate type pointers.
-	keep := Ctxt.Flag_dynlink
-	if !keep && t.Sym == nil {
-		// For an unnamed type, we only need the link if the type can
-		// be created at run time by reflect.PtrTo and similar
-		// functions. If the type exists in the program, those
-		// functions must return the existing type structure rather
-		// than creating a new one.
-		switch t.Etype {
-		case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
-			keep = true
-		}
-	}
-	// Do not put Noalg types in typelinks.  See issue #22605.
-	if typeHasNoAlg(t) {
-		keep = false
-	}
-	lsym.Set(obj.AttrMakeTypelink, keep)
-
-	return lsym
-}
-
-// ifaceMethodOffset returns the offset of the i-th method in the interface
-// type descriptor, ityp.
-func ifaceMethodOffset(ityp *types.Type, i int64) int64 {
-	// interface type descriptor layout is struct {
-	//   _type        // commonSize
-	//   pkgpath      // 1 word
-	//   []imethod    // 3 words (pointing to [...]imethod below)
-	//   uncommontype // uncommonSize
-	//   [...]imethod
-	// }
-	// The size of imethod is 8.
-	return int64(commonSize()+4*Widthptr+uncommonSize(ityp)) + i*8
-}
-
-// for each itabEntry, gather the methods on
-// the concrete type that implement the interface
-func peekitabs() {
-	for i := range itabs {
-		tab := &itabs[i]
-		methods := genfun(tab.t, tab.itype)
-		if len(methods) == 0 {
-			continue
-		}
-		tab.entries = methods
-	}
-}
-
-// for the given concrete type and interface
-// type, return the (sorted) set of methods
-// on the concrete type that implement the interface
-func genfun(t, it *types.Type) []*obj.LSym {
-	if t == nil || it == nil {
-		return nil
-	}
-	sigs := imethods(it)
-	methods := methods(t)
-	out := make([]*obj.LSym, 0, len(sigs))
-	// TODO(mdempsky): Short circuit before calling methods(t)?
-	// See discussion on CL 105039.
-	if len(sigs) == 0 {
-		return nil
-	}
-
-	// both sigs and methods are sorted by name,
-	// so we can find the intersect in a single pass
-	for _, m := range methods {
-		if m.name == sigs[0].name {
-			out = append(out, m.isym.Linksym())
-			sigs = sigs[1:]
-			if len(sigs) == 0 {
-				break
-			}
-		}
-	}
-
-	if len(sigs) != 0 {
-		Fatalf("incomplete itab")
-	}
-
-	return out
-}
-
-// itabsym uses the information gathered in
-// peekitabs to de-virtualize interface methods.
-// Since this is called by the SSA backend, it shouldn't
-// generate additional Nodes, Syms, etc.
-func itabsym(it *obj.LSym, offset int64) *obj.LSym {
-	var syms []*obj.LSym
-	if it == nil {
-		return nil
-	}
-
-	for i := range itabs {
-		e := &itabs[i]
-		if e.lsym == it {
-			syms = e.entries
-			break
-		}
-	}
-	if syms == nil {
-		return nil
-	}
-
-	// keep this arithmetic in sync with *itab layout
-	methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
-	if methodnum >= len(syms) {
-		return nil
-	}
-	return syms[methodnum]
-}
-
-// addsignat ensures that a runtime type descriptor is emitted for t.
-func addsignat(t *types.Type) {
-	if _, ok := signatset[t]; !ok {
-		signatset[t] = struct{}{}
-		signatslice = append(signatslice, t)
-	}
-}
-
-func addsignats(dcls []*Node) {
-	// copy types from dcl list to signatset
-	for _, n := range dcls {
-		if n.Op == OTYPE {
-			addsignat(n.Type)
-		}
-	}
-}
-
-func dumpsignats() {
-	// Process signatset. Use a loop, as dtypesym adds
-	// entries to signatset while it is being processed.
-	signats := make([]typeAndStr, len(signatslice))
-	for len(signatslice) > 0 {
-		signats = signats[:0]
-		// Transfer entries to a slice and sort, for reproducible builds.
-		for _, t := range signatslice {
-			signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()})
-			delete(signatset, t)
-		}
-		signatslice = signatslice[:0]
-		sort.Sort(typesByString(signats))
-		for _, ts := range signats {
-			t := ts.t
-			dtypesym(t)
-			if t.Sym != nil {
-				dtypesym(types.NewPtr(t))
-			}
-		}
-	}
-}
-
-func dumptabs() {
-	// process itabs
-	for _, i := range itabs {
-		// dump empty itab symbol into i.sym
-		// type itab struct {
-		//   inter  *interfacetype
-		//   _type  *_type
-		//   hash   uint32
-		//   _      [4]byte
-		//   fun    [1]uintptr // variable sized
-		// }
-		o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
-		o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
-		o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
-		o += 4                                // skip unused field
-		for _, fn := range genfun(i.t, i.itype) {
-			o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
-		}
-		// Nothing writes static itabs, so they are read only.
-		ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
-		i.lsym.Set(obj.AttrContentAddressable, true)
-	}
-
-	// process ptabs
-	if localpkg.Name == "main" && len(ptabs) > 0 {
-		ot := 0
-		s := Ctxt.Lookup("go.plugin.tabs")
-		for _, p := range ptabs {
-			// Dump ptab symbol into go.pluginsym package.
-			//
-			// type ptab struct {
-			//	name nameOff
-			//	typ  typeOff // pointer to symbol
-			// }
-			nsym := dname(p.s.Name, "", nil, true)
-			tsym := dtypesym(p.t)
-			ot = dsymptrOff(s, ot, nsym)
-			ot = dsymptrOff(s, ot, tsym)
-			// Plugin exports symbols as interfaces. Mark their types
-			// as UsedInIface.
-			tsym.Set(obj.AttrUsedInIface, true)
-		}
-		ggloblsym(s, int32(ot), int16(obj.RODATA))
-
-		ot = 0
-		s = Ctxt.Lookup("go.plugin.exports")
-		for _, p := range ptabs {
-			ot = dsymptr(s, ot, p.s.Linksym(), 0)
-		}
-		ggloblsym(s, int32(ot), int16(obj.RODATA))
-	}
-}
-
-func dumpimportstrings() {
-	// generate import strings for imported packages
-	for _, p := range types.ImportedPkgList() {
-		dimportpath(p)
-	}
-}
-
-func dumpbasictypes() {
-	// do basic types if compiling package runtime.
-	// they have to be in at least one package,
-	// and runtime is always loaded implicitly,
-	// so this is as good as any.
-	// another possible choice would be package main,
-	// but using runtime means fewer copies in object files.
-	if myimportpath == "runtime" {
-		for i := types.EType(1); i <= TBOOL; i++ {
-			dtypesym(types.NewPtr(types.Types[i]))
-		}
-		dtypesym(types.NewPtr(types.Types[TSTRING]))
-		dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
-
-		// emit type structs for error and func(error) string.
-		// The latter is the type of an auto-generated wrapper.
-		dtypesym(types.NewPtr(types.Errortype))
-
-		dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
-
-		// add paths for runtime and main, which 6l imports implicitly.
-		dimportpath(Runtimepkg)
-
-		if flag_race {
-			dimportpath(racepkg)
-		}
-		if flag_msan {
-			dimportpath(msanpkg)
-		}
-		dimportpath(types.NewPkg("main", ""))
-	}
-}
-
-type typeAndStr struct {
-	t       *types.Type
-	short   string
-	regular string
-}
-
-type typesByString []typeAndStr
-
-func (a typesByString) Len() int { return len(a) }
-func (a typesByString) Less(i, j int) bool {
-	if a[i].short != a[j].short {
-		return a[i].short < a[j].short
-	}
-	// When the only difference between the types is whether
-	// they refer to byte or uint8, such as **byte vs **uint8,
-	// the types' ShortStrings can be identical.
-	// To preserve deterministic sort ordering, sort these by String().
-	if a[i].regular != a[j].regular {
-		return a[i].regular < a[j].regular
-	}
-	// Identical anonymous interfaces defined in different locations
-	// will be equal for the above checks, but different in DWARF output.
-	// Sort by source position to ensure deterministic order.
-	// See issues 27013 and 30202.
-	if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 {
-		return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos)
-	}
-	return false
-}
-func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
-// which holds 1-bit entries describing where pointers are in a given type.
-// Above this length, the GC information is recorded as a GC program,
-// which can express repetition compactly. In either form, the
-// information is used by the runtime to initialize the heap bitmap,
-// and for large types (like 128 or more words), they are roughly the
-// same speed. GC programs are never much larger and often more
-// compact. (If large arrays are involved, they can be arbitrarily
-// more compact.)
-//
-// The cutoff must be large enough that any allocation large enough to
-// use a GC program is large enough that it does not share heap bitmap
-// bytes with any other objects, allowing the GC program execution to
-// assume an aligned start and not use atomic operations. In the current
-// runtime, this means all malloc size classes larger than the cutoff must
-// be multiples of four words. On 32-bit systems that's 16 bytes, and
-// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
-// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
-// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
-// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
-// must be >= 4.
-//
-// We used to use 16 because the GC programs do have some constant overhead
-// to get started, and processing 128 pointers seems to be enough to
-// amortize that overhead well.
-//
-// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
-// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
-// use bitmaps for objects up to 64 kB in size.
-//
-// Also known to reflect/type.go.
-//
-const maxPtrmaskBytes = 2048
-
-// dgcsym emits and returns a data symbol containing GC information for type t,
-// along with a boolean reporting whether the UseGCProg bit should be set in
-// the type kind, and the ptrdata field to record in the reflect type information.
-func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
-	ptrdata = typeptrdata(t)
-	if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
-		lsym = dgcptrmask(t)
-		return
-	}
-
-	useGCProg = true
-	lsym, ptrdata = dgcprog(t)
-	return
-}
-
-// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
-func dgcptrmask(t *types.Type) *obj.LSym {
-	ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
-	fillptrmask(t, ptrmask)
-	p := fmt.Sprintf("gcbits.%x", ptrmask)
-
-	sym := Runtimepkg.Lookup(p)
-	lsym := sym.Linksym()
-	if !sym.Uniq() {
-		sym.SetUniq(true)
-		for i, x := range ptrmask {
-			duint8(lsym, i, x)
-		}
-		ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
-		lsym.Set(obj.AttrContentAddressable, true)
-	}
-	return lsym
-}
-
-// fillptrmask fills in ptrmask with 1s corresponding to the
-// word offsets in t that hold pointers.
-// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
-func fillptrmask(t *types.Type, ptrmask []byte) {
-	for i := range ptrmask {
-		ptrmask[i] = 0
-	}
-	if !t.HasPointers() {
-		return
-	}
-
-	vec := bvalloc(8 * int32(len(ptrmask)))
-	onebitwalktype1(t, 0, vec)
-
-	nptr := typeptrdata(t) / int64(Widthptr)
-	for i := int64(0); i < nptr; i++ {
-		if vec.Get(int32(i)) {
-			ptrmask[i/8] |= 1 << (uint(i) % 8)
-		}
-	}
-}
-
-// dgcprog emits and returns the symbol containing a GC program for type t
-// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
-// In practice, the size is typeptrdata(t) except for non-trivial arrays.
-// For non-trivial arrays, the program describes the full t.Width size.
-func dgcprog(t *types.Type) (*obj.LSym, int64) {
-	dowidth(t)
-	if t.Width == BADWIDTH {
-		Fatalf("dgcprog: %v badwidth", t)
-	}
-	lsym := typesymprefix(".gcprog", t).Linksym()
-	var p GCProg
-	p.init(lsym)
-	p.emit(t, 0)
-	offset := p.w.BitIndex() * int64(Widthptr)
-	p.end()
-	if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
-		Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
-	}
-	return lsym, offset
-}
-
-type GCProg struct {
-	lsym   *obj.LSym
-	symoff int
-	w      gcprog.Writer
-}
-
-var Debug_gcprog int // set by -d gcprog
-
-func (p *GCProg) init(lsym *obj.LSym) {
-	p.lsym = lsym
-	p.symoff = 4 // first 4 bytes hold program length
-	p.w.Init(p.writeByte)
-	if Debug_gcprog > 0 {
-		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
-		p.w.Debug(os.Stderr)
-	}
-}
-
-func (p *GCProg) writeByte(x byte) {
-	p.symoff = duint8(p.lsym, p.symoff, x)
-}
-
-func (p *GCProg) end() {
-	p.w.End()
-	duint32(p.lsym, 0, uint32(p.symoff-4))
-	ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
-	if Debug_gcprog > 0 {
-		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
-	}
-}
-
-func (p *GCProg) emit(t *types.Type, offset int64) {
-	dowidth(t)
-	if !t.HasPointers() {
-		return
-	}
-	if t.Width == int64(Widthptr) {
-		p.w.Ptr(offset / int64(Widthptr))
-		return
-	}
-	switch t.Etype {
-	default:
-		Fatalf("GCProg.emit: unexpected type %v", t)
-
-	case TSTRING:
-		p.w.Ptr(offset / int64(Widthptr))
-
-	case TINTER:
-		// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
-		p.w.Ptr(offset/int64(Widthptr) + 1)
-
-	case TSLICE:
-		p.w.Ptr(offset / int64(Widthptr))
-
-	case TARRAY:
-		if t.NumElem() == 0 {
-			// should have been handled by haspointers check above
-			Fatalf("GCProg.emit: empty array")
-		}
-
-		// Flatten array-of-array-of-array to just a big array by multiplying counts.
-		count := t.NumElem()
-		elem := t.Elem()
-		for elem.IsArray() {
-			count *= elem.NumElem()
-			elem = elem.Elem()
-		}
-
-		if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
-			// Cheaper to just emit the bits.
-			for i := int64(0); i < count; i++ {
-				p.emit(elem, offset+i*elem.Width)
-			}
-			return
-		}
-		p.emit(elem, offset)
-		p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
-		p.w.Repeat(elem.Width/int64(Widthptr), count-1)
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			p.emit(t1.Type, offset+t1.Offset)
-		}
-	}
-}
-
-// zeroaddr returns the address of a symbol with at least
-// size bytes of zeros.
-func zeroaddr(size int64) *Node {
-	if size >= 1<<31 {
-		Fatalf("map elem too big %d", size)
-	}
-	if zerosize < size {
-		zerosize = size
-	}
-	s := mappkg.Lookup("zero")
-	if s.Def == nil {
-		x := newname(s)
-		x.Type = types.Types[TUINT8]
-		x.SetClass(PEXTERN)
-		x.SetTypecheck(1)
-		s.Def = asTypesNode(x)
-	}
-	z := nod(OADDR, asNode(s.Def), nil)
-	z.Type = types.NewPtr(types.Types[TUINT8])
-	z.SetTypecheck(1)
-	return z
-}
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
deleted file mode 100644
index 97e0424..0000000
--- a/src/cmd/compile/internal/gc/select.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "cmd/compile/internal/types"
-
-// select
-func typecheckselect(sel *Node) {
-	var def *Node
-	lno := setlineno(sel)
-	typecheckslice(sel.Ninit.Slice(), ctxStmt)
-	for _, ncase := range sel.List.Slice() {
-		if ncase.Op != OCASE {
-			setlineno(ncase)
-			Fatalf("typecheckselect %v", ncase.Op)
-		}
-
-		if ncase.List.Len() == 0 {
-			// default
-			if def != nil {
-				yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
-			} else {
-				def = ncase
-			}
-		} else if ncase.List.Len() > 1 {
-			yyerrorl(ncase.Pos, "select cases cannot be lists")
-		} else {
-			ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
-			n := ncase.List.First()
-			ncase.Left = n
-			ncase.List.Set(nil)
-			switch n.Op {
-			default:
-				pos := n.Pos
-				if n.Op == ONAME {
-					// We don't have the right position for ONAME nodes (see #15459 and
-					// others). Using ncase.Pos for now as it will provide the correct
-					// line number (assuming the expression follows the "case" keyword
-					// on the same line). This matches the approach before 1.10.
-					pos = ncase.Pos
-				}
-				yyerrorl(pos, "select case must be receive, send or assign recv")
-
-			// convert x = <-c into OSELRECV(x, <-c).
-			// remove implicit conversions; the eventual assignment
-			// will reintroduce them.
-			case OAS:
-				if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
-					n.Right = n.Right.Left
-				}
-
-				if n.Right.Op != ORECV {
-					yyerrorl(n.Pos, "select assignment must have receive on right hand side")
-					break
-				}
-
-				n.Op = OSELRECV
-
-				// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
-			case OAS2RECV:
-				if n.Right.Op != ORECV {
-					yyerrorl(n.Pos, "select assignment must have receive on right hand side")
-					break
-				}
-
-				n.Op = OSELRECV2
-				n.Left = n.List.First()
-				n.List.Set1(n.List.Second())
-
-				// convert <-c into OSELRECV(N, <-c)
-			case ORECV:
-				n = nodl(n.Pos, OSELRECV, nil, n)
-
-				n.SetTypecheck(1)
-				ncase.Left = n
-
-			case OSEND:
-				break
-			}
-		}
-
-		typecheckslice(ncase.Nbody.Slice(), ctxStmt)
-	}
-
-	lineno = lno
-}
-
-func walkselect(sel *Node) {
-	lno := setlineno(sel)
-	if sel.Nbody.Len() != 0 {
-		Fatalf("double walkselect")
-	}
-
-	init := sel.Ninit.Slice()
-	sel.Ninit.Set(nil)
-
-	init = append(init, walkselectcases(&sel.List)...)
-	sel.List.Set(nil)
-
-	sel.Nbody.Set(init)
-	walkstmtlist(sel.Nbody.Slice())
-
-	lineno = lno
-}
-
-func walkselectcases(cases *Nodes) []*Node {
-	ncas := cases.Len()
-	sellineno := lineno
-
-	// optimization: zero-case select
-	if ncas == 0 {
-		return []*Node{mkcall("block", nil, nil)}
-	}
-
-	// optimization: one-case select: single op.
-	if ncas == 1 {
-		cas := cases.First()
-		setlineno(cas)
-		l := cas.Ninit.Slice()
-		if cas.Left != nil { // not default:
-			n := cas.Left
-			l = append(l, n.Ninit.Slice()...)
-			n.Ninit.Set(nil)
-			switch n.Op {
-			default:
-				Fatalf("select %v", n.Op)
-
-			case OSEND:
-				// already ok
-
-			case OSELRECV, OSELRECV2:
-				if n.Op == OSELRECV || n.List.Len() == 0 {
-					if n.Left == nil {
-						n = n.Right
-					} else {
-						n.Op = OAS
-					}
-					break
-				}
-
-				if n.Left == nil {
-					nblank = typecheck(nblank, ctxExpr|ctxAssign)
-					n.Left = nblank
-				}
-
-				n.Op = OAS2
-				n.List.Prepend(n.Left)
-				n.Rlist.Set1(n.Right)
-				n.Right = nil
-				n.Left = nil
-				n.SetTypecheck(0)
-				n = typecheck(n, ctxStmt)
-			}
-
-			l = append(l, n)
-		}
-
-		l = append(l, cas.Nbody.Slice()...)
-		l = append(l, nod(OBREAK, nil, nil))
-		return l
-	}
-
-	// convert case value arguments to addresses.
-	// this rewrite is used by both the general code and the next optimization.
-	var dflt *Node
-	for _, cas := range cases.Slice() {
-		setlineno(cas)
-		n := cas.Left
-		if n == nil {
-			dflt = cas
-			continue
-		}
-		switch n.Op {
-		case OSEND:
-			n.Right = nod(OADDR, n.Right, nil)
-			n.Right = typecheck(n.Right, ctxExpr)
-
-		case OSELRECV, OSELRECV2:
-			if n.Op == OSELRECV2 && n.List.Len() == 0 {
-				n.Op = OSELRECV
-			}
-
-			if n.Left != nil {
-				n.Left = nod(OADDR, n.Left, nil)
-				n.Left = typecheck(n.Left, ctxExpr)
-			}
-		}
-	}
-
-	// optimization: two-case select but one is default: single non-blocking op.
-	if ncas == 2 && dflt != nil {
-		cas := cases.First()
-		if cas == dflt {
-			cas = cases.Second()
-		}
-
-		n := cas.Left
-		setlineno(n)
-		r := nod(OIF, nil, nil)
-		r.Ninit.Set(cas.Ninit.Slice())
-		switch n.Op {
-		default:
-			Fatalf("select %v", n.Op)
-
-		case OSEND:
-			// if selectnbsend(c, v) { body } else { default body }
-			ch := n.Left
-			r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
-
-		case OSELRECV:
-			// if selectnbrecv(&v, c) { body } else { default body }
-			ch := n.Right.Left
-			elem := n.Left
-			if elem == nil {
-				elem = nodnil()
-			}
-			r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
-
-		case OSELRECV2:
-			// if selectnbrecv2(&v, &received, c) { body } else { default body }
-			ch := n.Right.Left
-			elem := n.Left
-			if elem == nil {
-				elem = nodnil()
-			}
-			receivedp := nod(OADDR, n.List.First(), nil)
-			receivedp = typecheck(receivedp, ctxExpr)
-			r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
-		}
-
-		r.Left = typecheck(r.Left, ctxExpr)
-		r.Nbody.Set(cas.Nbody.Slice())
-		r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
-		return []*Node{r, nod(OBREAK, nil, nil)}
-	}
-
-	if dflt != nil {
-		ncas--
-	}
-	casorder := make([]*Node, ncas)
-	nsends, nrecvs := 0, 0
-
-	var init []*Node
-
-	// generate sel-struct
-	lineno = sellineno
-	selv := temp(types.NewArray(scasetype(), int64(ncas)))
-	r := nod(OAS, selv, nil)
-	r = typecheck(r, ctxStmt)
-	init = append(init, r)
-
-	// No initialization for order; runtime.selectgo is responsible for that.
-	order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
-
-	var pc0, pcs *Node
-	if flag_race {
-		pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
-		pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
-	} else {
-		pc0 = nodnil()
-	}
-
-	// register cases
-	for _, cas := range cases.Slice() {
-		setlineno(cas)
-
-		init = append(init, cas.Ninit.Slice()...)
-		cas.Ninit.Set(nil)
-
-		n := cas.Left
-		if n == nil { // default:
-			continue
-		}
-
-		var i int
-		var c, elem *Node
-		switch n.Op {
-		default:
-			Fatalf("select %v", n.Op)
-		case OSEND:
-			i = nsends
-			nsends++
-			c = n.Left
-			elem = n.Right
-		case OSELRECV, OSELRECV2:
-			nrecvs++
-			i = ncas - nrecvs
-			c = n.Right.Left
-			elem = n.Left
-		}
-
-		casorder[i] = cas
-
-		setField := func(f string, val *Node) {
-			r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
-			r = typecheck(r, ctxStmt)
-			init = append(init, r)
-		}
-
-		c = convnop(c, types.Types[TUNSAFEPTR])
-		setField("c", c)
-		if elem != nil {
-			elem = convnop(elem, types.Types[TUNSAFEPTR])
-			setField("elem", elem)
-		}
-
-		// TODO(mdempsky): There should be a cleaner way to
-		// handle this.
-		if flag_race {
-			r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
-			init = append(init, r)
-		}
-	}
-	if nsends+nrecvs != ncas {
-		Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
-	}
-
-	// run the select
-	lineno = sellineno
-	chosen := temp(types.Types[TINT])
-	recvOK := temp(types.Types[TBOOL])
-	r = nod(OAS2, nil, nil)
-	r.List.Set2(chosen, recvOK)
-	fn := syslook("selectgo")
-	r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
-	r = typecheck(r, ctxStmt)
-	init = append(init, r)
-
-	// selv and order are no longer alive after selectgo.
-	init = append(init, nod(OVARKILL, selv, nil))
-	init = append(init, nod(OVARKILL, order, nil))
-	if flag_race {
-		init = append(init, nod(OVARKILL, pcs, nil))
-	}
-
-	// dispatch cases
-	dispatch := func(cond, cas *Node) {
-		cond = typecheck(cond, ctxExpr)
-		cond = defaultlit(cond, nil)
-
-		r := nod(OIF, cond, nil)
-
-		if n := cas.Left; n != nil && n.Op == OSELRECV2 {
-			x := nod(OAS, n.List.First(), recvOK)
-			x = typecheck(x, ctxStmt)
-			r.Nbody.Append(x)
-		}
-
-		r.Nbody.AppendNodes(&cas.Nbody)
-		r.Nbody.Append(nod(OBREAK, nil, nil))
-		init = append(init, r)
-	}
-
-	if dflt != nil {
-		setlineno(dflt)
-		dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
-	}
-	for i, cas := range casorder {
-		setlineno(cas)
-		dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
-	}
-
-	return init
-}
-
-// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n *Node, i int64) *Node {
-	s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
-	t := types.NewPtr(types.Types[TUINT8])
-	return convnop(s, t)
-}
-
-var scase *types.Type
-
-// Keep in sync with src/runtime/select.go.
-func scasetype() *types.Type {
-	if scase == nil {
-		scase = tostruct([]*Node{
-			namedfield("c", types.Types[TUNSAFEPTR]),
-			namedfield("elem", types.Types[TUNSAFEPTR]),
-		})
-		scase.SetNoalg(true)
-	}
-	return scase
-}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
deleted file mode 100644
index 212fcc0..0000000
--- a/src/cmd/compile/internal/gc/sinit.go
+++ /dev/null
@@ -1,1172 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"fmt"
-)
-
-type InitEntry struct {
-	Xoffset int64 // struct, array only
-	Expr    *Node // bytes of run-time computed expressions
-}
-
-type InitPlan struct {
-	E []InitEntry
-}
-
-// An InitSchedule is used to decompose assignment statements into
-// static and dynamic initialization parts. Static initializations are
-// handled by populating variables' linker symbol data, while dynamic
-// initializations are accumulated to be executed in order.
-type InitSchedule struct {
-	// out is the ordered list of dynamic initialization
-	// statements.
-	out []*Node
-
-	initplans map[*Node]*InitPlan
-	inittemps map[*Node]*Node
-}
-
-func (s *InitSchedule) append(n *Node) {
-	s.out = append(s.out, n)
-}
-
-// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n *Node) {
-	if !s.tryStaticInit(n) {
-		if Debug.P != 0 {
-			Dump("nonstatic", n)
-		}
-		s.append(n)
-	}
-}
-
-// tryStaticInit attempts to statically execute an initialization
-// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(n *Node) bool {
-	// Only worry about simple "l = r" assignments. Multiple
-	// variable/expression OAS2 assignments have already been
-	// replaced by multiple simple OAS assignments, and the other
-	// OAS2* assignments mostly necessitate dynamic execution
-	// anyway.
-	if n.Op != OAS {
-		return false
-	}
-	if n.Left.isBlank() && candiscard(n.Right) {
-		return true
-	}
-	lno := setlineno(n)
-	defer func() { lineno = lno }()
-	return s.staticassign(n.Left, n.Right)
-}
-
-// like staticassign but we are copying an already
-// initialized value r.
-func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
-	if r.Op != ONAME {
-		return false
-	}
-	if r.Class() == PFUNC {
-		pfuncsym(l, r)
-		return true
-	}
-	if r.Class() != PEXTERN || r.Sym.Pkg != localpkg {
-		return false
-	}
-	if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
-		return false
-	}
-	if r.Name.Defn.Op != OAS {
-		return false
-	}
-	if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
-		return false
-	}
-	orig := r
-	r = r.Name.Defn.Right
-
-	for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
-		r = r.Left
-	}
-
-	switch r.Op {
-	case ONAME:
-		if s.staticcopy(l, r) {
-			return true
-		}
-		// We may have skipped past one or more OCONVNOPs, so
-		// use conv to ensure r is assignable to l (#13263).
-		s.append(nod(OAS, l, conv(r, l.Type)))
-		return true
-
-	case OLITERAL:
-		if isZero(r) {
-			return true
-		}
-		litsym(l, r, int(l.Type.Width))
-		return true
-
-	case OADDR:
-		if a := r.Left; a.Op == ONAME {
-			addrsym(l, a)
-			return true
-		}
-
-	case OPTRLIT:
-		switch r.Left.Op {
-		case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
-			// copy pointer
-			addrsym(l, s.inittemps[r])
-			return true
-		}
-
-	case OSLICELIT:
-		// copy slice
-		a := s.inittemps[r]
-		slicesym(l, a, r.Right.Int64Val())
-		return true
-
-	case OARRAYLIT, OSTRUCTLIT:
-		p := s.initplans[r]
-
-		n := l.copy()
-		for i := range p.E {
-			e := &p.E[i]
-			n.Xoffset = l.Xoffset + e.Xoffset
-			n.Type = e.Expr.Type
-			if e.Expr.Op == OLITERAL {
-				litsym(n, e.Expr, int(n.Type.Width))
-				continue
-			}
-			ll := n.sepcopy()
-			if s.staticcopy(ll, e.Expr) {
-				continue
-			}
-			// Requires computation, but we're
-			// copying someone else's computation.
-			rr := orig.sepcopy()
-			rr.Type = ll.Type
-			rr.Xoffset += e.Xoffset
-			setlineno(rr)
-			s.append(nod(OAS, ll, rr))
-		}
-
-		return true
-	}
-
-	return false
-}
-
-func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
-	for r.Op == OCONVNOP {
-		r = r.Left
-	}
-
-	switch r.Op {
-	case ONAME:
-		return s.staticcopy(l, r)
-
-	case OLITERAL:
-		if isZero(r) {
-			return true
-		}
-		litsym(l, r, int(l.Type.Width))
-		return true
-
-	case OADDR:
-		var nam Node
-		if stataddr(&nam, r.Left) {
-			addrsym(l, &nam)
-			return true
-		}
-		fallthrough
-
-	case OPTRLIT:
-		switch r.Left.Op {
-		case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
-			// Init pointer.
-			a := staticname(r.Left.Type)
-
-			s.inittemps[r] = a
-			addrsym(l, a)
-
-			// Init underlying literal.
-			if !s.staticassign(a, r.Left) {
-				s.append(nod(OAS, a, r.Left))
-			}
-			return true
-		}
-		//dump("not static ptrlit", r);
-
-	case OSTR2BYTES:
-		if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
-			sval := r.Left.StringVal()
-			slicebytes(l, sval)
-			return true
-		}
-
-	case OSLICELIT:
-		s.initplan(r)
-		// Init slice.
-		bound := r.Right.Int64Val()
-		ta := types.NewArray(r.Type.Elem(), bound)
-		ta.SetNoalg(true)
-		a := staticname(ta)
-		s.inittemps[r] = a
-		slicesym(l, a, bound)
-		// Fall through to init underlying array.
-		l = a
-		fallthrough
-
-	case OARRAYLIT, OSTRUCTLIT:
-		s.initplan(r)
-
-		p := s.initplans[r]
-		n := l.copy()
-		for i := range p.E {
-			e := &p.E[i]
-			n.Xoffset = l.Xoffset + e.Xoffset
-			n.Type = e.Expr.Type
-			if e.Expr.Op == OLITERAL {
-				litsym(n, e.Expr, int(n.Type.Width))
-				continue
-			}
-			setlineno(e.Expr)
-			a := n.sepcopy()
-			if !s.staticassign(a, e.Expr) {
-				s.append(nod(OAS, a, e.Expr))
-			}
-		}
-
-		return true
-
-	case OMAPLIT:
-		break
-
-	case OCLOSURE:
-		if hasemptycvars(r) {
-			if Debug_closure > 0 {
-				Warnl(r.Pos, "closure converted to global")
-			}
-			// Closures with no captured variables are globals,
-			// so the assignment can be done at link time.
-			pfuncsym(l, r.Func.Closure.Func.Nname)
-			return true
-		}
-		closuredebugruntimecheck(r)
-
-	case OCONVIFACE:
-		// This logic is mirrored in isStaticCompositeLiteral.
-		// If you change something here, change it there, and vice versa.
-
-		// Determine the underlying concrete type and value we are converting from.
-		val := r
-		for val.Op == OCONVIFACE {
-			val = val.Left
-		}
-		if val.Type.IsInterface() {
-			// val is an interface type.
-			// If val is nil, we can statically initialize l;
-			// both words are zero and so there no work to do, so report success.
-			// If val is non-nil, we have no concrete type to record,
-			// and we won't be able to statically initialize its value, so report failure.
-			return Isconst(val, CTNIL)
-		}
-
-		markTypeUsedInInterface(val.Type, l.Sym.Linksym())
-
-		var itab *Node
-		if l.Type.IsEmptyInterface() {
-			itab = typename(val.Type)
-		} else {
-			itab = itabname(val.Type, l.Type)
-		}
-
-		// Create a copy of l to modify while we emit data.
-		n := l.copy()
-
-		// Emit itab, advance offset.
-		addrsym(n, itab.Left) // itab is an OADDR node
-		n.Xoffset += int64(Widthptr)
-
-		// Emit data.
-		if isdirectiface(val.Type) {
-			if Isconst(val, CTNIL) {
-				// Nil is zero, nothing to do.
-				return true
-			}
-			// Copy val directly into n.
-			n.Type = val.Type
-			setlineno(val)
-			a := n.sepcopy()
-			if !s.staticassign(a, val) {
-				s.append(nod(OAS, a, val))
-			}
-		} else {
-			// Construct temp to hold val, write pointer to temp into n.
-			a := staticname(val.Type)
-			s.inittemps[val] = a
-			if !s.staticassign(a, val) {
-				s.append(nod(OAS, a, val))
-			}
-			addrsym(n, a)
-		}
-
-		return true
-	}
-
-	//dump("not static", r);
-	return false
-}
-
-// initContext is the context in which static data is populated.
-// It is either in an init function or in any other function.
-// Static data populated in an init function will be written either
-// zero times (as a readonly, static data symbol) or
-// one time (during init function execution).
-// Either way, there is no opportunity for races or further modification,
-// so the data can be written to a (possibly readonly) data symbol.
-// Static data populated in any other function needs to be local to
-// that function to allow multiple instances of that function
-// to execute concurrently without clobbering each others' data.
-type initContext uint8
-
-const (
-	inInitFunction initContext = iota
-	inNonInitFunction
-)
-
-func (c initContext) String() string {
-	if c == inInitFunction {
-		return "inInitFunction"
-	}
-	return "inNonInitFunction"
-}
-
-// from here down is the walk analysis
-// of composite literals.
-// most of the work is to generate
-// data statements for the constant
-// part of the composite literal.
-
-var statuniqgen int // name generator for static temps
-
-// staticname returns a name backed by a (writable) static data symbol.
-// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *Node {
-	// Don't use lookupN; it interns the resulting string, but these are all unique.
-	n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
-	statuniqgen++
-	addvar(n, t, PEXTERN)
-	n.Sym.Linksym().Set(obj.AttrLocal, true)
-	return n
-}
-
-// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *Node {
-	n := staticname(t)
-	n.MarkReadonly()
-	n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
-	return n
-}
-
-func (n *Node) isSimpleName() bool {
-	return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
-}
-
-func litas(l *Node, r *Node, init *Nodes) {
-	a := nod(OAS, l, r)
-	a = typecheck(a, ctxStmt)
-	a = walkexpr(a, init)
-	init.Append(a)
-}
-
-// initGenType is a bitmap indicating the types of generation that will occur for a static value.
-type initGenType uint8
-
-const (
-	initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
-	initConst                           // contains some constant values, which may be written into data symbols
-)
-
-// getdyn calculates the initGenType for n.
-// If top is false, getdyn is recursing.
-func getdyn(n *Node, top bool) initGenType {
-	switch n.Op {
-	default:
-		if n.isGoConst() {
-			return initConst
-		}
-		return initDynamic
-
-	case OSLICELIT:
-		if !top {
-			return initDynamic
-		}
-		if n.Right.Int64Val()/4 > int64(n.List.Len()) {
-			// <25% of entries have explicit values.
-			// Very rough estimation, it takes 4 bytes of instructions
-			// to initialize 1 byte of result. So don't use a static
-			// initializer if the dynamic initialization code would be
-			// smaller than the static value.
-			// See issue 23780.
-			return initDynamic
-		}
-
-	case OARRAYLIT, OSTRUCTLIT:
-	}
-
-	var mode initGenType
-	for _, n1 := range n.List.Slice() {
-		switch n1.Op {
-		case OKEY:
-			n1 = n1.Right
-		case OSTRUCTKEY:
-			n1 = n1.Left
-		}
-		mode |= getdyn(n1, false)
-		if mode == initDynamic|initConst {
-			break
-		}
-	}
-	return mode
-}
-
-// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *Node) bool {
-	switch n.Op {
-	case OSLICELIT:
-		return false
-	case OARRAYLIT:
-		for _, r := range n.List.Slice() {
-			if r.Op == OKEY {
-				r = r.Right
-			}
-			if !isStaticCompositeLiteral(r) {
-				return false
-			}
-		}
-		return true
-	case OSTRUCTLIT:
-		for _, r := range n.List.Slice() {
-			if r.Op != OSTRUCTKEY {
-				Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
-			}
-			if !isStaticCompositeLiteral(r.Left) {
-				return false
-			}
-		}
-		return true
-	case OLITERAL:
-		return true
-	case OCONVIFACE:
-		// See staticassign's OCONVIFACE case for comments.
-		val := n
-		for val.Op == OCONVIFACE {
-			val = val.Left
-		}
-		if val.Type.IsInterface() {
-			return Isconst(val, CTNIL)
-		}
-		if isdirectiface(val.Type) && Isconst(val, CTNIL) {
-			return true
-		}
-		return isStaticCompositeLiteral(val)
-	}
-	return false
-}
-
-// initKind is a kind of static initialization: static, dynamic, or local.
-// Static initialization represents literals and
-// literal components of composite literals.
-// Dynamic initialization represents non-literals and
-// non-literal components of composite literals.
-// LocalCode initialization represents initialization
-// that occurs purely in generated code local to the function of use.
-// Initialization code is sometimes generated in passes,
-// first static then dynamic.
-type initKind uint8
-
-const (
-	initKindStatic initKind = iota + 1
-	initKindDynamic
-	initKindLocalCode
-)
-
-// fixedlit handles struct, array, and slice literals.
-// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
-	isBlank := var_ == nblank
-	var splitnode func(*Node) (a *Node, value *Node)
-	switch n.Op {
-	case OARRAYLIT, OSLICELIT:
-		var k int64
-		splitnode = func(r *Node) (*Node, *Node) {
-			if r.Op == OKEY {
-				k = indexconst(r.Left)
-				if k < 0 {
-					Fatalf("fixedlit: invalid index %v", r.Left)
-				}
-				r = r.Right
-			}
-			a := nod(OINDEX, var_, nodintconst(k))
-			k++
-			if isBlank {
-				a = nblank
-			}
-			return a, r
-		}
-	case OSTRUCTLIT:
-		splitnode = func(r *Node) (*Node, *Node) {
-			if r.Op != OSTRUCTKEY {
-				Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
-			}
-			if r.Sym.IsBlank() || isBlank {
-				return nblank, r.Left
-			}
-			setlineno(r)
-			return nodSym(ODOT, var_, r.Sym), r.Left
-		}
-	default:
-		Fatalf("fixedlit bad op: %v", n.Op)
-	}
-
-	for _, r := range n.List.Slice() {
-		a, value := splitnode(r)
-		if a == nblank && candiscard(value) {
-			continue
-		}
-
-		switch value.Op {
-		case OSLICELIT:
-			if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
-				slicelit(ctxt, value, a, init)
-				continue
-			}
-
-		case OARRAYLIT, OSTRUCTLIT:
-			fixedlit(ctxt, kind, value, a, init)
-			continue
-		}
-
-		islit := value.isGoConst()
-		if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
-			continue
-		}
-
-		// build list of assignments: var[index] = expr
-		setlineno(a)
-		a = nod(OAS, a, value)
-		a = typecheck(a, ctxStmt)
-		switch kind {
-		case initKindStatic:
-			genAsStatic(a)
-		case initKindDynamic, initKindLocalCode:
-			a = orderStmtInPlace(a, map[string][]*Node{})
-			a = walkstmt(a)
-			init.Append(a)
-		default:
-			Fatalf("fixedlit: bad kind %d", kind)
-		}
-
-	}
-}
-
-func isSmallSliceLit(n *Node) bool {
-	if n.Op != OSLICELIT {
-		return false
-	}
-
-	r := n.Right
-
-	return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
-}
-
-func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
-	// make an array type corresponding the number of elements we have
-	t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
-	dowidth(t)
-
-	if ctxt == inNonInitFunction {
-		// put everything into static array
-		vstat := staticname(t)
-
-		fixedlit(ctxt, initKindStatic, n, vstat, init)
-		fixedlit(ctxt, initKindDynamic, n, vstat, init)
-
-		// copy static to slice
-		var_ = typecheck(var_, ctxExpr|ctxAssign)
-		var nam Node
-		if !stataddr(&nam, var_) || nam.Class() != PEXTERN {
-			Fatalf("slicelit: %v", var_)
-		}
-		slicesym(&nam, vstat, t.NumElem())
-		return
-	}
-
-	// recipe for var = []t{...}
-	// 1. make a static array
-	//	var vstat [...]t
-	// 2. assign (data statements) the constant part
-	//	vstat = constpart{}
-	// 3. make an auto pointer to array and allocate heap to it
-	//	var vauto *[...]t = new([...]t)
-	// 4. copy the static array to the auto array
-	//	*vauto = vstat
-	// 5. for each dynamic part assign to the array
-	//	vauto[i] = dynamic part
-	// 6. assign slice of allocated heap to var
-	//	var = vauto[:]
-	//
-	// an optimization is done if there is no constant part
-	//	3. var vauto *[...]t = new([...]t)
-	//	5. vauto[i] = dynamic part
-	//	6. var = vauto[:]
-
-	// if the literal contains constants,
-	// make static initialized array (1),(2)
-	var vstat *Node
-
-	mode := getdyn(n, true)
-	if mode&initConst != 0 && !isSmallSliceLit(n) {
-		if ctxt == inInitFunction {
-			vstat = readonlystaticname(t)
-		} else {
-			vstat = staticname(t)
-		}
-		fixedlit(ctxt, initKindStatic, n, vstat, init)
-	}
-
-	// make new auto *array (3 declare)
-	vauto := temp(types.NewPtr(t))
-
-	// set auto to point at new temp or heap (3 assign)
-	var a *Node
-	if x := prealloc[n]; x != nil {
-		// temp allocated during order.go for dddarg
-		if !types.Identical(t, x.Type) {
-			panic("dotdotdot base type does not match order's assigned type")
-		}
-
-		if vstat == nil {
-			a = nod(OAS, x, nil)
-			a = typecheck(a, ctxStmt)
-			init.Append(a) // zero new temp
-		} else {
-			// Declare that we're about to initialize all of x.
-			// (Which happens at the *vauto = vstat below.)
-			init.Append(nod(OVARDEF, x, nil))
-		}
-
-		a = nod(OADDR, x, nil)
-	} else if n.Esc == EscNone {
-		a = temp(t)
-		if vstat == nil {
-			a = nod(OAS, temp(t), nil)
-			a = typecheck(a, ctxStmt)
-			init.Append(a) // zero new temp
-			a = a.Left
-		} else {
-			init.Append(nod(OVARDEF, a, nil))
-		}
-
-		a = nod(OADDR, a, nil)
-	} else {
-		a = nod(ONEW, nil, nil)
-		a.List.Set1(typenod(t))
-	}
-
-	a = nod(OAS, vauto, a)
-	a = typecheck(a, ctxStmt)
-	a = walkexpr(a, init)
-	init.Append(a)
-
-	if vstat != nil {
-		// copy static to heap (4)
-		a = nod(ODEREF, vauto, nil)
-
-		a = nod(OAS, a, vstat)
-		a = typecheck(a, ctxStmt)
-		a = walkexpr(a, init)
-		init.Append(a)
-	}
-
-	// put dynamics into array (5)
-	var index int64
-	for _, value := range n.List.Slice() {
-		if value.Op == OKEY {
-			index = indexconst(value.Left)
-			if index < 0 {
-				Fatalf("slicelit: invalid index %v", value.Left)
-			}
-			value = value.Right
-		}
-		a := nod(OINDEX, vauto, nodintconst(index))
-		a.SetBounded(true)
-		index++
-
-		// TODO need to check bounds?
-
-		switch value.Op {
-		case OSLICELIT:
-			break
-
-		case OARRAYLIT, OSTRUCTLIT:
-			k := initKindDynamic
-			if vstat == nil {
-				// Generate both static and dynamic initializations.
-				// See issue #31987.
-				k = initKindLocalCode
-			}
-			fixedlit(ctxt, k, value, a, init)
-			continue
-		}
-
-		if vstat != nil && value.isGoConst() { // already set by copy from static value
-			continue
-		}
-
-		// build list of vauto[c] = expr
-		setlineno(value)
-		a = nod(OAS, a, value)
-
-		a = typecheck(a, ctxStmt)
-		a = orderStmtInPlace(a, map[string][]*Node{})
-		a = walkstmt(a)
-		init.Append(a)
-	}
-
-	// make slice out of heap (6)
-	a = nod(OAS, var_, nod(OSLICE, vauto, nil))
-
-	a = typecheck(a, ctxStmt)
-	a = orderStmtInPlace(a, map[string][]*Node{})
-	a = walkstmt(a)
-	init.Append(a)
-}
-
-func maplit(n *Node, m *Node, init *Nodes) {
-	// make the map var
-	a := nod(OMAKE, nil, nil)
-	a.Esc = n.Esc
-	a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
-	litas(m, a, init)
-
-	entries := n.List.Slice()
-
-	// The order pass already removed any dynamic (runtime-computed) entries.
-	// All remaining entries are static. Double-check that.
-	for _, r := range entries {
-		if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
-			Fatalf("maplit: entry is not a literal: %v", r)
-		}
-	}
-
-	if len(entries) > 25 {
-		// For a large number of entries, put them in an array and loop.
-
-		// build types [count]Tindex and [count]Tvalue
-		tk := types.NewArray(n.Type.Key(), int64(len(entries)))
-		te := types.NewArray(n.Type.Elem(), int64(len(entries)))
-
-		tk.SetNoalg(true)
-		te.SetNoalg(true)
-
-		dowidth(tk)
-		dowidth(te)
-
-		// make and initialize static arrays
-		vstatk := readonlystaticname(tk)
-		vstate := readonlystaticname(te)
-
-		datak := nod(OARRAYLIT, nil, nil)
-		datae := nod(OARRAYLIT, nil, nil)
-		for _, r := range entries {
-			datak.List.Append(r.Left)
-			datae.List.Append(r.Right)
-		}
-		fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
-		fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
-
-		// loop adding structure elements to map
-		// for i = 0; i < len(vstatk); i++ {
-		//	map[vstatk[i]] = vstate[i]
-		// }
-		i := temp(types.Types[TINT])
-		rhs := nod(OINDEX, vstate, i)
-		rhs.SetBounded(true)
-
-		kidx := nod(OINDEX, vstatk, i)
-		kidx.SetBounded(true)
-		lhs := nod(OINDEX, m, kidx)
-
-		zero := nod(OAS, i, nodintconst(0))
-		cond := nod(OLT, i, nodintconst(tk.NumElem()))
-		incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
-		body := nod(OAS, lhs, rhs)
-
-		loop := nod(OFOR, cond, incr)
-		loop.Nbody.Set1(body)
-		loop.Ninit.Set1(zero)
-
-		loop = typecheck(loop, ctxStmt)
-		loop = walkstmt(loop)
-		init.Append(loop)
-		return
-	}
-	// For a small number of entries, just add them directly.
-
-	// Build list of var[c] = expr.
-	// Use temporaries so that mapassign1 can have addressable key, elem.
-	// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
-	tmpkey := temp(m.Type.Key())
-	tmpelem := temp(m.Type.Elem())
-
-	for _, r := range entries {
-		index, elem := r.Left, r.Right
-
-		setlineno(index)
-		a := nod(OAS, tmpkey, index)
-		a = typecheck(a, ctxStmt)
-		a = walkstmt(a)
-		init.Append(a)
-
-		setlineno(elem)
-		a = nod(OAS, tmpelem, elem)
-		a = typecheck(a, ctxStmt)
-		a = walkstmt(a)
-		init.Append(a)
-
-		setlineno(tmpelem)
-		a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
-		a = typecheck(a, ctxStmt)
-		a = walkstmt(a)
-		init.Append(a)
-	}
-
-	a = nod(OVARKILL, tmpkey, nil)
-	a = typecheck(a, ctxStmt)
-	init.Append(a)
-	a = nod(OVARKILL, tmpelem, nil)
-	a = typecheck(a, ctxStmt)
-	init.Append(a)
-}
-
-func anylit(n *Node, var_ *Node, init *Nodes) {
-	t := n.Type
-	switch n.Op {
-	default:
-		Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
-
-	case ONAME:
-		a := nod(OAS, var_, n)
-		a = typecheck(a, ctxStmt)
-		init.Append(a)
-
-	case OPTRLIT:
-		if !t.IsPtr() {
-			Fatalf("anylit: not ptr")
-		}
-
-		var r *Node
-		if n.Right != nil {
-			// n.Right is stack temporary used as backing store.
-			init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
-			r = nod(OADDR, n.Right, nil)
-			r = typecheck(r, ctxExpr)
-		} else {
-			r = nod(ONEW, nil, nil)
-			r.SetTypecheck(1)
-			r.Type = t
-			r.Esc = n.Esc
-		}
-
-		r = walkexpr(r, init)
-		a := nod(OAS, var_, r)
-
-		a = typecheck(a, ctxStmt)
-		init.Append(a)
-
-		var_ = nod(ODEREF, var_, nil)
-		var_ = typecheck(var_, ctxExpr|ctxAssign)
-		anylit(n.Left, var_, init)
-
-	case OSTRUCTLIT, OARRAYLIT:
-		if !t.IsStruct() && !t.IsArray() {
-			Fatalf("anylit: not struct/array")
-		}
-
-		if var_.isSimpleName() && n.List.Len() > 4 {
-			// lay out static data
-			vstat := readonlystaticname(t)
-
-			ctxt := inInitFunction
-			if n.Op == OARRAYLIT {
-				ctxt = inNonInitFunction
-			}
-			fixedlit(ctxt, initKindStatic, n, vstat, init)
-
-			// copy static to var
-			a := nod(OAS, var_, vstat)
-
-			a = typecheck(a, ctxStmt)
-			a = walkexpr(a, init)
-			init.Append(a)
-
-			// add expressions to automatic
-			fixedlit(inInitFunction, initKindDynamic, n, var_, init)
-			break
-		}
-
-		var components int64
-		if n.Op == OARRAYLIT {
-			components = t.NumElem()
-		} else {
-			components = int64(t.NumFields())
-		}
-		// initialization of an array or struct with unspecified components (missing fields or arrays)
-		if var_.isSimpleName() || int64(n.List.Len()) < components {
-			a := nod(OAS, var_, nil)
-			a = typecheck(a, ctxStmt)
-			a = walkexpr(a, init)
-			init.Append(a)
-		}
-
-		fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
-
-	case OSLICELIT:
-		slicelit(inInitFunction, n, var_, init)
-
-	case OMAPLIT:
-		if !t.IsMap() {
-			Fatalf("anylit: not map")
-		}
-		maplit(n, var_, init)
-	}
-}
-
-func oaslit(n *Node, init *Nodes) bool {
-	if n.Left == nil || n.Right == nil {
-		// not a special composite literal assignment
-		return false
-	}
-	if n.Left.Type == nil || n.Right.Type == nil {
-		// not a special composite literal assignment
-		return false
-	}
-	if !n.Left.isSimpleName() {
-		// not a special composite literal assignment
-		return false
-	}
-	if !types.Identical(n.Left.Type, n.Right.Type) {
-		// not a special composite literal assignment
-		return false
-	}
-
-	switch n.Right.Op {
-	default:
-		// not a special composite literal assignment
-		return false
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
-		if vmatch1(n.Left, n.Right) {
-			// not a special composite literal assignment
-			return false
-		}
-		anylit(n.Right, n.Left, init)
-	}
-
-	n.Op = OEMPTY
-	n.Right = nil
-	return true
-}
-
-func getlit(lit *Node) int {
-	if smallintconst(lit) {
-		return int(lit.Int64Val())
-	}
-	return -1
-}
-
-// stataddr sets nam to the static address of n and reports whether it succeeded.
-func stataddr(nam *Node, n *Node) bool {
-	if n == nil {
-		return false
-	}
-
-	switch n.Op {
-	case ONAME:
-		*nam = *n
-		return true
-
-	case ODOT:
-		if !stataddr(nam, n.Left) {
-			break
-		}
-		nam.Xoffset += n.Xoffset
-		nam.Type = n.Type
-		return true
-
-	case OINDEX:
-		if n.Left.Type.IsSlice() {
-			break
-		}
-		if !stataddr(nam, n.Left) {
-			break
-		}
-		l := getlit(n.Right)
-		if l < 0 {
-			break
-		}
-
-		// Check for overflow.
-		if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
-			break
-		}
-		nam.Xoffset += int64(l) * n.Type.Width
-		nam.Type = n.Type
-		return true
-	}
-
-	return false
-}
-
-func (s *InitSchedule) initplan(n *Node) {
-	if s.initplans[n] != nil {
-		return
-	}
-	p := new(InitPlan)
-	s.initplans[n] = p
-	switch n.Op {
-	default:
-		Fatalf("initplan")
-
-	case OARRAYLIT, OSLICELIT:
-		var k int64
-		for _, a := range n.List.Slice() {
-			if a.Op == OKEY {
-				k = indexconst(a.Left)
-				if k < 0 {
-					Fatalf("initplan arraylit: invalid index %v", a.Left)
-				}
-				a = a.Right
-			}
-			s.addvalue(p, k*n.Type.Elem().Width, a)
-			k++
-		}
-
-	case OSTRUCTLIT:
-		for _, a := range n.List.Slice() {
-			if a.Op != OSTRUCTKEY {
-				Fatalf("initplan structlit")
-			}
-			if a.Sym.IsBlank() {
-				continue
-			}
-			s.addvalue(p, a.Xoffset, a.Left)
-		}
-
-	case OMAPLIT:
-		for _, a := range n.List.Slice() {
-			if a.Op != OKEY {
-				Fatalf("initplan maplit")
-			}
-			s.addvalue(p, -1, a.Right)
-		}
-	}
-}
-
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
-	// special case: zero can be dropped entirely
-	if isZero(n) {
-		return
-	}
-
-	// special case: inline struct and array (not slice) literals
-	if isvaluelit(n) {
-		s.initplan(n)
-		q := s.initplans[n]
-		for _, qe := range q.E {
-			// qe is a copy; we are not modifying entries in q.E
-			qe.Xoffset += xoffset
-			p.E = append(p.E, qe)
-		}
-		return
-	}
-
-	// add to plan
-	p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
-}
-
-func isZero(n *Node) bool {
-	switch n.Op {
-	case OLITERAL:
-		switch u := n.Val().U.(type) {
-		default:
-			Dump("unexpected literal", n)
-			Fatalf("isZero")
-		case *NilVal:
-			return true
-		case string:
-			return u == ""
-		case bool:
-			return !u
-		case *Mpint:
-			return u.CmpInt64(0) == 0
-		case *Mpflt:
-			return u.CmpFloat64(0) == 0
-		case *Mpcplx:
-			return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0
-		}
-
-	case OARRAYLIT:
-		for _, n1 := range n.List.Slice() {
-			if n1.Op == OKEY {
-				n1 = n1.Right
-			}
-			if !isZero(n1) {
-				return false
-			}
-		}
-		return true
-
-	case OSTRUCTLIT:
-		for _, n1 := range n.List.Slice() {
-			if !isZero(n1.Left) {
-				return false
-			}
-		}
-		return true
-	}
-
-	return false
-}
-
-func isvaluelit(n *Node) bool {
-	return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
-}
-
-func genAsStatic(as *Node) {
-	if as.Left.Type == nil {
-		Fatalf("genAsStatic as.Left not typechecked")
-	}
-
-	var nam Node
-	if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) {
-		Fatalf("genAsStatic: lhs %v", as.Left)
-	}
-
-	switch {
-	case as.Right.Op == OLITERAL:
-		litsym(&nam, as.Right, int(as.Right.Type.Width))
-	case as.Right.Op == ONAME && as.Right.Class() == PFUNC:
-		pfuncsym(&nam, as.Right)
-	default:
-		Fatalf("genAsStatic: rhs %v", as.Right)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
deleted file mode 100644
index 5b74754..0000000
--- a/src/cmd/compile/internal/gc/ssa.go
+++ /dev/null
@@ -1,7231 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"encoding/binary"
-	"fmt"
-	"html"
-	"os"
-	"path/filepath"
-	"sort"
-
-	"bufio"
-	"bytes"
-	"cmd/compile/internal/ssa"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/obj/x86"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"cmd/internal/sys"
-)
-
-var ssaConfig *ssa.Config
-var ssaCaches []ssa.Cache
-
-var ssaDump string     // early copy of $GOSSAFUNC; the func name to dump output for
-var ssaDir string      // optional destination for ssa dump file
-var ssaDumpStdout bool // whether to dump to stdout
-var ssaDumpCFG string  // generate CFGs for these phases
-const ssaDumpFile = "ssa.html"
-
-// The max number of defers in a function using open-coded defers. We enforce this
-// limit because the deferBits bitmask is currently a single byte (to minimize code size)
-const maxOpenDefers = 8
-
-// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*Node
-
-func initssaconfig() {
-	types_ := ssa.NewTypes()
-
-	if thearch.SoftFloat {
-		softfloatInit()
-	}
-
-	// Generate a few pointer types that are uncommon in the frontend but common in the backend.
-	// Caching is disabled in the backend, so generating these here avoids allocations.
-	_ = types.NewPtr(types.Types[TINTER])                             // *interface{}
-	_ = types.NewPtr(types.NewPtr(types.Types[TSTRING]))              // **string
-	_ = types.NewPtr(types.NewSlice(types.Types[TINTER]))             // *[]interface{}
-	_ = types.NewPtr(types.NewPtr(types.Bytetype))                    // **byte
-	_ = types.NewPtr(types.NewSlice(types.Bytetype))                  // *[]byte
-	_ = types.NewPtr(types.NewSlice(types.Types[TSTRING]))            // *[]string
-	_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
-	_ = types.NewPtr(types.Types[TINT16])                             // *int16
-	_ = types.NewPtr(types.Types[TINT64])                             // *int64
-	_ = types.NewPtr(types.Errortype)                                 // *error
-	types.NewPtrCacheEnabled = false
-	ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
-	ssaConfig.SoftFloat = thearch.SoftFloat
-	ssaConfig.Race = flag_race
-	ssaCaches = make([]ssa.Cache, nBackendWorkers)
-
-	// Set up some runtime functions we'll need to call.
-	assertE2I = sysfunc("assertE2I")
-	assertE2I2 = sysfunc("assertE2I2")
-	assertI2I = sysfunc("assertI2I")
-	assertI2I2 = sysfunc("assertI2I2")
-	deferproc = sysfunc("deferproc")
-	deferprocStack = sysfunc("deferprocStack")
-	Deferreturn = sysfunc("deferreturn")
-	Duffcopy = sysfunc("duffcopy")
-	Duffzero = sysfunc("duffzero")
-	gcWriteBarrier = sysfunc("gcWriteBarrier")
-	goschedguarded = sysfunc("goschedguarded")
-	growslice = sysfunc("growslice")
-	msanread = sysfunc("msanread")
-	msanwrite = sysfunc("msanwrite")
-	msanmove = sysfunc("msanmove")
-	newobject = sysfunc("newobject")
-	newproc = sysfunc("newproc")
-	panicdivide = sysfunc("panicdivide")
-	panicdottypeE = sysfunc("panicdottypeE")
-	panicdottypeI = sysfunc("panicdottypeI")
-	panicnildottype = sysfunc("panicnildottype")
-	panicoverflow = sysfunc("panicoverflow")
-	panicshift = sysfunc("panicshift")
-	raceread = sysfunc("raceread")
-	racereadrange = sysfunc("racereadrange")
-	racewrite = sysfunc("racewrite")
-	racewriterange = sysfunc("racewriterange")
-	x86HasPOPCNT = sysvar("x86HasPOPCNT")       // bool
-	x86HasSSE41 = sysvar("x86HasSSE41")         // bool
-	x86HasFMA = sysvar("x86HasFMA")             // bool
-	armHasVFPv4 = sysvar("armHasVFPv4")         // bool
-	arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool
-	typedmemclr = sysfunc("typedmemclr")
-	typedmemmove = sysfunc("typedmemmove")
-	Udiv = sysvar("udiv")                 // asm func with special ABI
-	writeBarrier = sysvar("writeBarrier") // struct { bool; ... }
-	zerobaseSym = sysvar("zerobase")
-
-	// asm funcs with special ABI
-	if thearch.LinkArch.Name == "amd64" {
-		GCWriteBarrierReg = map[int16]*obj.LSym{
-			x86.REG_AX: sysfunc("gcWriteBarrier"),
-			x86.REG_CX: sysfunc("gcWriteBarrierCX"),
-			x86.REG_DX: sysfunc("gcWriteBarrierDX"),
-			x86.REG_BX: sysfunc("gcWriteBarrierBX"),
-			x86.REG_BP: sysfunc("gcWriteBarrierBP"),
-			x86.REG_SI: sysfunc("gcWriteBarrierSI"),
-			x86.REG_R8: sysfunc("gcWriteBarrierR8"),
-			x86.REG_R9: sysfunc("gcWriteBarrierR9"),
-		}
-	}
-
-	if thearch.LinkArch.Family == sys.Wasm {
-		BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex")
-		BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU")
-		BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen")
-		BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU")
-		BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap")
-		BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU")
-		BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB")
-		BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU")
-		BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen")
-		BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU")
-		BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap")
-		BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU")
-		BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B")
-		BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU")
-		BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C")
-		BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU")
-	} else {
-		BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex")
-		BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU")
-		BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen")
-		BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU")
-		BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap")
-		BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU")
-		BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB")
-		BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU")
-		BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen")
-		BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU")
-		BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap")
-		BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU")
-		BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B")
-		BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU")
-		BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C")
-		BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU")
-	}
-	if thearch.LinkArch.PtrSize == 4 {
-		ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
-		ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU")
-		ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen")
-		ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU")
-		ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap")
-		ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU")
-		ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB")
-		ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU")
-		ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen")
-		ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU")
-		ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap")
-		ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU")
-		ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B")
-		ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU")
-		ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C")
-		ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
-	}
-
-	// Wasm (all asm funcs with special ABIs)
-	WasmMove = sysvar("wasmMove")
-	WasmZero = sysvar("wasmZero")
-	WasmDiv = sysvar("wasmDiv")
-	WasmTruncS = sysvar("wasmTruncS")
-	WasmTruncU = sysvar("wasmTruncU")
-	SigPanic = sysfunc("sigpanic")
-}
-
-// getParam returns the Field of ith param of node n (which is a
-// function/method/interface call), where the receiver of a method call is
-// considered as the 0th parameter. This does not include the receiver of an
-// interface call.
-func getParam(n *Node, i int) *types.Field {
-	t := n.Left.Type
-	if n.Op == OCALLMETH {
-		if i == 0 {
-			return t.Recv()
-		}
-		return t.Params().Field(i - 1)
-	}
-	return t.Params().Field(i)
-}
-
-// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
-func dvarint(x *obj.LSym, off int, v int64) int {
-	if v < 0 || v > 1e9 {
-		panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
-	}
-	if v < 1<<7 {
-		return duint8(x, off, uint8(v))
-	}
-	off = duint8(x, off, uint8((v&127)|128))
-	if v < 1<<14 {
-		return duint8(x, off, uint8(v>>7))
-	}
-	off = duint8(x, off, uint8(((v>>7)&127)|128))
-	if v < 1<<21 {
-		return duint8(x, off, uint8(v>>14))
-	}
-	off = duint8(x, off, uint8(((v>>14)&127)|128))
-	if v < 1<<28 {
-		return duint8(x, off, uint8(v>>21))
-	}
-	off = duint8(x, off, uint8(((v>>21)&127)|128))
-	return duint8(x, off, uint8(v>>28))
-}
-
-// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
-// that is using open-coded defers.  This funcdata is used to determine the active
-// defers in a function and execute those defers during panic processing.
-//
-// The funcdata is all encoded in varints (since values will almost always be less than
-// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
-// for stack variables are specified as the number of bytes below varp (pointer to the
-// top of the local variables) for their starting address. The format is:
-//
-//  - Max total argument size among all the defers
-//  - Offset of the deferBits variable
-//  - Number of defers in the function
-//  - Information about each defer call, in reverse order of appearance in the function:
-//    - Total argument size of the call
-//    - Offset of the closure value to call
-//    - Number of arguments (including interface receiver or method receiver as first arg)
-//    - Information about each argument
-//      - Offset of the stored defer argument in this function's frame
-//      - Size of the argument
-//      - Offset of where argument should be placed in the args frame when making call
-func (s *state) emitOpenDeferInfo() {
-	x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
-	s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
-	off := 0
-
-	// Compute maxargsize (max size of arguments for all defers)
-	// first, so we can output it first to the funcdata
-	var maxargsize int64
-	for i := len(s.openDefers) - 1; i >= 0; i-- {
-		r := s.openDefers[i]
-		argsize := r.n.Left.Type.ArgWidth()
-		if argsize > maxargsize {
-			maxargsize = argsize
-		}
-	}
-	off = dvarint(x, off, maxargsize)
-	off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
-	off = dvarint(x, off, int64(len(s.openDefers)))
-
-	// Write in reverse-order, for ease of running in that order at runtime
-	for i := len(s.openDefers) - 1; i >= 0; i-- {
-		r := s.openDefers[i]
-		off = dvarint(x, off, r.n.Left.Type.ArgWidth())
-		off = dvarint(x, off, -r.closureNode.Xoffset)
-		numArgs := len(r.argNodes)
-		if r.rcvrNode != nil {
-			// If there's an interface receiver, treat/place it as the first
-			// arg. (If there is a method receiver, it's already included as
-			// first arg in r.argNodes.)
-			numArgs++
-		}
-		off = dvarint(x, off, int64(numArgs))
-		if r.rcvrNode != nil {
-			off = dvarint(x, off, -r.rcvrNode.Xoffset)
-			off = dvarint(x, off, s.config.PtrSize)
-			off = dvarint(x, off, 0)
-		}
-		for j, arg := range r.argNodes {
-			f := getParam(r.n, j)
-			off = dvarint(x, off, -arg.Xoffset)
-			off = dvarint(x, off, f.Type.Size())
-			off = dvarint(x, off, f.Offset)
-		}
-	}
-}
-
-// buildssa builds an SSA function for fn.
-// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *Node, worker int) *ssa.Func {
-	name := fn.funcname()
-	printssa := false
-	if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
-		printssa = name == ssaDump || myimportpath+"."+name == ssaDump
-	}
-	var astBuf *bytes.Buffer
-	if printssa {
-		astBuf = &bytes.Buffer{}
-		fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
-		fdumplist(astBuf, "buildssa-body", fn.Nbody)
-		fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
-		if ssaDumpStdout {
-			fmt.Println("generating SSA for", name)
-			fmt.Print(astBuf.String())
-		}
-	}
-
-	var s state
-	s.pushLine(fn.Pos)
-	defer s.popLine()
-
-	s.hasdefer = fn.Func.HasDefer()
-	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
-		s.cgoUnsafeArgs = true
-	}
-
-	fe := ssafn{
-		curfn: fn,
-		log:   printssa && ssaDumpStdout,
-	}
-	s.curfn = fn
-
-	s.f = ssa.NewFunc(&fe)
-	s.config = ssaConfig
-	s.f.Type = fn.Type
-	s.f.Config = ssaConfig
-	s.f.Cache = &ssaCaches[worker]
-	s.f.Cache.Reset()
-	s.f.Name = name
-	s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
-	s.f.PrintOrHtmlSSA = printssa
-	if fn.Func.Pragma&Nosplit != 0 {
-		s.f.NoSplit = true
-	}
-	s.panics = map[funcLine]*ssa.Block{}
-	s.softFloat = s.config.SoftFloat
-
-	// Allocate starting block
-	s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
-	s.f.Entry.Pos = fn.Pos
-
-	if printssa {
-		ssaDF := ssaDumpFile
-		if ssaDir != "" {
-			ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
-			ssaD := filepath.Dir(ssaDF)
-			os.MkdirAll(ssaD, 0755)
-		}
-		s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
-		// TODO: generate and print a mapping from nodes to values and blocks
-		dumpSourcesColumn(s.f.HTMLWriter, fn)
-		s.f.HTMLWriter.WriteAST("AST", astBuf)
-	}
-
-	// Allocate starting values
-	s.labels = map[string]*ssaLabel{}
-	s.labeledNodes = map[*Node]*ssaLabel{}
-	s.fwdVars = map[*Node]*ssa.Value{}
-	s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
-
-	s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
-	switch {
-	case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
-		// Don't support open-coded defers for 386 ONLY when using shared
-		// libraries, because there is extra code (added by rewriteToUseGot())
-		// preceding the deferreturn/ret code that is generated by gencallret()
-		// that we don't track correctly.
-		s.hasOpenDefers = false
-	}
-	if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
-		// Skip doing open defers if there is any extra exit code (likely
-		// copying heap-allocated return values or race detection), since
-		// we will not generate that code in the case of the extra
-		// deferreturn/ret segment.
-		s.hasOpenDefers = false
-	}
-	if s.hasOpenDefers &&
-		s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
-		// Since we are generating defer calls at every exit for
-		// open-coded defers, skip doing open-coded defers if there are
-		// too many returns (especially if there are multiple defers).
-		// Open-coded defers are most important for improving performance
-		// for smaller functions (which don't have many returns).
-		s.hasOpenDefers = false
-	}
-
-	s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
-	s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
-
-	s.startBlock(s.f.Entry)
-	s.vars[&memVar] = s.startmem
-	if s.hasOpenDefers {
-		// Create the deferBits variable and stack slot.  deferBits is a
-		// bitmask showing which of the open-coded defers in this function
-		// have been activated.
-		deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
-		s.deferBitsTemp = deferBitsTemp
-		// For this value, AuxInt is initialized to zero by default
-		startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
-		s.vars[&deferBitsVar] = startDeferBits
-		s.deferBitsAddr = s.addr(deferBitsTemp)
-		s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
-		// Make sure that the deferBits stack slot is kept alive (for use
-		// by panics) and stores to deferBits are not eliminated, even if
-		// all checking code on deferBits in the function exit can be
-		// eliminated, because the defer statements were all
-		// unconditional.
-		s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
-	}
-
-	// Generate addresses of local declarations
-	s.decladdrs = map[*Node]*ssa.Value{}
-	var args []ssa.Param
-	var results []ssa.Param
-	for _, n := range fn.Func.Dcl {
-		switch n.Class() {
-		case PPARAM:
-			s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
-			args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
-		case PPARAMOUT:
-			s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
-			results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
-			if s.canSSA(n) {
-				// Save ssa-able PPARAMOUT variables so we can
-				// store them back to the stack at the end of
-				// the function.
-				s.returns = append(s.returns, n)
-			}
-		case PAUTO:
-			// processed at each use, to prevent Addr coming
-			// before the decl.
-		case PAUTOHEAP:
-			// moved to heap - already handled by frontend
-		case PFUNC:
-			// local function - already handled by frontend
-		default:
-			s.Fatalf("local variable with class %v unimplemented", n.Class())
-		}
-	}
-
-	// Populate SSAable arguments.
-	for _, n := range fn.Func.Dcl {
-		if n.Class() == PPARAM && s.canSSA(n) {
-			v := s.newValue0A(ssa.OpArg, n.Type, n)
-			s.vars[n] = v
-			s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
-		}
-	}
-
-	// Convert the AST-based IR to the SSA-based IR
-	s.stmtList(fn.Func.Enter)
-	s.stmtList(fn.Nbody)
-
-	// fallthrough to exit
-	if s.curBlock != nil {
-		s.pushLine(fn.Func.Endlineno)
-		s.exit()
-		s.popLine()
-	}
-
-	for _, b := range s.f.Blocks {
-		if b.Pos != src.NoXPos {
-			s.updateUnsetPredPos(b)
-		}
-	}
-
-	s.insertPhis()
-
-	// Main call to ssa package to compile function
-	ssa.Compile(s.f)
-
-	if s.hasOpenDefers {
-		s.emitOpenDeferInfo()
-	}
-
-	return s.f
-}
-
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
-	// Read sources of target function fn.
-	fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
-	targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
-	if err != nil {
-		writer.Logf("cannot read sources for function %v: %v", fn, err)
-	}
-
-	// Read sources of inlined functions.
-	var inlFns []*ssa.FuncLines
-	for _, fi := range ssaDumpInlined {
-		var elno src.XPos
-		if fi.Name.Defn == nil {
-			// Endlineno is filled from exported data.
-			elno = fi.Func.Endlineno
-		} else {
-			elno = fi.Name.Defn.Func.Endlineno
-		}
-		fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
-		fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
-		if err != nil {
-			writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
-			continue
-		}
-		inlFns = append(inlFns, fnLines)
-	}
-
-	sort.Sort(ssa.ByTopo(inlFns))
-	if targetFn != nil {
-		inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
-	}
-
-	writer.WriteSources("sources", inlFns)
-}
-
-func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
-	f, err := os.Open(os.ExpandEnv(file))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	var lines []string
-	ln := uint(1)
-	scanner := bufio.NewScanner(f)
-	for scanner.Scan() && ln <= end {
-		if ln >= start {
-			lines = append(lines, scanner.Text())
-		}
-		ln++
-	}
-	return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
-}
-
-// updateUnsetPredPos propagates the earliest-value position information for b
-// towards all of b's predecessors that need a position, and recurs on that
-// predecessor if its position is updated. B should have a non-empty position.
-func (s *state) updateUnsetPredPos(b *ssa.Block) {
-	if b.Pos == src.NoXPos {
-		s.Fatalf("Block %s should have a position", b)
-	}
-	bestPos := src.NoXPos
-	for _, e := range b.Preds {
-		p := e.Block()
-		if !p.LackingPos() {
-			continue
-		}
-		if bestPos == src.NoXPos {
-			bestPos = b.Pos
-			for _, v := range b.Values {
-				if v.LackingPos() {
-					continue
-				}
-				if v.Pos != src.NoXPos {
-					// Assume values are still in roughly textual order;
-					// TODO: could also seek minimum position?
-					bestPos = v.Pos
-					break
-				}
-			}
-		}
-		p.Pos = bestPos
-		s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
-	}
-}
-
-// Information about each open-coded defer.
-type openDeferInfo struct {
-	// The ODEFER node representing the function call of the defer
-	n *Node
-	// If defer call is closure call, the address of the argtmp where the
-	// closure is stored.
-	closure *ssa.Value
-	// The node representing the argtmp where the closure is stored - used for
-	// function, method, or interface call, to store a closure that panic
-	// processing can use for this defer.
-	closureNode *Node
-	// If defer call is interface call, the address of the argtmp where the
-	// receiver is stored
-	rcvr *ssa.Value
-	// The node representing the argtmp where the receiver is stored
-	rcvrNode *Node
-	// The addresses of the argtmps where the evaluated arguments of the defer
-	// function call are stored.
-	argVals []*ssa.Value
-	// The nodes representing the argtmps where the args of the defer are stored
-	argNodes []*Node
-}
-
-type state struct {
-	// configuration (arch) information
-	config *ssa.Config
-
-	// function we're building
-	f *ssa.Func
-
-	// Node for function
-	curfn *Node
-
-	// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
-	labels       map[string]*ssaLabel
-	labeledNodes map[*Node]*ssaLabel
-
-	// unlabeled break and continue statement tracking
-	breakTo    *ssa.Block // current target for plain break statement
-	continueTo *ssa.Block // current target for plain continue statement
-
-	// current location where we're interpreting the AST
-	curBlock *ssa.Block
-
-	// variable assignments in the current block (map from variable symbol to ssa value)
-	// *Node is the unique identifier (an ONAME Node) for the variable.
-	// TODO: keep a single varnum map, then make all of these maps slices instead?
-	vars map[*Node]*ssa.Value
-
-	// fwdVars are variables that are used before they are defined in the current block.
-	// This map exists just to coalesce multiple references into a single FwdRef op.
-	// *Node is the unique identifier (an ONAME Node) for the variable.
-	fwdVars map[*Node]*ssa.Value
-
-	// all defined variables at the end of each block. Indexed by block ID.
-	defvars []map[*Node]*ssa.Value
-
-	// addresses of PPARAM and PPARAMOUT variables.
-	decladdrs map[*Node]*ssa.Value
-
-	// starting values. Memory, stack pointer, and globals pointer
-	startmem *ssa.Value
-	sp       *ssa.Value
-	sb       *ssa.Value
-	// value representing address of where deferBits autotmp is stored
-	deferBitsAddr *ssa.Value
-	deferBitsTemp *Node
-
-	// line number stack. The current line number is top of stack
-	line []src.XPos
-	// the last line number processed; it may have been popped
-	lastPos src.XPos
-
-	// list of panic calls by function name and line number.
-	// Used to deduplicate panic calls.
-	panics map[funcLine]*ssa.Block
-
-	// list of PPARAMOUT (return) variables.
-	returns []*Node
-
-	cgoUnsafeArgs bool
-	hasdefer      bool // whether the function contains a defer statement
-	softFloat     bool
-	hasOpenDefers bool // whether we are doing open-coded defers
-
-	// If doing open-coded defers, list of info about the defer calls in
-	// scanning order. Hence, at exit we should run these defers in reverse
-	// order of this list
-	openDefers []*openDeferInfo
-	// For open-coded defers, this is the beginning and end blocks of the last
-	// defer exit code that we have generated so far. We use these to share
-	// code between exits if the shareDeferExits option (disabled by default)
-	// is on.
-	lastDeferExit       *ssa.Block // Entry block of last defer exit code we generated
-	lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
-	lastDeferCount      int        // Number of defers encountered at that point
-
-	prevCall *ssa.Value // the previous call; use this to tie results to the call op.
-}
-
-type funcLine struct {
-	f    *obj.LSym
-	base *src.PosBase
-	line uint
-}
-
-type ssaLabel struct {
-	target         *ssa.Block // block identified by this label
-	breakTarget    *ssa.Block // block to break to in control flow node identified by this label
-	continueTarget *ssa.Block // block to continue to in control flow node identified by this label
-}
-
-// label returns the label associated with sym, creating it if necessary.
-func (s *state) label(sym *types.Sym) *ssaLabel {
-	lab := s.labels[sym.Name]
-	if lab == nil {
-		lab = new(ssaLabel)
-		s.labels[sym.Name] = lab
-	}
-	return lab
-}
-
-func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
-func (s *state) Log() bool                            { return s.f.Log() }
-func (s *state) Fatalf(msg string, args ...interface{}) {
-	s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
-}
-func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
-func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
-
-var (
-	// dummy node for the memory variable
-	memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
-
-	// dummy nodes for temporary variables
-	ptrVar       = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
-	lenVar       = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
-	newlenVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
-	capVar       = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
-	typVar       = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
-	okVar        = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
-	deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}}
-)
-
-// startBlock sets the current block we're generating code in to b.
-func (s *state) startBlock(b *ssa.Block) {
-	if s.curBlock != nil {
-		s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
-	}
-	s.curBlock = b
-	s.vars = map[*Node]*ssa.Value{}
-	for n := range s.fwdVars {
-		delete(s.fwdVars, n)
-	}
-}
-
-// endBlock marks the end of generating code for the current block.
-// Returns the (former) current block. Returns nil if there is no current
-// block, i.e. if no code flows to the current execution point.
-func (s *state) endBlock() *ssa.Block {
-	b := s.curBlock
-	if b == nil {
-		return nil
-	}
-	for len(s.defvars) <= int(b.ID) {
-		s.defvars = append(s.defvars, nil)
-	}
-	s.defvars[b.ID] = s.vars
-	s.curBlock = nil
-	s.vars = nil
-	if b.LackingPos() {
-		// Empty plain blocks get the line of their successor (handled after all blocks created),
-		// except for increment blocks in For statements (handled in ssa conversion of OFOR),
-		// and for blocks ending in GOTO/BREAK/CONTINUE.
-		b.Pos = src.NoXPos
-	} else {
-		b.Pos = s.lastPos
-	}
-	return b
-}
-
-// pushLine pushes a line number on the line number stack.
-func (s *state) pushLine(line src.XPos) {
-	if !line.IsKnown() {
-		// the frontend may emit node with line number missing,
-		// use the parent line number in this case.
-		line = s.peekPos()
-		if Debug.K != 0 {
-			Warn("buildssa: unknown position (line 0)")
-		}
-	} else {
-		s.lastPos = line
-	}
-
-	s.line = append(s.line, line)
-}
-
-// popLine pops the top of the line number stack.
-func (s *state) popLine() {
-	s.line = s.line[:len(s.line)-1]
-}
-
-// peekPos peeks the top of the line number stack.
-func (s *state) peekPos() src.XPos {
-	return s.line[len(s.line)-1]
-}
-
-// newValue0 adds a new value with no arguments to the current block.
-func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
-	return s.curBlock.NewValue0(s.peekPos(), op, t)
-}
-
-// newValue0A adds a new value with no arguments and an aux value to the current block.
-func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
-	return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
-}
-
-// newValue0I adds a new value with no arguments and an auxint value to the current block.
-func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
-	return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
-}
-
-// newValue1 adds a new value with one argument to the current block.
-func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
-}
-
-// newValue1A adds a new value with one argument and an aux value to the current block.
-func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
-}
-
-// newValue1Apos adds a new value with one argument and an aux value to the current block.
-// isStmt determines whether the created values may be a statement or not
-// (i.e., false means never, yes means maybe).
-func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value {
-	if isStmt {
-		return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
-	}
-	return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
-}
-
-// newValue1I adds a new value with one argument and an auxint value to the current block.
-func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
-}
-
-// newValue2 adds a new value with two arguments to the current block.
-func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
-}
-
-// newValue2A adds a new value with two arguments and an aux value to the current block.
-func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
-}
-
-// newValue2Apos adds a new value with two arguments and an aux value to the current block.
-// isStmt determines whether the created values may be a statement or not
-// (i.e., false means never, yes means maybe).
-func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
-	if isStmt {
-		return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
-	}
-	return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
-}
-
-// newValue2I adds a new value with two arguments and an auxint value to the current block.
-func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
-}
-
-// newValue3 adds a new value with three arguments to the current block.
-func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
-}
-
-// newValue3I adds a new value with three arguments and an auxint value to the current block.
-func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
-}
-
-// newValue3A adds a new value with three arguments and an aux value to the current block.
-func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
-}
-
-// newValue3Apos adds a new value with three arguments and an aux value to the current block.
-// isStmt determines whether the created values may be a statement or not
-// (i.e., false means never, yes means maybe).
-func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
-	if isStmt {
-		return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
-	}
-	return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
-}
-
-// newValue4 adds a new value with four arguments to the current block.
-func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
-}
-
-// newValue4 adds a new value with four arguments and an auxint value to the current block.
-func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
-}
-
-// entryNewValue0 adds a new value with no arguments to the entry block.
-func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
-	return s.f.Entry.NewValue0(src.NoXPos, op, t)
-}
-
-// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
-func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
-	return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
-}
-
-// entryNewValue1 adds a new value with one argument to the entry block.
-func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
-}
-
-// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
-func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
-}
-
-// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
-func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
-}
-
-// entryNewValue2 adds a new value with two arguments to the entry block.
-func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
-}
-
-// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
-func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
-}
-
-// const* routines add a new const value to the entry block.
-func (s *state) constSlice(t *types.Type) *ssa.Value {
-	return s.f.ConstSlice(t)
-}
-func (s *state) constInterface(t *types.Type) *ssa.Value {
-	return s.f.ConstInterface(t)
-}
-func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
-func (s *state) constEmptyString(t *types.Type) *ssa.Value {
-	return s.f.ConstEmptyString(t)
-}
-func (s *state) constBool(c bool) *ssa.Value {
-	return s.f.ConstBool(types.Types[TBOOL], c)
-}
-func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
-	return s.f.ConstInt8(t, c)
-}
-func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
-	return s.f.ConstInt16(t, c)
-}
-func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
-	return s.f.ConstInt32(t, c)
-}
-func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
-	return s.f.ConstInt64(t, c)
-}
-func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
-	return s.f.ConstFloat32(t, c)
-}
-func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
-	return s.f.ConstFloat64(t, c)
-}
-func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
-	if s.config.PtrSize == 8 {
-		return s.constInt64(t, c)
-	}
-	if int64(int32(c)) != c {
-		s.Fatalf("integer constant too big %d", c)
-	}
-	return s.constInt32(t, int32(c))
-}
-func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
-	return s.f.ConstOffPtrSP(t, c, s.sp)
-}
-
-// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
-// soft-float runtime function instead (when emitting soft-float code).
-func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
-	if s.softFloat {
-		if c, ok := s.sfcall(op, arg); ok {
-			return c
-		}
-	}
-	return s.newValue1(op, t, arg)
-}
-func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
-	if s.softFloat {
-		if c, ok := s.sfcall(op, arg0, arg1); ok {
-			return c
-		}
-	}
-	return s.newValue2(op, t, arg0, arg1)
-}
-
-type instrumentKind uint8
-
-const (
-	instrumentRead = iota
-	instrumentWrite
-	instrumentMove
-)
-
-func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
-	s.instrument2(t, addr, nil, kind)
-}
-
-// instrumentFields instruments a read/write operation on addr.
-// If it is instrumenting for MSAN and t is a struct type, it instruments
-// operation for each field, instead of for the whole struct.
-func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
-	if !flag_msan || !t.IsStruct() {
-		s.instrument(t, addr, kind)
-		return
-	}
-	for _, f := range t.Fields().Slice() {
-		if f.Sym.IsBlank() {
-			continue
-		}
-		offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
-		s.instrumentFields(f.Type, offptr, kind)
-	}
-}
-
-func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
-	if flag_msan {
-		s.instrument2(t, dst, src, instrumentMove)
-	} else {
-		s.instrument(t, src, instrumentRead)
-		s.instrument(t, dst, instrumentWrite)
-	}
-}
-
-func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
-	if !s.curfn.Func.InstrumentBody() {
-		return
-	}
-
-	w := t.Size()
-	if w == 0 {
-		return // can't race on zero-sized things
-	}
-
-	if ssa.IsSanitizerSafeAddr(addr) {
-		return
-	}
-
-	var fn *obj.LSym
-	needWidth := false
-
-	if addr2 != nil && kind != instrumentMove {
-		panic("instrument2: non-nil addr2 for non-move instrumentation")
-	}
-
-	if flag_msan {
-		switch kind {
-		case instrumentRead:
-			fn = msanread
-		case instrumentWrite:
-			fn = msanwrite
-		case instrumentMove:
-			fn = msanmove
-		default:
-			panic("unreachable")
-		}
-		needWidth = true
-	} else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
-		// for composite objects we have to write every address
-		// because a write might happen to any subobject.
-		// composites with only one element don't have subobjects, though.
-		switch kind {
-		case instrumentRead:
-			fn = racereadrange
-		case instrumentWrite:
-			fn = racewriterange
-		default:
-			panic("unreachable")
-		}
-		needWidth = true
-	} else if flag_race {
-		// for non-composite objects we can write just the start
-		// address, as any write must write the first byte.
-		switch kind {
-		case instrumentRead:
-			fn = raceread
-		case instrumentWrite:
-			fn = racewrite
-		default:
-			panic("unreachable")
-		}
-	} else {
-		panic("unreachable")
-	}
-
-	args := []*ssa.Value{addr}
-	if addr2 != nil {
-		args = append(args, addr2)
-	}
-	if needWidth {
-		args = append(args, s.constInt(types.Types[TUINTPTR], w))
-	}
-	s.rtcall(fn, true, nil, args...)
-}
-
-func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
-	s.instrumentFields(t, src, instrumentRead)
-	return s.rawLoad(t, src)
-}
-
-func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
-	return s.newValue2(ssa.OpLoad, t, src, s.mem())
-}
-
-func (s *state) store(t *types.Type, dst, val *ssa.Value) {
-	s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
-}
-
-func (s *state) zero(t *types.Type, dst *ssa.Value) {
-	s.instrument(t, dst, instrumentWrite)
-	store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
-	store.Aux = t
-	s.vars[&memVar] = store
-}
-
-func (s *state) move(t *types.Type, dst, src *ssa.Value) {
-	s.instrumentMove(t, dst, src)
-	store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
-	store.Aux = t
-	s.vars[&memVar] = store
-}
-
-// stmtList converts the statement list n to SSA and adds it to s.
-func (s *state) stmtList(l Nodes) {
-	for _, n := range l.Slice() {
-		s.stmt(n)
-	}
-}
-
-// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *Node) {
-	if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
-		// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
-		s.pushLine(n.Pos)
-		defer s.popLine()
-	}
-
-	// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
-	// then this code is dead. Stop here.
-	if s.curBlock == nil && n.Op != OLABEL {
-		return
-	}
-
-	s.stmtList(n.Ninit)
-	switch n.Op {
-
-	case OBLOCK:
-		s.stmtList(n.List)
-
-	// No-ops
-	case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
-
-	// Expression statements
-	case OCALLFUNC:
-		if isIntrinsicCall(n) {
-			s.intrinsicCall(n)
-			return
-		}
-		fallthrough
-
-	case OCALLMETH, OCALLINTER:
-		s.callResult(n, callNormal)
-		if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
-			if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
-				n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
-				m := s.mem()
-				b := s.endBlock()
-				b.Kind = ssa.BlockExit
-				b.SetControl(m)
-				// TODO: never rewrite OPANIC to OCALLFUNC in the
-				// first place. Need to wait until all backends
-				// go through SSA.
-			}
-		}
-	case ODEFER:
-		if Debug_defer > 0 {
-			var defertype string
-			if s.hasOpenDefers {
-				defertype = "open-coded"
-			} else if n.Esc == EscNever {
-				defertype = "stack-allocated"
-			} else {
-				defertype = "heap-allocated"
-			}
-			Warnl(n.Pos, "%s defer", defertype)
-		}
-		if s.hasOpenDefers {
-			s.openDeferRecord(n.Left)
-		} else {
-			d := callDefer
-			if n.Esc == EscNever {
-				d = callDeferStack
-			}
-			s.callResult(n.Left, d)
-		}
-	case OGO:
-		s.callResult(n.Left, callGo)
-
-	case OAS2DOTTYPE:
-		res, resok := s.dottype(n.Right, true)
-		deref := false
-		if !canSSAType(n.Right.Type) {
-			if res.Op != ssa.OpLoad {
-				s.Fatalf("dottype of non-load")
-			}
-			mem := s.mem()
-			if mem.Op == ssa.OpVarKill {
-				mem = mem.Args[0]
-			}
-			if res.Args[1] != mem {
-				s.Fatalf("memory no longer live from 2-result dottype load")
-			}
-			deref = true
-			res = res.Args[0]
-		}
-		s.assign(n.List.First(), res, deref, 0)
-		s.assign(n.List.Second(), resok, false, 0)
-		return
-
-	case OAS2FUNC:
-		// We come here only when it is an intrinsic call returning two values.
-		if !isIntrinsicCall(n.Right) {
-			s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
-		}
-		v := s.intrinsicCall(n.Right)
-		v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
-		v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
-		s.assign(n.List.First(), v1, false, 0)
-		s.assign(n.List.Second(), v2, false, 0)
-		return
-
-	case ODCL:
-		if n.Left.Class() == PAUTOHEAP {
-			s.Fatalf("DCL %v", n)
-		}
-
-	case OLABEL:
-		sym := n.Sym
-		lab := s.label(sym)
-
-		// Associate label with its control flow node, if any
-		if ctl := n.labeledControl(); ctl != nil {
-			s.labeledNodes[ctl] = lab
-		}
-
-		// The label might already have a target block via a goto.
-		if lab.target == nil {
-			lab.target = s.f.NewBlock(ssa.BlockPlain)
-		}
-
-		// Go to that label.
-		// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
-		if s.curBlock != nil {
-			b := s.endBlock()
-			b.AddEdgeTo(lab.target)
-		}
-		s.startBlock(lab.target)
-
-	case OGOTO:
-		sym := n.Sym
-
-		lab := s.label(sym)
-		if lab.target == nil {
-			lab.target = s.f.NewBlock(ssa.BlockPlain)
-		}
-
-		b := s.endBlock()
-		b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
-		b.AddEdgeTo(lab.target)
-
-	case OAS:
-		if n.Left == n.Right && n.Left.Op == ONAME {
-			// An x=x assignment. No point in doing anything
-			// here. In addition, skipping this assignment
-			// prevents generating:
-			//   VARDEF x
-			//   COPY x -> x
-			// which is bad because x is incorrectly considered
-			// dead before the vardef. See issue #14904.
-			return
-		}
-
-		// Evaluate RHS.
-		rhs := n.Right
-		if rhs != nil {
-			switch rhs.Op {
-			case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
-				// All literals with nonzero fields have already been
-				// rewritten during walk. Any that remain are just T{}
-				// or equivalents. Use the zero value.
-				if !isZero(rhs) {
-					s.Fatalf("literal with nonzero value in SSA: %v", rhs)
-				}
-				rhs = nil
-			case OAPPEND:
-				// Check whether we're writing the result of an append back to the same slice.
-				// If so, we handle it specially to avoid write barriers on the fast
-				// (non-growth) path.
-				if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
-					break
-				}
-				// If the slice can be SSA'd, it'll be on the stack,
-				// so there will be no write barriers,
-				// so there's no need to attempt to prevent them.
-				if s.canSSA(n.Left) {
-					if Debug_append > 0 { // replicating old diagnostic message
-						Warnl(n.Pos, "append: len-only update (in local slice)")
-					}
-					break
-				}
-				if Debug_append > 0 {
-					Warnl(n.Pos, "append: len-only update")
-				}
-				s.append(rhs, true)
-				return
-			}
-		}
-
-		if n.Left.isBlank() {
-			// _ = rhs
-			// Just evaluate rhs for side-effects.
-			if rhs != nil {
-				s.expr(rhs)
-			}
-			return
-		}
-
-		var t *types.Type
-		if n.Right != nil {
-			t = n.Right.Type
-		} else {
-			t = n.Left.Type
-		}
-
-		var r *ssa.Value
-		deref := !canSSAType(t)
-		if deref {
-			if rhs == nil {
-				r = nil // Signal assign to use OpZero.
-			} else {
-				r = s.addr(rhs)
-			}
-		} else {
-			if rhs == nil {
-				r = s.zeroVal(t)
-			} else {
-				r = s.expr(rhs)
-			}
-		}
-
-		var skip skipMask
-		if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
-			// We're assigning a slicing operation back to its source.
-			// Don't write back fields we aren't changing. See issue #14855.
-			i, j, k := rhs.SliceBounds()
-			if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
-				// [0:...] is the same as [:...]
-				i = nil
-			}
-			// TODO: detect defaults for len/cap also.
-			// Currently doesn't really work because (*p)[:len(*p)] appears here as:
-			//    tmp = len(*p)
-			//    (*p)[:tmp]
-			//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
-			//      j = nil
-			//}
-			//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
-			//      k = nil
-			//}
-			if i == nil {
-				skip |= skipPtr
-				if j == nil {
-					skip |= skipLen
-				}
-				if k == nil {
-					skip |= skipCap
-				}
-			}
-		}
-
-		s.assign(n.Left, r, deref, skip)
-
-	case OIF:
-		if Isconst(n.Left, CTBOOL) {
-			s.stmtList(n.Left.Ninit)
-			if n.Left.BoolVal() {
-				s.stmtList(n.Nbody)
-			} else {
-				s.stmtList(n.Rlist)
-			}
-			break
-		}
-
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-		var likely int8
-		if n.Likely() {
-			likely = 1
-		}
-		var bThen *ssa.Block
-		if n.Nbody.Len() != 0 {
-			bThen = s.f.NewBlock(ssa.BlockPlain)
-		} else {
-			bThen = bEnd
-		}
-		var bElse *ssa.Block
-		if n.Rlist.Len() != 0 {
-			bElse = s.f.NewBlock(ssa.BlockPlain)
-		} else {
-			bElse = bEnd
-		}
-		s.condBranch(n.Left, bThen, bElse, likely)
-
-		if n.Nbody.Len() != 0 {
-			s.startBlock(bThen)
-			s.stmtList(n.Nbody)
-			if b := s.endBlock(); b != nil {
-				b.AddEdgeTo(bEnd)
-			}
-		}
-		if n.Rlist.Len() != 0 {
-			s.startBlock(bElse)
-			s.stmtList(n.Rlist)
-			if b := s.endBlock(); b != nil {
-				b.AddEdgeTo(bEnd)
-			}
-		}
-		s.startBlock(bEnd)
-
-	case ORETURN:
-		s.stmtList(n.List)
-		b := s.exit()
-		b.Pos = s.lastPos.WithIsStmt()
-
-	case ORETJMP:
-		s.stmtList(n.List)
-		b := s.exit()
-		b.Kind = ssa.BlockRetJmp // override BlockRet
-		b.Aux = n.Sym.Linksym()
-
-	case OCONTINUE, OBREAK:
-		var to *ssa.Block
-		if n.Sym == nil {
-			// plain break/continue
-			switch n.Op {
-			case OCONTINUE:
-				to = s.continueTo
-			case OBREAK:
-				to = s.breakTo
-			}
-		} else {
-			// labeled break/continue; look up the target
-			sym := n.Sym
-			lab := s.label(sym)
-			switch n.Op {
-			case OCONTINUE:
-				to = lab.continueTarget
-			case OBREAK:
-				to = lab.breakTarget
-			}
-		}
-
-		b := s.endBlock()
-		b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
-		b.AddEdgeTo(to)
-
-	case OFOR, OFORUNTIL:
-		// OFOR: for Ninit; Left; Right { Nbody }
-		// cond (Left); body (Nbody); incr (Right)
-		//
-		// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
-		// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
-		bCond := s.f.NewBlock(ssa.BlockPlain)
-		bBody := s.f.NewBlock(ssa.BlockPlain)
-		bIncr := s.f.NewBlock(ssa.BlockPlain)
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-		// ensure empty for loops have correct position; issue #30167
-		bBody.Pos = n.Pos
-
-		// first, jump to condition test (OFOR) or body (OFORUNTIL)
-		b := s.endBlock()
-		if n.Op == OFOR {
-			b.AddEdgeTo(bCond)
-			// generate code to test condition
-			s.startBlock(bCond)
-			if n.Left != nil {
-				s.condBranch(n.Left, bBody, bEnd, 1)
-			} else {
-				b := s.endBlock()
-				b.Kind = ssa.BlockPlain
-				b.AddEdgeTo(bBody)
-			}
-
-		} else {
-			b.AddEdgeTo(bBody)
-		}
-
-		// set up for continue/break in body
-		prevContinue := s.continueTo
-		prevBreak := s.breakTo
-		s.continueTo = bIncr
-		s.breakTo = bEnd
-		lab := s.labeledNodes[n]
-		if lab != nil {
-			// labeled for loop
-			lab.continueTarget = bIncr
-			lab.breakTarget = bEnd
-		}
-
-		// generate body
-		s.startBlock(bBody)
-		s.stmtList(n.Nbody)
-
-		// tear down continue/break
-		s.continueTo = prevContinue
-		s.breakTo = prevBreak
-		if lab != nil {
-			lab.continueTarget = nil
-			lab.breakTarget = nil
-		}
-
-		// done with body, goto incr
-		if b := s.endBlock(); b != nil {
-			b.AddEdgeTo(bIncr)
-		}
-
-		// generate incr (and, for OFORUNTIL, condition)
-		s.startBlock(bIncr)
-		if n.Right != nil {
-			s.stmt(n.Right)
-		}
-		if n.Op == OFOR {
-			if b := s.endBlock(); b != nil {
-				b.AddEdgeTo(bCond)
-				// It can happen that bIncr ends in a block containing only VARKILL,
-				// and that muddles the debugging experience.
-				if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
-					b.Pos = bCond.Pos
-				}
-			}
-		} else {
-			// bCond is unused in OFORUNTIL, so repurpose it.
-			bLateIncr := bCond
-			// test condition
-			s.condBranch(n.Left, bLateIncr, bEnd, 1)
-			// generate late increment
-			s.startBlock(bLateIncr)
-			s.stmtList(n.List)
-			s.endBlock().AddEdgeTo(bBody)
-		}
-
-		s.startBlock(bEnd)
-
-	case OSWITCH, OSELECT:
-		// These have been mostly rewritten by the front end into their Nbody fields.
-		// Our main task is to correctly hook up any break statements.
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-		prevBreak := s.breakTo
-		s.breakTo = bEnd
-		lab := s.labeledNodes[n]
-		if lab != nil {
-			// labeled
-			lab.breakTarget = bEnd
-		}
-
-		// generate body code
-		s.stmtList(n.Nbody)
-
-		s.breakTo = prevBreak
-		if lab != nil {
-			lab.breakTarget = nil
-		}
-
-		// walk adds explicit OBREAK nodes to the end of all reachable code paths.
-		// If we still have a current block here, then mark it unreachable.
-		if s.curBlock != nil {
-			m := s.mem()
-			b := s.endBlock()
-			b.Kind = ssa.BlockExit
-			b.SetControl(m)
-		}
-		s.startBlock(bEnd)
-
-	case OVARDEF:
-		if !s.canSSA(n.Left) {
-			s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
-		}
-	case OVARKILL:
-		// Insert a varkill op to record that a variable is no longer live.
-		// We only care about liveness info at call sites, so putting the
-		// varkill in the store chain is enough to keep it correctly ordered
-		// with respect to call ops.
-		if !s.canSSA(n.Left) {
-			s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
-		}
-
-	case OVARLIVE:
-		// Insert a varlive op to record that a variable is still live.
-		if !n.Left.Name.Addrtaken() {
-			s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
-		}
-		switch n.Left.Class() {
-		case PAUTO, PPARAM, PPARAMOUT:
-		default:
-			s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
-		}
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
-
-	case OCHECKNIL:
-		p := s.expr(n.Left)
-		s.nilCheck(p)
-
-	case OINLMARK:
-		s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
-
-	default:
-		s.Fatalf("unhandled stmt %v", n.Op)
-	}
-}
-
-// If true, share as many open-coded defer exits as possible (with the downside of
-// worse line-number information)
-const shareDeferExits = false
-
-// exit processes any code that needs to be generated just before returning.
-// It returns a BlockRet block that ends the control flow. Its control value
-// will be set to the final memory state.
-func (s *state) exit() *ssa.Block {
-	if s.hasdefer {
-		if s.hasOpenDefers {
-			if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
-				if s.curBlock.Kind != ssa.BlockPlain {
-					panic("Block for an exit should be BlockPlain")
-				}
-				s.curBlock.AddEdgeTo(s.lastDeferExit)
-				s.endBlock()
-				return s.lastDeferFinalBlock
-			}
-			s.openDeferExit()
-		} else {
-			s.rtcall(Deferreturn, true, nil)
-		}
-	}
-
-	// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
-	// variables back to the stack.
-	s.stmtList(s.curfn.Func.Exit)
-
-	// Store SSAable PPARAMOUT variables back to stack locations.
-	for _, n := range s.returns {
-		addr := s.decladdrs[n]
-		val := s.variable(n, n.Type)
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
-		s.store(n.Type, addr, val)
-		// TODO: if val is ever spilled, we'd like to use the
-		// PPARAMOUT slot for spilling it. That won't happen
-		// currently.
-	}
-
-	// Do actual return.
-	m := s.mem()
-	b := s.endBlock()
-	b.Kind = ssa.BlockRet
-	b.SetControl(m)
-	if s.hasdefer && s.hasOpenDefers {
-		s.lastDeferFinalBlock = b
-	}
-	return b
-}
-
-type opAndType struct {
-	op    Op
-	etype types.EType
-}
-
-var opToSSA = map[opAndType]ssa.Op{
-	opAndType{OADD, TINT8}:    ssa.OpAdd8,
-	opAndType{OADD, TUINT8}:   ssa.OpAdd8,
-	opAndType{OADD, TINT16}:   ssa.OpAdd16,
-	opAndType{OADD, TUINT16}:  ssa.OpAdd16,
-	opAndType{OADD, TINT32}:   ssa.OpAdd32,
-	opAndType{OADD, TUINT32}:  ssa.OpAdd32,
-	opAndType{OADD, TINT64}:   ssa.OpAdd64,
-	opAndType{OADD, TUINT64}:  ssa.OpAdd64,
-	opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
-	opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
-
-	opAndType{OSUB, TINT8}:    ssa.OpSub8,
-	opAndType{OSUB, TUINT8}:   ssa.OpSub8,
-	opAndType{OSUB, TINT16}:   ssa.OpSub16,
-	opAndType{OSUB, TUINT16}:  ssa.OpSub16,
-	opAndType{OSUB, TINT32}:   ssa.OpSub32,
-	opAndType{OSUB, TUINT32}:  ssa.OpSub32,
-	opAndType{OSUB, TINT64}:   ssa.OpSub64,
-	opAndType{OSUB, TUINT64}:  ssa.OpSub64,
-	opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
-	opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
-
-	opAndType{ONOT, TBOOL}: ssa.OpNot,
-
-	opAndType{ONEG, TINT8}:    ssa.OpNeg8,
-	opAndType{ONEG, TUINT8}:   ssa.OpNeg8,
-	opAndType{ONEG, TINT16}:   ssa.OpNeg16,
-	opAndType{ONEG, TUINT16}:  ssa.OpNeg16,
-	opAndType{ONEG, TINT32}:   ssa.OpNeg32,
-	opAndType{ONEG, TUINT32}:  ssa.OpNeg32,
-	opAndType{ONEG, TINT64}:   ssa.OpNeg64,
-	opAndType{ONEG, TUINT64}:  ssa.OpNeg64,
-	opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
-	opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
-
-	opAndType{OBITNOT, TINT8}:   ssa.OpCom8,
-	opAndType{OBITNOT, TUINT8}:  ssa.OpCom8,
-	opAndType{OBITNOT, TINT16}:  ssa.OpCom16,
-	opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
-	opAndType{OBITNOT, TINT32}:  ssa.OpCom32,
-	opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
-	opAndType{OBITNOT, TINT64}:  ssa.OpCom64,
-	opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
-
-	opAndType{OIMAG, TCOMPLEX64}:  ssa.OpComplexImag,
-	opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
-	opAndType{OREAL, TCOMPLEX64}:  ssa.OpComplexReal,
-	opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
-
-	opAndType{OMUL, TINT8}:    ssa.OpMul8,
-	opAndType{OMUL, TUINT8}:   ssa.OpMul8,
-	opAndType{OMUL, TINT16}:   ssa.OpMul16,
-	opAndType{OMUL, TUINT16}:  ssa.OpMul16,
-	opAndType{OMUL, TINT32}:   ssa.OpMul32,
-	opAndType{OMUL, TUINT32}:  ssa.OpMul32,
-	opAndType{OMUL, TINT64}:   ssa.OpMul64,
-	opAndType{OMUL, TUINT64}:  ssa.OpMul64,
-	opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
-	opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
-
-	opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
-	opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
-
-	opAndType{ODIV, TINT8}:   ssa.OpDiv8,
-	opAndType{ODIV, TUINT8}:  ssa.OpDiv8u,
-	opAndType{ODIV, TINT16}:  ssa.OpDiv16,
-	opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
-	opAndType{ODIV, TINT32}:  ssa.OpDiv32,
-	opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
-	opAndType{ODIV, TINT64}:  ssa.OpDiv64,
-	opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
-
-	opAndType{OMOD, TINT8}:   ssa.OpMod8,
-	opAndType{OMOD, TUINT8}:  ssa.OpMod8u,
-	opAndType{OMOD, TINT16}:  ssa.OpMod16,
-	opAndType{OMOD, TUINT16}: ssa.OpMod16u,
-	opAndType{OMOD, TINT32}:  ssa.OpMod32,
-	opAndType{OMOD, TUINT32}: ssa.OpMod32u,
-	opAndType{OMOD, TINT64}:  ssa.OpMod64,
-	opAndType{OMOD, TUINT64}: ssa.OpMod64u,
-
-	opAndType{OAND, TINT8}:   ssa.OpAnd8,
-	opAndType{OAND, TUINT8}:  ssa.OpAnd8,
-	opAndType{OAND, TINT16}:  ssa.OpAnd16,
-	opAndType{OAND, TUINT16}: ssa.OpAnd16,
-	opAndType{OAND, TINT32}:  ssa.OpAnd32,
-	opAndType{OAND, TUINT32}: ssa.OpAnd32,
-	opAndType{OAND, TINT64}:  ssa.OpAnd64,
-	opAndType{OAND, TUINT64}: ssa.OpAnd64,
-
-	opAndType{OOR, TINT8}:   ssa.OpOr8,
-	opAndType{OOR, TUINT8}:  ssa.OpOr8,
-	opAndType{OOR, TINT16}:  ssa.OpOr16,
-	opAndType{OOR, TUINT16}: ssa.OpOr16,
-	opAndType{OOR, TINT32}:  ssa.OpOr32,
-	opAndType{OOR, TUINT32}: ssa.OpOr32,
-	opAndType{OOR, TINT64}:  ssa.OpOr64,
-	opAndType{OOR, TUINT64}: ssa.OpOr64,
-
-	opAndType{OXOR, TINT8}:   ssa.OpXor8,
-	opAndType{OXOR, TUINT8}:  ssa.OpXor8,
-	opAndType{OXOR, TINT16}:  ssa.OpXor16,
-	opAndType{OXOR, TUINT16}: ssa.OpXor16,
-	opAndType{OXOR, TINT32}:  ssa.OpXor32,
-	opAndType{OXOR, TUINT32}: ssa.OpXor32,
-	opAndType{OXOR, TINT64}:  ssa.OpXor64,
-	opAndType{OXOR, TUINT64}: ssa.OpXor64,
-
-	opAndType{OEQ, TBOOL}:      ssa.OpEqB,
-	opAndType{OEQ, TINT8}:      ssa.OpEq8,
-	opAndType{OEQ, TUINT8}:     ssa.OpEq8,
-	opAndType{OEQ, TINT16}:     ssa.OpEq16,
-	opAndType{OEQ, TUINT16}:    ssa.OpEq16,
-	opAndType{OEQ, TINT32}:     ssa.OpEq32,
-	opAndType{OEQ, TUINT32}:    ssa.OpEq32,
-	opAndType{OEQ, TINT64}:     ssa.OpEq64,
-	opAndType{OEQ, TUINT64}:    ssa.OpEq64,
-	opAndType{OEQ, TINTER}:     ssa.OpEqInter,
-	opAndType{OEQ, TSLICE}:     ssa.OpEqSlice,
-	opAndType{OEQ, TFUNC}:      ssa.OpEqPtr,
-	opAndType{OEQ, TMAP}:       ssa.OpEqPtr,
-	opAndType{OEQ, TCHAN}:      ssa.OpEqPtr,
-	opAndType{OEQ, TPTR}:       ssa.OpEqPtr,
-	opAndType{OEQ, TUINTPTR}:   ssa.OpEqPtr,
-	opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
-	opAndType{OEQ, TFLOAT64}:   ssa.OpEq64F,
-	opAndType{OEQ, TFLOAT32}:   ssa.OpEq32F,
-
-	opAndType{ONE, TBOOL}:      ssa.OpNeqB,
-	opAndType{ONE, TINT8}:      ssa.OpNeq8,
-	opAndType{ONE, TUINT8}:     ssa.OpNeq8,
-	opAndType{ONE, TINT16}:     ssa.OpNeq16,
-	opAndType{ONE, TUINT16}:    ssa.OpNeq16,
-	opAndType{ONE, TINT32}:     ssa.OpNeq32,
-	opAndType{ONE, TUINT32}:    ssa.OpNeq32,
-	opAndType{ONE, TINT64}:     ssa.OpNeq64,
-	opAndType{ONE, TUINT64}:    ssa.OpNeq64,
-	opAndType{ONE, TINTER}:     ssa.OpNeqInter,
-	opAndType{ONE, TSLICE}:     ssa.OpNeqSlice,
-	opAndType{ONE, TFUNC}:      ssa.OpNeqPtr,
-	opAndType{ONE, TMAP}:       ssa.OpNeqPtr,
-	opAndType{ONE, TCHAN}:      ssa.OpNeqPtr,
-	opAndType{ONE, TPTR}:       ssa.OpNeqPtr,
-	opAndType{ONE, TUINTPTR}:   ssa.OpNeqPtr,
-	opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
-	opAndType{ONE, TFLOAT64}:   ssa.OpNeq64F,
-	opAndType{ONE, TFLOAT32}:   ssa.OpNeq32F,
-
-	opAndType{OLT, TINT8}:    ssa.OpLess8,
-	opAndType{OLT, TUINT8}:   ssa.OpLess8U,
-	opAndType{OLT, TINT16}:   ssa.OpLess16,
-	opAndType{OLT, TUINT16}:  ssa.OpLess16U,
-	opAndType{OLT, TINT32}:   ssa.OpLess32,
-	opAndType{OLT, TUINT32}:  ssa.OpLess32U,
-	opAndType{OLT, TINT64}:   ssa.OpLess64,
-	opAndType{OLT, TUINT64}:  ssa.OpLess64U,
-	opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
-	opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
-
-	opAndType{OLE, TINT8}:    ssa.OpLeq8,
-	opAndType{OLE, TUINT8}:   ssa.OpLeq8U,
-	opAndType{OLE, TINT16}:   ssa.OpLeq16,
-	opAndType{OLE, TUINT16}:  ssa.OpLeq16U,
-	opAndType{OLE, TINT32}:   ssa.OpLeq32,
-	opAndType{OLE, TUINT32}:  ssa.OpLeq32U,
-	opAndType{OLE, TINT64}:   ssa.OpLeq64,
-	opAndType{OLE, TUINT64}:  ssa.OpLeq64U,
-	opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
-	opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
-}
-
-func (s *state) concreteEtype(t *types.Type) types.EType {
-	e := t.Etype
-	switch e {
-	default:
-		return e
-	case TINT:
-		if s.config.PtrSize == 8 {
-			return TINT64
-		}
-		return TINT32
-	case TUINT:
-		if s.config.PtrSize == 8 {
-			return TUINT64
-		}
-		return TUINT32
-	case TUINTPTR:
-		if s.config.PtrSize == 8 {
-			return TUINT64
-		}
-		return TUINT32
-	}
-}
-
-func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
-	etype := s.concreteEtype(t)
-	x, ok := opToSSA[opAndType{op, etype}]
-	if !ok {
-		s.Fatalf("unhandled binary op %v %s", op, etype)
-	}
-	return x
-}
-
-func floatForComplex(t *types.Type) *types.Type {
-	switch t.Etype {
-	case TCOMPLEX64:
-		return types.Types[TFLOAT32]
-	case TCOMPLEX128:
-		return types.Types[TFLOAT64]
-	}
-	Fatalf("unexpected type: %v", t)
-	return nil
-}
-
-func complexForFloat(t *types.Type) *types.Type {
-	switch t.Etype {
-	case TFLOAT32:
-		return types.Types[TCOMPLEX64]
-	case TFLOAT64:
-		return types.Types[TCOMPLEX128]
-	}
-	Fatalf("unexpected type: %v", t)
-	return nil
-}
-
-type opAndTwoTypes struct {
-	op     Op
-	etype1 types.EType
-	etype2 types.EType
-}
-
-type twoTypes struct {
-	etype1 types.EType
-	etype2 types.EType
-}
-
-type twoOpsAndType struct {
-	op1              ssa.Op
-	op2              ssa.Op
-	intermediateType types.EType
-}
-
-var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
-
-	twoTypes{TINT8, TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
-
-	twoTypes{TINT8, TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
-
-	twoTypes{TFLOAT32, TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
-	twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
-
-	twoTypes{TFLOAT64, TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
-	twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
-	// unsigned
-	twoTypes{TUINT8, TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
-	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto32F, branchy code expansion instead
-
-	twoTypes{TUINT8, TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
-	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto64F, branchy code expansion instead
-
-	twoTypes{TFLOAT32, TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
-	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt32Fto64U, branchy code expansion instead
-
-	twoTypes{TFLOAT64, TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
-	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt64Fto64U, branchy code expansion instead
-
-	// float
-	twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
-	twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
-	twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
-	twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
-}
-
-// this map is used only for 32-bit arch, and only includes the difference
-// on 32-bit arch, don't use int64<->float conversion for uint32
-var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
-	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
-	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
-	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
-	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
-}
-
-// uint64<->float conversions, only on machines that have instructions for that
-var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
-	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
-	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
-	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
-	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
-}
-
-var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
-	opAndTwoTypes{OLSH, TINT8, TUINT8}:   ssa.OpLsh8x8,
-	opAndTwoTypes{OLSH, TUINT8, TUINT8}:  ssa.OpLsh8x8,
-	opAndTwoTypes{OLSH, TINT8, TUINT16}:  ssa.OpLsh8x16,
-	opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
-	opAndTwoTypes{OLSH, TINT8, TUINT32}:  ssa.OpLsh8x32,
-	opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
-	opAndTwoTypes{OLSH, TINT8, TUINT64}:  ssa.OpLsh8x64,
-	opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
-
-	opAndTwoTypes{OLSH, TINT16, TUINT8}:   ssa.OpLsh16x8,
-	opAndTwoTypes{OLSH, TUINT16, TUINT8}:  ssa.OpLsh16x8,
-	opAndTwoTypes{OLSH, TINT16, TUINT16}:  ssa.OpLsh16x16,
-	opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
-	opAndTwoTypes{OLSH, TINT16, TUINT32}:  ssa.OpLsh16x32,
-	opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
-	opAndTwoTypes{OLSH, TINT16, TUINT64}:  ssa.OpLsh16x64,
-	opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
-
-	opAndTwoTypes{OLSH, TINT32, TUINT8}:   ssa.OpLsh32x8,
-	opAndTwoTypes{OLSH, TUINT32, TUINT8}:  ssa.OpLsh32x8,
-	opAndTwoTypes{OLSH, TINT32, TUINT16}:  ssa.OpLsh32x16,
-	opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
-	opAndTwoTypes{OLSH, TINT32, TUINT32}:  ssa.OpLsh32x32,
-	opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
-	opAndTwoTypes{OLSH, TINT32, TUINT64}:  ssa.OpLsh32x64,
-	opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
-
-	opAndTwoTypes{OLSH, TINT64, TUINT8}:   ssa.OpLsh64x8,
-	opAndTwoTypes{OLSH, TUINT64, TUINT8}:  ssa.OpLsh64x8,
-	opAndTwoTypes{OLSH, TINT64, TUINT16}:  ssa.OpLsh64x16,
-	opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
-	opAndTwoTypes{OLSH, TINT64, TUINT32}:  ssa.OpLsh64x32,
-	opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
-	opAndTwoTypes{OLSH, TINT64, TUINT64}:  ssa.OpLsh64x64,
-	opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
-
-	opAndTwoTypes{ORSH, TINT8, TUINT8}:   ssa.OpRsh8x8,
-	opAndTwoTypes{ORSH, TUINT8, TUINT8}:  ssa.OpRsh8Ux8,
-	opAndTwoTypes{ORSH, TINT8, TUINT16}:  ssa.OpRsh8x16,
-	opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
-	opAndTwoTypes{ORSH, TINT8, TUINT32}:  ssa.OpRsh8x32,
-	opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
-	opAndTwoTypes{ORSH, TINT8, TUINT64}:  ssa.OpRsh8x64,
-	opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
-
-	opAndTwoTypes{ORSH, TINT16, TUINT8}:   ssa.OpRsh16x8,
-	opAndTwoTypes{ORSH, TUINT16, TUINT8}:  ssa.OpRsh16Ux8,
-	opAndTwoTypes{ORSH, TINT16, TUINT16}:  ssa.OpRsh16x16,
-	opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
-	opAndTwoTypes{ORSH, TINT16, TUINT32}:  ssa.OpRsh16x32,
-	opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
-	opAndTwoTypes{ORSH, TINT16, TUINT64}:  ssa.OpRsh16x64,
-	opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
-
-	opAndTwoTypes{ORSH, TINT32, TUINT8}:   ssa.OpRsh32x8,
-	opAndTwoTypes{ORSH, TUINT32, TUINT8}:  ssa.OpRsh32Ux8,
-	opAndTwoTypes{ORSH, TINT32, TUINT16}:  ssa.OpRsh32x16,
-	opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
-	opAndTwoTypes{ORSH, TINT32, TUINT32}:  ssa.OpRsh32x32,
-	opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
-	opAndTwoTypes{ORSH, TINT32, TUINT64}:  ssa.OpRsh32x64,
-	opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
-
-	opAndTwoTypes{ORSH, TINT64, TUINT8}:   ssa.OpRsh64x8,
-	opAndTwoTypes{ORSH, TUINT64, TUINT8}:  ssa.OpRsh64Ux8,
-	opAndTwoTypes{ORSH, TINT64, TUINT16}:  ssa.OpRsh64x16,
-	opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
-	opAndTwoTypes{ORSH, TINT64, TUINT32}:  ssa.OpRsh64x32,
-	opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
-	opAndTwoTypes{ORSH, TINT64, TUINT64}:  ssa.OpRsh64x64,
-	opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
-}
-
-func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
-	etype1 := s.concreteEtype(t)
-	etype2 := s.concreteEtype(u)
-	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
-	if !ok {
-		s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
-	}
-	return x
-}
-
-// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *Node) *ssa.Value {
-	if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
-		// ONAMEs and named OLITERALs have the line number
-		// of the decl, not the use. See issue 14742.
-		s.pushLine(n.Pos)
-		defer s.popLine()
-	}
-
-	s.stmtList(n.Ninit)
-	switch n.Op {
-	case OBYTES2STRTMP:
-		slice := s.expr(n.Left)
-		ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
-		len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
-		return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
-	case OSTR2BYTESTMP:
-		str := s.expr(n.Left)
-		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
-		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
-		return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
-	case OCFUNC:
-		aux := n.Left.Sym.Linksym()
-		return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
-	case ONAME:
-		if n.Class() == PFUNC {
-			// "value" of a function is the address of the function's closure
-			sym := funcsym(n.Sym).Linksym()
-			return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
-		}
-		if s.canSSA(n) {
-			return s.variable(n, n.Type)
-		}
-		addr := s.addr(n)
-		return s.load(n.Type, addr)
-	case OCLOSUREVAR:
-		addr := s.addr(n)
-		return s.load(n.Type, addr)
-	case OLITERAL:
-		switch u := n.Val().U.(type) {
-		case *Mpint:
-			i := u.Int64()
-			switch n.Type.Size() {
-			case 1:
-				return s.constInt8(n.Type, int8(i))
-			case 2:
-				return s.constInt16(n.Type, int16(i))
-			case 4:
-				return s.constInt32(n.Type, int32(i))
-			case 8:
-				return s.constInt64(n.Type, i)
-			default:
-				s.Fatalf("bad integer size %d", n.Type.Size())
-				return nil
-			}
-		case string:
-			if u == "" {
-				return s.constEmptyString(n.Type)
-			}
-			return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
-		case bool:
-			return s.constBool(u)
-		case *NilVal:
-			t := n.Type
-			switch {
-			case t.IsSlice():
-				return s.constSlice(t)
-			case t.IsInterface():
-				return s.constInterface(t)
-			default:
-				return s.constNil(t)
-			}
-		case *Mpflt:
-			switch n.Type.Size() {
-			case 4:
-				return s.constFloat32(n.Type, u.Float32())
-			case 8:
-				return s.constFloat64(n.Type, u.Float64())
-			default:
-				s.Fatalf("bad float size %d", n.Type.Size())
-				return nil
-			}
-		case *Mpcplx:
-			r := &u.Real
-			i := &u.Imag
-			switch n.Type.Size() {
-			case 8:
-				pt := types.Types[TFLOAT32]
-				return s.newValue2(ssa.OpComplexMake, n.Type,
-					s.constFloat32(pt, r.Float32()),
-					s.constFloat32(pt, i.Float32()))
-			case 16:
-				pt := types.Types[TFLOAT64]
-				return s.newValue2(ssa.OpComplexMake, n.Type,
-					s.constFloat64(pt, r.Float64()),
-					s.constFloat64(pt, i.Float64()))
-			default:
-				s.Fatalf("bad float size %d", n.Type.Size())
-				return nil
-			}
-
-		default:
-			s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
-			return nil
-		}
-	case OCONVNOP:
-		to := n.Type
-		from := n.Left.Type
-
-		// Assume everything will work out, so set up our return value.
-		// Anything interesting that happens from here is a fatal.
-		x := s.expr(n.Left)
-
-		// Special case for not confusing GC and liveness.
-		// We don't want pointers accidentally classified
-		// as not-pointers or vice-versa because of copy
-		// elision.
-		if to.IsPtrShaped() != from.IsPtrShaped() {
-			return s.newValue2(ssa.OpConvert, to, x, s.mem())
-		}
-
-		v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
-
-		// CONVNOP closure
-		if to.Etype == TFUNC && from.IsPtrShaped() {
-			return v
-		}
-
-		// named <--> unnamed type or typed <--> untyped const
-		if from.Etype == to.Etype {
-			return v
-		}
-
-		// unsafe.Pointer <--> *T
-		if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
-			return v
-		}
-
-		// map <--> *hmap
-		if to.Etype == TMAP && from.IsPtr() &&
-			to.MapType().Hmap == from.Elem() {
-			return v
-		}
-
-		dowidth(from)
-		dowidth(to)
-		if from.Width != to.Width {
-			s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
-			return nil
-		}
-		if etypesign(from.Etype) != etypesign(to.Etype) {
-			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
-			return nil
-		}
-
-		if instrumenting {
-			// These appear to be fine, but they fail the
-			// integer constraint below, so okay them here.
-			// Sample non-integer conversion: map[string]string -> *uint8
-			return v
-		}
-
-		if etypesign(from.Etype) == 0 {
-			s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
-			return nil
-		}
-
-		// integer, same width, same sign
-		return v
-
-	case OCONV:
-		x := s.expr(n.Left)
-		ft := n.Left.Type // from type
-		tt := n.Type      // to type
-		if ft.IsBoolean() && tt.IsKind(TUINT8) {
-			// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
-			return s.newValue1(ssa.OpCopy, n.Type, x)
-		}
-		if ft.IsInteger() && tt.IsInteger() {
-			var op ssa.Op
-			if tt.Size() == ft.Size() {
-				op = ssa.OpCopy
-			} else if tt.Size() < ft.Size() {
-				// truncation
-				switch 10*ft.Size() + tt.Size() {
-				case 21:
-					op = ssa.OpTrunc16to8
-				case 41:
-					op = ssa.OpTrunc32to8
-				case 42:
-					op = ssa.OpTrunc32to16
-				case 81:
-					op = ssa.OpTrunc64to8
-				case 82:
-					op = ssa.OpTrunc64to16
-				case 84:
-					op = ssa.OpTrunc64to32
-				default:
-					s.Fatalf("weird integer truncation %v -> %v", ft, tt)
-				}
-			} else if ft.IsSigned() {
-				// sign extension
-				switch 10*ft.Size() + tt.Size() {
-				case 12:
-					op = ssa.OpSignExt8to16
-				case 14:
-					op = ssa.OpSignExt8to32
-				case 18:
-					op = ssa.OpSignExt8to64
-				case 24:
-					op = ssa.OpSignExt16to32
-				case 28:
-					op = ssa.OpSignExt16to64
-				case 48:
-					op = ssa.OpSignExt32to64
-				default:
-					s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
-				}
-			} else {
-				// zero extension
-				switch 10*ft.Size() + tt.Size() {
-				case 12:
-					op = ssa.OpZeroExt8to16
-				case 14:
-					op = ssa.OpZeroExt8to32
-				case 18:
-					op = ssa.OpZeroExt8to64
-				case 24:
-					op = ssa.OpZeroExt16to32
-				case 28:
-					op = ssa.OpZeroExt16to64
-				case 48:
-					op = ssa.OpZeroExt32to64
-				default:
-					s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
-				}
-			}
-			return s.newValue1(op, n.Type, x)
-		}
-
-		if ft.IsFloat() || tt.IsFloat() {
-			conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
-			if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
-				if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
-					conv = conv1
-				}
-			}
-			if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
-				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
-					conv = conv1
-				}
-			}
-
-			if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
-				if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
-					// tt is float32 or float64, and ft is also unsigned
-					if tt.Size() == 4 {
-						return s.uint32Tofloat32(n, x, ft, tt)
-					}
-					if tt.Size() == 8 {
-						return s.uint32Tofloat64(n, x, ft, tt)
-					}
-				} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
-					// ft is float32 or float64, and tt is unsigned integer
-					if ft.Size() == 4 {
-						return s.float32ToUint32(n, x, ft, tt)
-					}
-					if ft.Size() == 8 {
-						return s.float64ToUint32(n, x, ft, tt)
-					}
-				}
-			}
-
-			if !ok {
-				s.Fatalf("weird float conversion %v -> %v", ft, tt)
-			}
-			op1, op2, it := conv.op1, conv.op2, conv.intermediateType
-
-			if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
-				// normal case, not tripping over unsigned 64
-				if op1 == ssa.OpCopy {
-					if op2 == ssa.OpCopy {
-						return x
-					}
-					return s.newValueOrSfCall1(op2, n.Type, x)
-				}
-				if op2 == ssa.OpCopy {
-					return s.newValueOrSfCall1(op1, n.Type, x)
-				}
-				return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
-			}
-			// Tricky 64-bit unsigned cases.
-			if ft.IsInteger() {
-				// tt is float32 or float64, and ft is also unsigned
-				if tt.Size() == 4 {
-					return s.uint64Tofloat32(n, x, ft, tt)
-				}
-				if tt.Size() == 8 {
-					return s.uint64Tofloat64(n, x, ft, tt)
-				}
-				s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
-			}
-			// ft is float32 or float64, and tt is unsigned integer
-			if ft.Size() == 4 {
-				return s.float32ToUint64(n, x, ft, tt)
-			}
-			if ft.Size() == 8 {
-				return s.float64ToUint64(n, x, ft, tt)
-			}
-			s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
-			return nil
-		}
-
-		if ft.IsComplex() && tt.IsComplex() {
-			var op ssa.Op
-			if ft.Size() == tt.Size() {
-				switch ft.Size() {
-				case 8:
-					op = ssa.OpRound32F
-				case 16:
-					op = ssa.OpRound64F
-				default:
-					s.Fatalf("weird complex conversion %v -> %v", ft, tt)
-				}
-			} else if ft.Size() == 8 && tt.Size() == 16 {
-				op = ssa.OpCvt32Fto64F
-			} else if ft.Size() == 16 && tt.Size() == 8 {
-				op = ssa.OpCvt64Fto32F
-			} else {
-				s.Fatalf("weird complex conversion %v -> %v", ft, tt)
-			}
-			ftp := floatForComplex(ft)
-			ttp := floatForComplex(tt)
-			return s.newValue2(ssa.OpComplexMake, tt,
-				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
-				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
-		}
-
-		s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
-		return nil
-
-	case ODOTTYPE:
-		res, _ := s.dottype(n, false)
-		return res
-
-	// binary ops
-	case OLT, OEQ, ONE, OLE, OGE, OGT:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Left.Type.IsComplex() {
-			pt := floatForComplex(n.Left.Type)
-			op := s.ssaOp(OEQ, pt)
-			r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
-			i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
-			c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
-			switch n.Op {
-			case OEQ:
-				return c
-			case ONE:
-				return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
-			default:
-				s.Fatalf("ordered complex compare %v", n.Op)
-			}
-		}
-
-		// Convert OGE and OGT into OLE and OLT.
-		op := n.Op
-		switch op {
-		case OGE:
-			op, a, b = OLE, b, a
-		case OGT:
-			op, a, b = OLT, b, a
-		}
-		if n.Left.Type.IsFloat() {
-			// float comparison
-			return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
-		}
-		// integer comparison
-		return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
-	case OMUL:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			mulop := ssa.OpMul64F
-			addop := ssa.OpAdd64F
-			subop := ssa.OpSub64F
-			pt := floatForComplex(n.Type) // Could be Float32 or Float64
-			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancellation error
-
-			areal := s.newValue1(ssa.OpComplexReal, pt, a)
-			breal := s.newValue1(ssa.OpComplexReal, pt, b)
-			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
-			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
-
-			if pt != wt { // Widen for calculation
-				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
-				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
-				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
-				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
-			}
-
-			xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
-			ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
-
-			if pt != wt { // Narrow to store back
-				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
-				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
-			}
-
-			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
-		}
-
-		if n.Type.IsFloat() {
-			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-		}
-
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-
-	case ODIV:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			// TODO this is not executed because the front-end substitutes a runtime call.
-			// That probably ought to change; with modest optimization the widen/narrow
-			// conversions could all be elided in larger expression trees.
-			mulop := ssa.OpMul64F
-			addop := ssa.OpAdd64F
-			subop := ssa.OpSub64F
-			divop := ssa.OpDiv64F
-			pt := floatForComplex(n.Type) // Could be Float32 or Float64
-			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancellation error
-
-			areal := s.newValue1(ssa.OpComplexReal, pt, a)
-			breal := s.newValue1(ssa.OpComplexReal, pt, b)
-			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
-			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
-
-			if pt != wt { // Widen for calculation
-				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
-				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
-				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
-				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
-			}
-
-			denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
-			xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
-			ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
-
-			// TODO not sure if this is best done in wide precision or narrow
-			// Double-rounding might be an issue.
-			// Note that the pre-SSA implementation does the entire calculation
-			// in wide format, so wide is compatible.
-			xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
-			ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
-
-			if pt != wt { // Narrow to store back
-				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
-				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
-			}
-			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
-		}
-		if n.Type.IsFloat() {
-			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-		}
-		return s.intDivide(n, a, b)
-	case OMOD:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		return s.intDivide(n, a, b)
-	case OADD, OSUB:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			pt := floatForComplex(n.Type)
-			op := s.ssaOp(n.Op, pt)
-			return s.newValue2(ssa.OpComplexMake, n.Type,
-				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
-				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
-		}
-		if n.Type.IsFloat() {
-			return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-		}
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-	case OAND, OOR, OXOR:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-	case OANDNOT:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
-		return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
-	case OLSH, ORSH:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		bt := b.Type
-		if bt.IsSigned() {
-			cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b)
-			s.check(cmp, panicshift)
-			bt = bt.ToUnsigned()
-		}
-		return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
-	case OANDAND, OOROR:
-		// To implement OANDAND (and OOROR), we introduce a
-		// new temporary variable to hold the result. The
-		// variable is associated with the OANDAND node in the
-		// s.vars table (normally variables are only
-		// associated with ONAME nodes). We convert
-		//     A && B
-		// to
-		//     var = A
-		//     if var {
-		//         var = B
-		//     }
-		// Using var in the subsequent block introduces the
-		// necessary phi variable.
-		el := s.expr(n.Left)
-		s.vars[n] = el
-
-		b := s.endBlock()
-		b.Kind = ssa.BlockIf
-		b.SetControl(el)
-		// In theory, we should set b.Likely here based on context.
-		// However, gc only gives us likeliness hints
-		// in a single place, for plain OIF statements,
-		// and passing around context is finnicky, so don't bother for now.
-
-		bRight := s.f.NewBlock(ssa.BlockPlain)
-		bResult := s.f.NewBlock(ssa.BlockPlain)
-		if n.Op == OANDAND {
-			b.AddEdgeTo(bRight)
-			b.AddEdgeTo(bResult)
-		} else if n.Op == OOROR {
-			b.AddEdgeTo(bResult)
-			b.AddEdgeTo(bRight)
-		}
-
-		s.startBlock(bRight)
-		er := s.expr(n.Right)
-		s.vars[n] = er
-
-		b = s.endBlock()
-		b.AddEdgeTo(bResult)
-
-		s.startBlock(bResult)
-		return s.variable(n, types.Types[TBOOL])
-	case OCOMPLEX:
-		r := s.expr(n.Left)
-		i := s.expr(n.Right)
-		return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
-
-	// unary ops
-	case ONEG:
-		a := s.expr(n.Left)
-		if n.Type.IsComplex() {
-			tp := floatForComplex(n.Type)
-			negop := s.ssaOp(n.Op, tp)
-			return s.newValue2(ssa.OpComplexMake, n.Type,
-				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
-				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
-		}
-		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
-	case ONOT, OBITNOT:
-		a := s.expr(n.Left)
-		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
-	case OIMAG, OREAL:
-		a := s.expr(n.Left)
-		return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
-	case OPLUS:
-		return s.expr(n.Left)
-
-	case OADDR:
-		return s.addr(n.Left)
-
-	case ORESULT:
-		if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
-			// Do the old thing
-			addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
-			return s.rawLoad(n.Type, addr)
-		}
-		which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
-		if which == -1 {
-			// Do the old thing // TODO: Panic instead.
-			addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
-			return s.rawLoad(n.Type, addr)
-		}
-		if canSSAType(n.Type) {
-			return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
-		} else {
-			addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
-			return s.rawLoad(n.Type, addr)
-		}
-
-	case ODEREF:
-		p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
-		return s.load(n.Type, p)
-
-	case ODOT:
-		if n.Left.Op == OSTRUCTLIT {
-			// All literals with nonzero fields have already been
-			// rewritten during walk. Any that remain are just T{}
-			// or equivalents. Use the zero value.
-			if !isZero(n.Left) {
-				s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
-			}
-			return s.zeroVal(n.Type)
-		}
-		// If n is addressable and can't be represented in
-		// SSA, then load just the selected field. This
-		// prevents false memory dependencies in race/msan
-		// instrumentation.
-		if islvalue(n) && !s.canSSA(n) {
-			p := s.addr(n)
-			return s.load(n.Type, p)
-		}
-		v := s.expr(n.Left)
-		return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
-
-	case ODOTPTR:
-		p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
-		p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
-		return s.load(n.Type, p)
-
-	case OINDEX:
-		switch {
-		case n.Left.Type.IsString():
-			if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
-				// Replace "abc"[1] with 'b'.
-				// Delayed until now because "abc"[1] is not an ideal constant.
-				// See test/fixedbugs/issue11370.go.
-				return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
-			}
-			a := s.expr(n.Left)
-			i := s.expr(n.Right)
-			len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
-			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
-			ptrtyp := s.f.Config.Types.BytePtr
-			ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
-			if Isconst(n.Right, CTINT) {
-				ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
-			} else {
-				ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
-			}
-			return s.load(types.Types[TUINT8], ptr)
-		case n.Left.Type.IsSlice():
-			p := s.addr(n)
-			return s.load(n.Left.Type.Elem(), p)
-		case n.Left.Type.IsArray():
-			if canSSAType(n.Left.Type) {
-				// SSA can handle arrays of length at most 1.
-				bound := n.Left.Type.NumElem()
-				a := s.expr(n.Left)
-				i := s.expr(n.Right)
-				if bound == 0 {
-					// Bounds check will never succeed.  Might as well
-					// use constants for the bounds check.
-					z := s.constInt(types.Types[TINT], 0)
-					s.boundsCheck(z, z, ssa.BoundsIndex, false)
-					// The return value won't be live, return junk.
-					return s.newValue0(ssa.OpUnknown, n.Type)
-				}
-				len := s.constInt(types.Types[TINT], bound)
-				s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
-				return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
-			}
-			p := s.addr(n)
-			return s.load(n.Left.Type.Elem(), p)
-		default:
-			s.Fatalf("bad type for index %v", n.Left.Type)
-			return nil
-		}
-
-	case OLEN, OCAP:
-		switch {
-		case n.Left.Type.IsSlice():
-			op := ssa.OpSliceLen
-			if n.Op == OCAP {
-				op = ssa.OpSliceCap
-			}
-			return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
-		case n.Left.Type.IsString(): // string; not reachable for OCAP
-			return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
-		case n.Left.Type.IsMap(), n.Left.Type.IsChan():
-			return s.referenceTypeBuiltin(n, s.expr(n.Left))
-		default: // array
-			return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
-		}
-
-	case OSPTR:
-		a := s.expr(n.Left)
-		if n.Left.Type.IsSlice() {
-			return s.newValue1(ssa.OpSlicePtr, n.Type, a)
-		} else {
-			return s.newValue1(ssa.OpStringPtr, n.Type, a)
-		}
-
-	case OITAB:
-		a := s.expr(n.Left)
-		return s.newValue1(ssa.OpITab, n.Type, a)
-
-	case OIDATA:
-		a := s.expr(n.Left)
-		return s.newValue1(ssa.OpIData, n.Type, a)
-
-	case OEFACE:
-		tab := s.expr(n.Left)
-		data := s.expr(n.Right)
-		return s.newValue2(ssa.OpIMake, n.Type, tab, data)
-
-	case OSLICEHEADER:
-		p := s.expr(n.Left)
-		l := s.expr(n.List.First())
-		c := s.expr(n.List.Second())
-		return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
-
-	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
-		v := s.expr(n.Left)
-		var i, j, k *ssa.Value
-		low, high, max := n.SliceBounds()
-		if low != nil {
-			i = s.expr(low)
-		}
-		if high != nil {
-			j = s.expr(high)
-		}
-		if max != nil {
-			k = s.expr(max)
-		}
-		p, l, c := s.slice(v, i, j, k, n.Bounded())
-		return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
-
-	case OSLICESTR:
-		v := s.expr(n.Left)
-		var i, j *ssa.Value
-		low, high, _ := n.SliceBounds()
-		if low != nil {
-			i = s.expr(low)
-		}
-		if high != nil {
-			j = s.expr(high)
-		}
-		p, l, _ := s.slice(v, i, j, nil, n.Bounded())
-		return s.newValue2(ssa.OpStringMake, n.Type, p, l)
-
-	case OCALLFUNC:
-		if isIntrinsicCall(n) {
-			return s.intrinsicCall(n)
-		}
-		fallthrough
-
-	case OCALLINTER, OCALLMETH:
-		return s.callResult(n, callNormal)
-
-	case OGETG:
-		return s.newValue1(ssa.OpGetG, n.Type, s.mem())
-
-	case OAPPEND:
-		return s.append(n, false)
-
-	case OSTRUCTLIT, OARRAYLIT:
-		// All literals with nonzero fields have already been
-		// rewritten during walk. Any that remain are just T{}
-		// or equivalents. Use the zero value.
-		if !isZero(n) {
-			s.Fatalf("literal with nonzero value in SSA: %v", n)
-		}
-		return s.zeroVal(n.Type)
-
-	case ONEWOBJ:
-		if n.Type.Elem().Size() == 0 {
-			return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
-		}
-		typ := s.expr(n.Left)
-		vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
-		return vv[0]
-
-	default:
-		s.Fatalf("unhandled expr %v", n.Op)
-		return nil
-	}
-}
-
-// append converts an OAPPEND node to SSA.
-// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
-// adds it to s, and returns the Value.
-// If inplace is true, it writes the result of the OAPPEND expression n
-// back to the slice being appended to, and returns nil.
-// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *Node, inplace bool) *ssa.Value {
-	// If inplace is false, process as expression "append(s, e1, e2, e3)":
-	//
-	// ptr, len, cap := s
-	// newlen := len + 3
-	// if newlen > cap {
-	//     ptr, len, cap = growslice(s, newlen)
-	//     newlen = len + 3 // recalculate to avoid a spill
-	// }
-	// // with write barriers, if needed:
-	// *(ptr+len) = e1
-	// *(ptr+len+1) = e2
-	// *(ptr+len+2) = e3
-	// return makeslice(ptr, newlen, cap)
-	//
-	//
-	// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
-	//
-	// a := &s
-	// ptr, len, cap := s
-	// newlen := len + 3
-	// if uint(newlen) > uint(cap) {
-	//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
-	//    vardef(a)       // if necessary, advise liveness we are writing a new a
-	//    *a.cap = newcap // write before ptr to avoid a spill
-	//    *a.ptr = newptr // with write barrier
-	// }
-	// newlen = len + 3 // recalculate to avoid a spill
-	// *a.len = newlen
-	// // with write barriers, if needed:
-	// *(ptr+len) = e1
-	// *(ptr+len+1) = e2
-	// *(ptr+len+2) = e3
-
-	et := n.Type.Elem()
-	pt := types.NewPtr(et)
-
-	// Evaluate slice
-	sn := n.List.First() // the slice node is the first in the list
-
-	var slice, addr *ssa.Value
-	if inplace {
-		addr = s.addr(sn)
-		slice = s.load(n.Type, addr)
-	} else {
-		slice = s.expr(sn)
-	}
-
-	// Allocate new blocks
-	grow := s.f.NewBlock(ssa.BlockPlain)
-	assign := s.f.NewBlock(ssa.BlockPlain)
-
-	// Decide if we need to grow
-	nargs := int64(n.List.Len() - 1)
-	p := s.newValue1(ssa.OpSlicePtr, pt, slice)
-	l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
-	c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
-	nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
-
-	cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl)
-	s.vars[&ptrVar] = p
-
-	if !inplace {
-		s.vars[&newlenVar] = nl
-		s.vars[&capVar] = c
-	} else {
-		s.vars[&lenVar] = l
-	}
-
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.Likely = ssa.BranchUnlikely
-	b.SetControl(cmp)
-	b.AddEdgeTo(grow)
-	b.AddEdgeTo(assign)
-
-	// Call growslice
-	s.startBlock(grow)
-	taddr := s.expr(n.Left)
-	r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
-
-	if inplace {
-		if sn.Op == ONAME && sn.Class() != PEXTERN {
-			// Tell liveness we're about to build a new slice
-			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
-		}
-		capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
-		s.store(types.Types[TINT], capaddr, r[2])
-		s.store(pt, addr, r[0])
-		// load the value we just stored to avoid having to spill it
-		s.vars[&ptrVar] = s.load(pt, addr)
-		s.vars[&lenVar] = r[1] // avoid a spill in the fast path
-	} else {
-		s.vars[&ptrVar] = r[0]
-		s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
-		s.vars[&capVar] = r[2]
-	}
-
-	b = s.endBlock()
-	b.AddEdgeTo(assign)
-
-	// assign new elements to slots
-	s.startBlock(assign)
-
-	if inplace {
-		l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
-		nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
-		lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
-		s.store(types.Types[TINT], lenaddr, nl)
-	}
-
-	// Evaluate args
-	type argRec struct {
-		// if store is true, we're appending the value v.  If false, we're appending the
-		// value at *v.
-		v     *ssa.Value
-		store bool
-	}
-	args := make([]argRec, 0, nargs)
-	for _, n := range n.List.Slice()[1:] {
-		if canSSAType(n.Type) {
-			args = append(args, argRec{v: s.expr(n), store: true})
-		} else {
-			v := s.addr(n)
-			args = append(args, argRec{v: v})
-		}
-	}
-
-	p = s.variable(&ptrVar, pt) // generates phi for ptr
-	if !inplace {
-		nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
-		c = s.variable(&capVar, types.Types[TINT])     // generates phi for cap
-	}
-	p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
-	for i, arg := range args {
-		addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
-		if arg.store {
-			s.storeType(et, addr, arg.v, 0, true)
-		} else {
-			s.move(et, addr, arg.v)
-		}
-	}
-
-	delete(s.vars, &ptrVar)
-	if inplace {
-		delete(s.vars, &lenVar)
-		return nil
-	}
-	delete(s.vars, &newlenVar)
-	delete(s.vars, &capVar)
-	// make result
-	return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
-}
-
-// condBranch evaluates the boolean expression cond and branches to yes
-// if cond is true and no if cond is false.
-// This function is intended to handle && and || better than just calling
-// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
-	switch cond.Op {
-	case OANDAND:
-		mid := s.f.NewBlock(ssa.BlockPlain)
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, mid, no, max8(likely, 0))
-		s.startBlock(mid)
-		s.condBranch(cond.Right, yes, no, likely)
-		return
-		// Note: if likely==1, then both recursive calls pass 1.
-		// If likely==-1, then we don't have enough information to decide
-		// whether the first branch is likely or not. So we pass 0 for
-		// the likeliness of the first branch.
-		// TODO: have the frontend give us branch prediction hints for
-		// OANDAND and OOROR nodes (if it ever has such info).
-	case OOROR:
-		mid := s.f.NewBlock(ssa.BlockPlain)
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, yes, mid, min8(likely, 0))
-		s.startBlock(mid)
-		s.condBranch(cond.Right, yes, no, likely)
-		return
-		// Note: if likely==-1, then both recursive calls pass -1.
-		// If likely==1, then we don't have enough info to decide
-		// the likelihood of the first branch.
-	case ONOT:
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, no, yes, -likely)
-		return
-	}
-	c := s.expr(cond)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(c)
-	b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
-	b.AddEdgeTo(yes)
-	b.AddEdgeTo(no)
-}
-
-type skipMask uint8
-
-const (
-	skipPtr skipMask = 1 << iota
-	skipLen
-	skipCap
-)
-
-// assign does left = right.
-// Right has already been evaluated to ssa, left has not.
-// If deref is true, then we do left = *right instead (and right has already been nil-checked).
-// If deref is true and right == nil, just do left = 0.
-// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
-	if left.Op == ONAME && left.isBlank() {
-		return
-	}
-	t := left.Type
-	dowidth(t)
-	if s.canSSA(left) {
-		if deref {
-			s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
-		}
-		if left.Op == ODOT {
-			// We're assigning to a field of an ssa-able value.
-			// We need to build a new structure with the new value for the
-			// field we're assigning and the old values for the other fields.
-			// For instance:
-			//   type T struct {a, b, c int}
-			//   var T x
-			//   x.b = 5
-			// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
-
-			// Grab information about the structure type.
-			t := left.Left.Type
-			nf := t.NumFields()
-			idx := fieldIdx(left)
-
-			// Grab old value of structure.
-			old := s.expr(left.Left)
-
-			// Make new structure.
-			new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
-
-			// Add fields as args.
-			for i := 0; i < nf; i++ {
-				if i == idx {
-					new.AddArg(right)
-				} else {
-					new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
-				}
-			}
-
-			// Recursively assign the new value we've made to the base of the dot op.
-			s.assign(left.Left, new, false, 0)
-			// TODO: do we need to update named values here?
-			return
-		}
-		if left.Op == OINDEX && left.Left.Type.IsArray() {
-			s.pushLine(left.Pos)
-			defer s.popLine()
-			// We're assigning to an element of an ssa-able array.
-			// a[i] = v
-			t := left.Left.Type
-			n := t.NumElem()
-
-			i := s.expr(left.Right) // index
-			if n == 0 {
-				// The bounds check must fail.  Might as well
-				// ignore the actual index and just use zeros.
-				z := s.constInt(types.Types[TINT], 0)
-				s.boundsCheck(z, z, ssa.BoundsIndex, false)
-				return
-			}
-			if n != 1 {
-				s.Fatalf("assigning to non-1-length array")
-			}
-			// Rewrite to a = [1]{v}
-			len := s.constInt(types.Types[TINT], 1)
-			s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
-			v := s.newValue1(ssa.OpArrayMake1, t, right)
-			s.assign(left.Left, v, false, 0)
-			return
-		}
-		// Update variable assignment.
-		s.vars[left] = right
-		s.addNamedValue(left, right)
-		return
-	}
-
-	// If this assignment clobbers an entire local variable, then emit
-	// OpVarDef so liveness analysis knows the variable is redefined.
-	if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
-		s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
-	}
-
-	// Left is not ssa-able. Compute its address.
-	addr := s.addr(left)
-	if isReflectHeaderDataField(left) {
-		// Package unsafe's documentation says storing pointers into
-		// reflect.SliceHeader and reflect.StringHeader's Data fields
-		// is valid, even though they have type uintptr (#19168).
-		// Mark it pointer type to signal the writebarrier pass to
-		// insert a write barrier.
-		t = types.Types[TUNSAFEPTR]
-	}
-	if deref {
-		// Treat as a mem->mem move.
-		if right == nil {
-			s.zero(t, addr)
-		} else {
-			s.move(t, addr, right)
-		}
-		return
-	}
-	// Treat as a store.
-	s.storeType(t, addr, right, skip, !left.IsAutoTmp())
-}
-
-// zeroVal returns the zero value for type t.
-func (s *state) zeroVal(t *types.Type) *ssa.Value {
-	switch {
-	case t.IsInteger():
-		switch t.Size() {
-		case 1:
-			return s.constInt8(t, 0)
-		case 2:
-			return s.constInt16(t, 0)
-		case 4:
-			return s.constInt32(t, 0)
-		case 8:
-			return s.constInt64(t, 0)
-		default:
-			s.Fatalf("bad sized integer type %v", t)
-		}
-	case t.IsFloat():
-		switch t.Size() {
-		case 4:
-			return s.constFloat32(t, 0)
-		case 8:
-			return s.constFloat64(t, 0)
-		default:
-			s.Fatalf("bad sized float type %v", t)
-		}
-	case t.IsComplex():
-		switch t.Size() {
-		case 8:
-			z := s.constFloat32(types.Types[TFLOAT32], 0)
-			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
-		case 16:
-			z := s.constFloat64(types.Types[TFLOAT64], 0)
-			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
-		default:
-			s.Fatalf("bad sized complex type %v", t)
-		}
-
-	case t.IsString():
-		return s.constEmptyString(t)
-	case t.IsPtrShaped():
-		return s.constNil(t)
-	case t.IsBoolean():
-		return s.constBool(false)
-	case t.IsInterface():
-		return s.constInterface(t)
-	case t.IsSlice():
-		return s.constSlice(t)
-	case t.IsStruct():
-		n := t.NumFields()
-		v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
-		for i := 0; i < n; i++ {
-			v.AddArg(s.zeroVal(t.FieldType(i)))
-		}
-		return v
-	case t.IsArray():
-		switch t.NumElem() {
-		case 0:
-			return s.entryNewValue0(ssa.OpArrayMake0, t)
-		case 1:
-			return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
-		}
-	}
-	s.Fatalf("zero for type %v not implemented", t)
-	return nil
-}
-
-type callKind int8
-
-const (
-	callNormal callKind = iota
-	callDefer
-	callDeferStack
-	callGo
-)
-
-type sfRtCallDef struct {
-	rtfn  *obj.LSym
-	rtype types.EType
-}
-
-var softFloatOps map[ssa.Op]sfRtCallDef
-
-func softfloatInit() {
-	// Some of these operations get transformed by sfcall.
-	softFloatOps = map[ssa.Op]sfRtCallDef{
-		ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
-		ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
-		ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
-		ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
-		ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
-		ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
-		ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
-		ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
-
-		ssa.OpEq64F:   sfRtCallDef{sysfunc("feq64"), TBOOL},
-		ssa.OpEq32F:   sfRtCallDef{sysfunc("feq32"), TBOOL},
-		ssa.OpNeq64F:  sfRtCallDef{sysfunc("feq64"), TBOOL},
-		ssa.OpNeq32F:  sfRtCallDef{sysfunc("feq32"), TBOOL},
-		ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
-		ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
-		ssa.OpLeq64F:  sfRtCallDef{sysfunc("fge64"), TBOOL},
-		ssa.OpLeq32F:  sfRtCallDef{sysfunc("fge32"), TBOOL},
-
-		ssa.OpCvt32to32F:  sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
-		ssa.OpCvt32Fto32:  sfRtCallDef{sysfunc("f32toint32"), TINT32},
-		ssa.OpCvt64to32F:  sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
-		ssa.OpCvt32Fto64:  sfRtCallDef{sysfunc("f32toint64"), TINT64},
-		ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
-		ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
-		ssa.OpCvt32to64F:  sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
-		ssa.OpCvt64Fto32:  sfRtCallDef{sysfunc("f64toint32"), TINT32},
-		ssa.OpCvt64to64F:  sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
-		ssa.OpCvt64Fto64:  sfRtCallDef{sysfunc("f64toint64"), TINT64},
-		ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
-		ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
-		ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
-		ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
-	}
-}
-
-// TODO: do not emit sfcall if operation can be optimized to constant in later
-// opt phase
-func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
-	if callDef, ok := softFloatOps[op]; ok {
-		switch op {
-		case ssa.OpLess32F,
-			ssa.OpLess64F,
-			ssa.OpLeq32F,
-			ssa.OpLeq64F:
-			args[0], args[1] = args[1], args[0]
-		case ssa.OpSub32F,
-			ssa.OpSub64F:
-			args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
-		}
-
-		result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
-		if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
-			result = s.newValue1(ssa.OpNot, result.Type, result)
-		}
-		return result, true
-	}
-	return nil, false
-}
-
-var intrinsics map[intrinsicKey]intrinsicBuilder
-
-// An intrinsicBuilder converts a call node n into an ssa value that
-// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
-
-type intrinsicKey struct {
-	arch *sys.Arch
-	pkg  string
-	fn   string
-}
-
-func init() {
-	intrinsics = map[intrinsicKey]intrinsicBuilder{}
-
-	var all []*sys.Arch
-	var p4 []*sys.Arch
-	var p8 []*sys.Arch
-	var lwatomics []*sys.Arch
-	for _, a := range &sys.Archs {
-		all = append(all, a)
-		if a.PtrSize == 4 {
-			p4 = append(p4, a)
-		} else {
-			p8 = append(p8, a)
-		}
-		if a.Family != sys.PPC64 {
-			lwatomics = append(lwatomics, a)
-		}
-	}
-
-	// add adds the intrinsic b for pkg.fn for the given list of architectures.
-	add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
-		for _, a := range archs {
-			intrinsics[intrinsicKey{a, pkg, fn}] = b
-		}
-	}
-	// addF does the same as add but operates on architecture families.
-	addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
-		m := 0
-		for _, f := range archFamilies {
-			if f >= 32 {
-				panic("too many architecture families")
-			}
-			m |= 1 << uint(f)
-		}
-		for _, a := range all {
-			if m>>uint(a.Family)&1 != 0 {
-				intrinsics[intrinsicKey{a, pkg, fn}] = b
-			}
-		}
-	}
-	// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
-	alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
-		aliased := false
-		for _, a := range archs {
-			if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
-				intrinsics[intrinsicKey{a, pkg, fn}] = b
-				aliased = true
-			}
-		}
-		if !aliased {
-			panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
-		}
-	}
-
-	/******** runtime ********/
-	if !instrumenting {
-		add("runtime", "slicebytetostringtmp",
-			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-				// Compiler frontend optimizations emit OBYTES2STRTMP nodes
-				// for the backend instead of slicebytetostringtmp calls
-				// when not instrumenting.
-				return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
-			},
-			all...)
-	}
-	addF("runtime/internal/math", "MulUintptr",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
-			}
-			return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
-		},
-		sys.AMD64, sys.I386, sys.MIPS64)
-	add("runtime", "KeepAlive",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
-			s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
-			return nil
-		},
-		all...)
-	add("runtime", "getclosureptr",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
-		},
-		all...)
-
-	add("runtime", "getcallerpc",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
-		},
-		all...)
-
-	add("runtime", "getcallersp",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
-		},
-		all...)
-
-	/******** runtime/internal/sys ********/
-	addF("runtime/internal/sys", "Ctz32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
-	addF("runtime/internal/sys", "Ctz64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
-	addF("runtime/internal/sys", "Bswap32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
-	addF("runtime/internal/sys", "Bswap64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
-
-	/******** runtime/internal/atomic ********/
-	addF("runtime/internal/atomic", "Load",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Load8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Load64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "LoadAcq",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
-		},
-		sys.PPC64, sys.S390X)
-	addF("runtime/internal/atomic", "LoadAcq64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
-		},
-		sys.PPC64)
-	addF("runtime/internal/atomic", "Loadp",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-
-	addF("runtime/internal/atomic", "Store",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Store8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Store64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "StorepNoWB",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "StoreRel",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.PPC64, sys.S390X)
-	addF("runtime/internal/atomic", "StoreRel64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.PPC64)
-
-	addF("runtime/internal/atomic", "Xchg",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
-		},
-		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Xchg64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
-		},
-		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-
-	type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
-
-	makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
-
-		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			// Target Atomic feature is identified by dynamic detection
-			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
-			v := s.load(types.Types[TBOOL], addr)
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(v)
-			bTrue := s.f.NewBlock(ssa.BlockPlain)
-			bFalse := s.f.NewBlock(ssa.BlockPlain)
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bTrue)
-			b.AddEdgeTo(bFalse)
-			b.Likely = ssa.BranchLikely
-
-			// We have atomic instructions - use it directly.
-			s.startBlock(bTrue)
-			emit(s, n, args, op1, typ)
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Use original instruction sequence.
-			s.startBlock(bFalse)
-			emit(s, n, args, op0, typ)
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Merge results.
-			s.startBlock(bEnd)
-			if rtyp == TNIL {
-				return nil
-			} else {
-				return s.variable(n, types.Types[rtyp])
-			}
-		}
-	}
-
-	atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
-		v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
-		s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-		s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
-	}
-	addF("runtime/internal/atomic", "Xchg",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "Xchg64",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
-		sys.ARM64)
-
-	addF("runtime/internal/atomic", "Xadd",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
-		},
-		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Xadd64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
-		},
-		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-
-	addF("runtime/internal/atomic", "Xadd",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "Xadd64",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
-		sys.ARM64)
-
-	addF("runtime/internal/atomic", "Cas",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
-		},
-		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "Cas64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
-		},
-		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
-	addF("runtime/internal/atomic", "CasRel",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
-		},
-		sys.PPC64)
-
-	atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
-		v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
-		s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
-		s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
-	}
-
-	addF("runtime/internal/atomic", "Cas",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "Cas64",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
-		sys.ARM64)
-
-	addF("runtime/internal/atomic", "And8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
-	addF("runtime/internal/atomic", "And",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
-	addF("runtime/internal/atomic", "Or8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
-	addF("runtime/internal/atomic", "Or",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
-			return nil
-		},
-		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
-
-	atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
-		s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
-	}
-
-	addF("runtime/internal/atomic", "And8",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "And",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "Or8",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
-		sys.ARM64)
-	addF("runtime/internal/atomic", "Or",
-		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
-		sys.ARM64)
-
-	alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
-	alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
-	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
-	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
-	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
-	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
-	alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
-	alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
-	alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
-	alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
-	alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
-	alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
-	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
-	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
-	alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
-	alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
-	alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
-	alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
-	alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
-	alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
-	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
-	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
-	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
-	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
-	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
-	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
-	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
-	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
-	alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
-
-	/******** math ********/
-	addF("math", "Sqrt",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
-		},
-		sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
-	addF("math", "Trunc",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
-	addF("math", "Ceil",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
-	addF("math", "Floor",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
-	addF("math", "Round",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.PPC64, sys.S390X)
-	addF("math", "RoundToEven",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.S390X, sys.Wasm)
-	addF("math", "Abs",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
-		},
-		sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
-	addF("math", "Copysign",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
-		},
-		sys.PPC64, sys.Wasm)
-	addF("math", "FMA",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
-		},
-		sys.ARM64, sys.PPC64, sys.S390X)
-	addF("math", "FMA",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if !s.config.UseFMA {
-				s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
-				return s.variable(n, types.Types[TFLOAT64])
-			}
-			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(v)
-			bTrue := s.f.NewBlock(ssa.BlockPlain)
-			bFalse := s.f.NewBlock(ssa.BlockPlain)
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bTrue)
-			b.AddEdgeTo(bFalse)
-			b.Likely = ssa.BranchLikely // >= haswell cpus are common
-
-			// We have the intrinsic - use it directly.
-			s.startBlock(bTrue)
-			s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Call the pure Go version.
-			s.startBlock(bFalse)
-			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Merge results.
-			s.startBlock(bEnd)
-			return s.variable(n, types.Types[TFLOAT64])
-		},
-		sys.AMD64)
-	addF("math", "FMA",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if !s.config.UseFMA {
-				s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
-				return s.variable(n, types.Types[TFLOAT64])
-			}
-			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
-			v := s.load(types.Types[TBOOL], addr)
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(v)
-			bTrue := s.f.NewBlock(ssa.BlockPlain)
-			bFalse := s.f.NewBlock(ssa.BlockPlain)
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bTrue)
-			b.AddEdgeTo(bFalse)
-			b.Likely = ssa.BranchLikely
-
-			// We have the intrinsic - use it directly.
-			s.startBlock(bTrue)
-			s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Call the pure Go version.
-			s.startBlock(bFalse)
-			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Merge results.
-			s.startBlock(bEnd)
-			return s.variable(n, types.Types[TFLOAT64])
-		},
-		sys.ARM)
-
-	makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41)
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(v)
-			bTrue := s.f.NewBlock(ssa.BlockPlain)
-			bFalse := s.f.NewBlock(ssa.BlockPlain)
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bTrue)
-			b.AddEdgeTo(bFalse)
-			b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
-
-			// We have the intrinsic - use it directly.
-			s.startBlock(bTrue)
-			s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Call the pure Go version.
-			s.startBlock(bFalse)
-			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Merge results.
-			s.startBlock(bEnd)
-			return s.variable(n, types.Types[TFLOAT64])
-		}
-	}
-	addF("math", "RoundToEven",
-		makeRoundAMD64(ssa.OpRoundToEven),
-		sys.AMD64)
-	addF("math", "Floor",
-		makeRoundAMD64(ssa.OpFloor),
-		sys.AMD64)
-	addF("math", "Ceil",
-		makeRoundAMD64(ssa.OpCeil),
-		sys.AMD64)
-	addF("math", "Trunc",
-		makeRoundAMD64(ssa.OpTrunc),
-		sys.AMD64)
-
-	/******** math/bits ********/
-	addF("math/bits", "TrailingZeros64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "TrailingZeros32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "TrailingZeros16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
-			c := s.constInt32(types.Types[TUINT32], 1<<16)
-			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
-			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
-		},
-		sys.MIPS)
-	addF("math/bits", "TrailingZeros16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
-	addF("math/bits", "TrailingZeros16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
-			c := s.constInt64(types.Types[TUINT64], 1<<16)
-			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
-			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
-		},
-		sys.S390X, sys.PPC64)
-	addF("math/bits", "TrailingZeros8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
-			c := s.constInt32(types.Types[TUINT32], 1<<8)
-			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
-			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
-		},
-		sys.MIPS)
-	addF("math/bits", "TrailingZeros8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
-	addF("math/bits", "TrailingZeros8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
-			c := s.constInt64(types.Types[TUINT64], 1<<8)
-			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
-			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
-		},
-		sys.S390X)
-	alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
-	alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
-	// ReverseBytes inlines correctly, no need to intrinsify it.
-	// ReverseBytes16 lowers to a rotate, no need for anything special here.
-	addF("math/bits", "Len64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "Len32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64)
-	addF("math/bits", "Len32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
-			}
-			x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
-			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
-		},
-		sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "Len16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
-				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
-			}
-			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
-			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
-		},
-		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "Len16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
-		},
-		sys.AMD64)
-	addF("math/bits", "Len8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
-				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
-			}
-			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
-			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
-		},
-		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	addF("math/bits", "Len8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
-		},
-		sys.AMD64)
-	addF("math/bits", "Len",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
-			}
-			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
-		},
-		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
-	// LeadingZeros is handled because it trivially calls Len.
-	addF("math/bits", "Reverse64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
-		},
-		sys.ARM64)
-	addF("math/bits", "Reverse32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
-		},
-		sys.ARM64)
-	addF("math/bits", "Reverse16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
-		},
-		sys.ARM64)
-	addF("math/bits", "Reverse8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
-		},
-		sys.ARM64)
-	addF("math/bits", "Reverse",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			if s.config.PtrSize == 4 {
-				return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
-			}
-			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
-		},
-		sys.ARM64)
-	addF("math/bits", "RotateLeft8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
-		},
-		sys.AMD64)
-	addF("math/bits", "RotateLeft16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
-		},
-		sys.AMD64)
-	addF("math/bits", "RotateLeft32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
-		},
-		sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
-	addF("math/bits", "RotateLeft64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
-		},
-		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
-	alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
-
-	makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT)
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(v)
-			bTrue := s.f.NewBlock(ssa.BlockPlain)
-			bFalse := s.f.NewBlock(ssa.BlockPlain)
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bTrue)
-			b.AddEdgeTo(bFalse)
-			b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
-
-			// We have the intrinsic - use it directly.
-			s.startBlock(bTrue)
-			op := op64
-			if s.config.PtrSize == 4 {
-				op = op32
-			}
-			s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Call the pure Go version.
-			s.startBlock(bFalse)
-			s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
-			s.endBlock().AddEdgeTo(bEnd)
-
-			// Merge results.
-			s.startBlock(bEnd)
-			return s.variable(n, types.Types[TINT])
-		}
-	}
-	addF("math/bits", "OnesCount64",
-		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
-		sys.AMD64)
-	addF("math/bits", "OnesCount64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
-		},
-		sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
-	addF("math/bits", "OnesCount32",
-		makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
-		sys.AMD64)
-	addF("math/bits", "OnesCount32",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
-		},
-		sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
-	addF("math/bits", "OnesCount16",
-		makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
-		sys.AMD64)
-	addF("math/bits", "OnesCount16",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
-		},
-		sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
-	addF("math/bits", "OnesCount8",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
-		},
-		sys.S390X, sys.PPC64, sys.Wasm)
-	addF("math/bits", "OnesCount",
-		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
-		sys.AMD64)
-	addF("math/bits", "Mul64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
-		},
-		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
-	alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
-	addF("math/bits", "Add64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
-		},
-		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
-	alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
-	addF("math/bits", "Sub64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
-		},
-		sys.AMD64, sys.ARM64, sys.S390X)
-	alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
-	addF("math/bits", "Div64",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			// check for divide-by-zero/overflow and panic with appropriate message
-			cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
-			s.check(cmpZero, panicdivide)
-			cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
-			s.check(cmpOverflow, panicoverflow)
-			return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
-		},
-		sys.AMD64)
-	alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
-
-	alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
-	alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
-	alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
-	alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
-	alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
-	alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
-
-	/******** sync/atomic ********/
-
-	// Note: these are disabled by flag_race in findIntrinsic below.
-	alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
-	alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
-	alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
-	alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
-	alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
-	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
-	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
-
-	alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
-	alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
-	// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
-	alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
-	alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
-	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
-	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
-
-	alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
-	alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
-	alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
-	alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
-	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
-	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
-
-	alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
-	alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
-	alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
-	alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
-	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
-	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
-
-	alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
-	alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
-	alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
-	alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
-	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
-	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
-
-	/******** math/big ********/
-	add("math/big", "mulWW",
-		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
-		},
-		sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
-}
-
-// findIntrinsic returns a function which builds the SSA equivalent of the
-// function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
-func findIntrinsic(sym *types.Sym) intrinsicBuilder {
-	if sym == nil || sym.Pkg == nil {
-		return nil
-	}
-	pkg := sym.Pkg.Path
-	if sym.Pkg == localpkg {
-		pkg = myimportpath
-	}
-	if flag_race && pkg == "sync/atomic" {
-		// The race detector needs to be able to intercept these calls.
-		// We can't intrinsify them.
-		return nil
-	}
-	// Skip intrinsifying math functions (which may contain hard-float
-	// instructions) when soft-float
-	if thearch.SoftFloat && pkg == "math" {
-		return nil
-	}
-
-	fn := sym.Name
-	if ssa.IntrinsicsDisable {
-		if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
-			// These runtime functions don't have definitions, must be intrinsics.
-		} else {
-			return nil
-		}
-	}
-	return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
-}
-
-func isIntrinsicCall(n *Node) bool {
-	if n == nil || n.Left == nil {
-		return false
-	}
-	return findIntrinsic(n.Left.Sym) != nil
-}
-
-// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *Node) *ssa.Value {
-	v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
-	if ssa.IntrinsicsDebug > 0 {
-		x := v
-		if x == nil {
-			x = s.mem()
-		}
-		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
-			x = x.Args[0]
-		}
-		Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
-	}
-	return v
-}
-
-// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
-	// Construct map of temps; see comments in s.call about the structure of n.
-	temps := map[*Node]*ssa.Value{}
-	for _, a := range n.List.Slice() {
-		if a.Op != OAS {
-			s.Fatalf("non-assignment as a temp function argument %v", a.Op)
-		}
-		l, r := a.Left, a.Right
-		if l.Op != ONAME {
-			s.Fatalf("non-ONAME temp function argument %v", a.Op)
-		}
-		// Evaluate and store to "temporary".
-		// Walk ensures these temporaries are dead outside of n.
-		temps[l] = s.expr(r)
-	}
-	args := make([]*ssa.Value, n.Rlist.Len())
-	for i, n := range n.Rlist.Slice() {
-		// Store a value to an argument slot.
-		if x, ok := temps[n]; ok {
-			// This is a previously computed temporary.
-			args[i] = x
-			continue
-		}
-		// This is an explicit value; evaluate it.
-		args[i] = s.expr(n)
-	}
-	return args
-}
-
-// openDeferRecord adds code to evaluate and store the args for an open-code defer
-// call, and records info about the defer, so we can generate proper code on the
-// exit paths. n is the sub-node of the defer node that is the actual function
-// call. We will also record funcdata information on where the args are stored
-// (as well as the deferBits variable), and this will enable us to run the proper
-// defer calls during panics.
-func (s *state) openDeferRecord(n *Node) {
-	// Do any needed expression evaluation for the args (including the
-	// receiver, if any). This may be evaluating something like 'autotmp_3 =
-	// once.mutex'. Such a statement will create a mapping in s.vars[] from
-	// the autotmp name to the evaluated SSA arg value, but won't do any
-	// stores to the stack.
-	s.stmtList(n.List)
-
-	var args []*ssa.Value
-	var argNodes []*Node
-
-	opendefer := &openDeferInfo{
-		n: n,
-	}
-	fn := n.Left
-	if n.Op == OCALLFUNC {
-		// We must always store the function value in a stack slot for the
-		// runtime panic code to use. But in the defer exit code, we will
-		// call the function directly if it is a static function.
-		closureVal := s.expr(fn)
-		closure := s.openDeferSave(nil, fn.Type, closureVal)
-		opendefer.closureNode = closure.Aux.(*Node)
-		if !(fn.Op == ONAME && fn.Class() == PFUNC) {
-			opendefer.closure = closure
-		}
-	} else if n.Op == OCALLMETH {
-		if fn.Op != ODOTMETH {
-			Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
-		}
-		closureVal := s.getMethodClosure(fn)
-		// We must always store the function value in a stack slot for the
-		// runtime panic code to use. But in the defer exit code, we will
-		// call the method directly.
-		closure := s.openDeferSave(nil, fn.Type, closureVal)
-		opendefer.closureNode = closure.Aux.(*Node)
-	} else {
-		if fn.Op != ODOTINTER {
-			Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
-		}
-		closure, rcvr := s.getClosureAndRcvr(fn)
-		opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
-		// Important to get the receiver type correct, so it is recognized
-		// as a pointer for GC purposes.
-		opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
-		opendefer.closureNode = opendefer.closure.Aux.(*Node)
-		opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node)
-	}
-	for _, argn := range n.Rlist.Slice() {
-		var v *ssa.Value
-		if canSSAType(argn.Type) {
-			v = s.openDeferSave(nil, argn.Type, s.expr(argn))
-		} else {
-			v = s.openDeferSave(argn, argn.Type, nil)
-		}
-		args = append(args, v)
-		argNodes = append(argNodes, v.Aux.(*Node))
-	}
-	opendefer.argVals = args
-	opendefer.argNodes = argNodes
-	index := len(s.openDefers)
-	s.openDefers = append(s.openDefers, opendefer)
-
-	// Update deferBits only after evaluation and storage to stack of
-	// args/receiver/interface is successful.
-	bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
-	newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue)
-	s.vars[&deferBitsVar] = newDeferBits
-	s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
-}
-
-// openDeferSave generates SSA nodes to store a value (with type t) for an
-// open-coded defer at an explicit autotmp location on the stack, so it can be
-// reloaded and used for the appropriate call on exit. If type t is SSAable, then
-// val must be non-nil (and n should be nil) and val is the value to be stored. If
-// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
-// evaluated (via s.addr() below) to get the value that is to be stored. The
-// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value {
-	canSSA := canSSAType(t)
-	var pos src.XPos
-	if canSSA {
-		pos = val.Pos
-	} else {
-		pos = n.Pos
-	}
-	argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
-	argTemp.Name.SetOpenDeferSlot(true)
-	var addrArgTemp *ssa.Value
-	// Use OpVarLive to make sure stack slots for the args, etc. are not
-	// removed by dead-store elimination
-	if s.curBlock.ID != s.f.Entry.ID {
-		// Force the argtmp storing this defer function/receiver/arg to be
-		// declared in the entry block, so that it will be live for the
-		// defer exit code (which will actually access it only if the
-		// associated defer call has been activated).
-		s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
-		s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
-		addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar])
-	} else {
-		// Special case if we're still in the entry block. We can't use
-		// the above code, since s.defvars[s.f.Entry.ID] isn't defined
-		// until we end the entry block with s.endBlock().
-		s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
-		s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
-		addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
-	}
-	if t.HasPointers() {
-		// Since we may use this argTemp during exit depending on the
-		// deferBits, we must define it unconditionally on entry.
-		// Therefore, we must make sure it is zeroed out in the entry
-		// block if it contains pointers, else GC may wrongly follow an
-		// uninitialized pointer value.
-		argTemp.Name.SetNeedzero(true)
-	}
-	if !canSSA {
-		a := s.addr(n)
-		s.move(t, addrArgTemp, a)
-		return addrArgTemp
-	}
-	// We are storing to the stack, hence we can avoid the full checks in
-	// storeType() (no write barrier) and do a simple store().
-	s.store(t, addrArgTemp, val)
-	return addrArgTemp
-}
-
-// openDeferExit generates SSA for processing all the open coded defers at exit.
-// The code involves loading deferBits, and checking each of the bits to see if
-// the corresponding defer statement was executed. For each bit that is turned
-// on, the associated defer call is made.
-func (s *state) openDeferExit() {
-	deferExit := s.f.NewBlock(ssa.BlockPlain)
-	s.endBlock().AddEdgeTo(deferExit)
-	s.startBlock(deferExit)
-	s.lastDeferExit = deferExit
-	s.lastDeferCount = len(s.openDefers)
-	zeroval := s.constInt8(types.Types[TUINT8], 0)
-	testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
-	// Test for and run defers in reverse order
-	for i := len(s.openDefers) - 1; i >= 0; i-- {
-		r := s.openDefers[i]
-		bCond := s.f.NewBlock(ssa.BlockPlain)
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-		deferBits := s.variable(&deferBitsVar, types.Types[TUINT8])
-		// Generate code to check if the bit associated with the current
-		// defer is set.
-		bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
-		andval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, bitval)
-		eqVal := s.newValue2(ssa.OpEq8, types.Types[TBOOL], andval, zeroval)
-		b := s.endBlock()
-		b.Kind = ssa.BlockIf
-		b.SetControl(eqVal)
-		b.AddEdgeTo(bEnd)
-		b.AddEdgeTo(bCond)
-		bCond.AddEdgeTo(bEnd)
-		s.startBlock(bCond)
-
-		// Clear this bit in deferBits and force store back to stack, so
-		// we will not try to re-run this defer call if this defer call panics.
-		nbitval := s.newValue1(ssa.OpCom8, types.Types[TUINT8], bitval)
-		maskedval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, nbitval)
-		s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
-		// Use this value for following tests, so we keep previous
-		// bits cleared.
-		s.vars[&deferBitsVar] = maskedval
-
-		// Generate code to call the function call of the defer, using the
-		// closure/receiver/args that were stored in argtmps at the point
-		// of the defer statement.
-		argStart := Ctxt.FixedFrameSize()
-		fn := r.n.Left
-		stksize := fn.Type.ArgWidth()
-		var ACArgs []ssa.Param
-		var ACResults []ssa.Param
-		var callArgs []*ssa.Value
-		if r.rcvr != nil {
-			// rcvr in case of OCALLINTER
-			v := s.load(r.rcvr.Type.Elem(), r.rcvr)
-			addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
-			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
-			if testLateExpansion {
-				callArgs = append(callArgs, v)
-			} else {
-				s.store(types.Types[TUINTPTR], addr, v)
-			}
-		}
-		for j, argAddrVal := range r.argVals {
-			f := getParam(r.n, j)
-			pt := types.NewPtr(f.Type)
-			ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
-			if testLateExpansion {
-				var a *ssa.Value
-				if !canSSAType(f.Type) {
-					a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
-				} else {
-					a = s.load(f.Type, argAddrVal)
-				}
-				callArgs = append(callArgs, a)
-			} else {
-				addr := s.constOffPtrSP(pt, argStart+f.Offset)
-				if !canSSAType(f.Type) {
-					s.move(f.Type, addr, argAddrVal)
-				} else {
-					argVal := s.load(f.Type, argAddrVal)
-					s.storeType(f.Type, addr, argVal, 0, false)
-				}
-			}
-		}
-		var call *ssa.Value
-		if r.closure != nil {
-			v := s.load(r.closure.Type.Elem(), r.closure)
-			s.maybeNilCheckClosure(v, callDefer)
-			codeptr := s.rawLoad(types.Types[TUINTPTR], v)
-			aux := ssa.ClosureAuxCall(ACArgs, ACResults)
-			if testLateExpansion {
-				callArgs = append(callArgs, s.mem())
-				call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
-			}
-		} else {
-			aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
-			if testLateExpansion {
-				callArgs = append(callArgs, s.mem())
-				call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-				call.AddArgs(callArgs...)
-			} else {
-				// Do a static call if the original call was a static function or method
-				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
-			}
-		}
-		call.AuxInt = stksize
-		if testLateExpansion {
-			s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
-		} else {
-			s.vars[&memVar] = call
-		}
-		// Make sure that the stack slots with pointers are kept live
-		// through the call (which is a pre-emption point). Also, we will
-		// use the first call of the last defer exit to compute liveness
-		// for the deferreturn, so we want all stack slots to be live.
-		if r.closureNode != nil {
-			s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
-		}
-		if r.rcvrNode != nil {
-			if r.rcvrNode.Type.HasPointers() {
-				s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
-			}
-		}
-		for _, argNode := range r.argNodes {
-			if argNode.Type.HasPointers() {
-				s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
-			}
-		}
-
-		s.endBlock()
-		s.startBlock(bEnd)
-	}
-}
-
-func (s *state) callResult(n *Node, k callKind) *ssa.Value {
-	return s.call(n, k, false)
-}
-
-func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
-	return s.call(n, k, true)
-}
-
-// Calls the function n using the specified call type.
-// Returns the address of the return value (or nil if none).
-func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
-	s.prevCall = nil
-	var sym *types.Sym     // target symbol (if static)
-	var closure *ssa.Value // ptr to closure to run (if dynamic)
-	var codeptr *ssa.Value // ptr to target code (if dynamic)
-	var rcvr *ssa.Value    // receiver to set
-	fn := n.Left
-	var ACArgs []ssa.Param
-	var ACResults []ssa.Param
-	var callArgs []*ssa.Value
-	res := n.Left.Type.Results()
-	if k == callNormal {
-		nf := res.NumFields()
-		for i := 0; i < nf; i++ {
-			fp := res.Field(i)
-			ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
-		}
-	}
-
-	testLateExpansion := false
-
-	switch n.Op {
-	case OCALLFUNC:
-		testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
-		if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
-			sym = fn.Sym
-			break
-		}
-		closure = s.expr(fn)
-		if k != callDefer && k != callDeferStack {
-			// Deferred nil function needs to panic when the function is invoked,
-			// not the point of defer statement.
-			s.maybeNilCheckClosure(closure, k)
-		}
-	case OCALLMETH:
-		if fn.Op != ODOTMETH {
-			s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
-		}
-		testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
-		if k == callNormal {
-			sym = fn.Sym
-			break
-		}
-		closure = s.getMethodClosure(fn)
-		// Note: receiver is already present in n.Rlist, so we don't
-		// want to set it here.
-	case OCALLINTER:
-		if fn.Op != ODOTINTER {
-			s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
-		}
-		testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
-		var iclosure *ssa.Value
-		iclosure, rcvr = s.getClosureAndRcvr(fn)
-		if k == callNormal {
-			codeptr = s.load(types.Types[TUINTPTR], iclosure)
-		} else {
-			closure = iclosure
-		}
-	}
-	dowidth(fn.Type)
-	stksize := fn.Type.ArgWidth() // includes receiver, args, and results
-
-	// Run all assignments of temps.
-	// The temps are introduced to avoid overwriting argument
-	// slots when arguments themselves require function calls.
-	s.stmtList(n.List)
-
-	var call *ssa.Value
-	if k == callDeferStack {
-		testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
-		// Make a defer struct d on the stack.
-		t := deferstruct(stksize)
-		d := tempAt(n.Pos, s.curfn, t)
-
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
-		addr := s.addr(d)
-
-		// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
-		// 0: siz
-		s.store(types.Types[TUINT32],
-			s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
-			s.constInt32(types.Types[TUINT32], int32(stksize)))
-		// 1: started, set in deferprocStack
-		// 2: heap, set in deferprocStack
-		// 3: openDefer
-		// 4: sp, set in deferprocStack
-		// 5: pc, set in deferprocStack
-		// 6: fn
-		s.store(closure.Type,
-			s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
-			closure)
-		// 7: panic, set in deferprocStack
-		// 8: link, set in deferprocStack
-		// 9: framepc
-		// 10: varp
-		// 11: fd
-
-		// Then, store all the arguments of the defer call.
-		ft := fn.Type
-		off := t.FieldOff(12)
-		args := n.Rlist.Slice()
-
-		// Set receiver (for interface calls). Always a pointer.
-		if rcvr != nil {
-			p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
-			s.store(types.Types[TUINTPTR], p, rcvr)
-		}
-		// Set receiver (for method calls).
-		if n.Op == OCALLMETH {
-			f := ft.Recv()
-			s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
-			args = args[1:]
-		}
-		// Set other args.
-		for _, f := range ft.Params().Fields().Slice() {
-			s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
-			args = args[1:]
-		}
-
-		// Call runtime.deferprocStack with pointer to _defer record.
-		ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
-		aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
-		if testLateExpansion {
-			callArgs = append(callArgs, addr, s.mem())
-			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-			call.AddArgs(callArgs...)
-		} else {
-			arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
-			s.store(types.Types[TUINTPTR], arg0, addr)
-			call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
-		}
-		if stksize < int64(Widthptr) {
-			// We need room for both the call to deferprocStack and the call to
-			// the deferred function.
-			// TODO Revisit this if/when we pass args in registers.
-			stksize = int64(Widthptr)
-		}
-		call.AuxInt = stksize
-	} else {
-		// Store arguments to stack, including defer/go arguments and receiver for method calls.
-		// These are written in SP-offset order.
-		argStart := Ctxt.FixedFrameSize()
-		// Defer/go args.
-		if k != callNormal {
-			// Write argsize and closure (args to newproc/deferproc).
-			argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
-			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
-			if testLateExpansion {
-				callArgs = append(callArgs, argsize)
-			} else {
-				addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
-				s.store(types.Types[TUINT32], addr, argsize)
-			}
-			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
-			if testLateExpansion {
-				callArgs = append(callArgs, closure)
-			} else {
-				addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
-				s.store(types.Types[TUINTPTR], addr, closure)
-			}
-			stksize += 2 * int64(Widthptr)
-			argStart += 2 * int64(Widthptr)
-		}
-
-		// Set receiver (for interface calls).
-		if rcvr != nil {
-			addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
-			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
-			if testLateExpansion {
-				callArgs = append(callArgs, rcvr)
-			} else {
-				s.store(types.Types[TUINTPTR], addr, rcvr)
-			}
-		}
-
-		// Write args.
-		t := n.Left.Type
-		args := n.Rlist.Slice()
-		if n.Op == OCALLMETH {
-			f := t.Recv()
-			ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
-			ACArgs = append(ACArgs, ACArg)
-			callArgs = append(callArgs, arg)
-			args = args[1:]
-		}
-		for i, n := range args {
-			f := t.Params().Field(i)
-			ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset, testLateExpansion)
-			ACArgs = append(ACArgs, ACArg)
-			callArgs = append(callArgs, arg)
-		}
-
-		callArgs = append(callArgs, s.mem())
-
-		// call target
-		switch {
-		case k == callDefer:
-			aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults)
-			if testLateExpansion {
-				call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
-			}
-		case k == callGo:
-			aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults)
-			if testLateExpansion {
-				call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
-			}
-		case closure != nil:
-			// rawLoad because loading the code pointer from a
-			// closure is always safe, but IsSanitizerSafeAddr
-			// can't always figure that out currently, and it's
-			// critical that we not clobber any arguments already
-			// stored onto the stack.
-			codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
-			if testLateExpansion {
-				aux := ssa.ClosureAuxCall(ACArgs, ACResults)
-				call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
-			}
-		case codeptr != nil:
-			if testLateExpansion {
-				aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
-				call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
-			}
-		case sym != nil:
-			if testLateExpansion {
-				aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
-				call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-				call.AddArgs(callArgs...)
-			} else {
-				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
-			}
-		default:
-			s.Fatalf("bad call type %v %v", n.Op, n)
-		}
-		call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
-	}
-	if testLateExpansion {
-		s.prevCall = call
-		s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
-	} else {
-		s.vars[&memVar] = call
-	}
-	// Insert OVARLIVE nodes
-	s.stmtList(n.Nbody)
-
-	// Finish block for defers
-	if k == callDefer || k == callDeferStack {
-		b := s.endBlock()
-		b.Kind = ssa.BlockDefer
-		b.SetControl(call)
-		bNext := s.f.NewBlock(ssa.BlockPlain)
-		b.AddEdgeTo(bNext)
-		// Add recover edge to exit code.
-		r := s.f.NewBlock(ssa.BlockPlain)
-		s.startBlock(r)
-		s.exit()
-		b.AddEdgeTo(r)
-		b.Likely = ssa.BranchLikely
-		s.startBlock(bNext)
-	}
-
-	if res.NumFields() == 0 || k != callNormal {
-		// call has no return value. Continue with the next statement.
-		return nil
-	}
-	fp := res.Field(0)
-	if returnResultAddr {
-		pt := types.NewPtr(fp.Type)
-		if testLateExpansion {
-			return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
-		}
-		return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
-	}
-
-	if testLateExpansion {
-		return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
-	}
-	return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
-}
-
-// maybeNilCheckClosure checks if a nil check of a closure is needed in some
-// architecture-dependent situations and, if so, emits the nil check.
-func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
-	if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
-		// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
-		// TODO(neelance): On other architectures this should be eliminated by the optimization steps
-		s.nilCheck(closure)
-	}
-}
-
-// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *Node) *ssa.Value {
-	// Make a name n2 for the function.
-	// fn.Sym might be sync.(*Mutex).Unlock.
-	// Make a PFUNC node out of that, then evaluate it.
-	// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
-	// We can then pass that to defer or go.
-	n2 := newnamel(fn.Pos, fn.Sym)
-	n2.Name.Curfn = s.curfn
-	n2.SetClass(PFUNC)
-	// n2.Sym already existed, so it's already marked as a function.
-	n2.Pos = fn.Pos
-	n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
-	return s.expr(n2)
-}
-
-// getClosureAndRcvr returns values for the appropriate closure and receiver of an
-// interface call
-func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
-	i := s.expr(fn.Left)
-	itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
-	s.nilCheck(itab)
-	itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
-	closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
-	rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
-	return closure, rcvr
-}
-
-// etypesign returns the signed-ness of e, for integer/pointer etypes.
-// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e types.EType) int8 {
-	switch e {
-	case TINT8, TINT16, TINT32, TINT64, TINT:
-		return -1
-	case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
-		return +1
-	}
-	return 0
-}
-
-// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
-// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *Node) *ssa.Value {
-	if n.Op != ONAME {
-		s.pushLine(n.Pos)
-		defer s.popLine()
-	}
-
-	t := types.NewPtr(n.Type)
-	switch n.Op {
-	case ONAME:
-		switch n.Class() {
-		case PEXTERN:
-			// global variable
-			v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
-			// TODO: Make OpAddr use AuxInt as well as Aux.
-			if n.Xoffset != 0 {
-				v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
-			}
-			return v
-		case PPARAM:
-			// parameter slot
-			v := s.decladdrs[n]
-			if v != nil {
-				return v
-			}
-			if n == nodfp {
-				// Special arg that points to the frame pointer (Used by ORECOVER).
-				return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem)
-			}
-			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
-			return nil
-		case PAUTO:
-			return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
-
-		case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
-			// ensure that we reuse symbols for out parameters so
-			// that cse works on their addresses
-			return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
-		default:
-			s.Fatalf("variable address class %v not implemented", n.Class())
-			return nil
-		}
-	case ORESULT:
-		// load return from callee
-		if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
-			return s.constOffPtrSP(t, n.Xoffset)
-		}
-		which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
-		if which == -1 {
-			// Do the old thing // TODO: Panic instead.
-			return s.constOffPtrSP(t, n.Xoffset)
-		}
-		x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
-		return x
-
-	case OINDEX:
-		if n.Left.Type.IsSlice() {
-			a := s.expr(n.Left)
-			i := s.expr(n.Right)
-			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
-			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
-			p := s.newValue1(ssa.OpSlicePtr, t, a)
-			return s.newValue2(ssa.OpPtrIndex, t, p, i)
-		} else { // array
-			a := s.addr(n.Left)
-			i := s.expr(n.Right)
-			len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
-			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
-			return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
-		}
-	case ODEREF:
-		return s.exprPtr(n.Left, n.Bounded(), n.Pos)
-	case ODOT:
-		p := s.addr(n.Left)
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
-	case ODOTPTR:
-		p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
-	case OCLOSUREVAR:
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
-			s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
-	case OCONVNOP:
-		addr := s.addr(n.Left)
-		return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
-	case OCALLFUNC, OCALLINTER, OCALLMETH:
-		return s.callAddr(n, callNormal)
-	case ODOTTYPE:
-		v, _ := s.dottype(n, false)
-		if v.Op != ssa.OpLoad {
-			s.Fatalf("dottype of non-load")
-		}
-		if v.Args[1] != s.mem() {
-			s.Fatalf("memory no longer live from dottype load")
-		}
-		return v.Args[0]
-	default:
-		s.Fatalf("unhandled addr %v", n.Op)
-		return nil
-	}
-}
-
-// canSSA reports whether n is SSA-able.
-// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *Node) bool {
-	if Debug.N != 0 {
-		return false
-	}
-	for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
-		n = n.Left
-	}
-	if n.Op != ONAME {
-		return false
-	}
-	if n.Name.Addrtaken() {
-		return false
-	}
-	if n.isParamHeapCopy() {
-		return false
-	}
-	if n.Class() == PAUTOHEAP {
-		s.Fatalf("canSSA of PAUTOHEAP %v", n)
-	}
-	switch n.Class() {
-	case PEXTERN:
-		return false
-	case PPARAMOUT:
-		if s.hasdefer {
-			// TODO: handle this case? Named return values must be
-			// in memory so that the deferred function can see them.
-			// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
-			// Or maybe not, see issue 18860.  Even unnamed return values
-			// must be written back so if a defer recovers, the caller can see them.
-			return false
-		}
-		if s.cgoUnsafeArgs {
-			// Cgo effectively takes the address of all result args,
-			// but the compiler can't see that.
-			return false
-		}
-	}
-	if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
-		// wrappers generated by genwrapper need to update
-		// the .this pointer in place.
-		// TODO: treat as a PPARAMOUT?
-		return false
-	}
-	return canSSAType(n.Type)
-	// TODO: try to make more variables SSAable?
-}
-
-// canSSA reports whether variables of type t are SSA-able.
-func canSSAType(t *types.Type) bool {
-	dowidth(t)
-	if t.Width > int64(4*Widthptr) {
-		// 4*Widthptr is an arbitrary constant. We want it
-		// to be at least 3*Widthptr so slices can be registerized.
-		// Too big and we'll introduce too much register pressure.
-		return false
-	}
-	switch t.Etype {
-	case TARRAY:
-		// We can't do larger arrays because dynamic indexing is
-		// not supported on SSA variables.
-		// TODO: allow if all indexes are constant.
-		if t.NumElem() <= 1 {
-			return canSSAType(t.Elem())
-		}
-		return false
-	case TSTRUCT:
-		if t.NumFields() > ssa.MaxStruct {
-			return false
-		}
-		for _, t1 := range t.Fields().Slice() {
-			if !canSSAType(t1.Type) {
-				return false
-			}
-		}
-		return true
-	default:
-		return true
-	}
-}
-
-// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
-	p := s.expr(n)
-	if bounded || n.NonNil() {
-		if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
-			s.f.Warnl(lineno, "removed nil check")
-		}
-		return p
-	}
-	s.nilCheck(p)
-	return p
-}
-
-// nilCheck generates nil pointer checking code.
-// Used only for automatically inserted nil checks,
-// not for user code like 'x != nil'.
-func (s *state) nilCheck(ptr *ssa.Value) {
-	if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
-		return
-	}
-	s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
-}
-
-// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
-// Starts a new block on return.
-// On input, len must be converted to full int width and be nonnegative.
-// Returns idx converted to full int width.
-// If bounded is true then caller guarantees the index is not out of bounds
-// (but boundsCheck will still extend the index to full int width).
-func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
-	idx = s.extendIndex(idx, len, kind, bounded)
-
-	if bounded || Debug.B != 0 {
-		// If bounded or bounds checking is flag-disabled, then no check necessary,
-		// just return the extended index.
-		//
-		// Here, bounded == true if the compiler generated the index itself,
-		// such as in the expansion of a slice initializer. These indexes are
-		// compiler-generated, not Go program variables, so they cannot be
-		// attacker-controlled, so we can omit Spectre masking as well.
-		//
-		// Note that we do not want to omit Spectre masking in code like:
-		//
-		//	if 0 <= i && i < len(x) {
-		//		use(x[i])
-		//	}
-		//
-		// Lucky for us, bounded==false for that code.
-		// In that case (handled below), we emit a bound check (and Spectre mask)
-		// and then the prove pass will remove the bounds check.
-		// In theory the prove pass could potentially remove certain
-		// Spectre masks, but it's very delicate and probably better
-		// to be conservative and leave them all in.
-		return idx
-	}
-
-	bNext := s.f.NewBlock(ssa.BlockPlain)
-	bPanic := s.f.NewBlock(ssa.BlockExit)
-
-	if !idx.Type.IsSigned() {
-		switch kind {
-		case ssa.BoundsIndex:
-			kind = ssa.BoundsIndexU
-		case ssa.BoundsSliceAlen:
-			kind = ssa.BoundsSliceAlenU
-		case ssa.BoundsSliceAcap:
-			kind = ssa.BoundsSliceAcapU
-		case ssa.BoundsSliceB:
-			kind = ssa.BoundsSliceBU
-		case ssa.BoundsSlice3Alen:
-			kind = ssa.BoundsSlice3AlenU
-		case ssa.BoundsSlice3Acap:
-			kind = ssa.BoundsSlice3AcapU
-		case ssa.BoundsSlice3B:
-			kind = ssa.BoundsSlice3BU
-		case ssa.BoundsSlice3C:
-			kind = ssa.BoundsSlice3CU
-		}
-	}
-
-	var cmp *ssa.Value
-	if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
-		cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
-	} else {
-		cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
-	}
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-	b.AddEdgeTo(bNext)
-	b.AddEdgeTo(bPanic)
-
-	s.startBlock(bPanic)
-	if thearch.LinkArch.Family == sys.Wasm {
-		// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
-		// Should be similar to gcWriteBarrier, but I can't make it work.
-		s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
-	} else {
-		mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
-		s.endBlock().SetControl(mem)
-	}
-	s.startBlock(bNext)
-
-	// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
-	if spectreIndex {
-		op := ssa.OpSpectreIndex
-		if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
-			op = ssa.OpSpectreSliceIndex
-		}
-		idx = s.newValue2(op, types.Types[TINT], idx, len)
-	}
-
-	return idx
-}
-
-// If cmp (a bool) is false, panic using the given function.
-func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-	bNext := s.f.NewBlock(ssa.BlockPlain)
-	line := s.peekPos()
-	pos := Ctxt.PosTable.Pos(line)
-	fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
-	bPanic := s.panics[fl]
-	if bPanic == nil {
-		bPanic = s.f.NewBlock(ssa.BlockPlain)
-		s.panics[fl] = bPanic
-		s.startBlock(bPanic)
-		// The panic call takes/returns memory to ensure that the right
-		// memory state is observed if the panic happens.
-		s.rtcall(fn, false, nil)
-	}
-	b.AddEdgeTo(bNext)
-	b.AddEdgeTo(bPanic)
-	s.startBlock(bNext)
-}
-
-func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
-	needcheck := true
-	switch b.Op {
-	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
-		if b.AuxInt != 0 {
-			needcheck = false
-		}
-	}
-	if needcheck {
-		// do a size-appropriate check for zero
-		cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
-		s.check(cmp, panicdivide)
-	}
-	return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-}
-
-// rtcall issues a call to the given runtime function fn with the listed args.
-// Returns a slice of results of the given result types.
-// The call is added to the end of the current block.
-// If returns is false, the block is marked as an exit block.
-func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
-	s.prevCall = nil
-	// Write args to the stack
-	off := Ctxt.FixedFrameSize()
-	testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
-	var ACArgs []ssa.Param
-	var ACResults []ssa.Param
-	var callArgs []*ssa.Value
-
-	for _, arg := range args {
-		t := arg.Type
-		off = Rnd(off, t.Alignment())
-		size := t.Size()
-		ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
-		if testLateExpansion {
-			callArgs = append(callArgs, arg)
-		} else {
-			ptr := s.constOffPtrSP(t.PtrTo(), off)
-			s.store(t, ptr, arg)
-		}
-		off += size
-	}
-	off = Rnd(off, int64(Widthreg))
-
-	// Accumulate results types and offsets
-	offR := off
-	for _, t := range results {
-		offR = Rnd(offR, t.Alignment())
-		ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)})
-		offR += t.Size()
-	}
-
-	// Issue call
-	var call *ssa.Value
-	aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
-	if testLateExpansion {
-		callArgs = append(callArgs, s.mem())
-		call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
-		call.AddArgs(callArgs...)
-		s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
-	} else {
-		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
-		s.vars[&memVar] = call
-	}
-
-	if !returns {
-		// Finish block
-		b := s.endBlock()
-		b.Kind = ssa.BlockExit
-		b.SetControl(call)
-		call.AuxInt = off - Ctxt.FixedFrameSize()
-		if len(results) > 0 {
-			s.Fatalf("panic call can't have results")
-		}
-		return nil
-	}
-
-	// Load results
-	res := make([]*ssa.Value, len(results))
-	if testLateExpansion {
-		for i, t := range results {
-			off = Rnd(off, t.Alignment())
-			if canSSAType(t) {
-				res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
-			} else {
-				addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
-				res[i] = s.rawLoad(t, addr)
-			}
-			off += t.Size()
-		}
-	} else {
-		for i, t := range results {
-			off = Rnd(off, t.Alignment())
-			ptr := s.constOffPtrSP(types.NewPtr(t), off)
-			res[i] = s.load(t, ptr)
-			off += t.Size()
-		}
-	}
-	off = Rnd(off, int64(Widthptr))
-
-	// Remember how much callee stack space we needed.
-	call.AuxInt = off
-
-	return res
-}
-
-// do *left = right for type t.
-func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
-	s.instrument(t, left, instrumentWrite)
-
-	if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
-		// Known to not have write barrier. Store the whole type.
-		s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
-		return
-	}
-
-	// store scalar fields first, so write barrier stores for
-	// pointer fields can be grouped together, and scalar values
-	// don't need to be live across the write barrier call.
-	// TODO: if the writebarrier pass knows how to reorder stores,
-	// we can do a single store here as long as skip==0.
-	s.storeTypeScalars(t, left, right, skip)
-	if skip&skipPtr == 0 && t.HasPointers() {
-		s.storeTypePtrs(t, left, right)
-	}
-}
-
-// do *left = right for all scalar (non-pointer) parts of t.
-func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
-	switch {
-	case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
-		s.store(t, left, right)
-	case t.IsPtrShaped():
-		if t.IsPtr() && t.Elem().NotInHeap() {
-			s.store(t, left, right) // see issue 42032
-		}
-		// otherwise, no scalar fields.
-	case t.IsString():
-		if skip&skipLen != 0 {
-			return
-		}
-		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
-		lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
-		s.store(types.Types[TINT], lenAddr, len)
-	case t.IsSlice():
-		if skip&skipLen == 0 {
-			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
-			lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
-			s.store(types.Types[TINT], lenAddr, len)
-		}
-		if skip&skipCap == 0 {
-			cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
-			capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
-			s.store(types.Types[TINT], capAddr, cap)
-		}
-	case t.IsInterface():
-		// itab field doesn't need a write barrier (even though it is a pointer).
-		itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
-		s.store(types.Types[TUINTPTR], left, itab)
-	case t.IsStruct():
-		n := t.NumFields()
-		for i := 0; i < n; i++ {
-			ft := t.FieldType(i)
-			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
-			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
-			s.storeTypeScalars(ft, addr, val, 0)
-		}
-	case t.IsArray() && t.NumElem() == 0:
-		// nothing
-	case t.IsArray() && t.NumElem() == 1:
-		s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
-	default:
-		s.Fatalf("bad write barrier type %v", t)
-	}
-}
-
-// do *left = right for all pointer parts of t.
-func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
-	switch {
-	case t.IsPtrShaped():
-		if t.IsPtr() && t.Elem().NotInHeap() {
-			break // see issue 42032
-		}
-		s.store(t, left, right)
-	case t.IsString():
-		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
-		s.store(s.f.Config.Types.BytePtr, left, ptr)
-	case t.IsSlice():
-		elType := types.NewPtr(t.Elem())
-		ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
-		s.store(elType, left, ptr)
-	case t.IsInterface():
-		// itab field is treated as a scalar.
-		idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
-		idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
-		s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
-	case t.IsStruct():
-		n := t.NumFields()
-		for i := 0; i < n; i++ {
-			ft := t.FieldType(i)
-			if !ft.HasPointers() {
-				continue
-			}
-			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
-			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
-			s.storeTypePtrs(ft, addr, val)
-		}
-	case t.IsArray() && t.NumElem() == 0:
-		// nothing
-	case t.IsArray() && t.NumElem() == 1:
-		s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
-	default:
-		s.Fatalf("bad write barrier type %v", t)
-	}
-}
-
-// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
-// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
-// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
-	var a *ssa.Value
-	if forLateExpandedCall {
-		if !canSSAType(t) {
-			a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
-		} else {
-			a = s.expr(n)
-		}
-	} else {
-		s.storeArgWithBase(n, t, s.sp, off)
-	}
-	return ssa.Param{Type: t, Offset: int32(off)}, a
-}
-
-func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
-	pt := types.NewPtr(t)
-	var addr *ssa.Value
-	if base == s.sp {
-		// Use special routine that avoids allocation on duplicate offsets.
-		addr = s.constOffPtrSP(pt, off)
-	} else {
-		addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
-	}
-
-	if !canSSAType(t) {
-		a := s.addr(n)
-		s.move(t, addr, a)
-		return
-	}
-
-	a := s.expr(n)
-	s.storeType(t, addr, a, 0, false)
-}
-
-// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
-// i,j,k may be nil, in which case they are set to their default value.
-// v may be a slice, string or pointer to an array.
-func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
-	t := v.Type
-	var ptr, len, cap *ssa.Value
-	switch {
-	case t.IsSlice():
-		ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
-		len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
-		cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
-	case t.IsString():
-		ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
-		len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
-		cap = len
-	case t.IsPtr():
-		if !t.Elem().IsArray() {
-			s.Fatalf("bad ptr to array in slice %v\n", t)
-		}
-		s.nilCheck(v)
-		ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
-		len = s.constInt(types.Types[TINT], t.Elem().NumElem())
-		cap = len
-	default:
-		s.Fatalf("bad type in slice %v\n", t)
-	}
-
-	// Set default values
-	if i == nil {
-		i = s.constInt(types.Types[TINT], 0)
-	}
-	if j == nil {
-		j = len
-	}
-	three := true
-	if k == nil {
-		three = false
-		k = cap
-	}
-
-	// Panic if slice indices are not in bounds.
-	// Make sure we check these in reverse order so that we're always
-	// comparing against a value known to be nonnegative. See issue 28797.
-	if three {
-		if k != cap {
-			kind := ssa.BoundsSlice3Alen
-			if t.IsSlice() {
-				kind = ssa.BoundsSlice3Acap
-			}
-			k = s.boundsCheck(k, cap, kind, bounded)
-		}
-		if j != k {
-			j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
-		}
-		i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
-	} else {
-		if j != k {
-			kind := ssa.BoundsSliceAlen
-			if t.IsSlice() {
-				kind = ssa.BoundsSliceAcap
-			}
-			j = s.boundsCheck(j, k, kind, bounded)
-		}
-		i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
-	}
-
-	// Word-sized integer operations.
-	subOp := s.ssaOp(OSUB, types.Types[TINT])
-	mulOp := s.ssaOp(OMUL, types.Types[TINT])
-	andOp := s.ssaOp(OAND, types.Types[TINT])
-
-	// Calculate the length (rlen) and capacity (rcap) of the new slice.
-	// For strings the capacity of the result is unimportant. However,
-	// we use rcap to test if we've generated a zero-length slice.
-	// Use length of strings for that.
-	rlen := s.newValue2(subOp, types.Types[TINT], j, i)
-	rcap := rlen
-	if j != k && !t.IsString() {
-		rcap = s.newValue2(subOp, types.Types[TINT], k, i)
-	}
-
-	if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
-		// No pointer arithmetic necessary.
-		return ptr, rlen, rcap
-	}
-
-	// Calculate the base pointer (rptr) for the new slice.
-	//
-	// Generate the following code assuming that indexes are in bounds.
-	// The masking is to make sure that we don't generate a slice
-	// that points to the next object in memory. We cannot just set
-	// the pointer to nil because then we would create a nil slice or
-	// string.
-	//
-	//     rcap = k - i
-	//     rlen = j - i
-	//     rptr = ptr + (mask(rcap) & (i * stride))
-	//
-	// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
-	// of the element type.
-	stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
-
-	// The delta is the number of bytes to offset ptr by.
-	delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
-
-	// If we're slicing to the point where the capacity is zero,
-	// zero out the delta.
-	mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
-	delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
-
-	// Compute rptr = ptr + delta.
-	rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
-
-	return rptr, rlen, rcap
-}
-
-type u642fcvtTab struct {
-	leq, cvt2F, and, rsh, or, add ssa.Op
-	one                           func(*state, *types.Type, int64) *ssa.Value
-}
-
-var u64_f64 = u642fcvtTab{
-	leq:   ssa.OpLeq64,
-	cvt2F: ssa.OpCvt64to64F,
-	and:   ssa.OpAnd64,
-	rsh:   ssa.OpRsh64Ux64,
-	or:    ssa.OpOr64,
-	add:   ssa.OpAdd64F,
-	one:   (*state).constInt64,
-}
-
-var u64_f32 = u642fcvtTab{
-	leq:   ssa.OpLeq64,
-	cvt2F: ssa.OpCvt64to32F,
-	and:   ssa.OpAnd64,
-	rsh:   ssa.OpRsh64Ux64,
-	or:    ssa.OpOr64,
-	add:   ssa.OpAdd32F,
-	one:   (*state).constInt64,
-}
-
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
-}
-
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
-}
-
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	// if x >= 0 {
-	//    result = (floatY) x
-	// } else {
-	// 	  y = uintX(x) ; y = x & 1
-	// 	  z = uintX(x) ; z = z >> 1
-	// 	  z = z >> 1
-	// 	  z = z | y
-	// 	  result = floatY(z)
-	// 	  result = result + result
-	// }
-	//
-	// Code borrowed from old code generator.
-	// What's going on: large 64-bit "unsigned" looks like
-	// negative number to hardware's integer-to-float
-	// conversion. However, because the mantissa is only
-	// 63 bits, we don't need the LSB, so instead we do an
-	// unsigned right shift (divide by two), convert, and
-	// double. However, before we do that, we need to be
-	// sure that we do not lose a "1" if that made the
-	// difference in the resulting rounding. Therefore, we
-	// preserve it, and OR (not ADD) it back in. The case
-	// that matters is when the eleven discarded bits are
-	// equal to 10000000001; that rounds up, and the 1 cannot
-	// be lost else it would round down if the LSB of the
-	// candidate mantissa is 0.
-	cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvt2F, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	one := cvttab.one(s, ft, 1)
-	y := s.newValue2(cvttab.and, ft, x, one)
-	z := s.newValue2(cvttab.rsh, ft, x, one)
-	z = s.newValue2(cvttab.or, ft, z, y)
-	a := s.newValue1(cvttab.cvt2F, tt, z)
-	a1 := s.newValue2(cvttab.add, tt, a, a)
-	s.vars[n] = a1
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-type u322fcvtTab struct {
-	cvtI2F, cvtF2F ssa.Op
-}
-
-var u32_f64 = u322fcvtTab{
-	cvtI2F: ssa.OpCvt32to64F,
-	cvtF2F: ssa.OpCopy,
-}
-
-var u32_f32 = u322fcvtTab{
-	cvtI2F: ssa.OpCvt32to32F,
-	cvtF2F: ssa.OpCvt64Fto32F,
-}
-
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
-}
-
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
-}
-
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	// if x >= 0 {
-	// 	result = floatY(x)
-	// } else {
-	// 	result = floatY(float64(x) + (1<<32))
-	// }
-	cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvtI2F, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
-	twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
-	a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
-	a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
-
-	s.vars[n] = a3
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
-	if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
-		s.Fatalf("node must be a map or a channel")
-	}
-	// if n == nil {
-	//   return 0
-	// } else {
-	//   // len
-	//   return *((*int)n)
-	//   // cap
-	//   return *(((*int)n)+1)
-	// }
-	lenType := n.Type
-	nilValue := s.constNil(types.Types[TUINTPTR])
-	cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchUnlikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	// length/capacity of a nil map/chan is zero
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	s.vars[n] = s.zeroVal(lenType)
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	switch n.Op {
-	case OLEN:
-		// length is stored in the first word for map/chan
-		s.vars[n] = s.load(lenType, x)
-	case OCAP:
-		// capacity is stored in the second word for chan
-		sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
-		s.vars[n] = s.load(lenType, sw)
-	default:
-		s.Fatalf("op must be OLEN or OCAP")
-	}
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, lenType)
-}
-
-type f2uCvtTab struct {
-	ltf, cvt2U, subf, or ssa.Op
-	floatValue           func(*state, *types.Type, float64) *ssa.Value
-	intValue             func(*state, *types.Type, int64) *ssa.Value
-	cutoff               uint64
-}
-
-var f32_u64 = f2uCvtTab{
-	ltf:        ssa.OpLess32F,
-	cvt2U:      ssa.OpCvt32Fto64,
-	subf:       ssa.OpSub32F,
-	or:         ssa.OpOr64,
-	floatValue: (*state).constFloat32,
-	intValue:   (*state).constInt64,
-	cutoff:     1 << 63,
-}
-
-var f64_u64 = f2uCvtTab{
-	ltf:        ssa.OpLess64F,
-	cvt2U:      ssa.OpCvt64Fto64,
-	subf:       ssa.OpSub64F,
-	or:         ssa.OpOr64,
-	floatValue: (*state).constFloat64,
-	intValue:   (*state).constInt64,
-	cutoff:     1 << 63,
-}
-
-var f32_u32 = f2uCvtTab{
-	ltf:        ssa.OpLess32F,
-	cvt2U:      ssa.OpCvt32Fto32,
-	subf:       ssa.OpSub32F,
-	or:         ssa.OpOr32,
-	floatValue: (*state).constFloat32,
-	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
-	cutoff:     1 << 31,
-}
-
-var f64_u32 = f2uCvtTab{
-	ltf:        ssa.OpLess64F,
-	cvt2U:      ssa.OpCvt64Fto32,
-	subf:       ssa.OpSub64F,
-	or:         ssa.OpOr32,
-	floatValue: (*state).constFloat64,
-	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
-	cutoff:     1 << 31,
-}
-
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.floatToUint(&f32_u64, n, x, ft, tt)
-}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.floatToUint(&f64_u64, n, x, ft, tt)
-}
-
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.floatToUint(&f32_u32, n, x, ft, tt)
-}
-
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	return s.floatToUint(&f64_u32, n, x, ft, tt)
-}
-
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
-	// cutoff:=1<<(intY_Size-1)
-	// if x < floatX(cutoff) {
-	// 	result = uintY(x)
-	// } else {
-	// 	y = x - floatX(cutoff)
-	// 	z = uintY(y)
-	// 	result = z | -(cutoff)
-	// }
-	cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
-	cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvt2U, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	y := s.newValue2(cvttab.subf, ft, x, cutoff)
-	y = s.newValue1(cvttab.cvt2U, tt, y)
-	z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
-	a1 := s.newValue2(cvttab.or, tt, y, z)
-	s.vars[n] = a1
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-// dottype generates SSA for a type assertion node.
-// commaok indicates whether to panic or return a bool.
-// If commaok is false, resok will be nil.
-func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
-	iface := s.expr(n.Left)   // input interface
-	target := s.expr(n.Right) // target type
-	byteptr := s.f.Config.Types.BytePtr
-
-	if n.Type.IsInterface() {
-		if n.Type.IsEmptyInterface() {
-			// Converting to an empty interface.
-			// Input could be an empty or nonempty interface.
-			if Debug_typeassert > 0 {
-				Warnl(n.Pos, "type assertion inlined")
-			}
-
-			// Get itab/type field from input.
-			itab := s.newValue1(ssa.OpITab, byteptr, iface)
-			// Conversion succeeds iff that field is not nil.
-			cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
-
-			if n.Left.Type.IsEmptyInterface() && commaok {
-				// Converting empty interface to empty interface with ,ok is just a nil check.
-				return iface, cond
-			}
-
-			// Branch on nilness.
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(cond)
-			b.Likely = ssa.BranchLikely
-			bOk := s.f.NewBlock(ssa.BlockPlain)
-			bFail := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bOk)
-			b.AddEdgeTo(bFail)
-
-			if !commaok {
-				// On failure, panic by calling panicnildottype.
-				s.startBlock(bFail)
-				s.rtcall(panicnildottype, false, nil, target)
-
-				// On success, return (perhaps modified) input interface.
-				s.startBlock(bOk)
-				if n.Left.Type.IsEmptyInterface() {
-					res = iface // Use input interface unchanged.
-					return
-				}
-				// Load type out of itab, build interface with existing idata.
-				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
-				typ := s.load(byteptr, off)
-				idata := s.newValue1(ssa.OpIData, byteptr, iface)
-				res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
-				return
-			}
-
-			s.startBlock(bOk)
-			// nonempty -> empty
-			// Need to load type from itab
-			off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
-			s.vars[&typVar] = s.load(byteptr, off)
-			s.endBlock()
-
-			// itab is nil, might as well use that as the nil result.
-			s.startBlock(bFail)
-			s.vars[&typVar] = itab
-			s.endBlock()
-
-			// Merge point.
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			bOk.AddEdgeTo(bEnd)
-			bFail.AddEdgeTo(bEnd)
-			s.startBlock(bEnd)
-			idata := s.newValue1(ssa.OpIData, byteptr, iface)
-			res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
-			resok = cond
-			delete(s.vars, &typVar)
-			return
-		}
-		// converting to a nonempty interface needs a runtime call.
-		if Debug_typeassert > 0 {
-			Warnl(n.Pos, "type assertion not inlined")
-		}
-		if n.Left.Type.IsEmptyInterface() {
-			if commaok {
-				call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
-				return call[0], call[1]
-			}
-			return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
-		}
-		if commaok {
-			call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
-			return call[0], call[1]
-		}
-		return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
-	}
-
-	if Debug_typeassert > 0 {
-		Warnl(n.Pos, "type assertion inlined")
-	}
-
-	// Converting to a concrete type.
-	direct := isdirectiface(n.Type)
-	itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
-	if Debug_typeassert > 0 {
-		Warnl(n.Pos, "type assertion inlined")
-	}
-	var targetITab *ssa.Value
-	if n.Left.Type.IsEmptyInterface() {
-		// Looking for pointer to target type.
-		targetITab = target
-	} else {
-		// Looking for pointer to itab for target type and source interface.
-		targetITab = s.expr(n.List.First())
-	}
-
-	var tmp *Node       // temporary for use with large types
-	var addr *ssa.Value // address of tmp
-	if commaok && !canSSAType(n.Type) {
-		// unSSAable type, use temporary.
-		// TODO: get rid of some of these temporaries.
-		tmp = tempAt(n.Pos, s.curfn, n.Type)
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
-		addr = s.addr(tmp)
-	}
-
-	cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cond)
-	b.Likely = ssa.BranchLikely
-
-	bOk := s.f.NewBlock(ssa.BlockPlain)
-	bFail := s.f.NewBlock(ssa.BlockPlain)
-	b.AddEdgeTo(bOk)
-	b.AddEdgeTo(bFail)
-
-	if !commaok {
-		// on failure, panic by calling panicdottype
-		s.startBlock(bFail)
-		taddr := s.expr(n.Right.Right)
-		if n.Left.Type.IsEmptyInterface() {
-			s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
-		} else {
-			s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
-		}
-
-		// on success, return data from interface
-		s.startBlock(bOk)
-		if direct {
-			return s.newValue1(ssa.OpIData, n.Type, iface), nil
-		}
-		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
-		return s.load(n.Type, p), nil
-	}
-
-	// commaok is the more complicated case because we have
-	// a control flow merge point.
-	bEnd := s.f.NewBlock(ssa.BlockPlain)
-	// Note that we need a new valVar each time (unlike okVar where we can
-	// reuse the variable) because it might have a different type every time.
-	valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
-
-	// type assertion succeeded
-	s.startBlock(bOk)
-	if tmp == nil {
-		if direct {
-			s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
-		} else {
-			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
-			s.vars[valVar] = s.load(n.Type, p)
-		}
-	} else {
-		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
-		s.move(n.Type, addr, p)
-	}
-	s.vars[&okVar] = s.constBool(true)
-	s.endBlock()
-	bOk.AddEdgeTo(bEnd)
-
-	// type assertion failed
-	s.startBlock(bFail)
-	if tmp == nil {
-		s.vars[valVar] = s.zeroVal(n.Type)
-	} else {
-		s.zero(n.Type, addr)
-	}
-	s.vars[&okVar] = s.constBool(false)
-	s.endBlock()
-	bFail.AddEdgeTo(bEnd)
-
-	// merge point
-	s.startBlock(bEnd)
-	if tmp == nil {
-		res = s.variable(valVar, n.Type)
-		delete(s.vars, valVar)
-	} else {
-		res = s.load(n.Type, addr)
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
-	}
-	resok = s.variable(&okVar, types.Types[TBOOL])
-	delete(s.vars, &okVar)
-	return res, resok
-}
-
-// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
-	v := s.vars[name]
-	if v != nil {
-		return v
-	}
-	v = s.fwdVars[name]
-	if v != nil {
-		return v
-	}
-
-	if s.curBlock == s.f.Entry {
-		// No variable should be live at entry.
-		s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
-	}
-	// Make a FwdRef, which records a value that's live on block input.
-	// We'll find the matching definition as part of insertPhis.
-	v = s.newValue0A(ssa.OpFwdRef, t, name)
-	s.fwdVars[name] = v
-	s.addNamedValue(name, v)
-	return v
-}
-
-func (s *state) mem() *ssa.Value {
-	return s.variable(&memVar, types.TypeMem)
-}
-
-func (s *state) addNamedValue(n *Node, v *ssa.Value) {
-	if n.Class() == Pxxx {
-		// Don't track our dummy nodes (&memVar etc.).
-		return
-	}
-	if n.IsAutoTmp() {
-		// Don't track temporary variables.
-		return
-	}
-	if n.Class() == PPARAMOUT {
-		// Don't track named output values.  This prevents return values
-		// from being assigned too early. See #14591 and #14762. TODO: allow this.
-		return
-	}
-	if n.Class() == PAUTO && n.Xoffset != 0 {
-		s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
-	}
-	loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
-	values, ok := s.f.NamedValues[loc]
-	if !ok {
-		s.f.Names = append(s.f.Names, loc)
-	}
-	s.f.NamedValues[loc] = append(values, v)
-}
-
-// Generate a disconnected call to a runtime routine and a return.
-func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog {
-	p := pp.Prog(obj.ACALL)
-	p.To.Type = obj.TYPE_MEM
-	p.To.Name = obj.NAME_EXTERN
-	p.To.Sym = sym
-	p = pp.Prog(obj.ARET)
-	return p
-}
-
-// Branch is an unresolved branch.
-type Branch struct {
-	P *obj.Prog  // branch instruction
-	B *ssa.Block // target
-}
-
-// SSAGenState contains state needed during Prog generation.
-type SSAGenState struct {
-	pp *Progs
-
-	// Branches remembers all the branch instructions we've seen
-	// and where they would like to go.
-	Branches []Branch
-
-	// bstart remembers where each block starts (indexed by block ID)
-	bstart []*obj.Prog
-
-	// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
-	ScratchFpMem *Node
-
-	maxarg int64 // largest frame size for arguments to calls made by the function
-
-	// Map from GC safe points to liveness index, generated by
-	// liveness analysis.
-	livenessMap LivenessMap
-
-	// lineRunStart records the beginning of the current run of instructions
-	// within a single block sharing the same line number
-	// Used to move statement marks to the beginning of such runs.
-	lineRunStart *obj.Prog
-
-	// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
-	OnWasmStackSkipped int
-}
-
-// Prog appends a new Prog.
-func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
-	p := s.pp.Prog(as)
-	if ssa.LosesStmtMark(as) {
-		return p
-	}
-	// Float a statement start to the beginning of any same-line run.
-	// lineRunStart is reset at block boundaries, which appears to work well.
-	if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
-		s.lineRunStart = p
-	} else if p.Pos.IsStmt() == src.PosIsStmt {
-		s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
-		p.Pos = p.Pos.WithNotStmt()
-	}
-	return p
-}
-
-// Pc returns the current Prog.
-func (s *SSAGenState) Pc() *obj.Prog {
-	return s.pp.next
-}
-
-// SetPos sets the current source position.
-func (s *SSAGenState) SetPos(pos src.XPos) {
-	s.pp.pos = pos
-}
-
-// Br emits a single branch instruction and returns the instruction.
-// Not all architectures need the returned instruction, but otherwise
-// the boilerplate is common to all.
-func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
-	p := s.Prog(op)
-	p.To.Type = obj.TYPE_BRANCH
-	s.Branches = append(s.Branches, Branch{P: p, B: target})
-	return p
-}
-
-// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
-// that reduce "jumpy" line number churn when debugging.
-// Spill/fill/copy instructions from the register allocator,
-// phi functions, and instructions with a no-pos position
-// are examples of instructions that can cause churn.
-func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
-	switch v.Op {
-	case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
-		// These are not statements
-		s.SetPos(v.Pos.WithNotStmt())
-	default:
-		p := v.Pos
-		if p != src.NoXPos {
-			// If the position is defined, update the position.
-			// Also convert default IsStmt to NotStmt; only
-			// explicit statement boundaries should appear
-			// in the generated code.
-			if p.IsStmt() != src.PosIsStmt {
-				p = p.WithNotStmt()
-				// Calls use the pos attached to v, but copy the statement mark from SSAGenState
-			}
-			s.SetPos(p)
-		} else {
-			s.SetPos(s.pp.pos.WithNotStmt())
-		}
-	}
-}
-
-// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*Node
-
-func (s byXoffset) Len() int           { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
-func (s byXoffset) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-func emitStackObjects(e *ssafn, pp *Progs) {
-	var vars []*Node
-	for _, n := range e.curfn.Func.Dcl {
-		if livenessShouldTrack(n) && n.Name.Addrtaken() {
-			vars = append(vars, n)
-		}
-	}
-	if len(vars) == 0 {
-		return
-	}
-
-	// Sort variables from lowest to highest address.
-	sort.Sort(byXoffset(vars))
-
-	// Populate the stack object data.
-	// Format must match runtime/stack.go:stackObjectRecord.
-	x := e.curfn.Func.lsym.Func().StackObjects
-	off := 0
-	off = duintptr(x, off, uint64(len(vars)))
-	for _, v := range vars {
-		// Note: arguments and return values have non-negative Xoffset,
-		// in which case the offset is relative to argp.
-		// Locals have a negative Xoffset, in which case the offset is relative to varp.
-		off = duintptr(x, off, uint64(v.Xoffset))
-		if !typesym(v.Type).Siggen() {
-			e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
-		}
-		off = dsymptr(x, off, dtypesym(v.Type), 0)
-	}
-
-	// Emit a funcdata pointing at the stack object data.
-	p := pp.Prog(obj.AFUNCDATA)
-	Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
-	p.To.Type = obj.TYPE_MEM
-	p.To.Name = obj.NAME_EXTERN
-	p.To.Sym = x
-
-	if debuglive != 0 {
-		for _, v := range vars {
-			Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
-		}
-	}
-}
-
-// genssa appends entries to pp for each instruction in f.
-func genssa(f *ssa.Func, pp *Progs) {
-	var s SSAGenState
-
-	e := f.Frontend().(*ssafn)
-
-	s.livenessMap = liveness(e, f, pp)
-	emitStackObjects(e, pp)
-
-	openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
-	if openDeferInfo != nil {
-		// This function uses open-coded defers -- write out the funcdata
-		// info that we computed at the end of genssa.
-		p := pp.Prog(obj.AFUNCDATA)
-		Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = openDeferInfo
-	}
-
-	// Remember where each block starts.
-	s.bstart = make([]*obj.Prog, f.NumBlocks())
-	s.pp = pp
-	var progToValue map[*obj.Prog]*ssa.Value
-	var progToBlock map[*obj.Prog]*ssa.Block
-	var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
-	if f.PrintOrHtmlSSA {
-		progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
-		progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
-		f.Logf("genssa %s\n", f.Name)
-		progToBlock[s.pp.next] = f.Blocks[0]
-	}
-
-	s.ScratchFpMem = e.scratchFpMem
-
-	if Ctxt.Flag_locationlists {
-		if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
-			f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
-		}
-		valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
-		for i := range valueToProgAfter {
-			valueToProgAfter[i] = nil
-		}
-	}
-
-	// If the very first instruction is not tagged as a statement,
-	// debuggers may attribute it to previous function in program.
-	firstPos := src.NoXPos
-	for _, v := range f.Entry.Values {
-		if v.Pos.IsStmt() == src.PosIsStmt {
-			firstPos = v.Pos
-			v.Pos = firstPos.WithDefaultStmt()
-			break
-		}
-	}
-
-	// inlMarks has an entry for each Prog that implements an inline mark.
-	// It maps from that Prog to the global inlining id of the inlined body
-	// which should unwind to this Prog's location.
-	var inlMarks map[*obj.Prog]int32
-	var inlMarkList []*obj.Prog
-
-	// inlMarksByPos maps from a (column 1) source position to the set of
-	// Progs that are in the set above and have that source position.
-	var inlMarksByPos map[src.XPos][]*obj.Prog
-
-	// Emit basic blocks
-	for i, b := range f.Blocks {
-		s.bstart[b.ID] = s.pp.next
-		s.lineRunStart = nil
-
-		// Attach a "default" liveness info. Normally this will be
-		// overwritten in the Values loop below for each Value. But
-		// for an empty block this will be used for its control
-		// instruction. We won't use the actual liveness map on a
-		// control instruction. Just mark it something that is
-		// preemptible, unless this function is "all unsafe".
-		s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
-
-		// Emit values in block
-		thearch.SSAMarkMoves(&s, b)
-		for _, v := range b.Values {
-			x := s.pp.next
-			s.DebugFriendlySetPosFrom(v)
-
-			switch v.Op {
-			case ssa.OpInitMem:
-				// memory arg needs no code
-			case ssa.OpArg:
-				// input args need no code
-			case ssa.OpSP, ssa.OpSB:
-				// nothing to do
-			case ssa.OpSelect0, ssa.OpSelect1:
-				// nothing to do
-			case ssa.OpGetG:
-				// nothing to do when there's a g register,
-				// and checkLower complains if there's not
-			case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
-				// nothing to do; already used by liveness
-			case ssa.OpPhi:
-				CheckLoweredPhi(v)
-			case ssa.OpConvert:
-				// nothing to do; no-op conversion for liveness
-				if v.Args[0].Reg() != v.Reg() {
-					v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
-				}
-			case ssa.OpInlMark:
-				p := thearch.Ginsnop(s.pp)
-				if inlMarks == nil {
-					inlMarks = map[*obj.Prog]int32{}
-					inlMarksByPos = map[src.XPos][]*obj.Prog{}
-				}
-				inlMarks[p] = v.AuxInt32()
-				inlMarkList = append(inlMarkList, p)
-				pos := v.Pos.AtColumn1()
-				inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
-
-			default:
-				// Attach this safe point to the next
-				// instruction.
-				s.pp.nextLive = s.livenessMap.Get(v)
-
-				// Special case for first line in function; move it to the start.
-				if firstPos != src.NoXPos {
-					s.SetPos(firstPos)
-					firstPos = src.NoXPos
-				}
-				// let the backend handle it
-				thearch.SSAGenValue(&s, v)
-			}
-
-			if Ctxt.Flag_locationlists {
-				valueToProgAfter[v.ID] = s.pp.next
-			}
-
-			if f.PrintOrHtmlSSA {
-				for ; x != s.pp.next; x = x.Link {
-					progToValue[x] = v
-				}
-			}
-		}
-		// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
-		if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
-			p := thearch.Ginsnop(s.pp)
-			p.Pos = p.Pos.WithIsStmt()
-			if b.Pos == src.NoXPos {
-				b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion.  See #35652.
-				if b.Pos == src.NoXPos {
-					b.Pos = pp.Text.Pos // Sometimes p.Pos is empty.  See #35695.
-				}
-			}
-			b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
-		}
-		// Emit control flow instructions for block
-		var next *ssa.Block
-		if i < len(f.Blocks)-1 && Debug.N == 0 {
-			// If -N, leave next==nil so every block with successors
-			// ends in a JMP (except call blocks - plive doesn't like
-			// select{send,recv} followed by a JMP call).  Helps keep
-			// line numbers for otherwise empty blocks.
-			next = f.Blocks[i+1]
-		}
-		x := s.pp.next
-		s.SetPos(b.Pos)
-		thearch.SSAGenBlock(&s, b, next)
-		if f.PrintOrHtmlSSA {
-			for ; x != s.pp.next; x = x.Link {
-				progToBlock[x] = b
-			}
-		}
-	}
-	if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
-		// We need the return address of a panic call to
-		// still be inside the function in question. So if
-		// it ends in a call which doesn't return, add a
-		// nop (which will never execute) after the call.
-		thearch.Ginsnop(pp)
-	}
-	if openDeferInfo != nil {
-		// When doing open-coded defers, generate a disconnected call to
-		// deferreturn and a return. This will be used to during panic
-		// recovery to unwind the stack and return back to the runtime.
-		s.pp.nextLive = s.livenessMap.deferreturn
-		gencallret(pp, Deferreturn)
-	}
-
-	if inlMarks != nil {
-		// We have some inline marks. Try to find other instructions we're
-		// going to emit anyway, and use those instructions instead of the
-		// inline marks.
-		for p := pp.Text; p != nil; p = p.Link {
-			if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm {
-				// Don't use 0-sized instructions as inline marks, because we need
-				// to identify inline mark instructions by pc offset.
-				// (Some of these instructions are sometimes zero-sized, sometimes not.
-				// We must not use anything that even might be zero-sized.)
-				// TODO: are there others?
-				continue
-			}
-			if _, ok := inlMarks[p]; ok {
-				// Don't use inline marks themselves. We don't know
-				// whether they will be zero-sized or not yet.
-				continue
-			}
-			pos := p.Pos.AtColumn1()
-			s := inlMarksByPos[pos]
-			if len(s) == 0 {
-				continue
-			}
-			for _, m := range s {
-				// We found an instruction with the same source position as
-				// some of the inline marks.
-				// Use this instruction instead.
-				p.Pos = p.Pos.WithIsStmt() // promote position to a statement
-				pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
-				// Make the inline mark a real nop, so it doesn't generate any code.
-				m.As = obj.ANOP
-				m.Pos = src.NoXPos
-				m.From = obj.Addr{}
-				m.To = obj.Addr{}
-			}
-			delete(inlMarksByPos, pos)
-		}
-		// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
-		for _, p := range inlMarkList {
-			if p.As != obj.ANOP {
-				pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
-			}
-		}
-	}
-
-	if Ctxt.Flag_locationlists {
-		e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
-		bstart := s.bstart
-		// Note that at this moment, Prog.Pc is a sequence number; it's
-		// not a real PC until after assembly, so this mapping has to
-		// be done later.
-		e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
-			switch v {
-			case ssa.BlockStart.ID:
-				if b == f.Entry.ID {
-					return 0 // Start at the very beginning, at the assembler-generated prologue.
-					// this should only happen for function args (ssa.OpArg)
-				}
-				return bstart[b].Pc
-			case ssa.BlockEnd.ID:
-				return e.curfn.Func.lsym.Size
-			default:
-				return valueToProgAfter[v].Pc
-			}
-		}
-	}
-
-	// Resolve branches, and relax DefaultStmt into NotStmt
-	for _, br := range s.Branches {
-		br.P.To.SetTarget(s.bstart[br.B.ID])
-		if br.P.Pos.IsStmt() != src.PosIsStmt {
-			br.P.Pos = br.P.Pos.WithNotStmt()
-		} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
-			br.P.Pos = br.P.Pos.WithNotStmt()
-		}
-
-	}
-
-	if e.log { // spew to stdout
-		filename := ""
-		for p := pp.Text; p != nil; p = p.Link {
-			if p.Pos.IsKnown() && p.InnermostFilename() != filename {
-				filename = p.InnermostFilename()
-				f.Logf("# %s\n", filename)
-			}
-
-			var s string
-			if v, ok := progToValue[p]; ok {
-				s = v.String()
-			} else if b, ok := progToBlock[p]; ok {
-				s = b.String()
-			} else {
-				s = "   " // most value and branch strings are 2-3 characters long
-			}
-			f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
-		}
-	}
-	if f.HTMLWriter != nil { // spew to ssa.html
-		var buf bytes.Buffer
-		buf.WriteString("<code>")
-		buf.WriteString("<dl class=\"ssa-gen\">")
-		filename := ""
-		for p := pp.Text; p != nil; p = p.Link {
-			// Don't spam every line with the file name, which is often huge.
-			// Only print changes, and "unknown" is not a change.
-			if p.Pos.IsKnown() && p.InnermostFilename() != filename {
-				filename = p.InnermostFilename()
-				buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
-				buf.WriteString(html.EscapeString("# " + filename))
-				buf.WriteString("</dd>")
-			}
-
-			buf.WriteString("<dt class=\"ssa-prog-src\">")
-			if v, ok := progToValue[p]; ok {
-				buf.WriteString(v.HTML())
-			} else if b, ok := progToBlock[p]; ok {
-				buf.WriteString("<b>" + b.HTML() + "</b>")
-			}
-			buf.WriteString("</dt>")
-			buf.WriteString("<dd class=\"ssa-prog\">")
-			buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
-			buf.WriteString("</dd>")
-		}
-		buf.WriteString("</dl>")
-		buf.WriteString("</code>")
-		f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
-	}
-
-	defframe(&s, e)
-
-	f.HTMLWriter.Close()
-	f.HTMLWriter = nil
-}
-
-func defframe(s *SSAGenState, e *ssafn) {
-	pp := s.pp
-
-	frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
-	if thearch.PadFrame != nil {
-		frame = thearch.PadFrame(frame)
-	}
-
-	// Fill in argument and frame size.
-	pp.Text.To.Type = obj.TYPE_TEXTSIZE
-	pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
-	pp.Text.To.Offset = frame
-
-	// Insert code to zero ambiguously live variables so that the
-	// garbage collector only sees initialized values when it
-	// looks for pointers.
-	p := pp.Text
-	var lo, hi int64
-
-	// Opaque state for backend to use. Current backends use it to
-	// keep track of which helper registers have been zeroed.
-	var state uint32
-
-	// Iterate through declarations. They are sorted in decreasing Xoffset order.
-	for _, n := range e.curfn.Func.Dcl {
-		if !n.Name.Needzero() {
-			continue
-		}
-		if n.Class() != PAUTO {
-			e.Fatalf(n.Pos, "needzero class %d", n.Class())
-		}
-		if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
-			e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
-			// Merge with range we already have.
-			lo = n.Xoffset
-			continue
-		}
-
-		// Zero old range
-		p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
-
-		// Set new range.
-		lo = n.Xoffset
-		hi = lo + n.Type.Size()
-	}
-
-	// Zero final range.
-	thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
-}
-
-// For generating consecutive jump instructions to model a specific branching
-type IndexJump struct {
-	Jump  obj.As
-	Index int
-}
-
-func (s *SSAGenState) oneJump(b *ssa.Block, jump *IndexJump) {
-	p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
-	p.Pos = b.Pos
-}
-
-// CombJump generates combinational instructions (2 at present) for a block jump,
-// thereby the behaviour of non-standard condition codes could be simulated
-func (s *SSAGenState) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
-	switch next {
-	case b.Succs[0].Block():
-		s.oneJump(b, &jumps[0][0])
-		s.oneJump(b, &jumps[0][1])
-	case b.Succs[1].Block():
-		s.oneJump(b, &jumps[1][0])
-		s.oneJump(b, &jumps[1][1])
-	default:
-		var q *obj.Prog
-		if b.Likely != ssa.BranchUnlikely {
-			s.oneJump(b, &jumps[1][0])
-			s.oneJump(b, &jumps[1][1])
-			q = s.Br(obj.AJMP, b.Succs[1].Block())
-		} else {
-			s.oneJump(b, &jumps[0][0])
-			s.oneJump(b, &jumps[0][1])
-			q = s.Br(obj.AJMP, b.Succs[0].Block())
-		}
-		q.Pos = b.Pos
-	}
-}
-
-// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
-func AddAux(a *obj.Addr, v *ssa.Value) {
-	AddAux2(a, v, v.AuxInt)
-}
-func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
-	if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
-		v.Fatalf("bad AddAux addr %v", a)
-	}
-	// add integer offset
-	a.Offset += offset
-
-	// If no additional symbol offset, we're done.
-	if v.Aux == nil {
-		return
-	}
-	// Add symbol's offset from its base register.
-	switch n := v.Aux.(type) {
-	case *ssa.AuxCall:
-		a.Name = obj.NAME_EXTERN
-		a.Sym = n.Fn
-	case *obj.LSym:
-		a.Name = obj.NAME_EXTERN
-		a.Sym = n
-	case *Node:
-		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
-			a.Name = obj.NAME_PARAM
-			a.Sym = n.Orig.Sym.Linksym()
-			a.Offset += n.Xoffset
-			break
-		}
-		a.Name = obj.NAME_AUTO
-		a.Sym = n.Sym.Linksym()
-		a.Offset += n.Xoffset
-	default:
-		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
-	}
-}
-
-// extendIndex extends v to a full int width.
-// panic with the given kind if v does not fit in an int (only on 32-bit archs).
-func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
-	size := idx.Type.Size()
-	if size == s.config.PtrSize {
-		return idx
-	}
-	if size > s.config.PtrSize {
-		// truncate 64-bit indexes on 32-bit pointer archs. Test the
-		// high word and branch to out-of-bounds failure if it is not 0.
-		var lo *ssa.Value
-		if idx.Type.IsSigned() {
-			lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
-		} else {
-			lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
-		}
-		if bounded || Debug.B != 0 {
-			return lo
-		}
-		bNext := s.f.NewBlock(ssa.BlockPlain)
-		bPanic := s.f.NewBlock(ssa.BlockExit)
-		hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
-		cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
-		if !idx.Type.IsSigned() {
-			switch kind {
-			case ssa.BoundsIndex:
-				kind = ssa.BoundsIndexU
-			case ssa.BoundsSliceAlen:
-				kind = ssa.BoundsSliceAlenU
-			case ssa.BoundsSliceAcap:
-				kind = ssa.BoundsSliceAcapU
-			case ssa.BoundsSliceB:
-				kind = ssa.BoundsSliceBU
-			case ssa.BoundsSlice3Alen:
-				kind = ssa.BoundsSlice3AlenU
-			case ssa.BoundsSlice3Acap:
-				kind = ssa.BoundsSlice3AcapU
-			case ssa.BoundsSlice3B:
-				kind = ssa.BoundsSlice3BU
-			case ssa.BoundsSlice3C:
-				kind = ssa.BoundsSlice3CU
-			}
-		}
-		b := s.endBlock()
-		b.Kind = ssa.BlockIf
-		b.SetControl(cmp)
-		b.Likely = ssa.BranchLikely
-		b.AddEdgeTo(bNext)
-		b.AddEdgeTo(bPanic)
-
-		s.startBlock(bPanic)
-		mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
-		s.endBlock().SetControl(mem)
-		s.startBlock(bNext)
-
-		return lo
-	}
-
-	// Extend value to the required size
-	var op ssa.Op
-	if idx.Type.IsSigned() {
-		switch 10*size + s.config.PtrSize {
-		case 14:
-			op = ssa.OpSignExt8to32
-		case 18:
-			op = ssa.OpSignExt8to64
-		case 24:
-			op = ssa.OpSignExt16to32
-		case 28:
-			op = ssa.OpSignExt16to64
-		case 48:
-			op = ssa.OpSignExt32to64
-		default:
-			s.Fatalf("bad signed index extension %s", idx.Type)
-		}
-	} else {
-		switch 10*size + s.config.PtrSize {
-		case 14:
-			op = ssa.OpZeroExt8to32
-		case 18:
-			op = ssa.OpZeroExt8to64
-		case 24:
-			op = ssa.OpZeroExt16to32
-		case 28:
-			op = ssa.OpZeroExt16to64
-		case 48:
-			op = ssa.OpZeroExt32to64
-		default:
-			s.Fatalf("bad unsigned index extension %s", idx.Type)
-		}
-	}
-	return s.newValue1(op, types.Types[TINT], idx)
-}
-
-// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
-// Called during ssaGenValue.
-func CheckLoweredPhi(v *ssa.Value) {
-	if v.Op != ssa.OpPhi {
-		v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
-	}
-	if v.Type.IsMemory() {
-		return
-	}
-	f := v.Block.Func
-	loc := f.RegAlloc[v.ID]
-	for _, a := range v.Args {
-		if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
-			v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
-		}
-	}
-}
-
-// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
-// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
-// That register contains the closure pointer on closure entry.
-func CheckLoweredGetClosurePtr(v *ssa.Value) {
-	entry := v.Block.Func.Entry
-	if entry != v.Block || entry.Values[0] != v {
-		Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
-	}
-}
-
-// AutoVar returns a *Node and int64 representing the auto variable and offset within it
-// where v should be spilled.
-func AutoVar(v *ssa.Value) (*Node, int64) {
-	loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
-	if v.Type.Size() > loc.Type.Size() {
-		v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
-	}
-	return loc.N.(*Node), loc.Off
-}
-
-func AddrAuto(a *obj.Addr, v *ssa.Value) {
-	n, off := AutoVar(v)
-	a.Type = obj.TYPE_MEM
-	a.Sym = n.Sym.Linksym()
-	a.Reg = int16(thearch.REGSP)
-	a.Offset = n.Xoffset + off
-	if n.Class() == PPARAM || n.Class() == PPARAMOUT {
-		a.Name = obj.NAME_PARAM
-	} else {
-		a.Name = obj.NAME_AUTO
-	}
-}
-
-func (s *SSAGenState) AddrScratch(a *obj.Addr) {
-	if s.ScratchFpMem == nil {
-		panic("no scratch memory available; forgot to declare usesScratch for Op?")
-	}
-	a.Type = obj.TYPE_MEM
-	a.Name = obj.NAME_AUTO
-	a.Sym = s.ScratchFpMem.Sym.Linksym()
-	a.Reg = int16(thearch.REGSP)
-	a.Offset = s.ScratchFpMem.Xoffset
-}
-
-// Call returns a new CALL instruction for the SSA value v.
-// It uses PrepareCall to prepare the call.
-func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
-	pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
-	s.PrepareCall(v)
-
-	p := s.Prog(obj.ACALL)
-	if pPosIsStmt == src.PosIsStmt {
-		p.Pos = v.Pos.WithIsStmt()
-	} else {
-		p.Pos = v.Pos.WithNotStmt()
-	}
-	if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = sym.Fn
-	} else {
-		// TODO(mdempsky): Can these differences be eliminated?
-		switch thearch.LinkArch.Family {
-		case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
-			p.To.Type = obj.TYPE_REG
-		case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
-			p.To.Type = obj.TYPE_MEM
-		default:
-			Fatalf("unknown indirect call family")
-		}
-		p.To.Reg = v.Args[0].Reg()
-	}
-	return p
-}
-
-// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
-// It must be called immediately before emitting the actual CALL instruction,
-// since it emits PCDATA for the stack map at the call (calls are safe points).
-func (s *SSAGenState) PrepareCall(v *ssa.Value) {
-	idx := s.livenessMap.Get(v)
-	if !idx.StackMapValid() {
-		// See Liveness.hasStackMap.
-		if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
-			Fatalf("missing stack map index for %v", v.LongString())
-		}
-	}
-
-	call, ok := v.Aux.(*ssa.AuxCall)
-
-	if ok && call.Fn == Deferreturn {
-		// Deferred calls will appear to be returning to
-		// the CALL deferreturn(SB) that we are about to emit.
-		// However, the stack trace code will show the line
-		// of the instruction byte before the return PC.
-		// To avoid that being an unrelated instruction,
-		// insert an actual hardware NOP that will have the right line number.
-		// This is different from obj.ANOP, which is a virtual no-op
-		// that doesn't make it into the instruction stream.
-		thearch.Ginsnopdefer(s.pp)
-	}
-
-	if ok {
-		// Record call graph information for nowritebarrierrec
-		// analysis.
-		if nowritebarrierrecCheck != nil {
-			nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos)
-		}
-	}
-
-	if s.maxarg < v.AuxInt {
-		s.maxarg = v.AuxInt
-	}
-}
-
-// UseArgs records the fact that an instruction needs a certain amount of
-// callee args space for its use.
-func (s *SSAGenState) UseArgs(n int64) {
-	if s.maxarg < n {
-		s.maxarg = n
-	}
-}
-
-// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *Node) int {
-	t := n.Left.Type
-	f := n.Sym
-	if !t.IsStruct() {
-		panic("ODOT's LHS is not a struct")
-	}
-
-	var i int
-	for _, t1 := range t.Fields().Slice() {
-		if t1.Sym != f {
-			i++
-			continue
-		}
-		if t1.Offset != n.Xoffset {
-			panic("field offset doesn't match")
-		}
-		return i
-	}
-	panic(fmt.Sprintf("can't find field in expr %v\n", n))
-
-	// TODO: keep the result of this function somewhere in the ODOT Node
-	// so we don't have to recompute it each time we need it.
-}
-
-// ssafn holds frontend information about a function that the backend is processing.
-// It also exports a bunch of compiler services for the ssa backend.
-type ssafn struct {
-	curfn        *Node
-	strings      map[string]*obj.LSym // map from constant string to data symbols
-	scratchFpMem *Node                // temp for floating point register / memory moves on some architectures
-	stksize      int64                // stack size for current frame
-	stkptrsize   int64                // prefix of stack containing pointers
-	log          bool                 // print ssa debug to the stdout
-}
-
-// StringData returns a symbol which
-// is the data component of a global string constant containing s.
-func (e *ssafn) StringData(s string) *obj.LSym {
-	if aux, ok := e.strings[s]; ok {
-		return aux
-	}
-	if e.strings == nil {
-		e.strings = make(map[string]*obj.LSym)
-	}
-	data := stringsym(e.curfn.Pos, s)
-	e.strings[s] = data
-	return data
-}
-
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
-	n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
-	return n
-}
-
-func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	ptrType := types.NewPtr(types.Types[TUINT8])
-	lenType := types.Types[TINT]
-	// Split this string up into two separate variables.
-	p := e.SplitSlot(&name, ".ptr", 0, ptrType)
-	l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
-	return p, l
-}
-
-func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	u := types.Types[TUINTPTR]
-	t := types.NewPtr(types.Types[TUINT8])
-	// Split this interface up into two separate variables.
-	f := ".itab"
-	if n.Type.IsEmptyInterface() {
-		f = ".type"
-	}
-	c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
-	d := e.SplitSlot(&name, ".data", u.Size(), t)
-	return c, d
-}
-
-func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
-	ptrType := types.NewPtr(name.Type.Elem())
-	lenType := types.Types[TINT]
-	p := e.SplitSlot(&name, ".ptr", 0, ptrType)
-	l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
-	c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
-	return p, l, c
-}
-
-func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	s := name.Type.Size() / 2
-	var t *types.Type
-	if s == 8 {
-		t = types.Types[TFLOAT64]
-	} else {
-		t = types.Types[TFLOAT32]
-	}
-	r := e.SplitSlot(&name, ".real", 0, t)
-	i := e.SplitSlot(&name, ".imag", t.Size(), t)
-	return r, i
-}
-
-func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	var t *types.Type
-	if name.Type.IsSigned() {
-		t = types.Types[TINT32]
-	} else {
-		t = types.Types[TUINT32]
-	}
-	if thearch.LinkArch.ByteOrder == binary.BigEndian {
-		return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
-	}
-	return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
-}
-
-func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
-	st := name.Type
-	// Note: the _ field may appear several times.  But
-	// have no fear, identically-named but distinct Autos are
-	// ok, albeit maybe confusing for a debugger.
-	return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
-}
-
-func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
-	n := name.N.(*Node)
-	at := name.Type
-	if at.NumElem() != 1 {
-		e.Fatalf(n.Pos, "bad array size")
-	}
-	et := at.Elem()
-	return e.SplitSlot(&name, "[0]", 0, et)
-}
-
-func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
-	return itabsym(it, offset)
-}
-
-// SplitSlot returns a slot representing the data of parent starting at offset.
-func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
-	node := parent.N.(*Node)
-
-	if node.Class() != PAUTO || node.Name.Addrtaken() {
-		// addressed things and non-autos retain their parents (i.e., cannot truly be split)
-		return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
-	}
-
-	s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
-
-	n := &Node{
-		Name: new(Name),
-		Op:   ONAME,
-		Pos:  parent.N.(*Node).Pos,
-	}
-	n.Orig = n
-
-	s.Def = asTypesNode(n)
-	asNode(s.Def).Name.SetUsed(true)
-	n.Sym = s
-	n.Type = t
-	n.SetClass(PAUTO)
-	n.Esc = EscNever
-	n.Name.Curfn = e.curfn
-	e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
-	dowidth(t)
-	return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
-}
-
-func (e *ssafn) CanSSA(t *types.Type) bool {
-	return canSSAType(t)
-}
-
-func (e *ssafn) Line(pos src.XPos) string {
-	return linestr(pos)
-}
-
-// Log logs a message from the compiler.
-func (e *ssafn) Logf(msg string, args ...interface{}) {
-	if e.log {
-		fmt.Printf(msg, args...)
-	}
-}
-
-func (e *ssafn) Log() bool {
-	return e.log
-}
-
-// Fatal reports a compiler error and exits.
-func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
-	lineno = pos
-	nargs := append([]interface{}{e.curfn.funcname()}, args...)
-	Fatalf("'%s': "+msg, nargs...)
-}
-
-// Warnl reports a "warning", which is usually flag-triggered
-// logging output for the benefit of tests.
-func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
-	Warnl(pos, fmt_, args...)
-}
-
-func (e *ssafn) Debug_checknil() bool {
-	return Debug_checknil != 0
-}
-
-func (e *ssafn) UseWriteBarrier() bool {
-	return use_writebarrier
-}
-
-func (e *ssafn) Syslook(name string) *obj.LSym {
-	switch name {
-	case "goschedguarded":
-		return goschedguarded
-	case "writeBarrier":
-		return writeBarrier
-	case "gcWriteBarrier":
-		return gcWriteBarrier
-	case "typedmemmove":
-		return typedmemmove
-	case "typedmemclr":
-		return typedmemclr
-	}
-	e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
-	return nil
-}
-
-func (e *ssafn) SetWBPos(pos src.XPos) {
-	e.curfn.Func.setWBPos(pos)
-}
-
-func (e *ssafn) MyImportPath() string {
-	return myimportpath
-}
-
-func (n *Node) Typ() *types.Type {
-	return n.Type
-}
-func (n *Node) StorageClass() ssa.StorageClass {
-	switch n.Class() {
-	case PPARAM:
-		return ssa.ClassParam
-	case PPARAMOUT:
-		return ssa.ClassParamOut
-	case PAUTO:
-		return ssa.ClassAuto
-	default:
-		Fatalf("untranslatable storage class for %v: %s", n, n.Class())
-		return 0
-	}
-}
-
-func clobberBase(n *Node) *Node {
-	if n.Op == ODOT && n.Left.Type.NumFields() == 1 {
-		return clobberBase(n.Left)
-	}
-	if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
-		return clobberBase(n.Left)
-	}
-	return n
-}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
deleted file mode 100644
index defefd76..0000000
--- a/src/cmd/compile/internal/gc/subr.go
+++ /dev/null
@@ -1,1918 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"crypto/md5"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"runtime/debug"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"unicode"
-	"unicode/utf8"
-)
-
-type Error struct {
-	pos src.XPos
-	msg string
-}
-
-var errors []Error
-
-// largeStack is info about a function whose stack frame is too large (rare).
-type largeStack struct {
-	locals int64
-	args   int64
-	callee int64
-	pos    src.XPos
-}
-
-var (
-	largeStackFramesMu sync.Mutex // protects largeStackFrames
-	largeStackFrames   []largeStack
-)
-
-func errorexit() {
-	flusherrors()
-	if outfile != "" {
-		os.Remove(outfile)
-	}
-	os.Exit(2)
-}
-
-func adderrorname(n *Node) {
-	if n.Op != ODOT {
-		return
-	}
-	old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
-	if len(errors) > 0 && errors[len(errors)-1].pos.Line() == n.Pos.Line() && errors[len(errors)-1].msg == old {
-		errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
-	}
-}
-
-func adderr(pos src.XPos, format string, args ...interface{}) {
-	msg := fmt.Sprintf(format, args...)
-	// Only add the position if know the position.
-	// See issue golang.org/issue/11361.
-	if pos.IsKnown() {
-		msg = fmt.Sprintf("%v: %s", linestr(pos), msg)
-	}
-	errors = append(errors, Error{
-		pos: pos,
-		msg: msg + "\n",
-	})
-}
-
-// byPos sorts errors by source position.
-type byPos []Error
-
-func (x byPos) Len() int           { return len(x) }
-func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
-func (x byPos) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-
-// flusherrors sorts errors seen so far by line number, prints them to stdout,
-// and empties the errors array.
-func flusherrors() {
-	Ctxt.Bso.Flush()
-	if len(errors) == 0 {
-		return
-	}
-	sort.Stable(byPos(errors))
-	for i, err := range errors {
-		if i == 0 || err.msg != errors[i-1].msg {
-			fmt.Printf("%s", err.msg)
-		}
-	}
-	errors = errors[:0]
-}
-
-func hcrash() {
-	if Debug.h != 0 {
-		flusherrors()
-		if outfile != "" {
-			os.Remove(outfile)
-		}
-		var x *int
-		*x = 0
-	}
-}
-
-func linestr(pos src.XPos) string {
-	return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
-}
-
-// lasterror keeps track of the most recently issued error.
-// It is used to avoid multiple error messages on the same
-// line.
-var lasterror struct {
-	syntax src.XPos // source position of last syntax error
-	other  src.XPos // source position of last non-syntax error
-	msg    string   // error message of last non-syntax error
-}
-
-// sameline reports whether two positions a, b are on the same line.
-func sameline(a, b src.XPos) bool {
-	p := Ctxt.PosTable.Pos(a)
-	q := Ctxt.PosTable.Pos(b)
-	return p.Base() == q.Base() && p.Line() == q.Line()
-}
-
-func yyerrorl(pos src.XPos, format string, args ...interface{}) {
-	msg := fmt.Sprintf(format, args...)
-
-	if strings.HasPrefix(msg, "syntax error") {
-		nsyntaxerrors++
-		// only one syntax error per line, no matter what error
-		if sameline(lasterror.syntax, pos) {
-			return
-		}
-		lasterror.syntax = pos
-	} else {
-		// only one of multiple equal non-syntax errors per line
-		// (flusherrors shows only one of them, so we filter them
-		// here as best as we can (they may not appear in order)
-		// so that we don't count them here and exit early, and
-		// then have nothing to show for.)
-		if sameline(lasterror.other, pos) && lasterror.msg == msg {
-			return
-		}
-		lasterror.other = pos
-		lasterror.msg = msg
-	}
-
-	adderr(pos, "%s", msg)
-
-	hcrash()
-	nerrors++
-	if nsavederrors+nerrors >= 10 && Debug.e == 0 {
-		flusherrors()
-		fmt.Printf("%v: too many errors\n", linestr(pos))
-		errorexit()
-	}
-}
-
-func yyerrorv(lang string, format string, args ...interface{}) {
-	what := fmt.Sprintf(format, args...)
-	yyerrorl(lineno, "%s requires %s or later (-lang was set to %s; check go.mod)", what, lang, flag_lang)
-}
-
-func yyerror(format string, args ...interface{}) {
-	yyerrorl(lineno, format, args...)
-}
-
-func Warn(fmt_ string, args ...interface{}) {
-	Warnl(lineno, fmt_, args...)
-}
-
-func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
-	adderr(line, fmt_, args...)
-	if Debug.m != 0 {
-		flusherrors()
-	}
-}
-
-func Fatalf(fmt_ string, args ...interface{}) {
-	flusherrors()
-
-	if Debug_panic != 0 || nsavederrors+nerrors == 0 {
-		fmt.Printf("%v: internal compiler error: ", linestr(lineno))
-		fmt.Printf(fmt_, args...)
-		fmt.Printf("\n")
-
-		// If this is a released compiler version, ask for a bug report.
-		if strings.HasPrefix(objabi.Version, "go") {
-			fmt.Printf("\n")
-			fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
-			fmt.Printf("https://golang.org/issue/new\n")
-		} else {
-			// Not a release; dump a stack trace, too.
-			fmt.Println()
-			os.Stdout.Write(debug.Stack())
-			fmt.Println()
-		}
-	}
-
-	hcrash()
-	errorexit()
-}
-
-// hasUniquePos reports whether n has a unique position that can be
-// used for reporting error messages.
-//
-// It's primarily used to distinguish references to named objects,
-// whose Pos will point back to their declaration position rather than
-// their usage position.
-func hasUniquePos(n *Node) bool {
-	switch n.Op {
-	case ONAME, OPACK:
-		return false
-	case OLITERAL, OTYPE:
-		if n.Sym != nil {
-			return false
-		}
-	}
-
-	if !n.Pos.IsKnown() {
-		if Debug.K != 0 {
-			Warn("setlineno: unknown position (line 0)")
-		}
-		return false
-	}
-
-	return true
-}
-
-func setlineno(n *Node) src.XPos {
-	lno := lineno
-	if n != nil && hasUniquePos(n) {
-		lineno = n.Pos
-	}
-	return lno
-}
-
-func lookup(name string) *types.Sym {
-	return localpkg.Lookup(name)
-}
-
-// lookupN looks up the symbol starting with prefix and ending with
-// the decimal n. If prefix is too long, lookupN panics.
-func lookupN(prefix string, n int) *types.Sym {
-	var buf [20]byte // plenty long enough for all current users
-	copy(buf[:], prefix)
-	b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
-	return localpkg.LookupBytes(b)
-}
-
-// autolabel generates a new Name node for use with
-// an automatically generated label.
-// prefix is a short mnemonic (e.g. ".s" for switch)
-// to help with debugging.
-// It should begin with "." to avoid conflicts with
-// user labels.
-func autolabel(prefix string) *types.Sym {
-	if prefix[0] != '.' {
-		Fatalf("autolabel prefix must start with '.', have %q", prefix)
-	}
-	fn := Curfn
-	if Curfn == nil {
-		Fatalf("autolabel outside function")
-	}
-	n := fn.Func.Label
-	fn.Func.Label++
-	return lookupN(prefix, int(n))
-}
-
-// find all the exported symbols in package opkg
-// and make them available in the current package
-func importdot(opkg *types.Pkg, pack *Node) {
-	n := 0
-	for _, s := range opkg.Syms {
-		if s.Def == nil {
-			continue
-		}
-		if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
-			continue
-		}
-		s1 := lookup(s.Name)
-		if s1.Def != nil {
-			pkgerror := fmt.Sprintf("during import %q", opkg.Path)
-			redeclare(lineno, s1, pkgerror)
-			continue
-		}
-
-		s1.Def = s.Def
-		s1.Block = s.Block
-		if asNode(s1.Def).Name == nil {
-			Dump("s1def", asNode(s1.Def))
-			Fatalf("missing Name")
-		}
-		asNode(s1.Def).Name.Pack = pack
-		s1.Origpkg = opkg
-		n++
-	}
-
-	if n == 0 {
-		// can't possibly be used - there were no symbols
-		yyerrorl(pack.Pos, "imported and not used: %q", opkg.Path)
-	}
-}
-
-func nod(op Op, nleft, nright *Node) *Node {
-	return nodl(lineno, op, nleft, nright)
-}
-
-func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node {
-	var n *Node
-	switch op {
-	case OCLOSURE, ODCLFUNC:
-		var x struct {
-			n Node
-			f Func
-		}
-		n = &x.n
-		n.Func = &x.f
-	case ONAME:
-		Fatalf("use newname instead")
-	case OLABEL, OPACK:
-		var x struct {
-			n Node
-			m Name
-		}
-		n = &x.n
-		n.Name = &x.m
-	default:
-		n = new(Node)
-	}
-	n.Op = op
-	n.Left = nleft
-	n.Right = nright
-	n.Pos = pos
-	n.Xoffset = BADWIDTH
-	n.Orig = n
-	return n
-}
-
-// newname returns a new ONAME Node associated with symbol s.
-func newname(s *types.Sym) *Node {
-	n := newnamel(lineno, s)
-	n.Name.Curfn = Curfn
-	return n
-}
-
-// newnamel returns a new ONAME Node associated with symbol s at position pos.
-// The caller is responsible for setting n.Name.Curfn.
-func newnamel(pos src.XPos, s *types.Sym) *Node {
-	if s == nil {
-		Fatalf("newnamel nil")
-	}
-
-	var x struct {
-		n Node
-		m Name
-		p Param
-	}
-	n := &x.n
-	n.Name = &x.m
-	n.Name.Param = &x.p
-
-	n.Op = ONAME
-	n.Pos = pos
-	n.Orig = n
-
-	n.Sym = s
-	return n
-}
-
-// nodSym makes a Node with Op op and with the Left field set to left
-// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op Op, left *Node, sym *types.Sym) *Node {
-	return nodlSym(lineno, op, left, sym)
-}
-
-// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
-// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op Op, left *Node, sym *types.Sym) *Node {
-	n := nodl(pos, op, left, nil)
-	n.Sym = sym
-	return n
-}
-
-// rawcopy returns a shallow copy of n.
-// Note: copy or sepcopy (rather than rawcopy) is usually the
-//       correct choice (see comment with Node.copy, below).
-func (n *Node) rawcopy() *Node {
-	copy := *n
-	return &copy
-}
-
-// sepcopy returns a separate shallow copy of n, with the copy's
-// Orig pointing to itself.
-func (n *Node) sepcopy() *Node {
-	copy := *n
-	copy.Orig = &copy
-	return &copy
-}
-
-// copy returns shallow copy of n and adjusts the copy's Orig if
-// necessary: In general, if n.Orig points to itself, the copy's
-// Orig should point to itself as well. Otherwise, if n is modified,
-// the copy's Orig node appears modified, too, and then doesn't
-// represent the original node anymore.
-// (This caused the wrong complit Op to be used when printing error
-// messages; see issues #26855, #27765).
-func (n *Node) copy() *Node {
-	copy := *n
-	if n.Orig == n {
-		copy.Orig = &copy
-	}
-	return &copy
-}
-
-// methcmp sorts methods by symbol.
-type methcmp []*types.Field
-
-func (x methcmp) Len() int           { return len(x) }
-func (x methcmp) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-
-func nodintconst(v int64) *Node {
-	u := new(Mpint)
-	u.SetInt64(v)
-	return nodlit(Val{u})
-}
-
-func nodnil() *Node {
-	return nodlit(Val{new(NilVal)})
-}
-
-func nodbool(b bool) *Node {
-	return nodlit(Val{b})
-}
-
-func nodstr(s string) *Node {
-	return nodlit(Val{s})
-}
-
-// treecopy recursively copies n, with the exception of
-// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
-// If pos.IsKnown(), it sets the source position of newly
-// allocated nodes to pos.
-func treecopy(n *Node, pos src.XPos) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	default:
-		m := n.sepcopy()
-		m.Left = treecopy(n.Left, pos)
-		m.Right = treecopy(n.Right, pos)
-		m.List.Set(listtreecopy(n.List.Slice(), pos))
-		if pos.IsKnown() {
-			m.Pos = pos
-		}
-		if m.Name != nil && n.Op != ODCLFIELD {
-			Dump("treecopy", n)
-			Fatalf("treecopy Name")
-		}
-		return m
-
-	case OPACK:
-		// OPACK nodes are never valid in const value declarations,
-		// but allow them like any other declared symbol to avoid
-		// crashing (golang.org/issue/11361).
-		fallthrough
-
-	case ONAME, ONONAME, OLITERAL, OTYPE:
-		return n
-
-	}
-}
-
-// isNil reports whether n represents the universal untyped zero value "nil".
-func (n *Node) isNil() bool {
-	// Check n.Orig because constant propagation may produce typed nil constants,
-	// which don't exist in the Go spec.
-	return Isconst(n.Orig, CTNIL)
-}
-
-func isptrto(t *types.Type, et types.EType) bool {
-	if t == nil {
-		return false
-	}
-	if !t.IsPtr() {
-		return false
-	}
-	t = t.Elem()
-	if t == nil {
-		return false
-	}
-	if t.Etype != et {
-		return false
-	}
-	return true
-}
-
-func (n *Node) isBlank() bool {
-	if n == nil {
-		return false
-	}
-	return n.Sym.IsBlank()
-}
-
-// methtype returns the underlying type, if any,
-// that owns methods with receiver parameter t.
-// The result is either a named type or an anonymous struct.
-func methtype(t *types.Type) *types.Type {
-	if t == nil {
-		return nil
-	}
-
-	// Strip away pointer if it's there.
-	if t.IsPtr() {
-		if t.Sym != nil {
-			return nil
-		}
-		t = t.Elem()
-		if t == nil {
-			return nil
-		}
-	}
-
-	// Must be a named type or anonymous struct.
-	if t.Sym == nil && !t.IsStruct() {
-		return nil
-	}
-
-	// Check types.
-	if issimple[t.Etype] {
-		return t
-	}
-	switch t.Etype {
-	case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
-		return t
-	}
-	return nil
-}
-
-// Is type src assignment compatible to type dst?
-// If so, return op code to use in conversion.
-// If not, return OXXX. In this case, the string return parameter may
-// hold a reason why. In all other cases, it'll be the empty string.
-func assignop(src, dst *types.Type) (Op, string) {
-	if src == dst {
-		return OCONVNOP, ""
-	}
-	if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
-		return OXXX, ""
-	}
-
-	// 1. src type is identical to dst.
-	if types.Identical(src, dst) {
-		return OCONVNOP, ""
-	}
-
-	// 2. src and dst have identical underlying types
-	// and either src or dst is not a named type or
-	// both are empty interface types.
-	// For assignable but different non-empty interface types,
-	// we want to recompute the itab. Recomputing the itab ensures
-	// that itabs are unique (thus an interface with a compile-time
-	// type I has an itab with interface type I).
-	if types.Identical(src.Orig, dst.Orig) {
-		if src.IsEmptyInterface() {
-			// Conversion between two empty interfaces
-			// requires no code.
-			return OCONVNOP, ""
-		}
-		if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
-			// Conversion between two types, at least one unnamed,
-			// needs no conversion. The exception is nonempty interfaces
-			// which need to have their itab updated.
-			return OCONVNOP, ""
-		}
-	}
-
-	// 3. dst is an interface type and src implements dst.
-	if dst.IsInterface() && src.Etype != TNIL {
-		var missing, have *types.Field
-		var ptr int
-		if implements(src, dst, &missing, &have, &ptr) {
-			return OCONVIFACE, ""
-		}
-
-		// we'll have complained about this method anyway, suppress spurious messages.
-		if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
-			return OCONVIFACE, ""
-		}
-
-		var why string
-		if isptrto(src, TINTER) {
-			why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
-		} else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
-			why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
-		} else if have != nil && have.Sym == missing.Sym {
-			why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
-				"\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-		} else if ptr != 0 {
-			why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
-		} else if have != nil {
-			why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
-				"\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-		} else {
-			why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
-		}
-
-		return OXXX, why
-	}
-
-	if isptrto(dst, TINTER) {
-		why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
-		return OXXX, why
-	}
-
-	if src.IsInterface() && dst.Etype != TBLANK {
-		var missing, have *types.Field
-		var ptr int
-		var why string
-		if implements(dst, src, &missing, &have, &ptr) {
-			why = ": need type assertion"
-		}
-		return OXXX, why
-	}
-
-	// 4. src is a bidirectional channel value, dst is a channel type,
-	// src and dst have identical element types, and
-	// either src or dst is not a named type.
-	if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
-		if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
-			return OCONVNOP, ""
-		}
-	}
-
-	// 5. src is the predeclared identifier nil and dst is a nillable type.
-	if src.Etype == TNIL {
-		switch dst.Etype {
-		case TPTR,
-			TFUNC,
-			TMAP,
-			TCHAN,
-			TINTER,
-			TSLICE:
-			return OCONVNOP, ""
-		}
-	}
-
-	// 6. rule about untyped constants - already converted by defaultlit.
-
-	// 7. Any typed value can be assigned to the blank identifier.
-	if dst.Etype == TBLANK {
-		return OCONVNOP, ""
-	}
-
-	return OXXX, ""
-}
-
-// Can we convert a value of type src to a value of type dst?
-// If so, return op code to use in conversion (maybe OCONVNOP).
-// If not, return OXXX. In this case, the string return parameter may
-// hold a reason why. In all other cases, it'll be the empty string.
-// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
-	if src == dst {
-		return OCONVNOP, ""
-	}
-	if src == nil || dst == nil {
-		return OXXX, ""
-	}
-
-	// Conversions from regular to go:notinheap are not allowed
-	// (unless it's unsafe.Pointer). These are runtime-specific
-	// rules.
-	// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
-	if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
-		why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
-		return OXXX, why
-	}
-	// (b) Disallow string to []T where T is go:notinheap.
-	if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
-		why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
-		return OXXX, why
-	}
-
-	// 1. src can be assigned to dst.
-	op, why := assignop(src, dst)
-	if op != OXXX {
-		return op, why
-	}
-
-	// The rules for interfaces are no different in conversions
-	// than assignments. If interfaces are involved, stop now
-	// with the good message from assignop.
-	// Otherwise clear the error.
-	if src.IsInterface() || dst.IsInterface() {
-		return OXXX, why
-	}
-
-	// 2. Ignoring struct tags, src and dst have identical underlying types.
-	if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
-		return OCONVNOP, ""
-	}
-
-	// 3. src and dst are unnamed pointer types and, ignoring struct tags,
-	// their base types have identical underlying types.
-	if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
-		if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
-			return OCONVNOP, ""
-		}
-	}
-
-	// 4. src and dst are both integer or floating point types.
-	if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
-		if simtype[src.Etype] == simtype[dst.Etype] {
-			return OCONVNOP, ""
-		}
-		return OCONV, ""
-	}
-
-	// 5. src and dst are both complex types.
-	if src.IsComplex() && dst.IsComplex() {
-		if simtype[src.Etype] == simtype[dst.Etype] {
-			return OCONVNOP, ""
-		}
-		return OCONV, ""
-	}
-
-	// Special case for constant conversions: any numeric
-	// conversion is potentially okay. We'll validate further
-	// within evconst. See #38117.
-	if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
-		return OCONV, ""
-	}
-
-	// 6. src is an integer or has type []byte or []rune
-	// and dst is a string type.
-	if src.IsInteger() && dst.IsString() {
-		return ORUNESTR, ""
-	}
-
-	if src.IsSlice() && dst.IsString() {
-		if src.Elem().Etype == types.Bytetype.Etype {
-			return OBYTES2STR, ""
-		}
-		if src.Elem().Etype == types.Runetype.Etype {
-			return ORUNES2STR, ""
-		}
-	}
-
-	// 7. src is a string and dst is []byte or []rune.
-	// String to slice.
-	if src.IsString() && dst.IsSlice() {
-		if dst.Elem().Etype == types.Bytetype.Etype {
-			return OSTR2BYTES, ""
-		}
-		if dst.Elem().Etype == types.Runetype.Etype {
-			return OSTR2RUNES, ""
-		}
-	}
-
-	// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
-	if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
-		return OCONVNOP, ""
-	}
-
-	// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
-	if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
-		return OCONVNOP, ""
-	}
-
-	// src is map and dst is a pointer to corresponding hmap.
-	// This rule is needed for the implementation detail that
-	// go gc maps are implemented as a pointer to a hmap struct.
-	if src.Etype == TMAP && dst.IsPtr() &&
-		src.MapType().Hmap == dst.Elem() {
-		return OCONVNOP, ""
-	}
-
-	return OXXX, ""
-}
-
-func assignconv(n *Node, t *types.Type, context string) *Node {
-	return assignconvfn(n, t, func() string { return context })
-}
-
-// Convert node n for assignment to type t.
-func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
-	if n == nil || n.Type == nil || n.Type.Broke() {
-		return n
-	}
-
-	if t.Etype == TBLANK && n.Type.Etype == TNIL {
-		yyerror("use of untyped nil")
-	}
-
-	n = convlit1(n, t, false, context)
-	if n.Type == nil {
-		return n
-	}
-	if t.Etype == TBLANK {
-		return n
-	}
-
-	// Convert ideal bool from comparison to plain bool
-	// if the next step is non-bool (like interface{}).
-	if n.Type == types.UntypedBool && !t.IsBoolean() {
-		if n.Op == ONAME || n.Op == OLITERAL {
-			r := nod(OCONVNOP, n, nil)
-			r.Type = types.Types[TBOOL]
-			r.SetTypecheck(1)
-			r.SetImplicit(true)
-			n = r
-		}
-	}
-
-	if types.Identical(n.Type, t) {
-		return n
-	}
-
-	op, why := assignop(n.Type, t)
-	if op == OXXX {
-		yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
-		op = OCONV
-	}
-
-	r := nod(op, n, nil)
-	r.Type = t
-	r.SetTypecheck(1)
-	r.SetImplicit(true)
-	r.Orig = n.Orig
-	return r
-}
-
-// IsMethod reports whether n is a method.
-// n must be a function or a method.
-func (n *Node) IsMethod() bool {
-	return n.Type.Recv() != nil
-}
-
-// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
-// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
-	if n.List.Len() == 0 {
-		return nil, nil, nil
-	}
-
-	switch n.Op {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		s := n.List.Slice()
-		return s[0], s[1], nil
-	case OSLICE3, OSLICE3ARR:
-		s := n.List.Slice()
-		return s[0], s[1], s[2]
-	}
-	Fatalf("SliceBounds op %v: %v", n.Op, n)
-	return nil, nil, nil
-}
-
-// SetSliceBounds sets n's slice bounds, where n is a slice expression.
-// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
-	switch n.Op {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		if max != nil {
-			Fatalf("SetSliceBounds %v given three bounds", n.Op)
-		}
-		s := n.List.Slice()
-		if s == nil {
-			if low == nil && high == nil {
-				return
-			}
-			n.List.Set2(low, high)
-			return
-		}
-		s[0] = low
-		s[1] = high
-		return
-	case OSLICE3, OSLICE3ARR:
-		s := n.List.Slice()
-		if s == nil {
-			if low == nil && high == nil && max == nil {
-				return
-			}
-			n.List.Set3(low, high, max)
-			return
-		}
-		s[0] = low
-		s[1] = high
-		s[2] = max
-		return
-	}
-	Fatalf("SetSliceBounds op %v: %v", n.Op, n)
-}
-
-// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
-// o must be a slicing op.
-func (o Op) IsSlice3() bool {
-	switch o {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		return false
-	case OSLICE3, OSLICE3ARR:
-		return true
-	}
-	Fatalf("IsSlice3 op %v", o)
-	return false
-}
-
-// backingArrayPtrLen extracts the pointer and length from a slice or string.
-// This constructs two nodes referring to n, so n must be a cheapexpr.
-func (n *Node) backingArrayPtrLen() (ptr, len *Node) {
-	var init Nodes
-	c := cheapexpr(n, &init)
-	if c != n || init.Len() != 0 {
-		Fatalf("backingArrayPtrLen not cheap: %v", n)
-	}
-	ptr = nod(OSPTR, n, nil)
-	if n.Type.IsString() {
-		ptr.Type = types.Types[TUINT8].PtrTo()
-	} else {
-		ptr.Type = n.Type.Elem().PtrTo()
-	}
-	len = nod(OLEN, n, nil)
-	len.Type = types.Types[TINT]
-	return ptr, len
-}
-
-// labeledControl returns the control flow Node (for, switch, select)
-// associated with the label n, if any.
-func (n *Node) labeledControl() *Node {
-	if n.Op != OLABEL {
-		Fatalf("labeledControl %v", n.Op)
-	}
-	ctl := n.Name.Defn
-	if ctl == nil {
-		return nil
-	}
-	switch ctl.Op {
-	case OFOR, OFORUNTIL, OSWITCH, OSELECT:
-		return ctl
-	}
-	return nil
-}
-
-func syslook(name string) *Node {
-	s := Runtimepkg.Lookup(name)
-	if s == nil || s.Def == nil {
-		Fatalf("syslook: can't find runtime.%s", name)
-	}
-	return asNode(s.Def)
-}
-
-// typehash computes a hash value for type t to use in type switch statements.
-func typehash(t *types.Type) uint32 {
-	p := t.LongString()
-
-	// Using MD5 is overkill, but reduces accidental collisions.
-	h := md5.Sum([]byte(p))
-	return binary.LittleEndian.Uint32(h[:4])
-}
-
-// updateHasCall checks whether expression n contains any function
-// calls and sets the n.HasCall flag if so.
-func updateHasCall(n *Node) {
-	if n == nil {
-		return
-	}
-	n.SetHasCall(calcHasCall(n))
-}
-
-func calcHasCall(n *Node) bool {
-	if n.Ninit.Len() != 0 {
-		// TODO(mdempsky): This seems overly conservative.
-		return true
-	}
-
-	switch n.Op {
-	case OLITERAL, ONAME, OTYPE:
-		if n.HasCall() {
-			Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
-		}
-		return false
-	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
-		return true
-	case OANDAND, OOROR:
-		// hard with instrumented code
-		if instrumenting {
-			return true
-		}
-	case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
-		ODEREF, ODOTPTR, ODOTTYPE, ODIV, OMOD:
-		// These ops might panic, make sure they are done
-		// before we start marshaling args for a call. See issue 16760.
-		return true
-
-	// When using soft-float, these ops might be rewritten to function calls
-	// so we ensure they are evaluated first.
-	case OADD, OSUB, ONEG, OMUL:
-		if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) {
-			return true
-		}
-	case OLT, OEQ, ONE, OLE, OGE, OGT:
-		if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) {
-			return true
-		}
-	case OCONV:
-		if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) {
-			return true
-		}
-	}
-
-	if n.Left != nil && n.Left.HasCall() {
-		return true
-	}
-	if n.Right != nil && n.Right.HasCall() {
-		return true
-	}
-	return false
-}
-
-func badtype(op Op, tl, tr *types.Type) {
-	var s string
-	if tl != nil {
-		s += fmt.Sprintf("\n\t%v", tl)
-	}
-	if tr != nil {
-		s += fmt.Sprintf("\n\t%v", tr)
-	}
-
-	// common mistake: *struct and *interface.
-	if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
-		if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
-			s += "\n\t(*struct vs *interface)"
-		} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
-			s += "\n\t(*interface vs *struct)"
-		}
-	}
-
-	yyerror("illegal types for operand: %v%s", op, s)
-}
-
-// brcom returns !(op).
-// For example, brcom(==) is !=.
-func brcom(op Op) Op {
-	switch op {
-	case OEQ:
-		return ONE
-	case ONE:
-		return OEQ
-	case OLT:
-		return OGE
-	case OGT:
-		return OLE
-	case OLE:
-		return OGT
-	case OGE:
-		return OLT
-	}
-	Fatalf("brcom: no com for %v\n", op)
-	return op
-}
-
-// brrev returns reverse(op).
-// For example, Brrev(<) is >.
-func brrev(op Op) Op {
-	switch op {
-	case OEQ:
-		return OEQ
-	case ONE:
-		return ONE
-	case OLT:
-		return OGT
-	case OGT:
-		return OLT
-	case OLE:
-		return OGE
-	case OGE:
-		return OLE
-	}
-	Fatalf("brrev: no rev for %v\n", op)
-	return op
-}
-
-// return side effect-free n, appending side effects to init.
-// result is assignable if n is.
-func safeexpr(n *Node, init *Nodes) *Node {
-	if n == nil {
-		return nil
-	}
-
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-
-	case ODOT, OLEN, OCAP:
-		l := safeexpr(n.Left, init)
-		if l == n.Left {
-			return n
-		}
-		r := n.copy()
-		r.Left = l
-		r = typecheck(r, ctxExpr)
-		r = walkexpr(r, init)
-		return r
-
-	case ODOTPTR, ODEREF:
-		l := safeexpr(n.Left, init)
-		if l == n.Left {
-			return n
-		}
-		a := n.copy()
-		a.Left = l
-		a = walkexpr(a, init)
-		return a
-
-	case OINDEX, OINDEXMAP:
-		l := safeexpr(n.Left, init)
-		r := safeexpr(n.Right, init)
-		if l == n.Left && r == n.Right {
-			return n
-		}
-		a := n.copy()
-		a.Left = l
-		a.Right = r
-		a = walkexpr(a, init)
-		return a
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
-		if isStaticCompositeLiteral(n) {
-			return n
-		}
-	}
-
-	// make a copy; must not be used as an lvalue
-	if islvalue(n) {
-		Fatalf("missing lvalue case in safeexpr: %v", n)
-	}
-	return cheapexpr(n, init)
-}
-
-func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
-	l := temp(t)
-	a := nod(OAS, l, n)
-	a = typecheck(a, ctxStmt)
-	a = walkexpr(a, init)
-	init.Append(a)
-	return l
-}
-
-// return side-effect free and cheap n, appending side effects to init.
-// result may not be assignable.
-func cheapexpr(n *Node, init *Nodes) *Node {
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-	}
-
-	return copyexpr(n, n.Type, init)
-}
-
-// Code to resolve elided DOTs in embedded types.
-
-// A Dlist stores a pointer to a TFIELD Type embedded within
-// a TSTRUCT or TINTER Type.
-type Dlist struct {
-	field *types.Field
-}
-
-// dotlist is used by adddot1 to record the path of embedded fields
-// used to access a target field or method.
-// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
-var dotlist = make([]Dlist, 10)
-
-// lookdot0 returns the number of fields or methods named s associated
-// with Type t. If exactly one exists, it will be returned in *save
-// (if save is not nil).
-func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
-	u := t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-
-	c := 0
-	if u.IsStruct() || u.IsInterface() {
-		for _, f := range u.Fields().Slice() {
-			if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
-				if save != nil {
-					*save = f
-				}
-				c++
-			}
-		}
-	}
-
-	u = t
-	if t.Sym != nil && t.IsPtr() && !t.Elem().IsPtr() {
-		// If t is a defined pointer type, then x.m is shorthand for (*x).m.
-		u = t.Elem()
-	}
-	u = methtype(u)
-	if u != nil {
-		for _, f := range u.Methods().Slice() {
-			if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
-				if save != nil {
-					*save = f
-				}
-				c++
-			}
-		}
-	}
-
-	return c
-}
-
-// adddot1 returns the number of fields or methods named s at depth d in Type t.
-// If exactly one exists, it will be returned in *save (if save is not nil),
-// and dotlist will contain the path of embedded fields traversed to find it,
-// in reverse order. If none exist, more will indicate whether t contains any
-// embedded fields at depth d, so callers can decide whether to retry at
-// a greater depth.
-func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
-	if t.Recur() {
-		return
-	}
-	t.SetRecur(true)
-	defer t.SetRecur(false)
-
-	var u *types.Type
-	d--
-	if d < 0 {
-		// We've reached our target depth. If t has any fields/methods
-		// named s, then we're done. Otherwise, we still need to check
-		// below for embedded fields.
-		c = lookdot0(s, t, save, ignorecase)
-		if c != 0 {
-			return c, false
-		}
-	}
-
-	u = t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-	if !u.IsStruct() && !u.IsInterface() {
-		return c, false
-	}
-
-	for _, f := range u.Fields().Slice() {
-		if f.Embedded == 0 || f.Sym == nil {
-			continue
-		}
-		if d < 0 {
-			// Found an embedded field at target depth.
-			return c, true
-		}
-		a, more1 := adddot1(s, f.Type, d, save, ignorecase)
-		if a != 0 && c == 0 {
-			dotlist[d].field = f
-		}
-		c += a
-		if more1 {
-			more = true
-		}
-	}
-
-	return c, more
-}
-
-// dotpath computes the unique shortest explicit selector path to fully qualify
-// a selection expression x.f, where x is of type t and f is the symbol s.
-// If no such path exists, dotpath returns nil.
-// If there are multiple shortest paths to the same depth, ambig is true.
-func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) {
-	// The embedding of types within structs imposes a tree structure onto
-	// types: structs parent the types they embed, and types parent their
-	// fields or methods. Our goal here is to find the shortest path to
-	// a field or method named s in the subtree rooted at t. To accomplish
-	// that, we iteratively perform depth-first searches of increasing depth
-	// until we either find the named field/method or exhaust the tree.
-	for d := 0; ; d++ {
-		if d > len(dotlist) {
-			dotlist = append(dotlist, Dlist{})
-		}
-		if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
-			return dotlist[:d], false
-		} else if c > 1 {
-			return nil, true
-		} else if !more {
-			return nil, false
-		}
-	}
-}
-
-// in T.field
-// find missing fields that
-// will give shortest unique addressing.
-// modify the tree with missing type names.
-func adddot(n *Node) *Node {
-	n.Left = typecheck(n.Left, ctxType|ctxExpr)
-	if n.Left.Diag() {
-		n.SetDiag(true)
-	}
-	t := n.Left.Type
-	if t == nil {
-		return n
-	}
-
-	if n.Left.Op == OTYPE {
-		return n
-	}
-
-	s := n.Sym
-	if s == nil {
-		return n
-	}
-
-	switch path, ambig := dotpath(s, t, nil, false); {
-	case path != nil:
-		// rebuild elided dots
-		for c := len(path) - 1; c >= 0; c-- {
-			n.Left = nodSym(ODOT, n.Left, path[c].field.Sym)
-			n.Left.SetImplicit(true)
-		}
-	case ambig:
-		yyerror("ambiguous selector %v", n)
-		n.Left = nil
-	}
-
-	return n
-}
-
-// Code to help generate trampoline functions for methods on embedded
-// types. These are approx the same as the corresponding adddot
-// routines except that they expect to be called with unique tasks and
-// they return the actual methods.
-
-type Symlink struct {
-	field *types.Field
-}
-
-var slist []Symlink
-
-func expand0(t *types.Type) {
-	u := t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-
-	if u.IsInterface() {
-		for _, f := range u.Fields().Slice() {
-			if f.Sym.Uniq() {
-				continue
-			}
-			f.Sym.SetUniq(true)
-			slist = append(slist, Symlink{field: f})
-		}
-
-		return
-	}
-
-	u = methtype(t)
-	if u != nil {
-		for _, f := range u.Methods().Slice() {
-			if f.Sym.Uniq() {
-				continue
-			}
-			f.Sym.SetUniq(true)
-			slist = append(slist, Symlink{field: f})
-		}
-	}
-}
-
-func expand1(t *types.Type, top bool) {
-	if t.Recur() {
-		return
-	}
-	t.SetRecur(true)
-
-	if !top {
-		expand0(t)
-	}
-
-	u := t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-
-	if u.IsStruct() || u.IsInterface() {
-		for _, f := range u.Fields().Slice() {
-			if f.Embedded == 0 {
-				continue
-			}
-			if f.Sym == nil {
-				continue
-			}
-			expand1(f.Type, false)
-		}
-	}
-
-	t.SetRecur(false)
-}
-
-func expandmeth(t *types.Type) {
-	if t == nil || t.AllMethods().Len() != 0 {
-		return
-	}
-
-	// mark top-level method symbols
-	// so that expand1 doesn't consider them.
-	for _, f := range t.Methods().Slice() {
-		f.Sym.SetUniq(true)
-	}
-
-	// generate all reachable methods
-	slist = slist[:0]
-	expand1(t, true)
-
-	// check each method to be uniquely reachable
-	var ms []*types.Field
-	for i, sl := range slist {
-		slist[i].field = nil
-		sl.field.Sym.SetUniq(false)
-
-		var f *types.Field
-		path, _ := dotpath(sl.field.Sym, t, &f, false)
-		if path == nil {
-			continue
-		}
-
-		// dotpath may have dug out arbitrary fields, we only want methods.
-		if !f.IsMethod() {
-			continue
-		}
-
-		// add it to the base type method list
-		f = f.Copy()
-		f.Embedded = 1 // needs a trampoline
-		for _, d := range path {
-			if d.field.Type.IsPtr() {
-				f.Embedded = 2
-				break
-			}
-		}
-		ms = append(ms, f)
-	}
-
-	for _, f := range t.Methods().Slice() {
-		f.Sym.SetUniq(false)
-	}
-
-	ms = append(ms, t.Methods().Slice()...)
-	sort.Sort(methcmp(ms))
-	t.AllMethods().Set(ms)
-}
-
-// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *types.Type, mustname bool) []*Node {
-	var args []*Node
-	gen := 0
-	for _, t := range tl.Fields().Slice() {
-		s := t.Sym
-		if mustname && (s == nil || s.Name == "_") {
-			// invent a name so that we can refer to it in the trampoline
-			s = lookupN(".anon", gen)
-			gen++
-		}
-		a := symfield(s, t.Type)
-		a.Pos = t.Pos
-		a.SetIsDDD(t.IsDDD())
-		args = append(args, a)
-	}
-
-	return args
-}
-
-// Generate a wrapper function to convert from
-// a receiver of type T to a receiver of type U.
-// That is,
-//
-//	func (t T) M() {
-//		...
-//	}
-//
-// already exists; this function generates
-//
-//	func (u U) M() {
-//		u.M()
-//	}
-//
-// where the types T and U are such that u.M() is valid
-// and calls the T.M method.
-// The resulting function is for use in method tables.
-//
-//	rcvr - U
-//	method - M func (t T)(), a TFIELD type struct
-//	newnam - the eventual mangled name of this function
-func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
-	if false && Debug.r != 0 {
-		fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
-	}
-
-	// Only generate (*T).M wrappers for T.M in T's own package.
-	if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
-		rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg {
-		return
-	}
-
-	// Only generate I.M wrappers for I in I's own package
-	// but keep doing it for error.Error (was issue #29304).
-	if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != localpkg && rcvr != types.Errortype {
-		return
-	}
-
-	lineno = autogeneratedPos
-	dclcontext = PEXTERN
-
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.Left = namedfield(".this", rcvr)
-	tfn.List.Set(structargs(method.Type.Params(), true))
-	tfn.Rlist.Set(structargs(method.Type.Results(), false))
-
-	fn := dclfunc(newnam, tfn)
-	fn.Func.SetDupok(true)
-
-	nthis := asNode(tfn.Type.Recv().Nname)
-
-	methodrcvr := method.Type.Recv().Type
-
-	// generate nil pointer check for better error
-	if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
-		// generating wrapper from *T to T.
-		n := nod(OIF, nil, nil)
-		n.Left = nod(OEQ, nthis, nodnil())
-		call := nod(OCALL, syslook("panicwrap"), nil)
-		n.Nbody.Set1(call)
-		fn.Nbody.Append(n)
-	}
-
-	dot := adddot(nodSym(OXDOT, nthis, method.Sym))
-
-	// generate call
-	// It's not possible to use a tail call when dynamic linking on ppc64le. The
-	// bad scenario is when a local call is made to the wrapper: the wrapper will
-	// call the implementation, which might be in a different module and so set
-	// the TOC to the appropriate value for that module. But if it returns
-	// directly to the wrapper's caller, nothing will reset it to the correct
-	// value for that function.
-	if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) {
-		// generate tail call: adjust pointer receiver and jump to embedded method.
-		dot = dot.Left // skip final .M
-		// TODO(mdempsky): Remove dependency on dotlist.
-		if !dotlist[0].field.Type.IsPtr() {
-			dot = nod(OADDR, dot, nil)
-		}
-		as := nod(OAS, nthis, convnop(dot, rcvr))
-		fn.Nbody.Append(as)
-		fn.Nbody.Append(nodSym(ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
-	} else {
-		fn.Func.SetWrapper(true) // ignore frame for panic+recover matching
-		call := nod(OCALL, dot, nil)
-		call.List.Set(paramNnames(tfn.Type))
-		call.SetIsDDD(tfn.Type.IsVariadic())
-		if method.Type.NumResults() > 0 {
-			n := nod(ORETURN, nil, nil)
-			n.List.Set1(call)
-			call = n
-		}
-		fn.Nbody.Append(call)
-	}
-
-	if false && Debug.r != 0 {
-		dumplist("genwrapper body", fn.Nbody)
-	}
-
-	funcbody()
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	fn = typecheck(fn, ctxStmt)
-
-	Curfn = fn
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-
-	// Inline calls within (*T).M wrappers. This is safe because we only
-	// generate those wrappers within the same compilation unit as (T).M.
-	// TODO(mdempsky): Investigate why we can't enable this more generally.
-	if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
-		inlcalls(fn)
-	}
-	escapeFuncs([]*Node{fn}, false)
-
-	Curfn = nil
-	xtop = append(xtop, fn)
-}
-
-func paramNnames(ft *types.Type) []*Node {
-	args := make([]*Node, ft.NumParams())
-	for i, f := range ft.Params().FieldSlice() {
-		args[i] = asNode(f.Nname)
-	}
-	return args
-}
-
-func hashmem(t *types.Type) *Node {
-	sym := Runtimepkg.Lookup("memhash")
-
-	n := newname(sym)
-	setNodeNameFunc(n)
-	n.Type = functype(nil, []*Node{
-		anonfield(types.NewPtr(t)),
-		anonfield(types.Types[TUINTPTR]),
-		anonfield(types.Types[TUINTPTR]),
-	}, []*Node{
-		anonfield(types.Types[TUINTPTR]),
-	})
-	return n
-}
-
-func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
-	if t == nil {
-		return nil, false
-	}
-
-	path, ambig := dotpath(s, t, &m, ignorecase)
-	if path == nil {
-		if ambig {
-			yyerror("%v.%v is ambiguous", t, s)
-		}
-		return nil, false
-	}
-
-	for _, d := range path {
-		if d.field.Type.IsPtr() {
-			followptr = true
-			break
-		}
-	}
-
-	if !m.IsMethod() {
-		yyerror("%v.%v is a field, not a method", t, s)
-		return nil, followptr
-	}
-
-	return m, followptr
-}
-
-func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
-	t0 := t
-	if t == nil {
-		return false
-	}
-
-	if t.IsInterface() {
-		i := 0
-		tms := t.Fields().Slice()
-		for _, im := range iface.Fields().Slice() {
-			for i < len(tms) && tms[i].Sym != im.Sym {
-				i++
-			}
-			if i == len(tms) {
-				*m = im
-				*samename = nil
-				*ptr = 0
-				return false
-			}
-			tm := tms[i]
-			if !types.Identical(tm.Type, im.Type) {
-				*m = im
-				*samename = tm
-				*ptr = 0
-				return false
-			}
-		}
-
-		return true
-	}
-
-	t = methtype(t)
-	var tms []*types.Field
-	if t != nil {
-		expandmeth(t)
-		tms = t.AllMethods().Slice()
-	}
-	i := 0
-	for _, im := range iface.Fields().Slice() {
-		if im.Broke() {
-			continue
-		}
-		for i < len(tms) && tms[i].Sym != im.Sym {
-			i++
-		}
-		if i == len(tms) {
-			*m = im
-			*samename, _ = ifacelookdot(im.Sym, t, true)
-			*ptr = 0
-			return false
-		}
-		tm := tms[i]
-		if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
-			*m = im
-			*samename = tm
-			*ptr = 0
-			return false
-		}
-		followptr := tm.Embedded == 2
-
-		// if pointer receiver in method,
-		// the method does not exist for value types.
-		rcvr := tm.Type.Recv().Type
-		if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
-			if false && Debug.r != 0 {
-				yyerror("interface pointer mismatch")
-			}
-
-			*m = im
-			*samename = nil
-			*ptr = 1
-			return false
-		}
-	}
-
-	// We're going to emit an OCONVIFACE.
-	// Call itabname so that (t, iface)
-	// gets added to itabs early, which allows
-	// us to de-virtualize calls through this
-	// type/interface pair later. See peekitabs in reflect.go
-	if isdirectiface(t0) && !iface.IsEmptyInterface() {
-		itabname(t0, iface)
-	}
-	return true
-}
-
-func listtreecopy(l []*Node, pos src.XPos) []*Node {
-	var out []*Node
-	for _, n := range l {
-		out = append(out, treecopy(n, pos))
-	}
-	return out
-}
-
-func liststmt(l []*Node) *Node {
-	n := nod(OBLOCK, nil, nil)
-	n.List.Set(l)
-	if len(l) != 0 {
-		n.Pos = l[0].Pos
-	}
-	return n
-}
-
-func (l Nodes) asblock() *Node {
-	n := nod(OBLOCK, nil, nil)
-	n.List = l
-	if l.Len() != 0 {
-		n.Pos = l.First().Pos
-	}
-	return n
-}
-
-func ngotype(n *Node) *types.Sym {
-	if n.Type != nil {
-		return typenamesym(n.Type)
-	}
-	return nil
-}
-
-// The result of addinit MUST be assigned back to n, e.g.
-// 	n.Left = addinit(n.Left, init)
-func addinit(n *Node, init []*Node) *Node {
-	if len(init) == 0 {
-		return n
-	}
-	if n.mayBeShared() {
-		// Introduce OCONVNOP to hold init list.
-		n = nod(OCONVNOP, n, nil)
-		n.Type = n.Left.Type
-		n.SetTypecheck(1)
-	}
-
-	n.Ninit.Prepend(init...)
-	n.SetHasCall(true)
-	return n
-}
-
-// The linker uses the magic symbol prefixes "go." and "type."
-// Avoid potential confusion between import paths and symbols
-// by rejecting these reserved imports for now. Also, people
-// "can do weird things in GOPATH and we'd prefer they didn't
-// do _that_ weird thing" (per rsc). See also #4257.
-var reservedimports = []string{
-	"go",
-	"type",
-}
-
-func isbadimport(path string, allowSpace bool) bool {
-	if strings.Contains(path, "\x00") {
-		yyerror("import path contains NUL")
-		return true
-	}
-
-	for _, ri := range reservedimports {
-		if path == ri {
-			yyerror("import path %q is reserved and cannot be used", path)
-			return true
-		}
-	}
-
-	for _, r := range path {
-		if r == utf8.RuneError {
-			yyerror("import path contains invalid UTF-8 sequence: %q", path)
-			return true
-		}
-
-		if r < 0x20 || r == 0x7f {
-			yyerror("import path contains control character: %q", path)
-			return true
-		}
-
-		if r == '\\' {
-			yyerror("import path contains backslash; use slash: %q", path)
-			return true
-		}
-
-		if !allowSpace && unicode.IsSpace(r) {
-			yyerror("import path contains space character: %q", path)
-			return true
-		}
-
-		if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
-			yyerror("import path contains invalid character '%c': %q", r, path)
-			return true
-		}
-	}
-
-	return false
-}
-
-// Can this type be stored directly in an interface word?
-// Yes, if the representation is a single pointer.
-func isdirectiface(t *types.Type) bool {
-	if t.Broke() {
-		return false
-	}
-
-	switch t.Etype {
-	case TPTR:
-		// Pointers to notinheap types must be stored indirectly. See issue 42076.
-		return !t.Elem().NotInHeap()
-	case TCHAN,
-		TMAP,
-		TFUNC,
-		TUNSAFEPTR:
-		return true
-
-	case TARRAY:
-		// Array of 1 direct iface type can be direct.
-		return t.NumElem() == 1 && isdirectiface(t.Elem())
-
-	case TSTRUCT:
-		// Struct with 1 field of direct iface type can be direct.
-		return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
-	}
-
-	return false
-}
-
-// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *Node) *Node {
-	typ := nodSym(ODOTPTR, itab, nil)
-	typ.Type = types.NewPtr(types.Types[TUINT8])
-	typ.SetTypecheck(1)
-	typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
-	typ.SetBounded(true)          // guaranteed not to fault
-	return typ
-}
-
-// ifaceData loads the data field from an interface.
-// The concrete type must be known to have type t.
-// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node {
-	if t.IsInterface() {
-		Fatalf("ifaceData interface: %v", t)
-	}
-	ptr := nodlSym(pos, OIDATA, n, nil)
-	if isdirectiface(t) {
-		ptr.Type = t
-		ptr.SetTypecheck(1)
-		return ptr
-	}
-	ptr.Type = types.NewPtr(t)
-	ptr.SetTypecheck(1)
-	ind := nodl(pos, ODEREF, ptr, nil)
-	ind.Type = t
-	ind.SetTypecheck(1)
-	ind.SetBounded(true)
-	return ind
-}
-
-// typePos returns the position associated with t.
-// This is where t was declared or where it appeared as a type expression.
-func typePos(t *types.Type) src.XPos {
-	n := asNode(t.Nod)
-	if n == nil || !n.Pos.IsKnown() {
-		Fatalf("bad type: %v", t)
-	}
-	return n.Pos
-}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
deleted file mode 100644
index 8d9fbe3..0000000
--- a/src/cmd/compile/internal/gc/swt.go
+++ /dev/null
@@ -1,756 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"sort"
-)
-
-// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *Node) {
-	typecheckslice(n.Ninit.Slice(), ctxStmt)
-	if n.Left != nil && n.Left.Op == OTYPESW {
-		typecheckTypeSwitch(n)
-	} else {
-		typecheckExprSwitch(n)
-	}
-}
-
-func typecheckTypeSwitch(n *Node) {
-	n.Left.Right = typecheck(n.Left.Right, ctxExpr)
-	t := n.Left.Right.Type
-	if t != nil && !t.IsInterface() {
-		yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
-		t = nil
-	}
-
-	// We don't actually declare the type switch's guarded
-	// declaration itself. So if there are no cases, we won't
-	// notice that it went unused.
-	if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
-		yyerrorl(v.Pos, "%v declared but not used", v.Sym)
-	}
-
-	var defCase, nilCase *Node
-	var ts typeSet
-	for _, ncase := range n.List.Slice() {
-		ls := ncase.List.Slice()
-		if len(ls) == 0 { // default:
-			if defCase != nil {
-				yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
-			} else {
-				defCase = ncase
-			}
-		}
-
-		for i := range ls {
-			ls[i] = typecheck(ls[i], ctxExpr|ctxType)
-			n1 := ls[i]
-			if t == nil || n1.Type == nil {
-				continue
-			}
-
-			var missing, have *types.Field
-			var ptr int
-			switch {
-			case n1.isNil(): // case nil:
-				if nilCase != nil {
-					yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
-				} else {
-					nilCase = ncase
-				}
-			case n1.Op != OTYPE:
-				yyerrorl(ncase.Pos, "%L is not a type", n1)
-			case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
-				if have != nil && !have.Broke() {
-					yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
-						" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-				} else if ptr != 0 {
-					yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
-						" (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
-				} else {
-					yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
-						" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
-				}
-			}
-
-			if n1.Op == OTYPE {
-				ts.add(ncase.Pos, n1.Type)
-			}
-		}
-
-		if ncase.Rlist.Len() != 0 {
-			// Assign the clause variable's type.
-			vt := t
-			if len(ls) == 1 {
-				if ls[0].Op == OTYPE {
-					vt = ls[0].Type
-				} else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
-					// Invalid single-type case;
-					// mark variable as broken.
-					vt = nil
-				}
-			}
-
-			// TODO(mdempsky): It should be possible to
-			// still typecheck the case body.
-			if vt == nil {
-				continue
-			}
-
-			nvar := ncase.Rlist.First()
-			nvar.Type = vt
-			nvar = typecheck(nvar, ctxExpr|ctxAssign)
-			ncase.Rlist.SetFirst(nvar)
-		}
-
-		typecheckslice(ncase.Nbody.Slice(), ctxStmt)
-	}
-}
-
-type typeSet struct {
-	m map[string][]typeSetEntry
-}
-
-type typeSetEntry struct {
-	pos src.XPos
-	typ *types.Type
-}
-
-func (s *typeSet) add(pos src.XPos, typ *types.Type) {
-	if s.m == nil {
-		s.m = make(map[string][]typeSetEntry)
-	}
-
-	// LongString does not uniquely identify types, so we need to
-	// disambiguate collisions with types.Identical.
-	// TODO(mdempsky): Add a method that *is* unique.
-	ls := typ.LongString()
-	prevs := s.m[ls]
-	for _, prev := range prevs {
-		if types.Identical(typ, prev.typ) {
-			yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
-			return
-		}
-	}
-	s.m[ls] = append(prevs, typeSetEntry{pos, typ})
-}
-
-func typecheckExprSwitch(n *Node) {
-	t := types.Types[TBOOL]
-	if n.Left != nil {
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		t = n.Left.Type
-	}
-
-	var nilonly string
-	if t != nil {
-		switch {
-		case t.IsMap():
-			nilonly = "map"
-		case t.Etype == TFUNC:
-			nilonly = "func"
-		case t.IsSlice():
-			nilonly = "slice"
-
-		case !IsComparable(t):
-			if t.IsStruct() {
-				yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
-			} else {
-				yyerrorl(n.Pos, "cannot switch on %L", n.Left)
-			}
-			t = nil
-		}
-	}
-
-	var defCase *Node
-	var cs constSet
-	for _, ncase := range n.List.Slice() {
-		ls := ncase.List.Slice()
-		if len(ls) == 0 { // default:
-			if defCase != nil {
-				yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
-			} else {
-				defCase = ncase
-			}
-		}
-
-		for i := range ls {
-			setlineno(ncase)
-			ls[i] = typecheck(ls[i], ctxExpr)
-			ls[i] = defaultlit(ls[i], t)
-			n1 := ls[i]
-			if t == nil || n1.Type == nil {
-				continue
-			}
-
-			if nilonly != "" && !n1.isNil() {
-				yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
-			} else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
-				yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
-			} else {
-				op1, _ := assignop(n1.Type, t)
-				op2, _ := assignop(t, n1.Type)
-				if op1 == OXXX && op2 == OXXX {
-					if n.Left != nil {
-						yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
-					} else {
-						yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
-					}
-				}
-			}
-
-			// Don't check for duplicate bools. Although the spec allows it,
-			// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
-			// (2) it would disallow useful things like
-			//       case GOARCH == "arm" && GOARM == "5":
-			//       case GOARCH == "arm":
-			//     which would both evaluate to false for non-ARM compiles.
-			if !n1.Type.IsBoolean() {
-				cs.add(ncase.Pos, n1, "case", "switch")
-			}
-		}
-
-		typecheckslice(ncase.Nbody.Slice(), ctxStmt)
-	}
-}
-
-// walkswitch walks a switch statement.
-func walkswitch(sw *Node) {
-	// Guard against double walk, see #25776.
-	if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
-		return // Was fatal, but eliminating every possible source of double-walking is hard
-	}
-
-	if sw.Left != nil && sw.Left.Op == OTYPESW {
-		walkTypeSwitch(sw)
-	} else {
-		walkExprSwitch(sw)
-	}
-}
-
-// walkExprSwitch generates an AST implementing sw.  sw is an
-// expression switch.
-func walkExprSwitch(sw *Node) {
-	lno := setlineno(sw)
-
-	cond := sw.Left
-	sw.Left = nil
-
-	// convert switch {...} to switch true {...}
-	if cond == nil {
-		cond = nodbool(true)
-		cond = typecheck(cond, ctxExpr)
-		cond = defaultlit(cond, nil)
-	}
-
-	// Given "switch string(byteslice)",
-	// with all cases being side-effect free,
-	// use a zero-cost alias of the byte slice.
-	// Do this before calling walkexpr on cond,
-	// because walkexpr will lower the string
-	// conversion into a runtime call.
-	// See issue 24937 for more discussion.
-	if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
-		cond.Op = OBYTES2STRTMP
-	}
-
-	cond = walkexpr(cond, &sw.Ninit)
-	if cond.Op != OLITERAL {
-		cond = copyexpr(cond, cond.Type, &sw.Nbody)
-	}
-
-	lineno = lno
-
-	s := exprSwitch{
-		exprname: cond,
-	}
-
-	var defaultGoto *Node
-	var body Nodes
-	for _, ncase := range sw.List.Slice() {
-		label := autolabel(".s")
-		jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
-
-		// Process case dispatch.
-		if ncase.List.Len() == 0 {
-			if defaultGoto != nil {
-				Fatalf("duplicate default case not detected during typechecking")
-			}
-			defaultGoto = jmp
-		}
-
-		for _, n1 := range ncase.List.Slice() {
-			s.Add(ncase.Pos, n1, jmp)
-		}
-
-		// Process body.
-		body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
-		body.Append(ncase.Nbody.Slice()...)
-		if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
-			br := nod(OBREAK, nil, nil)
-			br.Pos = pos
-			body.Append(br)
-		}
-	}
-	sw.List.Set(nil)
-
-	if defaultGoto == nil {
-		br := nod(OBREAK, nil, nil)
-		br.Pos = br.Pos.WithNotStmt()
-		defaultGoto = br
-	}
-
-	s.Emit(&sw.Nbody)
-	sw.Nbody.Append(defaultGoto)
-	sw.Nbody.AppendNodes(&body)
-	walkstmtlist(sw.Nbody.Slice())
-}
-
-// An exprSwitch walks an expression switch.
-type exprSwitch struct {
-	exprname *Node // value being switched on
-
-	done    Nodes
-	clauses []exprClause
-}
-
-type exprClause struct {
-	pos    src.XPos
-	lo, hi *Node
-	jmp    *Node
-}
-
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
-	c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
-	if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
-		s.clauses = append(s.clauses, c)
-		return
-	}
-
-	s.flush()
-	s.clauses = append(s.clauses, c)
-	s.flush()
-}
-
-func (s *exprSwitch) Emit(out *Nodes) {
-	s.flush()
-	out.AppendNodes(&s.done)
-}
-
-func (s *exprSwitch) flush() {
-	cc := s.clauses
-	s.clauses = nil
-	if len(cc) == 0 {
-		return
-	}
-
-	// Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
-	// The code below is structured to implicitly handle this case
-	// (e.g., sort.Slice doesn't need to invoke the less function
-	// when there's only a single slice element).
-
-	if s.exprname.Type.IsString() && len(cc) >= 2 {
-		// Sort strings by length and then by value. It is
-		// much cheaper to compare lengths than values, and
-		// all we need here is consistency. We respect this
-		// sorting below.
-		sort.Slice(cc, func(i, j int) bool {
-			si := cc[i].lo.StringVal()
-			sj := cc[j].lo.StringVal()
-			if len(si) != len(sj) {
-				return len(si) < len(sj)
-			}
-			return si < sj
-		})
-
-		// runLen returns the string length associated with a
-		// particular run of exprClauses.
-		runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
-
-		// Collapse runs of consecutive strings with the same length.
-		var runs [][]exprClause
-		start := 0
-		for i := 1; i < len(cc); i++ {
-			if runLen(cc[start:]) != runLen(cc[i:]) {
-				runs = append(runs, cc[start:i])
-				start = i
-			}
-		}
-		runs = append(runs, cc[start:])
-
-		// Perform two-level binary search.
-		nlen := nod(OLEN, s.exprname, nil)
-		binarySearch(len(runs), &s.done,
-			func(i int) *Node {
-				return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
-			},
-			func(i int, nif *Node) {
-				run := runs[i]
-				nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
-				s.search(run, &nif.Nbody)
-			},
-		)
-		return
-	}
-
-	sort.Slice(cc, func(i, j int) bool {
-		return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
-	})
-
-	// Merge consecutive integer cases.
-	if s.exprname.Type.IsInteger() {
-		merged := cc[:1]
-		for _, c := range cc[1:] {
-			last := &merged[len(merged)-1]
-			if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
-				last.hi = c.lo
-			} else {
-				merged = append(merged, c)
-			}
-		}
-		cc = merged
-	}
-
-	s.search(cc, &s.done)
-}
-
-func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
-	binarySearch(len(cc), out,
-		func(i int) *Node {
-			return nod(OLE, s.exprname, cc[i-1].hi)
-		},
-		func(i int, nif *Node) {
-			c := &cc[i]
-			nif.Left = c.test(s.exprname)
-			nif.Nbody.Set1(c.jmp)
-		},
-	)
-}
-
-func (c *exprClause) test(exprname *Node) *Node {
-	// Integer range.
-	if c.hi != c.lo {
-		low := nodl(c.pos, OGE, exprname, c.lo)
-		high := nodl(c.pos, OLE, exprname, c.hi)
-		return nodl(c.pos, OANDAND, low, high)
-	}
-
-	// Optimize "switch true { ...}" and "switch false { ... }".
-	if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
-		if exprname.BoolVal() {
-			return c.lo
-		} else {
-			return nodl(c.pos, ONOT, c.lo, nil)
-		}
-	}
-
-	return nodl(c.pos, OEQ, exprname, c.lo)
-}
-
-func allCaseExprsAreSideEffectFree(sw *Node) bool {
-	// In theory, we could be more aggressive, allowing any
-	// side-effect-free expressions in cases, but it's a bit
-	// tricky because some of that information is unavailable due
-	// to the introduction of temporaries during order.
-	// Restricting to constants is simple and probably powerful
-	// enough.
-
-	for _, ncase := range sw.List.Slice() {
-		if ncase.Op != OCASE {
-			Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
-		}
-		for _, v := range ncase.List.Slice() {
-			if v.Op != OLITERAL {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// hasFall reports whether stmts ends with a "fallthrough" statement.
-func hasFall(stmts []*Node) (bool, src.XPos) {
-	// Search backwards for the index of the fallthrough
-	// statement. Do not assume it'll be in the last
-	// position, since in some cases (e.g. when the statement
-	// list contains autotmp_ variables), one or more OVARKILL
-	// nodes will be at the end of the list.
-
-	i := len(stmts) - 1
-	for i >= 0 && stmts[i].Op == OVARKILL {
-		i--
-	}
-	if i < 0 {
-		return false, src.NoXPos
-	}
-	return stmts[i].Op == OFALL, stmts[i].Pos
-}
-
-// walkTypeSwitch generates an AST that implements sw, where sw is a
-// type switch.
-func walkTypeSwitch(sw *Node) {
-	var s typeSwitch
-	s.facename = sw.Left.Right
-	sw.Left = nil
-
-	s.facename = walkexpr(s.facename, &sw.Ninit)
-	s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
-	s.okname = temp(types.Types[TBOOL])
-
-	// Get interface descriptor word.
-	// For empty interfaces this will be the type.
-	// For non-empty interfaces this will be the itab.
-	itab := nod(OITAB, s.facename, nil)
-
-	// For empty interfaces, do:
-	//     if e._type == nil {
-	//         do nil case if it exists, otherwise default
-	//     }
-	//     h := e._type.hash
-	// Use a similar strategy for non-empty interfaces.
-	ifNil := nod(OIF, nil, nil)
-	ifNil.Left = nod(OEQ, itab, nodnil())
-	lineno = lineno.WithNotStmt() // disable statement marks after the first check.
-	ifNil.Left = typecheck(ifNil.Left, ctxExpr)
-	ifNil.Left = defaultlit(ifNil.Left, nil)
-	// ifNil.Nbody assigned at end.
-	sw.Nbody.Append(ifNil)
-
-	// Load hash from type or itab.
-	dotHash := nodSym(ODOTPTR, itab, nil)
-	dotHash.Type = types.Types[TUINT32]
-	dotHash.SetTypecheck(1)
-	if s.facename.Type.IsEmptyInterface() {
-		dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
-	} else {
-		dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
-	}
-	dotHash.SetBounded(true) // guaranteed not to fault
-	s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
-
-	br := nod(OBREAK, nil, nil)
-	var defaultGoto, nilGoto *Node
-	var body Nodes
-	for _, ncase := range sw.List.Slice() {
-		var caseVar *Node
-		if ncase.Rlist.Len() != 0 {
-			caseVar = ncase.Rlist.First()
-		}
-
-		// For single-type cases with an interface type,
-		// we initialize the case variable as part of the type assertion.
-		// In other cases, we initialize it in the body.
-		var singleType *types.Type
-		if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
-			singleType = ncase.List.First().Type
-		}
-		caseVarInitialized := false
-
-		label := autolabel(".s")
-		jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
-
-		if ncase.List.Len() == 0 { // default:
-			if defaultGoto != nil {
-				Fatalf("duplicate default case not detected during typechecking")
-			}
-			defaultGoto = jmp
-		}
-
-		for _, n1 := range ncase.List.Slice() {
-			if n1.isNil() { // case nil:
-				if nilGoto != nil {
-					Fatalf("duplicate nil case not detected during typechecking")
-				}
-				nilGoto = jmp
-				continue
-			}
-
-			if singleType != nil && singleType.IsInterface() {
-				s.Add(ncase.Pos, n1.Type, caseVar, jmp)
-				caseVarInitialized = true
-			} else {
-				s.Add(ncase.Pos, n1.Type, nil, jmp)
-			}
-		}
-
-		body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
-		if caseVar != nil && !caseVarInitialized {
-			val := s.facename
-			if singleType != nil {
-				// We have a single concrete type. Extract the data.
-				if singleType.IsInterface() {
-					Fatalf("singleType interface should have been handled in Add")
-				}
-				val = ifaceData(ncase.Pos, s.facename, singleType)
-			}
-			l := []*Node{
-				nodl(ncase.Pos, ODCL, caseVar, nil),
-				nodl(ncase.Pos, OAS, caseVar, val),
-			}
-			typecheckslice(l, ctxStmt)
-			body.Append(l...)
-		}
-		body.Append(ncase.Nbody.Slice()...)
-		body.Append(br)
-	}
-	sw.List.Set(nil)
-
-	if defaultGoto == nil {
-		defaultGoto = br
-	}
-	if nilGoto == nil {
-		nilGoto = defaultGoto
-	}
-	ifNil.Nbody.Set1(nilGoto)
-
-	s.Emit(&sw.Nbody)
-	sw.Nbody.Append(defaultGoto)
-	sw.Nbody.AppendNodes(&body)
-
-	walkstmtlist(sw.Nbody.Slice())
-}
-
-// A typeSwitch walks a type switch.
-type typeSwitch struct {
-	// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
-	facename *Node // value being type-switched on
-	hashname *Node // type hash of the value being type-switched on
-	okname   *Node // boolean used for comma-ok type assertions
-
-	done    Nodes
-	clauses []typeClause
-}
-
-type typeClause struct {
-	hash uint32
-	body Nodes
-}
-
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
-	var body Nodes
-	if caseVar != nil {
-		l := []*Node{
-			nodl(pos, ODCL, caseVar, nil),
-			nodl(pos, OAS, caseVar, nil),
-		}
-		typecheckslice(l, ctxStmt)
-		body.Append(l...)
-	} else {
-		caseVar = nblank
-	}
-
-	// cv, ok = iface.(type)
-	as := nodl(pos, OAS2, nil, nil)
-	as.List.Set2(caseVar, s.okname) // cv, ok =
-	dot := nodl(pos, ODOTTYPE, s.facename, nil)
-	dot.Type = typ // iface.(type)
-	as.Rlist.Set1(dot)
-	as = typecheck(as, ctxStmt)
-	as = walkexpr(as, &body)
-	body.Append(as)
-
-	// if ok { goto label }
-	nif := nodl(pos, OIF, nil, nil)
-	nif.Left = s.okname
-	nif.Nbody.Set1(jmp)
-	body.Append(nif)
-
-	if !typ.IsInterface() {
-		s.clauses = append(s.clauses, typeClause{
-			hash: typehash(typ),
-			body: body,
-		})
-		return
-	}
-
-	s.flush()
-	s.done.AppendNodes(&body)
-}
-
-func (s *typeSwitch) Emit(out *Nodes) {
-	s.flush()
-	out.AppendNodes(&s.done)
-}
-
-func (s *typeSwitch) flush() {
-	cc := s.clauses
-	s.clauses = nil
-	if len(cc) == 0 {
-		return
-	}
-
-	sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
-
-	// Combine adjacent cases with the same hash.
-	merged := cc[:1]
-	for _, c := range cc[1:] {
-		last := &merged[len(merged)-1]
-		if last.hash == c.hash {
-			last.body.AppendNodes(&c.body)
-		} else {
-			merged = append(merged, c)
-		}
-	}
-	cc = merged
-
-	binarySearch(len(cc), &s.done,
-		func(i int) *Node {
-			return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
-		},
-		func(i int, nif *Node) {
-			// TODO(mdempsky): Omit hash equality check if
-			// there's only one type.
-			c := cc[i]
-			nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
-			nif.Nbody.AppendNodes(&c.body)
-		},
-	)
-}
-
-// binarySearch constructs a binary search tree for handling n cases,
-// and appends it to out. It's used for efficiently implementing
-// switch statements.
-//
-// less(i) should return a boolean expression. If it evaluates true,
-// then cases before i will be tested; otherwise, cases i and later.
-//
-// base(i, nif) should setup nif (an OIF node) to test case i. In
-// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
-	const binarySearchMin = 4 // minimum number of cases for binary search
-
-	var do func(lo, hi int, out *Nodes)
-	do = func(lo, hi int, out *Nodes) {
-		n := hi - lo
-		if n < binarySearchMin {
-			for i := lo; i < hi; i++ {
-				nif := nod(OIF, nil, nil)
-				base(i, nif)
-				lineno = lineno.WithNotStmt()
-				nif.Left = typecheck(nif.Left, ctxExpr)
-				nif.Left = defaultlit(nif.Left, nil)
-				out.Append(nif)
-				out = &nif.Rlist
-			}
-			return
-		}
-
-		half := lo + n/2
-		nif := nod(OIF, nil, nil)
-		nif.Left = less(half)
-		lineno = lineno.WithNotStmt()
-		nif.Left = typecheck(nif.Left, ctxExpr)
-		nif.Left = defaultlit(nif.Left, nil)
-		do(lo, half, &nif.Nbody)
-		do(half, hi, &nif.Rlist)
-		out.Append(nif)
-	}
-
-	do(0, n, out)
-}
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
deleted file mode 100644
index 7b4a315..0000000
--- a/src/cmd/compile/internal/gc/syntax.go
+++ /dev/null
@@ -1,1196 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// “Abstract” syntax representation.
-
-package gc
-
-import (
-	"cmd/compile/internal/ssa"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
-	"sort"
-)
-
-// A Node is a single node in the syntax tree.
-// Actually the syntax tree is a syntax DAG, because there is only one
-// node with Op=ONAME for a given instance of a variable x.
-// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
-type Node struct {
-	// Tree structure.
-	// Generic recursive walks should follow these fields.
-	Left  *Node
-	Right *Node
-	Ninit Nodes
-	Nbody Nodes
-	List  Nodes
-	Rlist Nodes
-
-	// most nodes
-	Type *types.Type
-	Orig *Node // original form, for printing, and tracking copies of ONAMEs
-
-	// func
-	Func *Func
-
-	// ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
-	Name *Name
-
-	Sym *types.Sym  // various
-	E   interface{} // Opt or Val, see methods below
-
-	// Various. Usually an offset into a struct. For example:
-	// - ONAME nodes that refer to local variables use it to identify their stack frame position.
-	// - ODOT, ODOTPTR, and ORESULT use it to indicate offset relative to their base address.
-	// - OSTRUCTKEY uses it to store the named field's offset.
-	// - Named OLITERALs use it to store their ambient iota value.
-	// - OINLMARK stores an index into the inlTree data structure.
-	// - OCLOSURE uses it to store ambient iota value, if any.
-	// Possibly still more uses. If you find any, document them.
-	Xoffset int64
-
-	Pos src.XPos
-
-	flags bitset32
-
-	Esc uint16 // EscXXX
-
-	Op  Op
-	aux uint8
-}
-
-func (n *Node) ResetAux() {
-	n.aux = 0
-}
-
-func (n *Node) SubOp() Op {
-	switch n.Op {
-	case OASOP, ONAME:
-	default:
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	return Op(n.aux)
-}
-
-func (n *Node) SetSubOp(op Op) {
-	switch n.Op {
-	case OASOP, ONAME:
-	default:
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	n.aux = uint8(op)
-}
-
-func (n *Node) IndexMapLValue() bool {
-	if n.Op != OINDEXMAP {
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	return n.aux != 0
-}
-
-func (n *Node) SetIndexMapLValue(b bool) {
-	if n.Op != OINDEXMAP {
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	if b {
-		n.aux = 1
-	} else {
-		n.aux = 0
-	}
-}
-
-func (n *Node) TChanDir() types.ChanDir {
-	if n.Op != OTCHAN {
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	return types.ChanDir(n.aux)
-}
-
-func (n *Node) SetTChanDir(dir types.ChanDir) {
-	if n.Op != OTCHAN {
-		Fatalf("unexpected op: %v", n.Op)
-	}
-	n.aux = uint8(dir)
-}
-
-func (n *Node) IsSynthetic() bool {
-	name := n.Sym.Name
-	return name[0] == '.' || name[0] == '~'
-}
-
-// IsAutoTmp indicates if n was created by the compiler as a temporary,
-// based on the setting of the .AutoTemp flag in n's Name.
-func (n *Node) IsAutoTmp() bool {
-	if n == nil || n.Op != ONAME {
-		return false
-	}
-	return n.Name.AutoTemp()
-}
-
-const (
-	nodeClass, _     = iota, 1 << iota // PPARAM, PAUTO, PEXTERN, etc; three bits; first in the list because frequently accessed
-	_, _                               // second nodeClass bit
-	_, _                               // third nodeClass bit
-	nodeWalkdef, _                     // tracks state during typecheckdef; 2 == loop detected; two bits
-	_, _                               // second nodeWalkdef bit
-	nodeTypecheck, _                   // tracks state during typechecking; 2 == loop detected; two bits
-	_, _                               // second nodeTypecheck bit
-	nodeInitorder, _                   // tracks state during init1; two bits
-	_, _                               // second nodeInitorder bit
-	_, nodeHasBreak
-	_, nodeNoInline  // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
-	_, nodeImplicit  // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
-	_, nodeIsDDD     // is the argument variadic
-	_, nodeDiag      // already printed error about this
-	_, nodeColas     // OAS resulting from :=
-	_, nodeNonNil    // guaranteed to be non-nil
-	_, nodeTransient // storage can be reused immediately after this statement
-	_, nodeBounded   // bounds check unnecessary
-	_, nodeHasCall   // expression contains a function call
-	_, nodeLikely    // if statement condition likely
-	_, nodeHasVal    // node.E contains a Val
-	_, nodeHasOpt    // node.E contains an Opt
-	_, nodeEmbedded  // ODCLFIELD embedded type
-)
-
-func (n *Node) Class() Class     { return Class(n.flags.get3(nodeClass)) }
-func (n *Node) Walkdef() uint8   { return n.flags.get2(nodeWalkdef) }
-func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
-func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
-
-func (n *Node) HasBreak() bool  { return n.flags&nodeHasBreak != 0 }
-func (n *Node) NoInline() bool  { return n.flags&nodeNoInline != 0 }
-func (n *Node) Implicit() bool  { return n.flags&nodeImplicit != 0 }
-func (n *Node) IsDDD() bool     { return n.flags&nodeIsDDD != 0 }
-func (n *Node) Diag() bool      { return n.flags&nodeDiag != 0 }
-func (n *Node) Colas() bool     { return n.flags&nodeColas != 0 }
-func (n *Node) NonNil() bool    { return n.flags&nodeNonNil != 0 }
-func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
-func (n *Node) Bounded() bool   { return n.flags&nodeBounded != 0 }
-func (n *Node) HasCall() bool   { return n.flags&nodeHasCall != 0 }
-func (n *Node) Likely() bool    { return n.flags&nodeLikely != 0 }
-func (n *Node) HasVal() bool    { return n.flags&nodeHasVal != 0 }
-func (n *Node) HasOpt() bool    { return n.flags&nodeHasOpt != 0 }
-func (n *Node) Embedded() bool  { return n.flags&nodeEmbedded != 0 }
-
-func (n *Node) SetClass(b Class)     { n.flags.set3(nodeClass, uint8(b)) }
-func (n *Node) SetWalkdef(b uint8)   { n.flags.set2(nodeWalkdef, b) }
-func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
-func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
-
-func (n *Node) SetHasBreak(b bool)  { n.flags.set(nodeHasBreak, b) }
-func (n *Node) SetNoInline(b bool)  { n.flags.set(nodeNoInline, b) }
-func (n *Node) SetImplicit(b bool)  { n.flags.set(nodeImplicit, b) }
-func (n *Node) SetIsDDD(b bool)     { n.flags.set(nodeIsDDD, b) }
-func (n *Node) SetDiag(b bool)      { n.flags.set(nodeDiag, b) }
-func (n *Node) SetColas(b bool)     { n.flags.set(nodeColas, b) }
-func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
-func (n *Node) SetHasCall(b bool)   { n.flags.set(nodeHasCall, b) }
-func (n *Node) SetLikely(b bool)    { n.flags.set(nodeLikely, b) }
-func (n *Node) SetHasVal(b bool)    { n.flags.set(nodeHasVal, b) }
-func (n *Node) SetHasOpt(b bool)    { n.flags.set(nodeHasOpt, b) }
-func (n *Node) SetEmbedded(b bool)  { n.flags.set(nodeEmbedded, b) }
-
-// MarkNonNil marks a pointer n as being guaranteed non-nil,
-// on all code paths, at all times.
-// During conversion to SSA, non-nil pointers won't have nil checks
-// inserted before dereferencing. See state.exprPtr.
-func (n *Node) MarkNonNil() {
-	if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() {
-		Fatalf("MarkNonNil(%v), type %v", n, n.Type)
-	}
-	n.flags.set(nodeNonNil, true)
-}
-
-// SetBounded indicates whether operation n does not need safety checks.
-// When n is an index or slice operation, n does not need bounds checks.
-// When n is a dereferencing operation, n does not need nil checks.
-// When n is a makeslice+copy operation, n does not need length and cap checks.
-func (n *Node) SetBounded(b bool) {
-	switch n.Op {
-	case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
-		// No bounds checks needed.
-	case ODOTPTR, ODEREF:
-		// No nil check needed.
-	case OMAKESLICECOPY:
-		// No length and cap checks needed
-		// since new slice and copied over slice data have same length.
-	default:
-		Fatalf("SetBounded(%v)", n)
-	}
-	n.flags.set(nodeBounded, b)
-}
-
-// MarkReadonly indicates that n is an ONAME with readonly contents.
-func (n *Node) MarkReadonly() {
-	if n.Op != ONAME {
-		Fatalf("Node.MarkReadonly %v", n.Op)
-	}
-	n.Name.SetReadonly(true)
-	// Mark the linksym as readonly immediately
-	// so that the SSA backend can use this information.
-	// It will be overridden later during dumpglobls.
-	n.Sym.Linksym().Type = objabi.SRODATA
-}
-
-// Val returns the Val for the node.
-func (n *Node) Val() Val {
-	if !n.HasVal() {
-		return Val{}
-	}
-	return Val{n.E}
-}
-
-// SetVal sets the Val for the node, which must not have been used with SetOpt.
-func (n *Node) SetVal(v Val) {
-	if n.HasOpt() {
-		Debug.h = 1
-		Dump("have Opt", n)
-		Fatalf("have Opt")
-	}
-	n.SetHasVal(true)
-	n.E = v.U
-}
-
-// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
-	if !n.HasOpt() {
-		return nil
-	}
-	return n.E
-}
-
-// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
-// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
-	if x == nil && n.HasVal() {
-		return
-	}
-	if n.HasVal() {
-		Debug.h = 1
-		Dump("have Val", n)
-		Fatalf("have Val")
-	}
-	n.SetHasOpt(true)
-	n.E = x
-}
-
-func (n *Node) Iota() int64 {
-	return n.Xoffset
-}
-
-func (n *Node) SetIota(x int64) {
-	n.Xoffset = x
-}
-
-// mayBeShared reports whether n may occur in multiple places in the AST.
-// Extra care must be taken when mutating such a node.
-func (n *Node) mayBeShared() bool {
-	switch n.Op {
-	case ONAME, OLITERAL, OTYPE:
-		return true
-	}
-	return false
-}
-
-// isMethodExpression reports whether n represents a method expression T.M.
-func (n *Node) isMethodExpression() bool {
-	return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME
-}
-
-// funcname returns the name (without the package) of the function n.
-func (n *Node) funcname() string {
-	if n == nil || n.Func == nil || n.Func.Nname == nil {
-		return "<nil>"
-	}
-	return n.Func.Nname.Sym.Name
-}
-
-// pkgFuncName returns the name of the function referenced by n, with package prepended.
-// This differs from the compiler's internal convention where local functions lack a package
-// because the ultimate consumer of this is a human looking at an IDE; package is only empty
-// if the compilation package is actually the empty string.
-func (n *Node) pkgFuncName() string {
-	var s *types.Sym
-	if n == nil {
-		return "<nil>"
-	}
-	if n.Op == ONAME {
-		s = n.Sym
-	} else {
-		if n.Func == nil || n.Func.Nname == nil {
-			return "<nil>"
-		}
-		s = n.Func.Nname.Sym
-	}
-	pkg := s.Pkg
-
-	p := myimportpath
-	if pkg != nil && pkg.Path != "" {
-		p = pkg.Path
-	}
-	if p == "" {
-		return s.Name
-	}
-	return p + "." + s.Name
-}
-
-// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
-func (n *Node) CanBeAnSSASym() {
-}
-
-// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
-type Name struct {
-	Pack *Node      // real package for import . names
-	Pkg  *types.Pkg // pkg for OPACK nodes
-	// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
-	// For a closure var, the ONAME node of the outer captured variable
-	Defn *Node
-	// The ODCLFUNC node (for a static function/method or a closure) in which
-	// local variable or param is declared.
-	Curfn     *Node
-	Param     *Param // additional fields for ONAME, OTYPE
-	Decldepth int32  // declaration loop depth, increased for every loop or label
-	// Unique number for ONAME nodes within a function. Function outputs
-	// (results) are numbered starting at one, followed by function inputs
-	// (parameters), and then local variables. Vargen is used to distinguish
-	// local variables/params with the same name.
-	Vargen int32
-	flags  bitset16
-}
-
-const (
-	nameCaptured = 1 << iota // is the variable captured by a closure
-	nameReadonly
-	nameByval                 // is the variable captured by value or by reference
-	nameNeedzero              // if it contains pointers, needs to be zeroed on function entry
-	nameAutoTemp              // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
-	nameUsed                  // for variable declared and not used error
-	nameIsClosureVar          // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
-	nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
-	nameAssigned              // is the variable ever assigned to
-	nameAddrtaken             // address taken, even if not moved to heap
-	nameInlFormal             // PAUTO created by inliner, derived from callee formal
-	nameInlLocal              // PAUTO created by inliner, derived from callee local
-	nameOpenDeferSlot         // if temporary var storing info for open-coded defers
-	nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
-)
-
-func (n *Name) Captured() bool              { return n.flags&nameCaptured != 0 }
-func (n *Name) Readonly() bool              { return n.flags&nameReadonly != 0 }
-func (n *Name) Byval() bool                 { return n.flags&nameByval != 0 }
-func (n *Name) Needzero() bool              { return n.flags&nameNeedzero != 0 }
-func (n *Name) AutoTemp() bool              { return n.flags&nameAutoTemp != 0 }
-func (n *Name) Used() bool                  { return n.flags&nameUsed != 0 }
-func (n *Name) IsClosureVar() bool          { return n.flags&nameIsClosureVar != 0 }
-func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
-func (n *Name) Assigned() bool              { return n.flags&nameAssigned != 0 }
-func (n *Name) Addrtaken() bool             { return n.flags&nameAddrtaken != 0 }
-func (n *Name) InlFormal() bool             { return n.flags&nameInlFormal != 0 }
-func (n *Name) InlLocal() bool              { return n.flags&nameInlLocal != 0 }
-func (n *Name) OpenDeferSlot() bool         { return n.flags&nameOpenDeferSlot != 0 }
-func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
-
-func (n *Name) SetCaptured(b bool)              { n.flags.set(nameCaptured, b) }
-func (n *Name) SetReadonly(b bool)              { n.flags.set(nameReadonly, b) }
-func (n *Name) SetByval(b bool)                 { n.flags.set(nameByval, b) }
-func (n *Name) SetNeedzero(b bool)              { n.flags.set(nameNeedzero, b) }
-func (n *Name) SetAutoTemp(b bool)              { n.flags.set(nameAutoTemp, b) }
-func (n *Name) SetUsed(b bool)                  { n.flags.set(nameUsed, b) }
-func (n *Name) SetIsClosureVar(b bool)          { n.flags.set(nameIsClosureVar, b) }
-func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
-func (n *Name) SetAssigned(b bool)              { n.flags.set(nameAssigned, b) }
-func (n *Name) SetAddrtaken(b bool)             { n.flags.set(nameAddrtaken, b) }
-func (n *Name) SetInlFormal(b bool)             { n.flags.set(nameInlFormal, b) }
-func (n *Name) SetInlLocal(b bool)              { n.flags.set(nameInlLocal, b) }
-func (n *Name) SetOpenDeferSlot(b bool)         { n.flags.set(nameOpenDeferSlot, b) }
-func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
-
-type Param struct {
-	Ntype    *Node
-	Heapaddr *Node // temp holding heap address of param
-
-	// ONAME PAUTOHEAP
-	Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
-
-	// ONAME closure linkage
-	// Consider:
-	//
-	//	func f() {
-	//		x := 1 // x1
-	//		func() {
-	//			use(x) // x2
-	//			func() {
-	//				use(x) // x3
-	//				--- parser is here ---
-	//			}()
-	//		}()
-	//	}
-	//
-	// There is an original declaration of x and then a chain of mentions of x
-	// leading into the current function. Each time x is mentioned in a new closure,
-	// we create a variable representing x for use in that specific closure,
-	// since the way you get to x is different in each closure.
-	//
-	// Let's number the specific variables as shown in the code:
-	// x1 is the original x, x2 is when mentioned in the closure,
-	// and x3 is when mentioned in the closure in the closure.
-	//
-	// We keep these linked (assume N > 1):
-	//
-	//   - x1.Defn = original declaration statement for x (like most variables)
-	//   - x1.Innermost = current innermost closure x (in this case x3), or nil for none
-	//   - x1.IsClosureVar() = false
-	//
-	//   - xN.Defn = x1, N > 1
-	//   - xN.IsClosureVar() = true, N > 1
-	//   - x2.Outer = nil
-	//   - xN.Outer = x(N-1), N > 2
-	//
-	//
-	// When we look up x in the symbol table, we always get x1.
-	// Then we can use x1.Innermost (if not nil) to get the x
-	// for the innermost known closure function,
-	// but the first reference in a closure will find either no x1.Innermost
-	// or an x1.Innermost with .Funcdepth < Funcdepth.
-	// In that case, a new xN must be created, linked in with:
-	//
-	//     xN.Defn = x1
-	//     xN.Outer = x1.Innermost
-	//     x1.Innermost = xN
-	//
-	// When we finish the function, we'll process its closure variables
-	// and find xN and pop it off the list using:
-	//
-	//     x1 := xN.Defn
-	//     x1.Innermost = xN.Outer
-	//
-	// We leave x1.Innermost set so that we can still get to the original
-	// variable quickly. Not shown here, but once we're
-	// done parsing a function and no longer need xN.Outer for the
-	// lexical x reference links as described above, funcLit
-	// recomputes xN.Outer as the semantic x reference link tree,
-	// even filling in x in intermediate closures that might not
-	// have mentioned it along the way to inner closures that did.
-	// See funcLit for details.
-	//
-	// During the eventual compilation, then, for closure variables we have:
-	//
-	//     xN.Defn = original variable
-	//     xN.Outer = variable captured in next outward scope
-	//                to make closure where xN appears
-	//
-	// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
-	// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
-	Innermost *Node
-	Outer     *Node
-
-	// OTYPE & ONAME //go:embed info,
-	// sharing storage to reduce gc.Param size.
-	// Extra is nil, or else *Extra is a *paramType or an *embedFileList.
-	Extra *interface{}
-}
-
-type paramType struct {
-	flag  PragmaFlag
-	alias bool
-}
-
-type irEmbed struct {
-	Pos      src.XPos
-	Patterns []string
-}
-
-type embedList []irEmbed
-
-// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
-func (p *Param) Pragma() PragmaFlag {
-	if p.Extra == nil {
-		return 0
-	}
-	return (*p.Extra).(*paramType).flag
-}
-
-// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
-func (p *Param) SetPragma(flag PragmaFlag) {
-	if p.Extra == nil {
-		if flag == 0 {
-			return
-		}
-		p.Extra = new(interface{})
-		*p.Extra = &paramType{flag: flag}
-		return
-	}
-	(*p.Extra).(*paramType).flag = flag
-}
-
-// Alias reports whether p, which must be for an OTYPE, is a type alias.
-func (p *Param) Alias() bool {
-	if p.Extra == nil {
-		return false
-	}
-	t, ok := (*p.Extra).(*paramType)
-	if !ok {
-		return false
-	}
-	return t.alias
-}
-
-// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
-func (p *Param) SetAlias(alias bool) {
-	if p.Extra == nil {
-		if !alias {
-			return
-		}
-		p.Extra = new(interface{})
-		*p.Extra = &paramType{alias: alias}
-		return
-	}
-	(*p.Extra).(*paramType).alias = alias
-}
-
-// EmbedList returns the list of embedded files for p,
-// which must be for an ONAME var.
-func (p *Param) EmbedList() []irEmbed {
-	if p.Extra == nil {
-		return nil
-	}
-	return *(*p.Extra).(*embedList)
-}
-
-// SetEmbedList sets the list of embedded files for p,
-// which must be for an ONAME var.
-func (p *Param) SetEmbedList(list []irEmbed) {
-	if p.Extra == nil {
-		if len(list) == 0 {
-			return
-		}
-		f := embedList(list)
-		p.Extra = new(interface{})
-		*p.Extra = &f
-		return
-	}
-	*(*p.Extra).(*embedList) = list
-}
-
-// Functions
-//
-// A simple function declaration is represented as an ODCLFUNC node f
-// and an ONAME node n. They're linked to one another through
-// f.Func.Nname == n and n.Name.Defn == f. When functions are
-// referenced by name in an expression, the function's ONAME node is
-// used directly.
-//
-// Function names have n.Class() == PFUNC. This distinguishes them
-// from variables of function type.
-//
-// Confusingly, n.Func and f.Func both exist, but commonly point to
-// different Funcs. (Exception: an OCALLPART's Func does point to its
-// ODCLFUNC's Func.)
-//
-// A method declaration is represented like functions, except n.Sym
-// will be the qualified method name (e.g., "T.m") and
-// f.Func.Shortname is the bare method name (e.g., "m").
-//
-// Method expressions are represented as ONAME/PFUNC nodes like
-// function names, but their Left and Right fields still point to the
-// type and method, respectively. They can be distinguished from
-// normal functions with isMethodExpression. Also, unlike function
-// name nodes, method expression nodes exist for each method
-// expression. The declaration ONAME can be accessed with
-// x.Type.Nname(), where x is the method expression ONAME node.
-//
-// Method values are represented by ODOTMETH/ODOTINTER when called
-// immediately, and OCALLPART otherwise. They are like method
-// expressions, except that for ODOTMETH/ODOTINTER the method name is
-// stored in Sym instead of Right.
-//
-// Closures are represented by OCLOSURE node c. They link back and
-// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure
-// == f and f.Func.Closure == c.
-//
-// Function bodies are stored in f.Nbody, and inline function bodies
-// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma.
-//
-// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They
-// also use Dcl instead of Inldcl.
-
-// Func holds Node fields used only with function-like nodes.
-type Func struct {
-	Shortname *types.Sym
-	// Extra entry code for the function. For example, allocate and initialize
-	// memory for escaping parameters. However, just for OCLOSURE, Enter is a
-	// list of ONAME nodes of captured variables
-	Enter Nodes
-	Exit  Nodes
-	// ONAME nodes for closure params, each should have closurevar set
-	Cvars Nodes
-	// ONAME nodes for all params/locals for this func/closure, does NOT
-	// include closurevars until transformclosure runs.
-	Dcl []*Node
-
-	// Parents records the parent scope of each scope within a
-	// function. The root scope (0) has no parent, so the i'th
-	// scope's parent is stored at Parents[i-1].
-	Parents []ScopeID
-
-	// Marks records scope boundary changes.
-	Marks []Mark
-
-	// Closgen tracks how many closures have been generated within
-	// this function. Used by closurename for creating unique
-	// function names.
-	Closgen int
-
-	FieldTrack map[*types.Sym]struct{}
-	DebugInfo  *ssa.FuncDebug
-	Ntype      *Node // signature
-	Top        int   // top context (ctxCallee, etc)
-	Closure    *Node // OCLOSURE <-> ODCLFUNC (see header comment above)
-	Nname      *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
-	lsym       *obj.LSym
-
-	Inl *Inline
-
-	Label int32 // largest auto-generated label in this function
-
-	Endlineno src.XPos
-	WBPos     src.XPos // position of first write barrier; see SetWBPos
-
-	Pragma PragmaFlag // go:xxx function annotations
-
-	flags      bitset16
-	numDefers  int // number of defer calls in the function
-	numReturns int // number of explicit returns in the function
-
-	// nwbrCalls records the LSyms of functions called by this
-	// function for go:nowritebarrierrec analysis. Only filled in
-	// if nowritebarrierrecCheck != nil.
-	nwbrCalls *[]nowritebarrierrecCallSym
-}
-
-// An Inline holds fields used for function bodies that can be inlined.
-type Inline struct {
-	Cost int32 // heuristic cost of inlining this function
-
-	// Copies of Func.Dcl and Nbody for use during inlining.
-	Dcl  []*Node
-	Body []*Node
-}
-
-// A Mark represents a scope boundary.
-type Mark struct {
-	// Pos is the position of the token that marks the scope
-	// change.
-	Pos src.XPos
-
-	// Scope identifies the innermost scope to the right of Pos.
-	Scope ScopeID
-}
-
-// A ScopeID represents a lexical scope within a function.
-type ScopeID int32
-
-const (
-	funcDupok         = 1 << iota // duplicate definitions ok
-	funcWrapper                   // is method wrapper
-	funcNeedctxt                  // function uses context register (has closure variables)
-	funcReflectMethod             // function calls reflect.Type.Method or MethodByName
-	// true if closure inside a function; false if a simple function or a
-	// closure in a global variable initialization
-	funcIsHiddenClosure
-	funcHasDefer                 // contains a defer statement
-	funcNilCheckDisabled         // disable nil checks when compiling this function
-	funcInlinabilityChecked      // inliner has already determined whether the function is inlinable
-	funcExportInline             // include inline body in export data
-	funcInstrumentBody           // add race/msan instrumentation during SSA construction
-	funcOpenCodedDeferDisallowed // can't do open-coded defers
-)
-
-func (f *Func) Dupok() bool                    { return f.flags&funcDupok != 0 }
-func (f *Func) Wrapper() bool                  { return f.flags&funcWrapper != 0 }
-func (f *Func) Needctxt() bool                 { return f.flags&funcNeedctxt != 0 }
-func (f *Func) ReflectMethod() bool            { return f.flags&funcReflectMethod != 0 }
-func (f *Func) IsHiddenClosure() bool          { return f.flags&funcIsHiddenClosure != 0 }
-func (f *Func) HasDefer() bool                 { return f.flags&funcHasDefer != 0 }
-func (f *Func) NilCheckDisabled() bool         { return f.flags&funcNilCheckDisabled != 0 }
-func (f *Func) InlinabilityChecked() bool      { return f.flags&funcInlinabilityChecked != 0 }
-func (f *Func) ExportInline() bool             { return f.flags&funcExportInline != 0 }
-func (f *Func) InstrumentBody() bool           { return f.flags&funcInstrumentBody != 0 }
-func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
-
-func (f *Func) SetDupok(b bool)                    { f.flags.set(funcDupok, b) }
-func (f *Func) SetWrapper(b bool)                  { f.flags.set(funcWrapper, b) }
-func (f *Func) SetNeedctxt(b bool)                 { f.flags.set(funcNeedctxt, b) }
-func (f *Func) SetReflectMethod(b bool)            { f.flags.set(funcReflectMethod, b) }
-func (f *Func) SetIsHiddenClosure(b bool)          { f.flags.set(funcIsHiddenClosure, b) }
-func (f *Func) SetHasDefer(b bool)                 { f.flags.set(funcHasDefer, b) }
-func (f *Func) SetNilCheckDisabled(b bool)         { f.flags.set(funcNilCheckDisabled, b) }
-func (f *Func) SetInlinabilityChecked(b bool)      { f.flags.set(funcInlinabilityChecked, b) }
-func (f *Func) SetExportInline(b bool)             { f.flags.set(funcExportInline, b) }
-func (f *Func) SetInstrumentBody(b bool)           { f.flags.set(funcInstrumentBody, b) }
-func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
-
-func (f *Func) setWBPos(pos src.XPos) {
-	if Debug_wb != 0 {
-		Warnl(pos, "write barrier")
-	}
-	if !f.WBPos.IsKnown() {
-		f.WBPos = pos
-	}
-}
-
-//go:generate stringer -type=Op -trimprefix=O
-
-type Op uint8
-
-// Node ops.
-const (
-	OXXX Op = iota
-
-	// names
-	ONAME // var or func name
-	// Unnamed arg or return value: f(int, string) (int, error) { etc }
-	// Also used for a qualified package identifier that hasn't been resolved yet.
-	ONONAME
-	OTYPE    // type name
-	OPACK    // import
-	OLITERAL // literal
-
-	// expressions
-	OADD          // Left + Right
-	OSUB          // Left - Right
-	OOR           // Left | Right
-	OXOR          // Left ^ Right
-	OADDSTR       // +{List} (string addition, list elements are strings)
-	OADDR         // &Left
-	OANDAND       // Left && Right
-	OAPPEND       // append(List); after walk, Left may contain elem type descriptor
-	OBYTES2STR    // Type(Left) (Type is string, Left is a []byte)
-	OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
-	ORUNES2STR    // Type(Left) (Type is string, Left is a []rune)
-	OSTR2BYTES    // Type(Left) (Type is []byte, Left is a string)
-	OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
-	OSTR2RUNES    // Type(Left) (Type is []rune, Left is a string)
-	// Left = Right or (if Colas=true) Left := Right
-	// If Colas, then Ninit includes a DCL node for Left.
-	OAS
-	// List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
-	// If Colas, then Ninit includes DCL nodes for List
-	OAS2
-	OAS2DOTTYPE // List = Right (x, ok = I.(int))
-	OAS2FUNC    // List = Right (x, y = f())
-	OAS2MAPR    // List = Right (x, ok = m["foo"])
-	OAS2RECV    // List = Right (x, ok = <-c)
-	OASOP       // Left Etype= Right (x += y)
-	OCALL       // Left(List) (function call, method call or type conversion)
-
-	// OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
-	// Prior to walk, they are: Left(List), where List is all regular arguments.
-	// After walk, List is a series of assignments to temporaries,
-	// and Rlist is an updated set of arguments.
-	// Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
-	// TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
-	OCALLFUNC  // Left(List/Rlist) (function call f(args))
-	OCALLMETH  // Left(List/Rlist) (direct method call x.Method(args))
-	OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
-	OCALLPART  // Left.Right (method expression x.Method, not called)
-	OCAP       // cap(Left)
-	OCLOSE     // close(Left)
-	OCLOSURE   // func Type { Func.Closure.Nbody } (func literal)
-	OCOMPLIT   // Right{List} (composite literal, not yet lowered to specific form)
-	OMAPLIT    // Type{List} (composite literal, Type is map)
-	OSTRUCTLIT // Type{List} (composite literal, Type is struct)
-	OARRAYLIT  // Type{List} (composite literal, Type is array)
-	OSLICELIT  // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
-	OPTRLIT    // &Left (left is composite literal)
-	OCONV      // Type(Left) (type conversion)
-	OCONVIFACE // Type(Left) (type conversion, to interface)
-	OCONVNOP   // Type(Left) (type conversion, no effect)
-	OCOPY      // copy(Left, Right)
-	ODCL       // var Left (declares Left of type Left.Type)
-
-	// Used during parsing but don't last.
-	ODCLFUNC  // func f() or func (r) f()
-	ODCLFIELD // struct field, interface field, or func/method argument/return value.
-	ODCLCONST // const pi = 3.14
-	ODCLTYPE  // type Int int or type Int = int
-
-	ODELETE        // delete(List)
-	ODOT           // Left.Sym (Left is of struct type)
-	ODOTPTR        // Left.Sym (Left is of pointer to struct type)
-	ODOTMETH       // Left.Sym (Left is non-interface, Right is method name)
-	ODOTINTER      // Left.Sym (Left is interface, Right is method name)
-	OXDOT          // Left.Sym (before rewrite to one of the preceding)
-	ODOTTYPE       // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
-	ODOTTYPE2      // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
-	OEQ            // Left == Right
-	ONE            // Left != Right
-	OLT            // Left < Right
-	OLE            // Left <= Right
-	OGE            // Left >= Right
-	OGT            // Left > Right
-	ODEREF         // *Left
-	OINDEX         // Left[Right] (index of array or slice)
-	OINDEXMAP      // Left[Right] (index of map)
-	OKEY           // Left:Right (key:value in struct/array/map literal)
-	OSTRUCTKEY     // Sym:Left (key:value in struct literal, after type checking)
-	OLEN           // len(Left)
-	OMAKE          // make(List) (before type checking converts to one of the following)
-	OMAKECHAN      // make(Type, Left) (type is chan)
-	OMAKEMAP       // make(Type, Left) (type is map)
-	OMAKESLICE     // make(Type, Left, Right) (type is slice)
-	OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
-	// OMAKESLICECOPY is created by the order pass and corresponds to:
-	//  s = make(Type, Left); copy(s, Right)
-	//
-	// Bounded can be set on the node when Left == len(Right) is known at compile time.
-	//
-	// This node is created so the walk pass can optimize this pattern which would
-	// otherwise be hard to detect after the order pass.
-	OMUL         // Left * Right
-	ODIV         // Left / Right
-	OMOD         // Left % Right
-	OLSH         // Left << Right
-	ORSH         // Left >> Right
-	OAND         // Left & Right
-	OANDNOT      // Left &^ Right
-	ONEW         // new(Left); corresponds to calls to new in source code
-	ONEWOBJ      // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
-	ONOT         // !Left
-	OBITNOT      // ^Left
-	OPLUS        // +Left
-	ONEG         // -Left
-	OOROR        // Left || Right
-	OPANIC       // panic(Left)
-	OPRINT       // print(List)
-	OPRINTN      // println(List)
-	OPAREN       // (Left)
-	OSEND        // Left <- Right
-	OSLICE       // Left[List[0] : List[1]] (Left is untypechecked or slice)
-	OSLICEARR    // Left[List[0] : List[1]] (Left is array)
-	OSLICESTR    // Left[List[0] : List[1]] (Left is string)
-	OSLICE3      // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
-	OSLICE3ARR   // Left[List[0] : List[1] : List[2]] (Left is array)
-	OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
-	ORECOVER     // recover()
-	ORECV        // <-Left
-	ORUNESTR     // Type(Left) (Type is string, Left is rune)
-	OSELRECV     // Left = <-Right.Left: (appears as .Left of OCASE; Right.Op == ORECV)
-	OSELRECV2    // List = <-Right.Left: (appears as .Left of OCASE; count(List) == 2, Right.Op == ORECV)
-	OIOTA        // iota
-	OREAL        // real(Left)
-	OIMAG        // imag(Left)
-	OCOMPLEX     // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
-	OALIGNOF     // unsafe.Alignof(Left)
-	OOFFSETOF    // unsafe.Offsetof(Left)
-	OSIZEOF      // unsafe.Sizeof(Left)
-
-	// statements
-	OBLOCK // { List } (block of code)
-	OBREAK // break [Sym]
-	// OCASE:  case List: Nbody (List==nil means default)
-	//   For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
-	//   for nil), and, if a type-switch variable is specified, Rlist is an
-	//   ONAME for the version of the type-switch variable with the specified
-	//   type.
-	OCASE
-	OCONTINUE // continue [Sym]
-	ODEFER    // defer Left (Left must be call)
-	OEMPTY    // no-op (empty statement)
-	OFALL     // fallthrough
-	OFOR      // for Ninit; Left; Right { Nbody }
-	// OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
-	// 	Ninit
-	// 	top: { Nbody }   // Execute the body at least once
-	// 	cont: Right
-	// 	if Left {        // And then test the loop condition
-	// 		List     // Before looping to top, execute List
-	// 		goto top
-	// 	}
-	// OFORUNTIL is created by walk. There's no way to write this in Go code.
-	OFORUNTIL
-	OGOTO   // goto Sym
-	OIF     // if Ninit; Left { Nbody } else { Rlist }
-	OLABEL  // Sym:
-	OGO     // go Left (Left must be call)
-	ORANGE  // for List = range Right { Nbody }
-	ORETURN // return List
-	OSELECT // select { List } (List is list of OCASE)
-	OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
-	// OTYPESW:  Left := Right.(type) (appears as .Left of OSWITCH)
-	//   Left is nil if there is no type-switch variable
-	OTYPESW
-
-	// types
-	OTCHAN   // chan int
-	OTMAP    // map[string]int
-	OTSTRUCT // struct{}
-	OTINTER  // interface{}
-	// OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
-	// list of result fields.
-	OTFUNC
-	OTARRAY // []int, [8]int, [N]int or [...]int
-
-	// misc
-	ODDD        // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}.
-	OINLCALL    // intermediary representation of an inlined call.
-	OEFACE      // itable and data words of an empty-interface value.
-	OITAB       // itable word of an interface value.
-	OIDATA      // data word of an interface value in Left
-	OSPTR       // base pointer of a slice or string.
-	OCLOSUREVAR // variable reference at beginning of closure function
-	OCFUNC      // reference to c function pointer (not go func value)
-	OCHECKNIL   // emit code to ensure pointer/interface not nil
-	OVARDEF     // variable is about to be fully initialized
-	OVARKILL    // variable is dead
-	OVARLIVE    // variable is alive
-	ORESULT     // result of a function call; Xoffset is stack offset
-	OINLMARK    // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
-
-	// arch-specific opcodes
-	ORETJMP // return to other function
-	OGETG   // runtime.getg() (read g pointer)
-
-	OEND
-)
-
-// Nodes is a pointer to a slice of *Node.
-// For fields that are not used in most nodes, this is used instead of
-// a slice to save space.
-type Nodes struct{ slice *[]*Node }
-
-// asNodes returns a slice of *Node as a Nodes value.
-func asNodes(s []*Node) Nodes {
-	return Nodes{&s}
-}
-
-// Slice returns the entries in Nodes as a slice.
-// Changes to the slice entries (as in s[i] = n) will be reflected in
-// the Nodes.
-func (n Nodes) Slice() []*Node {
-	if n.slice == nil {
-		return nil
-	}
-	return *n.slice
-}
-
-// Len returns the number of entries in Nodes.
-func (n Nodes) Len() int {
-	if n.slice == nil {
-		return 0
-	}
-	return len(*n.slice)
-}
-
-// Index returns the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
-	return (*n.slice)[i]
-}
-
-// First returns the first element of Nodes (same as n.Index(0)).
-// It panics if n has no elements.
-func (n Nodes) First() *Node {
-	return (*n.slice)[0]
-}
-
-// Second returns the second element of Nodes (same as n.Index(1)).
-// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
-	return (*n.slice)[1]
-}
-
-// Set sets n to a slice.
-// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
-	if len(s) == 0 {
-		n.slice = nil
-	} else {
-		// Copy s and take address of t rather than s to avoid
-		// allocation in the case where len(s) == 0 (which is
-		// over 3x more common, dynamically, for make.bash).
-		t := s
-		n.slice = &t
-	}
-}
-
-// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 *Node) {
-	n.slice = &[]*Node{n1}
-}
-
-// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
-	n.slice = &[]*Node{n1, n2}
-}
-
-// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 *Node) {
-	n.slice = &[]*Node{n1, n2, n3}
-}
-
-// MoveNodes sets n to the contents of n2, then clears n2.
-func (n *Nodes) MoveNodes(n2 *Nodes) {
-	n.slice = n2.slice
-	n2.slice = nil
-}
-
-// SetIndex sets the i'th element of Nodes to node.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
-	(*n.slice)[i] = node
-}
-
-// SetFirst sets the first element of Nodes to node.
-// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node *Node) {
-	(*n.slice)[0] = node
-}
-
-// SetSecond sets the second element of Nodes to node.
-// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node *Node) {
-	(*n.slice)[1] = node
-}
-
-// Addr returns the address of the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
-	return &(*n.slice)[i]
-}
-
-// Append appends entries to Nodes.
-func (n *Nodes) Append(a ...*Node) {
-	if len(a) == 0 {
-		return
-	}
-	if n.slice == nil {
-		s := make([]*Node, len(a))
-		copy(s, a)
-		n.slice = &s
-		return
-	}
-	*n.slice = append(*n.slice, a...)
-}
-
-// Prepend prepends entries to Nodes.
-// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
-	if len(a) == 0 {
-		return
-	}
-	if n.slice == nil {
-		n.slice = &a
-	} else {
-		*n.slice = append(a, *n.slice...)
-	}
-}
-
-// AppendNodes appends the contents of *n2 to n, then clears n2.
-func (n *Nodes) AppendNodes(n2 *Nodes) {
-	switch {
-	case n2.slice == nil:
-	case n.slice == nil:
-		n.slice = n2.slice
-	default:
-		*n.slice = append(*n.slice, *n2.slice...)
-	}
-	n2.slice = nil
-}
-
-// inspect invokes f on each node in an AST in depth-first order.
-// If f(n) returns false, inspect skips visiting n's children.
-func inspect(n *Node, f func(*Node) bool) {
-	if n == nil || !f(n) {
-		return
-	}
-	inspectList(n.Ninit, f)
-	inspect(n.Left, f)
-	inspect(n.Right, f)
-	inspectList(n.List, f)
-	inspectList(n.Nbody, f)
-	inspectList(n.Rlist, f)
-}
-
-func inspectList(l Nodes, f func(*Node) bool) {
-	for _, n := range l.Slice() {
-		inspect(n, f)
-	}
-}
-
-// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
-// a ready-to-use empty queue.
-type nodeQueue struct {
-	ring       []*Node
-	head, tail int
-}
-
-// empty reports whether q contains no Nodes.
-func (q *nodeQueue) empty() bool {
-	return q.head == q.tail
-}
-
-// pushRight appends n to the right of the queue.
-func (q *nodeQueue) pushRight(n *Node) {
-	if len(q.ring) == 0 {
-		q.ring = make([]*Node, 16)
-	} else if q.head+len(q.ring) == q.tail {
-		// Grow the ring.
-		nring := make([]*Node, len(q.ring)*2)
-		// Copy the old elements.
-		part := q.ring[q.head%len(q.ring):]
-		if q.tail-q.head <= len(part) {
-			part = part[:q.tail-q.head]
-			copy(nring, part)
-		} else {
-			pos := copy(nring, part)
-			copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
-		}
-		q.ring, q.head, q.tail = nring, 0, q.tail-q.head
-	}
-
-	q.ring[q.tail%len(q.ring)] = n
-	q.tail++
-}
-
-// popLeft pops a node from the left of the queue. It panics if q is
-// empty.
-func (q *nodeQueue) popLeft() *Node {
-	if q.empty() {
-		panic("dequeue empty")
-	}
-	n := q.ring[q.head%len(q.ring)]
-	q.head++
-	return n
-}
-
-// NodeSet is a set of Nodes.
-type NodeSet map[*Node]struct{}
-
-// Has reports whether s contains n.
-func (s NodeSet) Has(n *Node) bool {
-	_, isPresent := s[n]
-	return isPresent
-}
-
-// Add adds n to s.
-func (s *NodeSet) Add(n *Node) {
-	if *s == nil {
-		*s = make(map[*Node]struct{})
-	}
-	(*s)[n] = struct{}{}
-}
-
-// Sorted returns s sorted according to less.
-func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
-	var res []*Node
-	for n := range s {
-		res = append(res, n)
-	}
-	sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
-	return res
-}
diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go
index ed4b5a2..c6eb23a 100644
--- a/src/cmd/compile/internal/gc/trace.go
+++ b/src/cmd/compile/internal/gc/trace.go
@@ -9,6 +9,8 @@
 import (
 	"os"
 	tracepkg "runtime/trace"
+
+	"cmd/compile/internal/base"
 )
 
 func init() {
@@ -18,10 +20,10 @@
 func traceHandlerGo17(traceprofile string) {
 	f, err := os.Create(traceprofile)
 	if err != nil {
-		Fatalf("%v", err)
+		base.Fatalf("%v", err)
 	}
 	if err := tracepkg.Start(f); err != nil {
-		Fatalf("%v", err)
+		base.Fatalf("%v", err)
 	}
-	atExit(tracepkg.Stop)
+	base.AtExit(tracepkg.Stop)
 }
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
deleted file mode 100644
index c0b0503..0000000
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ /dev/null
@@ -1,4019 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"fmt"
-	"strings"
-)
-
-// To enable tracing support (-t flag), set enableTrace to true.
-const enableTrace = false
-
-var trace bool
-var traceIndent []byte
-var skipDowidthForTracing bool
-
-func tracePrint(title string, n *Node) func(np **Node) {
-	indent := traceIndent
-
-	// guard against nil
-	var pos, op string
-	var tc uint8
-	if n != nil {
-		pos = linestr(n.Pos)
-		op = n.Op.String()
-		tc = n.Typecheck()
-	}
-
-	skipDowidthForTracing = true
-	defer func() { skipDowidthForTracing = false }()
-	fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
-	traceIndent = append(traceIndent, ". "...)
-
-	return func(np **Node) {
-		traceIndent = traceIndent[:len(traceIndent)-2]
-
-		// if we have a result, use that
-		if np != nil {
-			n = *np
-		}
-
-		// guard against nil
-		// use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
-		var tc uint8
-		var typ *types.Type
-		if n != nil {
-			pos = linestr(n.Pos)
-			op = n.Op.String()
-			tc = n.Typecheck()
-			typ = n.Type
-		}
-
-		skipDowidthForTracing = true
-		defer func() { skipDowidthForTracing = false }()
-		fmt.Printf("%s: %s=> %p %s %v tc=%d type=%#L\n", pos, indent, n, op, n, tc, typ)
-	}
-}
-
-const (
-	ctxStmt    = 1 << iota // evaluated at statement level
-	ctxExpr                // evaluated in value context
-	ctxType                // evaluated in type context
-	ctxCallee              // call-only expressions are ok
-	ctxMultiOK             // multivalue function returns are ok
-	ctxAssign              // assigning to expression
-)
-
-// type checks the whole tree of an expression.
-// calculates expression types.
-// evaluates compile time constants.
-// marks variables that escape the local frame.
-// rewrites n.Op to be more specific in some cases.
-
-var typecheckdefstack []*Node
-
-// resolve ONONAME to definition, if any.
-func resolve(n *Node) (res *Node) {
-	if n == nil || n.Op != ONONAME {
-		return n
-	}
-
-	// only trace if there's work to do
-	if enableTrace && trace {
-		defer tracePrint("resolve", n)(&res)
-	}
-
-	if n.Sym.Pkg != localpkg {
-		if inimport {
-			Fatalf("recursive inimport")
-		}
-		inimport = true
-		expandDecl(n)
-		inimport = false
-		return n
-	}
-
-	r := asNode(n.Sym.Def)
-	if r == nil {
-		return n
-	}
-
-	if r.Op == OIOTA {
-		if x := getIotaValue(); x >= 0 {
-			return nodintconst(x)
-		}
-		return n
-	}
-
-	return r
-}
-
-func typecheckslice(l []*Node, top int) {
-	for i := range l {
-		l[i] = typecheck(l[i], top)
-	}
-}
-
-var _typekind = []string{
-	TINT:        "int",
-	TUINT:       "uint",
-	TINT8:       "int8",
-	TUINT8:      "uint8",
-	TINT16:      "int16",
-	TUINT16:     "uint16",
-	TINT32:      "int32",
-	TUINT32:     "uint32",
-	TINT64:      "int64",
-	TUINT64:     "uint64",
-	TUINTPTR:    "uintptr",
-	TCOMPLEX64:  "complex64",
-	TCOMPLEX128: "complex128",
-	TFLOAT32:    "float32",
-	TFLOAT64:    "float64",
-	TBOOL:       "bool",
-	TSTRING:     "string",
-	TPTR:        "pointer",
-	TUNSAFEPTR:  "unsafe.Pointer",
-	TSTRUCT:     "struct",
-	TINTER:      "interface",
-	TCHAN:       "chan",
-	TMAP:        "map",
-	TARRAY:      "array",
-	TSLICE:      "slice",
-	TFUNC:       "func",
-	TNIL:        "nil",
-	TIDEAL:      "untyped number",
-}
-
-func typekind(t *types.Type) string {
-	if t.IsUntyped() {
-		return fmt.Sprintf("%v", t)
-	}
-	et := t.Etype
-	if int(et) < len(_typekind) {
-		s := _typekind[et]
-		if s != "" {
-			return s
-		}
-	}
-	return fmt.Sprintf("etype=%d", et)
-}
-
-func cycleFor(start *Node) []*Node {
-	// Find the start node in typecheck_tcstack.
-	// We know that it must exist because each time we mark
-	// a node with n.SetTypecheck(2) we push it on the stack,
-	// and each time we mark a node with n.SetTypecheck(2) we
-	// pop it from the stack. We hit a cycle when we encounter
-	// a node marked 2 in which case is must be on the stack.
-	i := len(typecheck_tcstack) - 1
-	for i > 0 && typecheck_tcstack[i] != start {
-		i--
-	}
-
-	// collect all nodes with same Op
-	var cycle []*Node
-	for _, n := range typecheck_tcstack[i:] {
-		if n.Op == start.Op {
-			cycle = append(cycle, n)
-		}
-	}
-
-	return cycle
-}
-
-func cycleTrace(cycle []*Node) string {
-	var s string
-	for i, n := range cycle {
-		s += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cycle[(i+1)%len(cycle)])
-	}
-	return s
-}
-
-var typecheck_tcstack []*Node
-
-// typecheck type checks node n.
-// The result of typecheck MUST be assigned back to n, e.g.
-// 	n.Left = typecheck(n.Left, top)
-func typecheck(n *Node, top int) (res *Node) {
-	// cannot type check until all the source has been parsed
-	if !typecheckok {
-		Fatalf("early typecheck")
-	}
-
-	if n == nil {
-		return nil
-	}
-
-	// only trace if there's work to do
-	if enableTrace && trace {
-		defer tracePrint("typecheck", n)(&res)
-	}
-
-	lno := setlineno(n)
-
-	// Skip over parens.
-	for n.Op == OPAREN {
-		n = n.Left
-	}
-
-	// Resolve definition of name and value of iota lazily.
-	n = resolve(n)
-
-	// Skip typecheck if already done.
-	// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
-	if n.Typecheck() == 1 {
-		switch n.Op {
-		case ONAME, OTYPE, OLITERAL, OPACK:
-			break
-
-		default:
-			lineno = lno
-			return n
-		}
-	}
-
-	if n.Typecheck() == 2 {
-		// Typechecking loop. Trying printing a meaningful message,
-		// otherwise a stack trace of typechecking.
-		switch n.Op {
-		// We can already diagnose variables used as types.
-		case ONAME:
-			if top&(ctxExpr|ctxType) == ctxType {
-				yyerror("%v is not a type", n)
-			}
-
-		case OTYPE:
-			// Only report a type cycle if we are expecting a type.
-			// Otherwise let other code report an error.
-			if top&ctxType == ctxType {
-				// A cycle containing only alias types is an error
-				// since it would expand indefinitely when aliases
-				// are substituted.
-				cycle := cycleFor(n)
-				for _, n1 := range cycle {
-					if n1.Name != nil && !n1.Name.Param.Alias() {
-						// Cycle is ok. But if n is an alias type and doesn't
-						// have a type yet, we have a recursive type declaration
-						// with aliases that we can't handle properly yet.
-						// Report an error rather than crashing later.
-						if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
-							lineno = n.Pos
-							Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
-						}
-						lineno = lno
-						return n
-					}
-				}
-				yyerrorl(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
-			}
-
-		case OLITERAL:
-			if top&(ctxExpr|ctxType) == ctxType {
-				yyerror("%v is not a type", n)
-				break
-			}
-			yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n)))
-		}
-
-		if nsavederrors+nerrors == 0 {
-			var trace string
-			for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
-				x := typecheck_tcstack[i]
-				trace += fmt.Sprintf("\n\t%v %v", x.Line(), x)
-			}
-			yyerror("typechecking loop involving %v%s", n, trace)
-		}
-
-		lineno = lno
-		return n
-	}
-
-	n.SetTypecheck(2)
-
-	typecheck_tcstack = append(typecheck_tcstack, n)
-	n = typecheck1(n, top)
-
-	n.SetTypecheck(1)
-
-	last := len(typecheck_tcstack) - 1
-	typecheck_tcstack[last] = nil
-	typecheck_tcstack = typecheck_tcstack[:last]
-
-	lineno = lno
-	return n
-}
-
-// indexlit implements typechecking of untyped values as
-// array/slice indexes. It is almost equivalent to defaultlit
-// but also accepts untyped numeric values representable as
-// value of type int (see also checkmake for comparison).
-// The result of indexlit MUST be assigned back to n, e.g.
-// 	n.Left = indexlit(n.Left)
-func indexlit(n *Node) *Node {
-	if n != nil && n.Type != nil && n.Type.Etype == TIDEAL {
-		return defaultlit(n, types.Types[TINT])
-	}
-	return n
-}
-
-// The result of typecheck1 MUST be assigned back to n, e.g.
-// 	n.Left = typecheck1(n.Left, top)
-func typecheck1(n *Node, top int) (res *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheck1", n)(&res)
-	}
-
-	switch n.Op {
-	case OLITERAL, ONAME, ONONAME, OTYPE:
-		if n.Sym == nil {
-			break
-		}
-
-		if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
-			yyerror("use of builtin %v not in function call", n.Sym)
-			n.Type = nil
-			return n
-		}
-
-		typecheckdef(n)
-		if n.Op == ONONAME {
-			n.Type = nil
-			return n
-		}
-	}
-
-	ok := 0
-	switch n.Op {
-	// until typecheck is complete, do nothing.
-	default:
-		Dump("typecheck", n)
-
-		Fatalf("typecheck %v", n.Op)
-
-	// names
-	case OLITERAL:
-		ok |= ctxExpr
-
-		if n.Type == nil && n.Val().Ctype() == CTSTR {
-			n.Type = types.UntypedString
-		}
-
-	case ONONAME:
-		ok |= ctxExpr
-
-	case ONAME:
-		if n.Name.Decldepth == 0 {
-			n.Name.Decldepth = decldepth
-		}
-		if n.SubOp() != 0 {
-			ok |= ctxCallee
-			break
-		}
-
-		if top&ctxAssign == 0 {
-			// not a write to the variable
-			if n.isBlank() {
-				yyerror("cannot use _ as value")
-				n.Type = nil
-				return n
-			}
-
-			n.Name.SetUsed(true)
-		}
-
-		ok |= ctxExpr
-
-	case OPACK:
-		yyerror("use of package %v without selector", n.Sym)
-		n.Type = nil
-		return n
-
-	case ODDD:
-		break
-
-	// types (ODEREF is with exprs)
-	case OTYPE:
-		ok |= ctxType
-
-		if n.Type == nil {
-			return n
-		}
-
-	case OTARRAY:
-		ok |= ctxType
-		r := typecheck(n.Right, ctxType)
-		if r.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-		var t *types.Type
-		if n.Left == nil {
-			t = types.NewSlice(r.Type)
-		} else if n.Left.Op == ODDD {
-			if !n.Diag() {
-				n.SetDiag(true)
-				yyerror("use of [...] array outside of array literal")
-			}
-			n.Type = nil
-			return n
-		} else {
-			n.Left = indexlit(typecheck(n.Left, ctxExpr))
-			l := n.Left
-			if consttype(l) != CTINT {
-				switch {
-				case l.Type == nil:
-					// Error already reported elsewhere.
-				case l.Type.IsInteger() && l.Op != OLITERAL:
-					yyerror("non-constant array bound %v", l)
-				default:
-					yyerror("invalid array bound %v", l)
-				}
-				n.Type = nil
-				return n
-			}
-
-			v := l.Val()
-			if doesoverflow(v, types.Types[TINT]) {
-				yyerror("array bound is too large")
-				n.Type = nil
-				return n
-			}
-
-			bound := v.U.(*Mpint).Int64()
-			if bound < 0 {
-				yyerror("array bound must be non-negative")
-				n.Type = nil
-				return n
-			}
-			t = types.NewArray(r.Type, bound)
-		}
-
-		setTypeNode(n, t)
-		n.Left = nil
-		n.Right = nil
-		checkwidth(t)
-
-	case OTMAP:
-		ok |= ctxType
-		n.Left = typecheck(n.Left, ctxType)
-		n.Right = typecheck(n.Right, ctxType)
-		l := n.Left
-		r := n.Right
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Type.NotInHeap() {
-			yyerror("incomplete (or unallocatable) map key not allowed")
-		}
-		if r.Type.NotInHeap() {
-			yyerror("incomplete (or unallocatable) map value not allowed")
-		}
-
-		setTypeNode(n, types.NewMap(l.Type, r.Type))
-		mapqueue = append(mapqueue, n) // check map keys when all types are settled
-		n.Left = nil
-		n.Right = nil
-
-	case OTCHAN:
-		ok |= ctxType
-		n.Left = typecheck(n.Left, ctxType)
-		l := n.Left
-		if l.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Type.NotInHeap() {
-			yyerror("chan of incomplete (or unallocatable) type not allowed")
-		}
-
-		setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
-		n.Left = nil
-		n.ResetAux()
-
-	case OTSTRUCT:
-		ok |= ctxType
-		setTypeNode(n, tostruct(n.List.Slice()))
-		n.List.Set(nil)
-
-	case OTINTER:
-		ok |= ctxType
-		setTypeNode(n, tointerface(n.List.Slice()))
-
-	case OTFUNC:
-		ok |= ctxType
-		setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice()))
-		n.Left = nil
-		n.List.Set(nil)
-		n.Rlist.Set(nil)
-
-	// type or expr
-	case ODEREF:
-		n.Left = typecheck(n.Left, ctxExpr|ctxType)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Op == OTYPE {
-			ok |= ctxType
-			setTypeNode(n, types.NewPtr(l.Type))
-			n.Left = nil
-			// Ensure l.Type gets dowidth'd for the backend. Issue 20174.
-			checkwidth(l.Type)
-			break
-		}
-
-		if !t.IsPtr() {
-			if top&(ctxExpr|ctxStmt) != 0 {
-				yyerror("invalid indirect of %L", n.Left)
-				n.Type = nil
-				return n
-			}
-
-			break
-		}
-
-		ok |= ctxExpr
-		n.Type = t.Elem()
-
-	// arithmetic exprs
-	case OASOP,
-		OADD,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLT,
-		OLSH,
-		ORSH,
-		OMOD,
-		OMUL,
-		ONE,
-		OOR,
-		OOROR,
-		OSUB,
-		OXOR:
-		var l *Node
-		var op Op
-		var r *Node
-		if n.Op == OASOP {
-			ok |= ctxStmt
-			n.Left = typecheck(n.Left, ctxExpr)
-			n.Right = typecheck(n.Right, ctxExpr)
-			l = n.Left
-			r = n.Right
-			checkassign(n, n.Left)
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			if n.Implicit() && !okforarith[l.Type.Etype] {
-				yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
-				n.Type = nil
-				return n
-			}
-			// TODO(marvin): Fix Node.EType type union.
-			op = n.SubOp()
-		} else {
-			ok |= ctxExpr
-			n.Left = typecheck(n.Left, ctxExpr)
-			n.Right = typecheck(n.Right, ctxExpr)
-			l = n.Left
-			r = n.Right
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			op = n.Op
-		}
-		if op == OLSH || op == ORSH {
-			r = defaultlit(r, types.Types[TUINT])
-			n.Right = r
-			t := r.Type
-			if !t.IsInteger() {
-				yyerror("invalid operation: %v (shift count type %v, must be integer)", n, r.Type)
-				n.Type = nil
-				return n
-			}
-			if t.IsSigned() && !langSupported(1, 13, curpkg()) {
-				yyerrorv("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type)
-				n.Type = nil
-				return n
-			}
-			t = l.Type
-			if t != nil && t.Etype != TIDEAL && !t.IsInteger() {
-				yyerror("invalid operation: %v (shift of type %v)", n, t)
-				n.Type = nil
-				return n
-			}
-
-			// no defaultlit for left
-			// the outer context gives the type
-			n.Type = l.Type
-			if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
-				n.Type = types.UntypedInt
-			}
-
-			break
-		}
-
-		// For "x == x && len(s)", it's better to report that "len(s)" (type int)
-		// can't be used with "&&" than to report that "x == x" (type untyped bool)
-		// can't be converted to int (see issue #41500).
-		if n.Op == OANDAND || n.Op == OOROR {
-			if !n.Left.Type.IsBoolean() {
-				yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
-				n.Type = nil
-				return n
-			}
-			if !n.Right.Type.IsBoolean() {
-				yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
-				n.Type = nil
-				return n
-			}
-		}
-
-		// ideal mixed with non-ideal
-		l, r = defaultlit2(l, r, false)
-
-		n.Left = l
-		n.Right = r
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		t := l.Type
-		if t.Etype == TIDEAL {
-			t = r.Type
-		}
-		et := t.Etype
-		if et == TIDEAL {
-			et = TINT
-		}
-		aop := OXXX
-		if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
-			// comparison is okay as long as one side is
-			// assignable to the other.  convert so they have
-			// the same type.
-			//
-			// the only conversion that isn't a no-op is concrete == interface.
-			// in that case, check comparability of the concrete type.
-			// The conversion allocates, so only do it if the concrete type is huge.
-			converted := false
-			if r.Type.Etype != TBLANK {
-				aop, _ = assignop(l.Type, r.Type)
-				if aop != OXXX {
-					if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
-						yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
-						n.Type = nil
-						return n
-					}
-
-					dowidth(l.Type)
-					if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
-						l = nod(aop, l, nil)
-						l.Type = r.Type
-						l.SetTypecheck(1)
-						n.Left = l
-					}
-
-					t = r.Type
-					converted = true
-				}
-			}
-
-			if !converted && l.Type.Etype != TBLANK {
-				aop, _ = assignop(r.Type, l.Type)
-				if aop != OXXX {
-					if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
-						yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
-						n.Type = nil
-						return n
-					}
-
-					dowidth(r.Type)
-					if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
-						r = nod(aop, r, nil)
-						r.Type = l.Type
-						r.SetTypecheck(1)
-						n.Right = r
-					}
-
-					t = l.Type
-				}
-			}
-
-			et = t.Etype
-		}
-
-		if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
-			l, r = defaultlit2(l, r, true)
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 {
-				yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
-				n.Type = nil
-				return n
-			}
-		}
-
-		if t.Etype == TIDEAL {
-			t = mixUntyped(l.Type, r.Type)
-		}
-		if dt := defaultType(t); !okfor[op][dt.Etype] {
-			yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
-			n.Type = nil
-			return n
-		}
-
-		// okfor allows any array == array, map == map, func == func.
-		// restrict to slice/map/func == nil and nil == slice/map/func.
-		if l.Type.IsArray() && !IsComparable(l.Type) {
-			yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsSlice() && !l.isNil() && !r.isNil() {
-			yyerror("invalid operation: %v (slice can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsMap() && !l.isNil() && !r.isNil() {
-			yyerror("invalid operation: %v (map can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() {
-			yyerror("invalid operation: %v (func can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsStruct() {
-			if f := IncomparableField(l.Type); f != nil {
-				yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
-				n.Type = nil
-				return n
-			}
-		}
-
-		if iscmp[n.Op] {
-			evconst(n)
-			t = types.UntypedBool
-			if n.Op != OLITERAL {
-				l, r = defaultlit2(l, r, true)
-				n.Left = l
-				n.Right = r
-			}
-		}
-
-		if et == TSTRING && n.Op == OADD {
-			// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
-			n.Op = OADDSTR
-
-			if l.Op == OADDSTR {
-				n.List.Set(l.List.Slice())
-			} else {
-				n.List.Set1(l)
-			}
-			if r.Op == OADDSTR {
-				n.List.AppendNodes(&r.List)
-			} else {
-				n.List.Append(r)
-			}
-			n.Left = nil
-			n.Right = nil
-		}
-
-		if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
-			if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				n.Type = nil
-				return n
-			}
-		}
-
-		n.Type = t
-
-	case OBITNOT, ONEG, ONOT, OPLUS:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !okfor[n.Op][defaultType(t).Etype] {
-			yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-
-	// exprs
-	case OADDR:
-		ok |= ctxExpr
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-		switch n.Left.Op {
-		case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
-			n.Op = OPTRLIT
-
-		default:
-			checklvalue(n.Left, "take the address of")
-			r := outervalue(n.Left)
-			if r.Op == ONAME {
-				if r.Orig != r {
-					Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
-				}
-				r.Name.SetAddrtaken(true)
-				if r.Name.IsClosureVar() && !capturevarscomplete {
-					// Mark the original variable as Addrtaken so that capturevars
-					// knows not to pass it by value.
-					// But if the capturevars phase is complete, don't touch it,
-					// in case l.Name's containing function has not yet been compiled.
-					r.Name.Defn.Name.SetAddrtaken(true)
-				}
-			}
-			n.Left = defaultlit(n.Left, nil)
-			if n.Left.Type == nil {
-				n.Type = nil
-				return n
-			}
-		}
-
-		n.Type = types.NewPtr(n.Left.Type)
-
-	case OCOMPLIT:
-		ok |= ctxExpr
-		n = typecheckcomplit(n)
-		if n.Type == nil {
-			return n
-		}
-
-	case OXDOT, ODOT:
-		if n.Op == OXDOT {
-			n = adddot(n)
-			n.Op = ODOT
-			if n.Left == nil {
-				n.Type = nil
-				return n
-			}
-		}
-
-		n.Left = typecheck(n.Left, ctxExpr|ctxType)
-
-		n.Left = defaultlit(n.Left, nil)
-
-		t := n.Left.Type
-		if t == nil {
-			adderrorname(n)
-			n.Type = nil
-			return n
-		}
-
-		s := n.Sym
-
-		if n.Left.Op == OTYPE {
-			n = typecheckMethodExpr(n)
-			if n.Type == nil {
-				return n
-			}
-			ok = ctxExpr
-			break
-		}
-
-		if t.IsPtr() && !t.Elem().IsInterface() {
-			t = t.Elem()
-			if t == nil {
-				n.Type = nil
-				return n
-			}
-			n.Op = ODOTPTR
-			checkwidth(t)
-		}
-
-		if n.Sym.IsBlank() {
-			yyerror("cannot refer to blank field or method")
-			n.Type = nil
-			return n
-		}
-
-		if lookdot(n, t, 0) == nil {
-			// Legitimate field or method lookup failed, try to explain the error
-			switch {
-			case t.IsEmptyInterface():
-				yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
-
-			case t.IsPtr() && t.Elem().IsInterface():
-				// Pointer to interface is almost always a mistake.
-				yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
-
-			case lookdot(n, t, 1) != nil:
-				// Field or method matches by name, but it is not exported.
-				yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
-
-			default:
-				if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
-					yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
-				} else {
-					yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
-				}
-			}
-			n.Type = nil
-			return n
-		}
-
-		switch n.Op {
-		case ODOTINTER, ODOTMETH:
-			if top&ctxCallee != 0 {
-				ok |= ctxCallee
-			} else {
-				typecheckpartialcall(n, s)
-				ok |= ctxExpr
-			}
-
-		default:
-			ok |= ctxExpr
-		}
-
-	case ODOTTYPE:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsInterface() {
-			yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if n.Right != nil {
-			n.Right = typecheck(n.Right, ctxType)
-			n.Type = n.Right.Type
-			n.Right = nil
-			if n.Type == nil {
-				return n
-			}
-		}
-
-		if n.Type != nil && !n.Type.IsInterface() {
-			var missing, have *types.Field
-			var ptr int
-			if !implements(n.Type, t, &missing, &have, &ptr) {
-				if have != nil && have.Sym == missing.Sym {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
-						"\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-				} else if ptr != 0 {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
-				} else if have != nil {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
-						"\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-				} else {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
-				}
-				n.Type = nil
-				return n
-			}
-		}
-
-	case OINDEX:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		n.Left = implicitstar(n.Left)
-		l := n.Left
-		n.Right = typecheck(n.Right, ctxExpr)
-		r := n.Right
-		t := l.Type
-		if t == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		switch t.Etype {
-		default:
-			yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
-			n.Type = nil
-			return n
-
-		case TSTRING, TARRAY, TSLICE:
-			n.Right = indexlit(n.Right)
-			if t.IsString() {
-				n.Type = types.Bytetype
-			} else {
-				n.Type = t.Elem()
-			}
-			why := "string"
-			if t.IsArray() {
-				why = "array"
-			} else if t.IsSlice() {
-				why = "slice"
-			}
-
-			if n.Right.Type != nil && !n.Right.Type.IsInteger() {
-				yyerror("non-integer %s index %v", why, n.Right)
-				break
-			}
-
-			if !n.Bounded() && Isconst(n.Right, CTINT) {
-				x := n.Right.Int64Val()
-				if x < 0 {
-					yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
-				} else if t.IsArray() && x >= t.NumElem() {
-					yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
-				} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
-					yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
-				} else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-					yyerror("invalid %s index %v (index too large)", why, n.Right)
-				}
-			}
-
-		case TMAP:
-			n.Right = assignconv(n.Right, t.Key(), "map index")
-			n.Type = t.Elem()
-			n.Op = OINDEXMAP
-			n.ResetAux()
-		}
-
-	case ORECV:
-		ok |= ctxStmt | ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanRecv() {
-			yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t.Elem()
-
-	case OSEND:
-		ok |= ctxStmt
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Right = typecheck(n.Right, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		t := n.Left.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanSend() {
-			yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		n.Right = assignconv(n.Right, t.Elem(), "send")
-		if n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Type = nil
-
-	case OSLICEHEADER:
-		// Errors here are Fatalf instead of yyerror because only the compiler
-		// can construct an OSLICEHEADER node.
-		// Components used in OSLICEHEADER that are supplied by parsed source code
-		// have already been typechecked in e.g. OMAKESLICE earlier.
-		ok |= ctxExpr
-
-		t := n.Type
-		if t == nil {
-			Fatalf("no type specified for OSLICEHEADER")
-		}
-
-		if !t.IsSlice() {
-			Fatalf("invalid type %v for OSLICEHEADER", n.Type)
-		}
-
-		if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() {
-			Fatalf("need unsafe.Pointer for OSLICEHEADER")
-		}
-
-		if x := n.List.Len(); x != 2 {
-			Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
-		}
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		l := typecheck(n.List.First(), ctxExpr)
-		c := typecheck(n.List.Second(), ctxExpr)
-		l = defaultlit(l, types.Types[TINT])
-		c = defaultlit(c, types.Types[TINT])
-
-		if Isconst(l, CTINT) && l.Int64Val() < 0 {
-			Fatalf("len for OSLICEHEADER must be non-negative")
-		}
-
-		if Isconst(c, CTINT) && c.Int64Val() < 0 {
-			Fatalf("cap for OSLICEHEADER must be non-negative")
-		}
-
-		if Isconst(l, CTINT) && Isconst(c, CTINT) && l.Val().U.(*Mpint).Cmp(c.Val().U.(*Mpint)) > 0 {
-			Fatalf("len larger than cap for OSLICEHEADER")
-		}
-
-		n.List.SetFirst(l)
-		n.List.SetSecond(c)
-
-	case OMAKESLICECOPY:
-		// Errors here are Fatalf instead of yyerror because only the compiler
-		// can construct an OMAKESLICECOPY node.
-		// Components used in OMAKESCLICECOPY that are supplied by parsed source code
-		// have already been typechecked in OMAKE and OCOPY earlier.
-		ok |= ctxExpr
-
-		t := n.Type
-
-		if t == nil {
-			Fatalf("no type specified for OMAKESLICECOPY")
-		}
-
-		if !t.IsSlice() {
-			Fatalf("invalid type %v for OMAKESLICECOPY", n.Type)
-		}
-
-		if n.Left == nil {
-			Fatalf("missing len argument for OMAKESLICECOPY")
-		}
-
-		if n.Right == nil {
-			Fatalf("missing slice argument to copy for OMAKESLICECOPY")
-		}
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Right = typecheck(n.Right, ctxExpr)
-
-		n.Left = defaultlit(n.Left, types.Types[TINT])
-
-		if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL {
-			yyerror("non-integer len argument in OMAKESLICECOPY")
-		}
-
-		if Isconst(n.Left, CTINT) {
-			if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-				Fatalf("len for OMAKESLICECOPY too large")
-			}
-			if n.Left.Int64Val() < 0 {
-				Fatalf("len for OMAKESLICECOPY must be non-negative")
-			}
-		}
-
-	case OSLICE, OSLICE3:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		low, high, max := n.SliceBounds()
-		hasmax := n.Op.IsSlice3()
-		low = typecheck(low, ctxExpr)
-		high = typecheck(high, ctxExpr)
-		max = typecheck(max, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		low = indexlit(low)
-		high = indexlit(high)
-		max = indexlit(max)
-		n.SetSliceBounds(low, high, max)
-		l := n.Left
-		if l.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Type.IsArray() {
-			if !islvalue(n.Left) {
-				yyerror("invalid operation %v (slice of unaddressable value)", n)
-				n.Type = nil
-				return n
-			}
-
-			n.Left = nod(OADDR, n.Left, nil)
-			n.Left.SetImplicit(true)
-			n.Left = typecheck(n.Left, ctxExpr)
-			l = n.Left
-		}
-		t := l.Type
-		var tp *types.Type
-		if t.IsString() {
-			if hasmax {
-				yyerror("invalid operation %v (3-index slice of string)", n)
-				n.Type = nil
-				return n
-			}
-			n.Type = t
-			n.Op = OSLICESTR
-		} else if t.IsPtr() && t.Elem().IsArray() {
-			tp = t.Elem()
-			n.Type = types.NewSlice(tp.Elem())
-			dowidth(n.Type)
-			if hasmax {
-				n.Op = OSLICE3ARR
-			} else {
-				n.Op = OSLICEARR
-			}
-		} else if t.IsSlice() {
-			n.Type = t
-		} else {
-			yyerror("cannot slice %v (type %v)", l, t)
-			n.Type = nil
-			return n
-		}
-
-		if low != nil && !checksliceindex(l, low, tp) {
-			n.Type = nil
-			return n
-		}
-		if high != nil && !checksliceindex(l, high, tp) {
-			n.Type = nil
-			return n
-		}
-		if max != nil && !checksliceindex(l, max, tp) {
-			n.Type = nil
-			return n
-		}
-		if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
-			n.Type = nil
-			return n
-		}
-
-	// call and call like
-	case OCALL:
-		typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
-		n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee)
-		if n.Left.Diag() {
-			n.SetDiag(true)
-		}
-
-		l := n.Left
-
-		if l.Op == ONAME && l.SubOp() != 0 {
-			if n.IsDDD() && l.SubOp() != OAPPEND {
-				yyerror("invalid use of ... with builtin %v", l)
-			}
-
-			// builtin: OLEN, OCAP, etc.
-			n.Op = l.SubOp()
-			n.Left = n.Right
-			n.Right = nil
-			n = typecheck1(n, top)
-			return n
-		}
-
-		n.Left = defaultlit(n.Left, nil)
-		l = n.Left
-		if l.Op == OTYPE {
-			if n.IsDDD() {
-				if !l.Type.Broke() {
-					yyerror("invalid use of ... in type conversion to %v", l.Type)
-				}
-				n.SetDiag(true)
-			}
-
-			// pick off before type-checking arguments
-			ok |= ctxExpr
-
-			// turn CALL(type, arg) into CONV(arg) w/ type
-			n.Left = nil
-
-			n.Op = OCONV
-			n.Type = l.Type
-			if !onearg(n, "conversion to %v", l.Type) {
-				n.Type = nil
-				return n
-			}
-			n = typecheck1(n, top)
-			return n
-		}
-
-		typecheckargs(n)
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		checkwidth(t)
-
-		switch l.Op {
-		case ODOTINTER:
-			n.Op = OCALLINTER
-
-		case ODOTMETH:
-			n.Op = OCALLMETH
-
-			// typecheckaste was used here but there wasn't enough
-			// information further down the call chain to know if we
-			// were testing a method receiver for unexported fields.
-			// It isn't necessary, so just do a sanity check.
-			tp := t.Recv().Type
-
-			if l.Left == nil || !types.Identical(l.Left.Type, tp) {
-				Fatalf("method receiver")
-			}
-
-		default:
-			n.Op = OCALLFUNC
-			if t.Etype != TFUNC {
-				name := l.String()
-				if isBuiltinFuncName(name) && l.Name.Defn != nil {
-					// be more specific when the function
-					// name matches a predeclared function
-					yyerror("cannot call non-function %s (type %v), declared at %s",
-						name, t, linestr(l.Name.Defn.Pos))
-				} else {
-					yyerror("cannot call non-function %s (type %v)", name, t)
-				}
-				n.Type = nil
-				return n
-			}
-		}
-
-		typecheckaste(OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
-		ok |= ctxStmt
-		if t.NumResults() == 0 {
-			break
-		}
-		ok |= ctxExpr
-		if t.NumResults() == 1 {
-			n.Type = l.Type.Results().Field(0).Type
-
-			if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
-				// Emit code for runtime.getg() directly instead of calling function.
-				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
-				// so that the ordering pass can make sure to preserve the semantics of the original code
-				// (in particular, the exact time of the function call) by introducing temporaries.
-				// In this case, we know getg() always returns the same result within a given function
-				// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
-				n.Op = OGETG
-			}
-
-			break
-		}
-
-		// multiple return
-		if top&(ctxMultiOK|ctxStmt) == 0 {
-			yyerror("multiple-value %v() in single-value context", l)
-			break
-		}
-
-		n.Type = l.Type.Results()
-
-	case OALIGNOF, OOFFSETOF, OSIZEOF:
-		ok |= ctxExpr
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-		n.Type = types.Types[TUINTPTR]
-
-	case OCAP, OLEN:
-		ok |= ctxExpr
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		n.Left = implicitstar(n.Left)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		var ok bool
-		if n.Op == OLEN {
-			ok = okforlen[t.Etype]
-		} else {
-			ok = okforcap[t.Etype]
-		}
-		if !ok {
-			yyerror("invalid argument %L for %v", l, n.Op)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = types.Types[TINT]
-
-	case OREAL, OIMAG:
-		ok |= ctxExpr
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		// Determine result type.
-		switch t.Etype {
-		case TIDEAL:
-			n.Type = types.UntypedFloat
-		case TCOMPLEX64:
-			n.Type = types.Types[TFLOAT32]
-		case TCOMPLEX128:
-			n.Type = types.Types[TFLOAT64]
-		default:
-			yyerror("invalid argument %L for %v", l, n.Op)
-			n.Type = nil
-			return n
-		}
-
-	case OCOMPLEX:
-		ok |= ctxExpr
-		typecheckargs(n)
-		if !twoarg(n) {
-			n.Type = nil
-			return n
-		}
-		l := n.Left
-		r := n.Right
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		l, r = defaultlit2(l, r, false)
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Left = l
-		n.Right = r
-
-		if !types.Identical(l.Type, r.Type) {
-			yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
-			n.Type = nil
-			return n
-		}
-
-		var t *types.Type
-		switch l.Type.Etype {
-		default:
-			yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
-			n.Type = nil
-			return n
-
-		case TIDEAL:
-			t = types.UntypedComplex
-
-		case TFLOAT32:
-			t = types.Types[TCOMPLEX64]
-
-		case TFLOAT64:
-			t = types.Types[TCOMPLEX128]
-		}
-		n.Type = t
-
-	case OCLOSE:
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanSend() {
-			yyerror("invalid operation: %v (cannot close receive-only channel)", n)
-			n.Type = nil
-			return n
-		}
-
-		ok |= ctxStmt
-
-	case ODELETE:
-		ok |= ctxStmt
-		typecheckargs(n)
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing arguments to delete")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() == 1 {
-			yyerror("missing second (key) argument to delete")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() != 2 {
-			yyerror("too many arguments to delete")
-			n.Type = nil
-			return n
-		}
-
-		l := args.First()
-		r := args.Second()
-		if l.Type != nil && !l.Type.IsMap() {
-			yyerror("first argument to delete must be map; have %L", l.Type)
-			n.Type = nil
-			return n
-		}
-
-		args.SetSecond(assignconv(r, l.Type.Key(), "delete"))
-
-	case OAPPEND:
-		ok |= ctxExpr
-		typecheckargs(n)
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing arguments to append")
-			n.Type = nil
-			return n
-		}
-
-		t := args.First().Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-		if !t.IsSlice() {
-			if Isconst(args.First(), CTNIL) {
-				yyerror("first argument to append must be typed slice; have untyped nil")
-				n.Type = nil
-				return n
-			}
-
-			yyerror("first argument to append must be slice; have %L", t)
-			n.Type = nil
-			return n
-		}
-
-		if n.IsDDD() {
-			if args.Len() == 1 {
-				yyerror("cannot use ... on first argument to append")
-				n.Type = nil
-				return n
-			}
-
-			if args.Len() != 2 {
-				yyerror("too many arguments to append")
-				n.Type = nil
-				return n
-			}
-
-			if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
-				args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING]))
-				break
-			}
-
-			args.SetSecond(assignconv(args.Second(), t.Orig, "append"))
-			break
-		}
-
-		as := args.Slice()[1:]
-		for i, n := range as {
-			if n.Type == nil {
-				continue
-			}
-			as[i] = assignconv(n, t.Elem(), "append")
-			checkwidth(as[i].Type) // ensure width is calculated for backend
-		}
-
-	case OCOPY:
-		ok |= ctxStmt | ctxExpr
-		typecheckargs(n)
-		if !twoarg(n) {
-			n.Type = nil
-			return n
-		}
-		n.Type = types.Types[TINT]
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Left = defaultlit(n.Left, nil)
-		n.Right = defaultlit(n.Right, nil)
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-		// copy([]byte, string)
-		if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
-			if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
-				break
-			}
-			yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
-			n.Type = nil
-			return n
-		}
-
-		if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
-			if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
-				yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
-			} else if !n.Left.Type.IsSlice() {
-				yyerror("first argument to copy should be slice; have %L", n.Left.Type)
-			} else {
-				yyerror("second argument to copy should be slice or string; have %L", n.Right.Type)
-			}
-			n.Type = nil
-			return n
-		}
-
-		if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
-			yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
-			n.Type = nil
-			return n
-		}
-
-	case OCONV:
-		ok |= ctxExpr
-		checkwidth(n.Type) // ensure width is calculated for backend
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = convlit1(n.Left, n.Type, true, nil)
-		t := n.Left.Type
-		if t == nil || n.Type == nil {
-			n.Type = nil
-			return n
-		}
-		var why string
-		n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
-		if n.Op == OXXX {
-			if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
-				yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
-				n.SetDiag(true)
-			}
-			n.Op = OCONV
-			n.Type = nil
-			return n
-		}
-
-		switch n.Op {
-		case OCONVNOP:
-			if t.Etype == n.Type.Etype {
-				switch t.Etype {
-				case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
-					// Floating point casts imply rounding and
-					// so the conversion must be kept.
-					n.Op = OCONV
-				}
-			}
-
-		// do not convert to []byte literal. See CL 125796.
-		// generated code and compiler memory footprint is better without it.
-		case OSTR2BYTES:
-			break
-
-		case OSTR2RUNES:
-			if n.Left.Op == OLITERAL {
-				n = stringtoruneslit(n)
-			}
-		}
-
-	case OMAKE:
-		ok |= ctxExpr
-		args := n.List.Slice()
-		if len(args) == 0 {
-			yyerror("missing argument to make")
-			n.Type = nil
-			return n
-		}
-
-		n.List.Set(nil)
-		l := args[0]
-		l = typecheck(l, ctxType)
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		i := 1
-		switch t.Etype {
-		default:
-			yyerror("cannot make type %v", t)
-			n.Type = nil
-			return n
-
-		case TSLICE:
-			if i >= len(args) {
-				yyerror("missing len argument to make(%v)", t)
-				n.Type = nil
-				return n
-			}
-
-			l = args[i]
-			i++
-			l = typecheck(l, ctxExpr)
-			var r *Node
-			if i < len(args) {
-				r = args[i]
-				i++
-				r = typecheck(r, ctxExpr)
-			}
-
-			if l.Type == nil || (r != nil && r.Type == nil) {
-				n.Type = nil
-				return n
-			}
-			if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
-				n.Type = nil
-				return n
-			}
-			if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
-				yyerror("len larger than cap in make(%v)", t)
-				n.Type = nil
-				return n
-			}
-
-			n.Left = l
-			n.Right = r
-			n.Op = OMAKESLICE
-
-		case TMAP:
-			if i < len(args) {
-				l = args[i]
-				i++
-				l = typecheck(l, ctxExpr)
-				l = defaultlit(l, types.Types[TINT])
-				if l.Type == nil {
-					n.Type = nil
-					return n
-				}
-				if !checkmake(t, "size", &l) {
-					n.Type = nil
-					return n
-				}
-				n.Left = l
-			} else {
-				n.Left = nodintconst(0)
-			}
-			n.Op = OMAKEMAP
-
-		case TCHAN:
-			l = nil
-			if i < len(args) {
-				l = args[i]
-				i++
-				l = typecheck(l, ctxExpr)
-				l = defaultlit(l, types.Types[TINT])
-				if l.Type == nil {
-					n.Type = nil
-					return n
-				}
-				if !checkmake(t, "buffer", &l) {
-					n.Type = nil
-					return n
-				}
-				n.Left = l
-			} else {
-				n.Left = nodintconst(0)
-			}
-			n.Op = OMAKECHAN
-		}
-
-		if i < len(args) {
-			yyerror("too many arguments to make(%v)", t)
-			n.Op = OMAKE
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-
-	case ONEW:
-		ok |= ctxExpr
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing argument to new")
-			n.Type = nil
-			return n
-		}
-
-		l := args.First()
-		l = typecheck(l, ctxType)
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if args.Len() > 1 {
-			yyerror("too many arguments to new(%v)", t)
-			n.Type = nil
-			return n
-		}
-
-		n.Left = l
-		n.Type = types.NewPtr(t)
-
-	case OPRINT, OPRINTN:
-		ok |= ctxStmt
-		typecheckargs(n)
-		ls := n.List.Slice()
-		for i1, n1 := range ls {
-			// Special case for print: int constant is int64, not int.
-			if Isconst(n1, CTINT) {
-				ls[i1] = defaultlit(ls[i1], types.Types[TINT64])
-			} else {
-				ls[i1] = defaultlit(ls[i1], nil)
-			}
-		}
-
-	case OPANIC:
-		ok |= ctxStmt
-		if !onearg(n, "panic") {
-			n.Type = nil
-			return n
-		}
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, types.Types[TINTER])
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-	case ORECOVER:
-		ok |= ctxExpr | ctxStmt
-		if n.List.Len() != 0 {
-			yyerror("too many arguments to recover")
-			n.Type = nil
-			return n
-		}
-
-		n.Type = types.Types[TINTER]
-
-	case OCLOSURE:
-		ok |= ctxExpr
-		typecheckclosure(n, top)
-		if n.Type == nil {
-			return n
-		}
-
-	case OITAB:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		t := n.Left.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsInterface() {
-			Fatalf("OITAB of %v", t)
-		}
-		n.Type = types.NewPtr(types.Types[TUINTPTR])
-
-	case OIDATA:
-		// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
-		// usually by just having checked the OITAB.
-		Fatalf("cannot typecheck interface data %v", n)
-
-	case OSPTR:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		t := n.Left.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsSlice() && !t.IsString() {
-			Fatalf("OSPTR of %v", t)
-		}
-		if t.IsString() {
-			n.Type = types.NewPtr(types.Types[TUINT8])
-		} else {
-			n.Type = types.NewPtr(t.Elem())
-		}
-
-	case OCLOSUREVAR:
-		ok |= ctxExpr
-
-	case OCFUNC:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Type = types.Types[TUINTPTR]
-
-	case OCONVNOP:
-		ok |= ctxExpr
-		n.Left = typecheck(n.Left, ctxExpr)
-
-	// statements
-	case OAS:
-		ok |= ctxStmt
-
-		typecheckas(n)
-
-		// Code that creates temps does not bother to set defn, so do it here.
-		if n.Left.Op == ONAME && n.Left.IsAutoTmp() {
-			n.Left.Name.Defn = n
-		}
-
-	case OAS2:
-		ok |= ctxStmt
-		typecheckas2(n)
-
-	case OBREAK,
-		OCONTINUE,
-		ODCL,
-		OEMPTY,
-		OGOTO,
-		OFALL,
-		OVARKILL,
-		OVARLIVE:
-		ok |= ctxStmt
-
-	case OLABEL:
-		ok |= ctxStmt
-		decldepth++
-		if n.Sym.IsBlank() {
-			// Empty identifier is valid but useless.
-			// Eliminate now to simplify life later.
-			// See issues 7538, 11589, 11593.
-			n.Op = OEMPTY
-			n.Left = nil
-		}
-
-	case ODEFER:
-		ok |= ctxStmt
-		n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
-		if !n.Left.Diag() {
-			checkdefergo(n)
-		}
-
-	case OGO:
-		ok |= ctxStmt
-		n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
-		checkdefergo(n)
-
-	case OFOR, OFORUNTIL:
-		ok |= ctxStmt
-		typecheckslice(n.Ninit.Slice(), ctxStmt)
-		decldepth++
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		if n.Left != nil {
-			t := n.Left.Type
-			if t != nil && !t.IsBoolean() {
-				yyerror("non-bool %L used as for condition", n.Left)
-			}
-		}
-		n.Right = typecheck(n.Right, ctxStmt)
-		if n.Op == OFORUNTIL {
-			typecheckslice(n.List.Slice(), ctxStmt)
-		}
-		typecheckslice(n.Nbody.Slice(), ctxStmt)
-		decldepth--
-
-	case OIF:
-		ok |= ctxStmt
-		typecheckslice(n.Ninit.Slice(), ctxStmt)
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		if n.Left != nil {
-			t := n.Left.Type
-			if t != nil && !t.IsBoolean() {
-				yyerror("non-bool %L used as if condition", n.Left)
-			}
-		}
-		typecheckslice(n.Nbody.Slice(), ctxStmt)
-		typecheckslice(n.Rlist.Slice(), ctxStmt)
-
-	case ORETURN:
-		ok |= ctxStmt
-		typecheckargs(n)
-		if Curfn == nil {
-			yyerror("return outside function")
-			n.Type = nil
-			return n
-		}
-
-		if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
-			break
-		}
-		typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
-
-	case ORETJMP:
-		ok |= ctxStmt
-
-	case OSELECT:
-		ok |= ctxStmt
-		typecheckselect(n)
-
-	case OSWITCH:
-		ok |= ctxStmt
-		typecheckswitch(n)
-
-	case ORANGE:
-		ok |= ctxStmt
-		typecheckrange(n)
-
-	case OTYPESW:
-		yyerror("use of .(type) outside type switch")
-		n.Type = nil
-		return n
-
-	case ODCLFUNC:
-		ok |= ctxStmt
-		typecheckfunc(n)
-
-	case ODCLCONST:
-		ok |= ctxStmt
-		n.Left = typecheck(n.Left, ctxExpr)
-
-	case ODCLTYPE:
-		ok |= ctxStmt
-		n.Left = typecheck(n.Left, ctxType)
-		checkwidth(n.Left.Type)
-	}
-
-	t := n.Type
-	if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE {
-		switch t.Etype {
-		case TFUNC, // might have TANY; wait until it's called
-			TANY, TFORW, TIDEAL, TNIL, TBLANK:
-			break
-
-		default:
-			checkwidth(t)
-		}
-	}
-
-	evconst(n)
-	if n.Op == OTYPE && top&ctxType == 0 {
-		if !n.Type.Broke() {
-			yyerror("type %v is not an expression", n.Type)
-		}
-		n.Type = nil
-		return n
-	}
-
-	if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE {
-		yyerror("%v is not a type", n)
-		n.Type = nil
-		return n
-	}
-
-	// TODO(rsc): simplify
-	if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 {
-		yyerror("%v used as value", n)
-		n.Type = nil
-		return n
-	}
-
-	if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 {
-		if !n.Diag() {
-			yyerror("%v evaluated but not used", n)
-			n.SetDiag(true)
-		}
-
-		n.Type = nil
-		return n
-	}
-
-	return n
-}
-
-func typecheckargs(n *Node) {
-	if n.List.Len() != 1 || n.IsDDD() {
-		typecheckslice(n.List.Slice(), ctxExpr)
-		return
-	}
-
-	typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK)
-	t := n.List.First().Type
-	if t == nil || !t.IsFuncArgStruct() {
-		return
-	}
-
-	// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
-
-	// Save n as n.Orig for fmt.go.
-	if n.Orig == n {
-		n.Orig = n.sepcopy()
-	}
-
-	as := nod(OAS2, nil, nil)
-	as.Rlist.AppendNodes(&n.List)
-
-	// If we're outside of function context, then this call will
-	// be executed during the generated init function. However,
-	// init.go hasn't yet created it. Instead, associate the
-	// temporary variables with dummyInitFn for now, and init.go
-	// will reassociate them later when it's appropriate.
-	static := Curfn == nil
-	if static {
-		Curfn = dummyInitFn
-	}
-	for _, f := range t.FieldSlice() {
-		t := temp(f.Type)
-		as.Ninit.Append(nod(ODCL, t, nil))
-		as.List.Append(t)
-		n.List.Append(t)
-	}
-	if static {
-		Curfn = nil
-	}
-
-	as = typecheck(as, ctxStmt)
-	n.Ninit.Append(as)
-}
-
-func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
-	t := r.Type
-	if t == nil {
-		return false
-	}
-	if !t.IsInteger() {
-		yyerror("invalid slice index %v (type %v)", r, t)
-		return false
-	}
-
-	if r.Op == OLITERAL {
-		if r.Int64Val() < 0 {
-			yyerror("invalid slice index %v (index must be non-negative)", r)
-			return false
-		} else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
-			yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
-			return false
-		} else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
-			yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
-			return false
-		} else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-			yyerror("invalid slice index %v (index too large)", r)
-			return false
-		}
-	}
-
-	return true
-}
-
-func checksliceconst(lo *Node, hi *Node) bool {
-	if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
-		yyerror("invalid slice index: %v > %v", lo, hi)
-		return false
-	}
-
-	return true
-}
-
-func checkdefergo(n *Node) {
-	what := "defer"
-	if n.Op == OGO {
-		what = "go"
-	}
-
-	switch n.Left.Op {
-	// ok
-	case OCALLINTER,
-		OCALLMETH,
-		OCALLFUNC,
-		OCLOSE,
-		OCOPY,
-		ODELETE,
-		OPANIC,
-		OPRINT,
-		OPRINTN,
-		ORECOVER:
-		return
-
-	case OAPPEND,
-		OCAP,
-		OCOMPLEX,
-		OIMAG,
-		OLEN,
-		OMAKE,
-		OMAKESLICE,
-		OMAKECHAN,
-		OMAKEMAP,
-		ONEW,
-		OREAL,
-		OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
-		if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
-			break
-		}
-		yyerrorl(n.Pos, "%s discards result of %v", what, n.Left)
-		return
-	}
-
-	// type is broken or missing, most likely a method call on a broken type
-	// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
-	if n.Left.Type == nil || n.Left.Type.Broke() {
-		return
-	}
-
-	if !n.Diag() {
-		// The syntax made sure it was a call, so this must be
-		// a conversion.
-		n.SetDiag(true)
-		yyerrorl(n.Pos, "%s requires function call, not conversion", what)
-	}
-}
-
-// The result of implicitstar MUST be assigned back to n, e.g.
-// 	n.Left = implicitstar(n.Left)
-func implicitstar(n *Node) *Node {
-	// insert implicit * if needed for fixed array
-	t := n.Type
-	if t == nil || !t.IsPtr() {
-		return n
-	}
-	t = t.Elem()
-	if t == nil {
-		return n
-	}
-	if !t.IsArray() {
-		return n
-	}
-	n = nod(ODEREF, n, nil)
-	n.SetImplicit(true)
-	n = typecheck(n, ctxExpr)
-	return n
-}
-
-func onearg(n *Node, f string, args ...interface{}) bool {
-	if n.Left != nil {
-		return true
-	}
-	if n.List.Len() == 0 {
-		p := fmt.Sprintf(f, args...)
-		yyerror("missing argument to %s: %v", p, n)
-		return false
-	}
-
-	if n.List.Len() > 1 {
-		p := fmt.Sprintf(f, args...)
-		yyerror("too many arguments to %s: %v", p, n)
-		n.Left = n.List.First()
-		n.List.Set(nil)
-		return false
-	}
-
-	n.Left = n.List.First()
-	n.List.Set(nil)
-	return true
-}
-
-func twoarg(n *Node) bool {
-	if n.Left != nil {
-		return true
-	}
-	if n.List.Len() != 2 {
-		if n.List.Len() < 2 {
-			yyerror("not enough arguments in call to %v", n)
-		} else {
-			yyerror("too many arguments in call to %v", n)
-		}
-		return false
-	}
-	n.Left = n.List.First()
-	n.Right = n.List.Second()
-	n.List.Set(nil)
-	return true
-}
-
-func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
-	var r *types.Field
-	for _, f := range fs.Slice() {
-		if dostrcmp != 0 && f.Sym.Name == s.Name {
-			return f
-		}
-		if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
-			return f
-		}
-		if f.Sym != s {
-			continue
-		}
-		if r != nil {
-			if errnode != nil {
-				yyerror("ambiguous selector %v", errnode)
-			} else if t.IsPtr() {
-				yyerror("ambiguous selector (%v).%v", t, s)
-			} else {
-				yyerror("ambiguous selector %v.%v", t, s)
-			}
-			break
-		}
-
-		r = f
-	}
-
-	return r
-}
-
-// typecheckMethodExpr checks selector expressions (ODOT) where the
-// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *Node) (res *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckMethodExpr", n)(&res)
-	}
-
-	t := n.Left.Type
-
-	// Compute the method set for t.
-	var ms *types.Fields
-	if t.IsInterface() {
-		ms = t.Fields()
-	} else {
-		mt := methtype(t)
-		if mt == nil {
-			yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym)
-			n.Type = nil
-			return n
-		}
-		expandmeth(mt)
-		ms = mt.AllMethods()
-
-		// The method expression T.m requires a wrapper when T
-		// is different from m's declared receiver type. We
-		// normally generate these wrappers while writing out
-		// runtime type descriptors, which is always done for
-		// types declared at package scope. However, we need
-		// to make sure to generate wrappers for anonymous
-		// receiver types too.
-		if mt.Sym == nil {
-			addsignat(t)
-		}
-	}
-
-	s := n.Sym
-	m := lookdot1(n, s, t, ms, 0)
-	if m == nil {
-		if lookdot1(n, s, t, ms, 1) != nil {
-			yyerror("%v undefined (cannot refer to unexported method %v)", n, s)
-		} else if _, ambig := dotpath(s, t, nil, false); ambig {
-			yyerror("%v undefined (ambiguous selector)", n) // method or field
-		} else {
-			yyerror("%v undefined (type %v has no method %v)", n, t, s)
-		}
-		n.Type = nil
-		return n
-	}
-
-	if !isMethodApplicable(t, m) {
-		yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
-		n.Type = nil
-		return n
-	}
-
-	n.Op = ONAME
-	if n.Name == nil {
-		n.Name = new(Name)
-	}
-	n.Right = newname(n.Sym)
-	n.Sym = methodSym(t, n.Sym)
-	n.Type = methodfunc(m.Type, n.Left.Type)
-	n.Xoffset = 0
-	n.SetClass(PFUNC)
-	// methodSym already marked n.Sym as a function.
-
-	// Issue 25065. Make sure that we emit the symbol for a local method.
-	if Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) {
-		makefuncsym(n.Sym)
-	}
-
-	return n
-}
-
-// isMethodApplicable reports whether method m can be called on a
-// value of type t. This is necessary because we compute a single
-// method set for both T and *T, but some *T methods are not
-// applicable to T receivers.
-func isMethodApplicable(t *types.Type, m *types.Field) bool {
-	return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || isifacemethod(m.Type) || m.Embedded == 2
-}
-
-func derefall(t *types.Type) *types.Type {
-	for t != nil && t.IsPtr() {
-		t = t.Elem()
-	}
-	return t
-}
-
-func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
-	s := n.Sym
-
-	dowidth(t)
-	var f1 *types.Field
-	if t.IsStruct() || t.IsInterface() {
-		f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
-	}
-
-	var f2 *types.Field
-	if n.Left.Type == t || n.Left.Type.Sym == nil {
-		mt := methtype(t)
-		if mt != nil {
-			f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
-		}
-	}
-
-	if f1 != nil {
-		if dostrcmp > 1 || f1.Broke() {
-			// Already in the process of diagnosing an error.
-			return f1
-		}
-		if f2 != nil {
-			yyerror("%v is both field and method", n.Sym)
-		}
-		if f1.Offset == BADWIDTH {
-			Fatalf("lookdot badwidth %v %p", f1, f1)
-		}
-		n.Xoffset = f1.Offset
-		n.Type = f1.Type
-		if t.IsInterface() {
-			if n.Left.Type.IsPtr() {
-				n.Left = nod(ODEREF, n.Left, nil) // implicitstar
-				n.Left.SetImplicit(true)
-				n.Left = typecheck(n.Left, ctxExpr)
-			}
-
-			n.Op = ODOTINTER
-		} else {
-			n.SetOpt(f1)
-		}
-
-		return f1
-	}
-
-	if f2 != nil {
-		if dostrcmp > 1 {
-			// Already in the process of diagnosing an error.
-			return f2
-		}
-		tt := n.Left.Type
-		dowidth(tt)
-		rcvr := f2.Type.Recv().Type
-		if !types.Identical(rcvr, tt) {
-			if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
-				checklvalue(n.Left, "call pointer method on")
-				n.Left = nod(OADDR, n.Left, nil)
-				n.Left.SetImplicit(true)
-				n.Left = typecheck(n.Left, ctxType|ctxExpr)
-			} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
-				n.Left = nod(ODEREF, n.Left, nil)
-				n.Left.SetImplicit(true)
-				n.Left = typecheck(n.Left, ctxType|ctxExpr)
-			} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
-				yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
-				for tt.IsPtr() {
-					// Stop one level early for method with pointer receiver.
-					if rcvr.IsPtr() && !tt.Elem().IsPtr() {
-						break
-					}
-					n.Left = nod(ODEREF, n.Left, nil)
-					n.Left.SetImplicit(true)
-					n.Left = typecheck(n.Left, ctxType|ctxExpr)
-					tt = tt.Elem()
-				}
-			} else {
-				Fatalf("method mismatch: %v for %v", rcvr, tt)
-			}
-		}
-
-		pll := n
-		ll := n.Left
-		for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == ODEREF) {
-			pll = ll
-			ll = ll.Left
-		}
-		if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE {
-			// It is invalid to automatically dereference a named pointer type when selecting a method.
-			// Make n.Left == ll to clarify error message.
-			n.Left = ll
-			return nil
-		}
-
-		n.Sym = methodSym(n.Left.Type, f2.Sym)
-		n.Xoffset = f2.Offset
-		n.Type = f2.Type
-		n.Op = ODOTMETH
-
-		return f2
-	}
-
-	return nil
-}
-
-func nokeys(l Nodes) bool {
-	for _, n := range l.Slice() {
-		if n.Op == OKEY || n.Op == OSTRUCTKEY {
-			return false
-		}
-	}
-	return true
-}
-
-func hasddd(t *types.Type) bool {
-	for _, tl := range t.Fields().Slice() {
-		if tl.IsDDD() {
-			return true
-		}
-	}
-
-	return false
-}
-
-// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) {
-	var t *types.Type
-	var i int
-
-	lno := lineno
-	defer func() { lineno = lno }()
-
-	if tstruct.Broke() {
-		return
-	}
-
-	var n *Node
-	if nl.Len() == 1 {
-		n = nl.First()
-	}
-
-	n1 := tstruct.NumFields()
-	n2 := nl.Len()
-	if !hasddd(tstruct) {
-		if n2 > n1 {
-			goto toomany
-		}
-		if n2 < n1 {
-			goto notenough
-		}
-	} else {
-		if !isddd {
-			if n2 < n1-1 {
-				goto notenough
-			}
-		} else {
-			if n2 > n1 {
-				goto toomany
-			}
-			if n2 < n1 {
-				goto notenough
-			}
-		}
-	}
-
-	i = 0
-	for _, tl := range tstruct.Fields().Slice() {
-		t = tl.Type
-		if tl.IsDDD() {
-			if isddd {
-				if i >= nl.Len() {
-					goto notenough
-				}
-				if nl.Len()-i > 1 {
-					goto toomany
-				}
-				n = nl.Index(i)
-				setlineno(n)
-				if n.Type != nil {
-					nl.SetIndex(i, assignconvfn(n, t, desc))
-				}
-				return
-			}
-
-			// TODO(mdempsky): Make into ... call with implicit slice.
-			for ; i < nl.Len(); i++ {
-				n = nl.Index(i)
-				setlineno(n)
-				if n.Type != nil {
-					nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
-				}
-			}
-			return
-		}
-
-		if i >= nl.Len() {
-			goto notenough
-		}
-		n = nl.Index(i)
-		setlineno(n)
-		if n.Type != nil {
-			nl.SetIndex(i, assignconvfn(n, t, desc))
-		}
-		i++
-	}
-
-	if i < nl.Len() {
-		goto toomany
-	}
-	if isddd {
-		if call != nil {
-			yyerror("invalid use of ... in call to %v", call)
-		} else {
-			yyerror("invalid use of ... in %v", op)
-		}
-	}
-	return
-
-notenough:
-	if n == nil || (!n.Diag() && n.Type != nil) {
-		details := errorDetails(nl, tstruct, isddd)
-		if call != nil {
-			// call is the expression being called, not the overall call.
-			// Method expressions have the form T.M, and the compiler has
-			// rewritten those to ONAME nodes but left T in Left.
-			if call.isMethodExpression() {
-				yyerror("not enough arguments in call to method expression %v%s", call, details)
-			} else {
-				yyerror("not enough arguments in call to %v%s", call, details)
-			}
-		} else {
-			yyerror("not enough arguments to %v%s", op, details)
-		}
-		if n != nil {
-			n.SetDiag(true)
-		}
-	}
-	return
-
-toomany:
-	details := errorDetails(nl, tstruct, isddd)
-	if call != nil {
-		yyerror("too many arguments in call to %v%s", call, details)
-	} else {
-		yyerror("too many arguments to %v%s", op, details)
-	}
-}
-
-func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
-	// If we don't know any type at a call site, let's suppress any return
-	// message signatures. See Issue https://golang.org/issues/19012.
-	if tstruct == nil {
-		return ""
-	}
-	// If any node has an unknown type, suppress it as well
-	for _, n := range nl.Slice() {
-		if n.Type == nil {
-			return ""
-		}
-	}
-	return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct)
-}
-
-// sigrepr is a type's representation to the outside world,
-// in string representations of return signatures
-// e.g in error messages about wrong arguments to return.
-func sigrepr(t *types.Type, isddd bool) string {
-	switch t {
-	case types.UntypedString:
-		return "string"
-	case types.UntypedBool:
-		return "bool"
-	}
-
-	if t.Etype == TIDEAL {
-		// "untyped number" is not commonly used
-		// outside of the compiler, so let's use "number".
-		// TODO(mdempsky): Revisit this.
-		return "number"
-	}
-
-	// Turn []T... argument to ...T for clearer error message.
-	if isddd {
-		if !t.IsSlice() {
-			Fatalf("bad type for ... argument: %v", t)
-		}
-		return "..." + t.Elem().String()
-	}
-	return t.String()
-}
-
-// sigerr returns the signature of the types at the call or return.
-func (nl Nodes) sigerr(isddd bool) string {
-	if nl.Len() < 1 {
-		return "()"
-	}
-
-	var typeStrings []string
-	for i, n := range nl.Slice() {
-		isdddArg := isddd && i == nl.Len()-1
-		typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
-	}
-
-	return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
-}
-
-// type check composite
-func fielddup(name string, hash map[string]bool) {
-	if hash[name] {
-		yyerror("duplicate field name in struct literal: %s", name)
-		return
-	}
-	hash[name] = true
-}
-
-// iscomptype reports whether type t is a composite literal type.
-func iscomptype(t *types.Type) bool {
-	switch t.Etype {
-	case TARRAY, TSLICE, TSTRUCT, TMAP:
-		return true
-	default:
-		return false
-	}
-}
-
-// pushtype adds elided type information for composite literals if
-// appropriate, and returns the resulting expression.
-func pushtype(n *Node, t *types.Type) *Node {
-	if n == nil || n.Op != OCOMPLIT || n.Right != nil {
-		return n
-	}
-
-	switch {
-	case iscomptype(t):
-		// For T, return T{...}.
-		n.Right = typenod(t)
-
-	case t.IsPtr() && iscomptype(t.Elem()):
-		// For *T, return &T{...}.
-		n.Right = typenod(t.Elem())
-
-		n = nodl(n.Pos, OADDR, n, nil)
-		n.SetImplicit(true)
-	}
-
-	return n
-}
-
-// The result of typecheckcomplit MUST be assigned back to n, e.g.
-// 	n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *Node) (res *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckcomplit", n)(&res)
-	}
-
-	lno := lineno
-	defer func() {
-		lineno = lno
-	}()
-
-	if n.Right == nil {
-		yyerrorl(n.Pos, "missing type in composite literal")
-		n.Type = nil
-		return n
-	}
-
-	// Save original node (including n.Right)
-	n.Orig = n.copy()
-
-	setlineno(n.Right)
-
-	// Need to handle [...]T arrays specially.
-	if n.Right.Op == OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ODDD {
-		n.Right.Right = typecheck(n.Right.Right, ctxType)
-		if n.Right.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		elemType := n.Right.Right.Type
-
-		length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal")
-
-		n.Op = OARRAYLIT
-		n.Type = types.NewArray(elemType, length)
-		n.Right = nil
-		return n
-	}
-
-	n.Right = typecheck(n.Right, ctxType)
-	t := n.Right.Type
-	if t == nil {
-		n.Type = nil
-		return n
-	}
-	n.Type = t
-
-	switch t.Etype {
-	default:
-		yyerror("invalid composite literal type %v", t)
-		n.Type = nil
-
-	case TARRAY:
-		typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal")
-		n.Op = OARRAYLIT
-		n.Right = nil
-
-	case TSLICE:
-		length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal")
-		n.Op = OSLICELIT
-		n.Right = nodintconst(length)
-
-	case TMAP:
-		var cs constSet
-		for i3, l := range n.List.Slice() {
-			setlineno(l)
-			if l.Op != OKEY {
-				n.List.SetIndex(i3, typecheck(l, ctxExpr))
-				yyerror("missing key in map literal")
-				continue
-			}
-
-			r := l.Left
-			r = pushtype(r, t.Key())
-			r = typecheck(r, ctxExpr)
-			l.Left = assignconv(r, t.Key(), "map key")
-			cs.add(lineno, l.Left, "key", "map literal")
-
-			r = l.Right
-			r = pushtype(r, t.Elem())
-			r = typecheck(r, ctxExpr)
-			l.Right = assignconv(r, t.Elem(), "map value")
-		}
-
-		n.Op = OMAPLIT
-		n.Right = nil
-
-	case TSTRUCT:
-		// Need valid field offsets for Xoffset below.
-		dowidth(t)
-
-		errored := false
-		if n.List.Len() != 0 && nokeys(n.List) {
-			// simple list of variables
-			ls := n.List.Slice()
-			for i, n1 := range ls {
-				setlineno(n1)
-				n1 = typecheck(n1, ctxExpr)
-				ls[i] = n1
-				if i >= t.NumFields() {
-					if !errored {
-						yyerror("too many values in %v", n)
-						errored = true
-					}
-					continue
-				}
-
-				f := t.Field(i)
-				s := f.Sym
-				if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg {
-					yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
-				}
-				// No pushtype allowed here. Must name fields for that.
-				n1 = assignconv(n1, f.Type, "field value")
-				n1 = nodSym(OSTRUCTKEY, n1, f.Sym)
-				n1.Xoffset = f.Offset
-				ls[i] = n1
-			}
-			if len(ls) < t.NumFields() {
-				yyerror("too few values in %v", n)
-			}
-		} else {
-			hash := make(map[string]bool)
-
-			// keyed list
-			ls := n.List.Slice()
-			for i, l := range ls {
-				setlineno(l)
-
-				if l.Op == OKEY {
-					key := l.Left
-
-					l.Op = OSTRUCTKEY
-					l.Left = l.Right
-					l.Right = nil
-
-					// An OXDOT uses the Sym field to hold
-					// the field to the right of the dot,
-					// so s will be non-nil, but an OXDOT
-					// is never a valid struct literal key.
-					if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() {
-						yyerror("invalid field name %v in struct initializer", key)
-						l.Left = typecheck(l.Left, ctxExpr)
-						continue
-					}
-
-					// Sym might have resolved to name in other top-level
-					// package, because of import dot. Redirect to correct sym
-					// before we do the lookup.
-					s := key.Sym
-					if s.Pkg != localpkg && types.IsExported(s.Name) {
-						s1 := lookup(s.Name)
-						if s1.Origpkg == s.Pkg {
-							s = s1
-						}
-					}
-					l.Sym = s
-				}
-
-				if l.Op != OSTRUCTKEY {
-					if !errored {
-						yyerror("mixture of field:value and value initializers")
-						errored = true
-					}
-					ls[i] = typecheck(ls[i], ctxExpr)
-					continue
-				}
-
-				f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
-				if f == nil {
-					if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
-						if visible(ci.Sym) {
-							yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym)
-						} else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
-							yyerror("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t)
-						} else {
-							yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
-						}
-						continue
-					}
-					var f *types.Field
-					p, _ := dotpath(l.Sym, t, &f, true)
-					if p == nil || f.IsMethod() {
-						yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
-						continue
-					}
-					// dotpath returns the parent embedded types in reverse order.
-					var ep []string
-					for ei := len(p) - 1; ei >= 0; ei-- {
-						ep = append(ep, p[ei].field.Sym.Name)
-					}
-					ep = append(ep, l.Sym.Name)
-					yyerror("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
-					continue
-				}
-				fielddup(f.Sym.Name, hash)
-				l.Xoffset = f.Offset
-
-				// No pushtype allowed here. Tried and rejected.
-				l.Left = typecheck(l.Left, ctxExpr)
-				l.Left = assignconv(l.Left, f.Type, "field value")
-			}
-		}
-
-		n.Op = OSTRUCTLIT
-		n.Right = nil
-	}
-
-	return n
-}
-
-// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx string) int64 {
-	// If there are key/value pairs, create a map to keep seen
-	// keys so we can check for duplicate indices.
-	var indices map[int64]bool
-	for _, elt := range elts {
-		if elt.Op == OKEY {
-			indices = make(map[int64]bool)
-			break
-		}
-	}
-
-	var key, length int64
-	for i, elt := range elts {
-		setlineno(elt)
-		vp := &elts[i]
-		if elt.Op == OKEY {
-			elt.Left = typecheck(elt.Left, ctxExpr)
-			key = indexconst(elt.Left)
-			if key < 0 {
-				if !elt.Left.Diag() {
-					if key == -2 {
-						yyerror("index too large")
-					} else {
-						yyerror("index must be non-negative integer constant")
-					}
-					elt.Left.SetDiag(true)
-				}
-				key = -(1 << 30) // stay negative for a while
-			}
-			vp = &elt.Right
-		}
-
-		r := *vp
-		r = pushtype(r, elemType)
-		r = typecheck(r, ctxExpr)
-		*vp = assignconv(r, elemType, ctx)
-
-		if key >= 0 {
-			if indices != nil {
-				if indices[key] {
-					yyerror("duplicate index in %s: %d", ctx, key)
-				} else {
-					indices[key] = true
-				}
-			}
-
-			if bound >= 0 && key >= bound {
-				yyerror("array index %d out of bounds [0:%d]", key, bound)
-				bound = -1
-			}
-		}
-
-		key++
-		if key > length {
-			length = key
-		}
-	}
-
-	return length
-}
-
-// visible reports whether sym is exported or locally defined.
-func visible(sym *types.Sym) bool {
-	return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == localpkg)
-}
-
-// nonexported reports whether sym is an unexported field.
-func nonexported(sym *types.Sym) bool {
-	return sym != nil && !types.IsExported(sym.Name)
-}
-
-// lvalue etc
-func islvalue(n *Node) bool {
-	switch n.Op {
-	case OINDEX:
-		if n.Left.Type != nil && n.Left.Type.IsArray() {
-			return islvalue(n.Left)
-		}
-		if n.Left.Type != nil && n.Left.Type.IsString() {
-			return false
-		}
-		fallthrough
-	case ODEREF, ODOTPTR, OCLOSUREVAR:
-		return true
-
-	case ODOT:
-		return islvalue(n.Left)
-
-	case ONAME:
-		if n.Class() == PFUNC {
-			return false
-		}
-		return true
-	}
-
-	return false
-}
-
-func checklvalue(n *Node, verb string) {
-	if !islvalue(n) {
-		yyerror("cannot %s %v", verb, n)
-	}
-}
-
-func checkassign(stmt *Node, n *Node) {
-	// Variables declared in ORANGE are assigned on every iteration.
-	if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
-		r := outervalue(n)
-		if r.Op == ONAME {
-			r.Name.SetAssigned(true)
-			if r.Name.IsClosureVar() {
-				r.Name.Defn.Name.SetAssigned(true)
-			}
-		}
-	}
-
-	if islvalue(n) {
-		return
-	}
-	if n.Op == OINDEXMAP {
-		n.SetIndexMapLValue(true)
-		return
-	}
-
-	// have already complained about n being invalid
-	if n.Type == nil {
-		return
-	}
-
-	switch {
-	case n.Op == ODOT && n.Left.Op == OINDEXMAP:
-		yyerror("cannot assign to struct field %v in map", n)
-	case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR:
-		yyerror("cannot assign to %v (strings are immutable)", n)
-	case n.Op == OLITERAL && n.Sym != nil && n.isGoConst():
-		yyerror("cannot assign to %v (declared const)", n)
-	default:
-		yyerror("cannot assign to %v", n)
-	}
-	n.Type = nil
-}
-
-func checkassignlist(stmt *Node, l Nodes) {
-	for _, n := range l.Slice() {
-		checkassign(stmt, n)
-	}
-}
-
-// samesafeexpr checks whether it is safe to reuse one of l and r
-// instead of computing both. samesafeexpr assumes that l and r are
-// used in the same statement or expression. In order for it to be
-// safe to reuse l or r, they must:
-// * be the same expression
-// * not have side-effects (no function calls, no channel ops);
-//   however, panics are ok
-// * not cause inappropriate aliasing; e.g. two string to []byte
-//   conversions, must result in two distinct slices
-//
-// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
-// as an lvalue (map assignment) and an rvalue (map access). This is
-// currently OK, since the only place samesafeexpr gets used on an
-// lvalue expression is for OSLICE and OAPPEND optimizations, and it
-// is correct in those settings.
-func samesafeexpr(l *Node, r *Node) bool {
-	if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
-		return false
-	}
-
-	switch l.Op {
-	case ONAME, OCLOSUREVAR:
-		return l == r
-
-	case ODOT, ODOTPTR:
-		return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
-
-	case ODEREF, OCONVNOP,
-		ONOT, OBITNOT, OPLUS, ONEG:
-		return samesafeexpr(l.Left, r.Left)
-
-	case OCONV:
-		// Some conversions can't be reused, such as []byte(str).
-		// Allow only numeric-ish types. This is a bit conservative.
-		return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
-
-	case OINDEX, OINDEXMAP,
-		OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
-		return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
-
-	case OLITERAL:
-		return eqval(l.Val(), r.Val())
-	}
-
-	return false
-}
-
-// type check assignment.
-// if this assignment is the definition of a var on the left side,
-// fill in the var's type.
-func typecheckas(n *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckas", n)(nil)
-	}
-
-	// delicate little dance.
-	// the definition of n may refer to this assignment
-	// as its definition, in which case it will call typecheckas.
-	// in that case, do not call typecheck back, or it will cycle.
-	// if the variable has a type (ntype) then typechecking
-	// will not look at defn, so it is okay (and desirable,
-	// so that the conversion below happens).
-	n.Left = resolve(n.Left)
-
-	if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
-		n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
-	}
-
-	// Use ctxMultiOK so we can emit an "N variables but M values" error
-	// to be consistent with typecheckas2 (#26616).
-	n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK)
-	checkassign(n, n.Left)
-	if n.Right != nil && n.Right.Type != nil {
-		if n.Right.Type.IsFuncArgStruct() {
-			yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
-			// Multi-value RHS isn't actually valid for OAS; nil out
-			// to indicate failed typechecking.
-			n.Right.Type = nil
-		} else if n.Left.Type != nil {
-			n.Right = assignconv(n.Right, n.Left.Type, "assignment")
-		}
-	}
-
-	if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
-		n.Right = defaultlit(n.Right, nil)
-		n.Left.Type = n.Right.Type
-	}
-
-	// second half of dance.
-	// now that right is done, typecheck the left
-	// just to get it over with.  see dance above.
-	n.SetTypecheck(1)
-
-	if n.Left.Typecheck() == 0 {
-		n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
-	}
-	if !n.Left.isBlank() {
-		checkwidth(n.Left.Type) // ensure width is calculated for backend
-	}
-}
-
-func checkassignto(src *types.Type, dst *Node) {
-	if op, why := assignop(src, dst.Type); op == OXXX {
-		yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
-		return
-	}
-}
-
-func typecheckas2(n *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckas2", n)(nil)
-	}
-
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		// delicate little dance.
-		n1 = resolve(n1)
-		ls[i1] = n1
-
-		if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
-			ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
-		}
-	}
-
-	cl := n.List.Len()
-	cr := n.Rlist.Len()
-	if cl > 1 && cr == 1 {
-		n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK))
-	} else {
-		typecheckslice(n.Rlist.Slice(), ctxExpr)
-	}
-	checkassignlist(n, n.List)
-
-	var l *Node
-	var r *Node
-	if cl == cr {
-		// easy
-		ls := n.List.Slice()
-		rs := n.Rlist.Slice()
-		for il, nl := range ls {
-			nr := rs[il]
-			if nl.Type != nil && nr.Type != nil {
-				rs[il] = assignconv(nr, nl.Type, "assignment")
-			}
-			if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
-				rs[il] = defaultlit(rs[il], nil)
-				nl.Type = rs[il].Type
-			}
-		}
-
-		goto out
-	}
-
-	l = n.List.First()
-	r = n.Rlist.First()
-
-	// x,y,z = f()
-	if cr == 1 {
-		if r.Type == nil {
-			goto out
-		}
-		switch r.Op {
-		case OCALLMETH, OCALLINTER, OCALLFUNC:
-			if !r.Type.IsFuncArgStruct() {
-				break
-			}
-			cr = r.Type.NumFields()
-			if cr != cl {
-				goto mismatch
-			}
-			n.Op = OAS2FUNC
-			n.Right = r
-			n.Rlist.Set(nil)
-			for i, l := range n.List.Slice() {
-				f := r.Type.Field(i)
-				if f.Type != nil && l.Type != nil {
-					checkassignto(f.Type, l)
-				}
-				if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
-					l.Type = f.Type
-				}
-			}
-			goto out
-		}
-	}
-
-	// x, ok = y
-	if cl == 2 && cr == 1 {
-		if r.Type == nil {
-			goto out
-		}
-		switch r.Op {
-		case OINDEXMAP, ORECV, ODOTTYPE:
-			switch r.Op {
-			case OINDEXMAP:
-				n.Op = OAS2MAPR
-			case ORECV:
-				n.Op = OAS2RECV
-			case ODOTTYPE:
-				n.Op = OAS2DOTTYPE
-				r.Op = ODOTTYPE2
-			}
-			n.Right = r
-			n.Rlist.Set(nil)
-			if l.Type != nil {
-				checkassignto(r.Type, l)
-			}
-			if l.Name != nil && l.Name.Defn == n {
-				l.Type = r.Type
-			}
-			l := n.List.Second()
-			if l.Type != nil && !l.Type.IsBoolean() {
-				checkassignto(types.Types[TBOOL], l)
-			}
-			if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
-				l.Type = types.Types[TBOOL]
-			}
-			goto out
-		}
-	}
-
-mismatch:
-	switch r.Op {
-	default:
-		yyerror("assignment mismatch: %d variables but %d values", cl, cr)
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
-	}
-
-	// second half of dance
-out:
-	n.SetTypecheck(1)
-	ls = n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Typecheck() == 0 {
-			ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
-		}
-	}
-}
-
-// type check function definition
-func typecheckfunc(n *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckfunc", n)(nil)
-	}
-
-	for _, ln := range n.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class() == PPARAM || ln.Class() == PPARAMOUT) {
-			ln.Name.Decldepth = 1
-		}
-	}
-
-	n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign)
-	t := n.Func.Nname.Type
-	if t == nil {
-		return
-	}
-	n.Type = t
-	t.FuncType().Nname = asTypesNode(n.Func.Nname)
-	rcvr := t.Recv()
-	if rcvr != nil && n.Func.Shortname != nil {
-		m := addmethod(n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0)
-		if m == nil {
-			return
-		}
-
-		n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname)
-		declare(n.Func.Nname, PFUNC)
-	}
-
-	if Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil {
-		makefuncsym(n.Func.Nname.Sym)
-	}
-}
-
-// The result of stringtoruneslit MUST be assigned back to n, e.g.
-// 	n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *Node) *Node {
-	if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
-		Fatalf("stringtoarraylit %v", n)
-	}
-
-	var l []*Node
-	i := 0
-	for _, r := range n.Left.StringVal() {
-		l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
-		i++
-	}
-
-	nn := nod(OCOMPLIT, nil, typenod(n.Type))
-	nn.List.Set(l)
-	nn = typecheck(nn, ctxExpr)
-	return nn
-}
-
-var mapqueue []*Node
-
-func checkMapKeys() {
-	for _, n := range mapqueue {
-		k := n.Type.MapType().Key
-		if !k.Broke() && !IsComparable(k) {
-			yyerrorl(n.Pos, "invalid map key type %v", k)
-		}
-	}
-	mapqueue = nil
-}
-
-func setUnderlying(t, underlying *types.Type) {
-	if underlying.Etype == TFORW {
-		// This type isn't computed yet; when it is, update n.
-		underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
-		return
-	}
-
-	n := asNode(t.Nod)
-	ft := t.ForwardType()
-	cache := t.Cache
-
-	// TODO(mdempsky): Fix Type rekinding.
-	*t = *underlying
-
-	// Restore unnecessarily clobbered attributes.
-	t.Nod = asTypesNode(n)
-	t.Sym = n.Sym
-	if n.Name != nil {
-		t.Vargen = n.Name.Vargen
-	}
-	t.Cache = cache
-	t.SetDeferwidth(false)
-
-	// spec: "The declared type does not inherit any methods bound
-	// to the existing type, but the method set of an interface
-	// type [...] remains unchanged."
-	if !t.IsInterface() {
-		*t.Methods() = types.Fields{}
-		*t.AllMethods() = types.Fields{}
-	}
-
-	// Propagate go:notinheap pragma from the Name to the Type.
-	if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
-		t.SetNotInHeap(true)
-	}
-
-	// Update types waiting on this type.
-	for _, w := range ft.Copyto {
-		setUnderlying(w, t)
-	}
-
-	// Double-check use of type as embedded type.
-	if ft.Embedlineno.IsKnown() {
-		if t.IsPtr() || t.IsUnsafePtr() {
-			yyerrorl(ft.Embedlineno, "embedded type cannot be a pointer")
-		}
-	}
-}
-
-func typecheckdeftype(n *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckdeftype", n)(nil)
-	}
-
-	n.SetTypecheck(1)
-	n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
-	t := n.Name.Param.Ntype.Type
-	if t == nil {
-		n.SetDiag(true)
-		n.Type = nil
-	} else if n.Type == nil {
-		n.SetDiag(true)
-	} else {
-		// copy new type and clear fields
-		// that don't come along.
-		setUnderlying(n.Type, t)
-	}
-}
-
-func typecheckdef(n *Node) {
-	if enableTrace && trace {
-		defer tracePrint("typecheckdef", n)(nil)
-	}
-
-	lno := setlineno(n)
-
-	if n.Op == ONONAME {
-		if !n.Diag() {
-			n.SetDiag(true)
-
-			// Note: adderrorname looks for this string and
-			// adds context about the outer expression
-			yyerrorl(lineno, "undefined: %v", n.Sym)
-		}
-		lineno = lno
-		return
-	}
-
-	if n.Walkdef() == 1 {
-		lineno = lno
-		return
-	}
-
-	typecheckdefstack = append(typecheckdefstack, n)
-	if n.Walkdef() == 2 {
-		flusherrors()
-		fmt.Printf("typecheckdef loop:")
-		for i := len(typecheckdefstack) - 1; i >= 0; i-- {
-			n := typecheckdefstack[i]
-			fmt.Printf(" %v", n.Sym)
-		}
-		fmt.Printf("\n")
-		Fatalf("typecheckdef loop")
-	}
-
-	n.SetWalkdef(2)
-
-	if n.Type != nil || n.Sym == nil { // builtin or no name
-		goto ret
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("typecheckdef %v", n.Op)
-
-	case OLITERAL:
-		if n.Name.Param.Ntype != nil {
-			n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
-			n.Type = n.Name.Param.Ntype.Type
-			n.Name.Param.Ntype = nil
-			if n.Type == nil {
-				n.SetDiag(true)
-				goto ret
-			}
-		}
-
-		e := n.Name.Defn
-		n.Name.Defn = nil
-		if e == nil {
-			Dump("typecheckdef nil defn", n)
-			yyerrorl(n.Pos, "xxx")
-		}
-
-		e = typecheck(e, ctxExpr)
-		if e.Type == nil {
-			goto ret
-		}
-		if !e.isGoConst() {
-			if !e.Diag() {
-				if Isconst(e, CTNIL) {
-					yyerrorl(n.Pos, "const initializer cannot be nil")
-				} else {
-					yyerrorl(n.Pos, "const initializer %v is not a constant", e)
-				}
-				e.SetDiag(true)
-			}
-			goto ret
-		}
-
-		t := n.Type
-		if t != nil {
-			if !okforconst[t.Etype] {
-				yyerrorl(n.Pos, "invalid constant type %v", t)
-				goto ret
-			}
-
-			if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
-				yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
-				goto ret
-			}
-
-			e = convlit(e, t)
-		}
-
-		n.SetVal(e.Val())
-		n.Type = e.Type
-
-	case ONAME:
-		if n.Name.Param.Ntype != nil {
-			n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
-			n.Type = n.Name.Param.Ntype.Type
-			if n.Type == nil {
-				n.SetDiag(true)
-				goto ret
-			}
-		}
-
-		if n.Type != nil {
-			break
-		}
-		if n.Name.Defn == nil {
-			if n.SubOp() != 0 { // like OPRINTN
-				break
-			}
-			if nsavederrors+nerrors > 0 {
-				// Can have undefined variables in x := foo
-				// that make x have an n.name.Defn == nil.
-				// If there are other errors anyway, don't
-				// bother adding to the noise.
-				break
-			}
-
-			Fatalf("var without type, init: %v", n.Sym)
-		}
-
-		if n.Name.Defn.Op == ONAME {
-			n.Name.Defn = typecheck(n.Name.Defn, ctxExpr)
-			n.Type = n.Name.Defn.Type
-			break
-		}
-
-		n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
-
-	case OTYPE:
-		if p := n.Name.Param; p.Alias() {
-			// Type alias declaration: Simply use the rhs type - no need
-			// to create a new type.
-			// If we have a syntax error, p.Ntype may be nil.
-			if p.Ntype != nil {
-				p.Ntype = typecheck(p.Ntype, ctxType)
-				n.Type = p.Ntype.Type
-				if n.Type == nil {
-					n.SetDiag(true)
-					goto ret
-				}
-				// For package-level type aliases, set n.Sym.Def so we can identify
-				// it as a type alias during export. See also #31959.
-				if n.Name.Curfn == nil {
-					n.Sym.Def = asTypesNode(p.Ntype)
-				}
-			}
-			break
-		}
-
-		// regular type declaration
-		defercheckwidth()
-		n.SetWalkdef(1)
-		setTypeNode(n, types.New(TFORW))
-		n.Type.Sym = n.Sym
-		nerrors0 := nerrors
-		typecheckdeftype(n)
-		if n.Type.Etype == TFORW && nerrors > nerrors0 {
-			// Something went wrong during type-checking,
-			// but it was reported. Silence future errors.
-			n.Type.SetBroke(true)
-		}
-		resumecheckwidth()
-	}
-
-ret:
-	if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() {
-		Fatalf("got %v for %v", n.Type, n)
-	}
-	last := len(typecheckdefstack) - 1
-	if typecheckdefstack[last] != n {
-		Fatalf("typecheckdefstack mismatch")
-	}
-	typecheckdefstack[last] = nil
-	typecheckdefstack = typecheckdefstack[:last]
-
-	lineno = lno
-	n.SetWalkdef(1)
-}
-
-func checkmake(t *types.Type, arg string, np **Node) bool {
-	n := *np
-	if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
-		yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
-		return false
-	}
-
-	// Do range checks for constants before defaultlit
-	// to avoid redundant "constant NNN overflows int" errors.
-	switch consttype(n) {
-	case CTINT, CTRUNE, CTFLT, CTCPLX:
-		v := toint(n.Val()).U.(*Mpint)
-		if v.CmpInt64(0) < 0 {
-			yyerror("negative %s argument in make(%v)", arg, t)
-			return false
-		}
-		if v.Cmp(maxintval[TINT]) > 0 {
-			yyerror("%s argument too large in make(%v)", arg, t)
-			return false
-		}
-	}
-
-	// defaultlit is necessary for non-constants too: n might be 1.1<<k.
-	// TODO(gri) The length argument requirements for (array/slice) make
-	// are the same as for index expressions. Factor the code better;
-	// for instance, indexlit might be called here and incorporate some
-	// of the bounds checks done for make.
-	n = defaultlit(n, types.Types[TINT])
-	*np = n
-
-	return true
-}
-
-func markbreak(n *Node, implicit *Node) {
-	if n == nil {
-		return
-	}
-
-	switch n.Op {
-	case OBREAK:
-		if n.Sym == nil {
-			if implicit != nil {
-				implicit.SetHasBreak(true)
-			}
-		} else {
-			lab := asNode(n.Sym.Label)
-			if lab != nil {
-				lab.SetHasBreak(true)
-			}
-		}
-	case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
-		implicit = n
-		fallthrough
-	default:
-		markbreak(n.Left, implicit)
-		markbreak(n.Right, implicit)
-		markbreaklist(n.Ninit, implicit)
-		markbreaklist(n.Nbody, implicit)
-		markbreaklist(n.List, implicit)
-		markbreaklist(n.Rlist, implicit)
-	}
-}
-
-func markbreaklist(l Nodes, implicit *Node) {
-	s := l.Slice()
-	for i := 0; i < len(s); i++ {
-		n := s[i]
-		if n == nil {
-			continue
-		}
-		if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
-			switch n.Name.Defn.Op {
-			case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
-				n.Sym.Label = asTypesNode(n.Name.Defn)
-				markbreak(n.Name.Defn, n.Name.Defn)
-				n.Sym.Label = nil
-				i++
-				continue
-			}
-		}
-
-		markbreak(n, implicit)
-	}
-}
-
-// isterminating reports whether the Nodes list ends with a terminating statement.
-func (l Nodes) isterminating() bool {
-	s := l.Slice()
-	c := len(s)
-	if c == 0 {
-		return false
-	}
-	return s[c-1].isterminating()
-}
-
-// Isterminating reports whether the node n, the last one in a
-// statement list, is a terminating statement.
-func (n *Node) isterminating() bool {
-	switch n.Op {
-	// NOTE: OLABEL is treated as a separate statement,
-	// not a separate prefix, so skipping to the last statement
-	// in the block handles the labeled statement case by
-	// skipping over the label. No case OLABEL here.
-
-	case OBLOCK:
-		return n.List.isterminating()
-
-	case OGOTO, ORETURN, ORETJMP, OPANIC, OFALL:
-		return true
-
-	case OFOR, OFORUNTIL:
-		if n.Left != nil {
-			return false
-		}
-		if n.HasBreak() {
-			return false
-		}
-		return true
-
-	case OIF:
-		return n.Nbody.isterminating() && n.Rlist.isterminating()
-
-	case OSWITCH, OTYPESW, OSELECT:
-		if n.HasBreak() {
-			return false
-		}
-		def := false
-		for _, n1 := range n.List.Slice() {
-			if !n1.Nbody.isterminating() {
-				return false
-			}
-			if n1.List.Len() == 0 { // default
-				def = true
-			}
-		}
-
-		if n.Op != OSELECT && !def {
-			return false
-		}
-		return true
-	}
-
-	return false
-}
-
-// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *Node) {
-	if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 {
-		markbreaklist(fn.Nbody, nil)
-		if !fn.Nbody.isterminating() {
-			yyerrorl(fn.Func.Endlineno, "missing return at end of function")
-		}
-	}
-}
-
-func deadcode(fn *Node) {
-	deadcodeslice(fn.Nbody)
-	deadcodefn(fn)
-}
-
-func deadcodefn(fn *Node) {
-	if fn.Nbody.Len() == 0 {
-		return
-	}
-
-	for _, n := range fn.Nbody.Slice() {
-		if n.Ninit.Len() > 0 {
-			return
-		}
-		switch n.Op {
-		case OIF:
-			if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
-				return
-			}
-		case OFOR:
-			if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
-				return
-			}
-		default:
-			return
-		}
-	}
-
-	fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)})
-}
-
-func deadcodeslice(nn Nodes) {
-	var lastLabel = -1
-	for i, n := range nn.Slice() {
-		if n != nil && n.Op == OLABEL {
-			lastLabel = i
-		}
-	}
-	for i, n := range nn.Slice() {
-		// Cut is set to true when all nodes after i'th position
-		// should be removed.
-		// In other words, it marks whole slice "tail" as dead.
-		cut := false
-		if n == nil {
-			continue
-		}
-		if n.Op == OIF {
-			n.Left = deadcodeexpr(n.Left)
-			if Isconst(n.Left, CTBOOL) {
-				var body Nodes
-				if n.Left.BoolVal() {
-					n.Rlist = Nodes{}
-					body = n.Nbody
-				} else {
-					n.Nbody = Nodes{}
-					body = n.Rlist
-				}
-				// If "then" or "else" branch ends with panic or return statement,
-				// it is safe to remove all statements after this node.
-				// isterminating is not used to avoid goto-related complications.
-				// We must be careful not to deadcode-remove labels, as they
-				// might be the target of a goto. See issue 28616.
-				if body := body.Slice(); len(body) != 0 {
-					switch body[(len(body) - 1)].Op {
-					case ORETURN, ORETJMP, OPANIC:
-						if i > lastLabel {
-							cut = true
-						}
-					}
-				}
-			}
-		}
-
-		deadcodeslice(n.Ninit)
-		deadcodeslice(n.Nbody)
-		deadcodeslice(n.List)
-		deadcodeslice(n.Rlist)
-		if cut {
-			*nn.slice = nn.Slice()[:i+1]
-			break
-		}
-	}
-}
-
-func deadcodeexpr(n *Node) *Node {
-	// Perform dead-code elimination on short-circuited boolean
-	// expressions involving constants with the intent of
-	// producing a constant 'if' condition.
-	switch n.Op {
-	case OANDAND:
-		n.Left = deadcodeexpr(n.Left)
-		n.Right = deadcodeexpr(n.Right)
-		if Isconst(n.Left, CTBOOL) {
-			if n.Left.BoolVal() {
-				return n.Right // true && x => x
-			} else {
-				return n.Left // false && x => false
-			}
-		}
-	case OOROR:
-		n.Left = deadcodeexpr(n.Left)
-		n.Right = deadcodeexpr(n.Right)
-		if Isconst(n.Left, CTBOOL) {
-			if n.Left.BoolVal() {
-				return n.Left // true || x => true
-			} else {
-				return n.Right // false || x => x
-			}
-		}
-	}
-	return n
-}
-
-// setTypeNode sets n to an OTYPE node representing t.
-func setTypeNode(n *Node, t *types.Type) {
-	n.Op = OTYPE
-	n.Type = t
-	n.Type.Nod = asTypesNode(n)
-}
-
-// getIotaValue returns the current value for "iota",
-// or -1 if not within a ConstSpec.
-func getIotaValue() int64 {
-	if i := len(typecheckdefstack); i > 0 {
-		if x := typecheckdefstack[i-1]; x.Op == OLITERAL {
-			return x.Iota()
-		}
-	}
-
-	if Curfn != nil && Curfn.Iota() >= 0 {
-		return Curfn.Iota()
-	}
-
-	return -1
-}
-
-// curpkg returns the current package, based on Curfn.
-func curpkg() *types.Pkg {
-	fn := Curfn
-	if fn == nil {
-		// Initialization expressions for package-scope variables.
-		return localpkg
-	}
-
-	// TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
-	// Curfn, rather than mixing them.
-	if fn.Op == ODCLFUNC {
-		fn = fn.Func.Nname
-	}
-
-	return fnpkg(fn)
-}
diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/gc/types.go
deleted file mode 100644
index 748f845..0000000
--- a/src/cmd/compile/internal/gc/types.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-)
-
-// convenience constants
-const (
-	Txxx = types.Txxx
-
-	TINT8    = types.TINT8
-	TUINT8   = types.TUINT8
-	TINT16   = types.TINT16
-	TUINT16  = types.TUINT16
-	TINT32   = types.TINT32
-	TUINT32  = types.TUINT32
-	TINT64   = types.TINT64
-	TUINT64  = types.TUINT64
-	TINT     = types.TINT
-	TUINT    = types.TUINT
-	TUINTPTR = types.TUINTPTR
-
-	TCOMPLEX64  = types.TCOMPLEX64
-	TCOMPLEX128 = types.TCOMPLEX128
-
-	TFLOAT32 = types.TFLOAT32
-	TFLOAT64 = types.TFLOAT64
-
-	TBOOL = types.TBOOL
-
-	TPTR       = types.TPTR
-	TFUNC      = types.TFUNC
-	TSLICE     = types.TSLICE
-	TARRAY     = types.TARRAY
-	TSTRUCT    = types.TSTRUCT
-	TCHAN      = types.TCHAN
-	TMAP       = types.TMAP
-	TINTER     = types.TINTER
-	TFORW      = types.TFORW
-	TANY       = types.TANY
-	TSTRING    = types.TSTRING
-	TUNSAFEPTR = types.TUNSAFEPTR
-
-	// pseudo-types for literals
-	TIDEAL = types.TIDEAL
-	TNIL   = types.TNIL
-	TBLANK = types.TBLANK
-
-	// pseudo-types for frame layout
-	TFUNCARGS = types.TFUNCARGS
-	TCHANARGS = types.TCHANARGS
-
-	NTYPE = types.NTYPE
-)
diff --git a/src/cmd/compile/internal/gc/types_acc.go b/src/cmd/compile/internal/gc/types_acc.go
deleted file mode 100644
index 7240f72..0000000
--- a/src/cmd/compile/internal/gc/types_acc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements convertions between *types.Node and *Node.
-// TODO(gri) try to eliminate these soon
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"unsafe"
-)
-
-func asNode(n *types.Node) *Node      { return (*Node)(unsafe.Pointer(n)) }
-func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
deleted file mode 100644
index ff8cabd..0000000
--- a/src/cmd/compile/internal/gc/universe.go
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(gri) This file should probably become part of package types.
-
-package gc
-
-import "cmd/compile/internal/types"
-
-// builtinpkg is a fake package that declares the universe block.
-var builtinpkg *types.Pkg
-
-var basicTypes = [...]struct {
-	name  string
-	etype types.EType
-}{
-	{"int8", TINT8},
-	{"int16", TINT16},
-	{"int32", TINT32},
-	{"int64", TINT64},
-	{"uint8", TUINT8},
-	{"uint16", TUINT16},
-	{"uint32", TUINT32},
-	{"uint64", TUINT64},
-	{"float32", TFLOAT32},
-	{"float64", TFLOAT64},
-	{"complex64", TCOMPLEX64},
-	{"complex128", TCOMPLEX128},
-	{"bool", TBOOL},
-	{"string", TSTRING},
-}
-
-var typedefs = [...]struct {
-	name     string
-	etype    types.EType
-	sameas32 types.EType
-	sameas64 types.EType
-}{
-	{"int", TINT, TINT32, TINT64},
-	{"uint", TUINT, TUINT32, TUINT64},
-	{"uintptr", TUINTPTR, TUINT32, TUINT64},
-}
-
-var builtinFuncs = [...]struct {
-	name string
-	op   Op
-}{
-	{"append", OAPPEND},
-	{"cap", OCAP},
-	{"close", OCLOSE},
-	{"complex", OCOMPLEX},
-	{"copy", OCOPY},
-	{"delete", ODELETE},
-	{"imag", OIMAG},
-	{"len", OLEN},
-	{"make", OMAKE},
-	{"new", ONEW},
-	{"panic", OPANIC},
-	{"print", OPRINT},
-	{"println", OPRINTN},
-	{"real", OREAL},
-	{"recover", ORECOVER},
-}
-
-// isBuiltinFuncName reports whether name matches a builtin function
-// name.
-func isBuiltinFuncName(name string) bool {
-	for _, fn := range &builtinFuncs {
-		if fn.name == name {
-			return true
-		}
-	}
-	return false
-}
-
-var unsafeFuncs = [...]struct {
-	name string
-	op   Op
-}{
-	{"Alignof", OALIGNOF},
-	{"Offsetof", OOFFSETOF},
-	{"Sizeof", OSIZEOF},
-}
-
-// initUniverse initializes the universe block.
-func initUniverse() {
-	lexinit()
-	typeinit()
-	lexinit1()
-}
-
-// lexinit initializes known symbols and the basic types.
-func lexinit() {
-	for _, s := range &basicTypes {
-		etype := s.etype
-		if int(etype) >= len(types.Types) {
-			Fatalf("lexinit: %s bad etype", s.name)
-		}
-		s2 := builtinpkg.Lookup(s.name)
-		t := types.Types[etype]
-		if t == nil {
-			t = types.New(etype)
-			t.Sym = s2
-			if etype != TANY && etype != TSTRING {
-				dowidth(t)
-			}
-			types.Types[etype] = t
-		}
-		s2.Def = asTypesNode(typenod(t))
-		asNode(s2.Def).Name = new(Name)
-	}
-
-	for _, s := range &builtinFuncs {
-		s2 := builtinpkg.Lookup(s.name)
-		s2.Def = asTypesNode(newname(s2))
-		asNode(s2.Def).SetSubOp(s.op)
-	}
-
-	for _, s := range &unsafeFuncs {
-		s2 := unsafepkg.Lookup(s.name)
-		s2.Def = asTypesNode(newname(s2))
-		asNode(s2.Def).SetSubOp(s.op)
-	}
-
-	types.UntypedString = types.New(TSTRING)
-	types.UntypedBool = types.New(TBOOL)
-	types.Types[TANY] = types.New(TANY)
-
-	s := builtinpkg.Lookup("true")
-	s.Def = asTypesNode(nodbool(true))
-	asNode(s.Def).Sym = lookup("true")
-	asNode(s.Def).Name = new(Name)
-	asNode(s.Def).Type = types.UntypedBool
-
-	s = builtinpkg.Lookup("false")
-	s.Def = asTypesNode(nodbool(false))
-	asNode(s.Def).Sym = lookup("false")
-	asNode(s.Def).Name = new(Name)
-	asNode(s.Def).Type = types.UntypedBool
-
-	s = lookup("_")
-	s.Block = -100
-	s.Def = asTypesNode(newname(s))
-	types.Types[TBLANK] = types.New(TBLANK)
-	asNode(s.Def).Type = types.Types[TBLANK]
-	nblank = asNode(s.Def)
-
-	s = builtinpkg.Lookup("_")
-	s.Block = -100
-	s.Def = asTypesNode(newname(s))
-	types.Types[TBLANK] = types.New(TBLANK)
-	asNode(s.Def).Type = types.Types[TBLANK]
-
-	types.Types[TNIL] = types.New(TNIL)
-	s = builtinpkg.Lookup("nil")
-	var v Val
-	v.U = new(NilVal)
-	s.Def = asTypesNode(nodlit(v))
-	asNode(s.Def).Sym = s
-	asNode(s.Def).Name = new(Name)
-
-	s = builtinpkg.Lookup("iota")
-	s.Def = asTypesNode(nod(OIOTA, nil, nil))
-	asNode(s.Def).Sym = s
-	asNode(s.Def).Name = new(Name)
-}
-
-func typeinit() {
-	if Widthptr == 0 {
-		Fatalf("typeinit before betypeinit")
-	}
-
-	for et := types.EType(0); et < NTYPE; et++ {
-		simtype[et] = et
-	}
-
-	types.Types[TPTR] = types.New(TPTR)
-	dowidth(types.Types[TPTR])
-
-	t := types.New(TUNSAFEPTR)
-	types.Types[TUNSAFEPTR] = t
-	t.Sym = unsafepkg.Lookup("Pointer")
-	t.Sym.Def = asTypesNode(typenod(t))
-	asNode(t.Sym.Def).Name = new(Name)
-	dowidth(types.Types[TUNSAFEPTR])
-
-	for et := TINT8; et <= TUINT64; et++ {
-		isInt[et] = true
-	}
-	isInt[TINT] = true
-	isInt[TUINT] = true
-	isInt[TUINTPTR] = true
-
-	isFloat[TFLOAT32] = true
-	isFloat[TFLOAT64] = true
-
-	isComplex[TCOMPLEX64] = true
-	isComplex[TCOMPLEX128] = true
-
-	// initialize okfor
-	for et := types.EType(0); et < NTYPE; et++ {
-		if isInt[et] || et == TIDEAL {
-			okforeq[et] = true
-			okforcmp[et] = true
-			okforarith[et] = true
-			okforadd[et] = true
-			okforand[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-			minintval[et] = new(Mpint)
-			maxintval[et] = new(Mpint)
-		}
-
-		if isFloat[et] {
-			okforeq[et] = true
-			okforcmp[et] = true
-			okforadd[et] = true
-			okforarith[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-			minfltval[et] = newMpflt()
-			maxfltval[et] = newMpflt()
-		}
-
-		if isComplex[et] {
-			okforeq[et] = true
-			okforadd[et] = true
-			okforarith[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-		}
-	}
-
-	issimple[TBOOL] = true
-
-	okforadd[TSTRING] = true
-
-	okforbool[TBOOL] = true
-
-	okforcap[TARRAY] = true
-	okforcap[TCHAN] = true
-	okforcap[TSLICE] = true
-
-	okforconst[TBOOL] = true
-	okforconst[TSTRING] = true
-
-	okforlen[TARRAY] = true
-	okforlen[TCHAN] = true
-	okforlen[TMAP] = true
-	okforlen[TSLICE] = true
-	okforlen[TSTRING] = true
-
-	okforeq[TPTR] = true
-	okforeq[TUNSAFEPTR] = true
-	okforeq[TINTER] = true
-	okforeq[TCHAN] = true
-	okforeq[TSTRING] = true
-	okforeq[TBOOL] = true
-	okforeq[TMAP] = true    // nil only; refined in typecheck
-	okforeq[TFUNC] = true   // nil only; refined in typecheck
-	okforeq[TSLICE] = true  // nil only; refined in typecheck
-	okforeq[TARRAY] = true  // only if element type is comparable; refined in typecheck
-	okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
-
-	okforcmp[TSTRING] = true
-
-	var i int
-	for i = 0; i < len(okfor); i++ {
-		okfor[i] = okfornone[:]
-	}
-
-	// binary
-	okfor[OADD] = okforadd[:]
-	okfor[OAND] = okforand[:]
-	okfor[OANDAND] = okforbool[:]
-	okfor[OANDNOT] = okforand[:]
-	okfor[ODIV] = okforarith[:]
-	okfor[OEQ] = okforeq[:]
-	okfor[OGE] = okforcmp[:]
-	okfor[OGT] = okforcmp[:]
-	okfor[OLE] = okforcmp[:]
-	okfor[OLT] = okforcmp[:]
-	okfor[OMOD] = okforand[:]
-	okfor[OMUL] = okforarith[:]
-	okfor[ONE] = okforeq[:]
-	okfor[OOR] = okforand[:]
-	okfor[OOROR] = okforbool[:]
-	okfor[OSUB] = okforarith[:]
-	okfor[OXOR] = okforand[:]
-	okfor[OLSH] = okforand[:]
-	okfor[ORSH] = okforand[:]
-
-	// unary
-	okfor[OBITNOT] = okforand[:]
-	okfor[ONEG] = okforarith[:]
-	okfor[ONOT] = okforbool[:]
-	okfor[OPLUS] = okforarith[:]
-
-	// special
-	okfor[OCAP] = okforcap[:]
-	okfor[OLEN] = okforlen[:]
-
-	// comparison
-	iscmp[OLT] = true
-	iscmp[OGT] = true
-	iscmp[OGE] = true
-	iscmp[OLE] = true
-	iscmp[OEQ] = true
-	iscmp[ONE] = true
-
-	maxintval[TINT8].SetString("0x7f")
-	minintval[TINT8].SetString("-0x80")
-	maxintval[TINT16].SetString("0x7fff")
-	minintval[TINT16].SetString("-0x8000")
-	maxintval[TINT32].SetString("0x7fffffff")
-	minintval[TINT32].SetString("-0x80000000")
-	maxintval[TINT64].SetString("0x7fffffffffffffff")
-	minintval[TINT64].SetString("-0x8000000000000000")
-
-	maxintval[TUINT8].SetString("0xff")
-	maxintval[TUINT16].SetString("0xffff")
-	maxintval[TUINT32].SetString("0xffffffff")
-	maxintval[TUINT64].SetString("0xffffffffffffffff")
-
-	// f is valid float if min < f < max.  (min and max are not themselves valid.)
-	maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
-	minfltval[TFLOAT32].SetString("-33554431p103")
-	maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
-	minfltval[TFLOAT64].SetString("-18014398509481983p970")
-
-	maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
-	minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
-	maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
-	minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
-
-	types.Types[TINTER] = types.New(TINTER) // empty interface
-
-	// simple aliases
-	simtype[TMAP] = TPTR
-	simtype[TCHAN] = TPTR
-	simtype[TFUNC] = TPTR
-	simtype[TUNSAFEPTR] = TPTR
-
-	slicePtrOffset = 0
-	sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
-	sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
-	sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
-
-	// string is same as slice wo the cap
-	sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
-
-	dowidth(types.Types[TSTRING])
-	dowidth(types.UntypedString)
-}
-
-func makeErrorInterface() *types.Type {
-	field := types.NewField()
-	field.Type = types.Types[TSTRING]
-	f := functypefield(fakeRecvField(), nil, []*types.Field{field})
-
-	field = types.NewField()
-	field.Sym = lookup("Error")
-	field.Type = f
-
-	t := types.New(TINTER)
-	t.SetInterface([]*types.Field{field})
-	return t
-}
-
-func lexinit1() {
-	// error type
-	s := builtinpkg.Lookup("error")
-	types.Errortype = makeErrorInterface()
-	types.Errortype.Sym = s
-	types.Errortype.Orig = makeErrorInterface()
-	s.Def = asTypesNode(typenod(types.Errortype))
-	dowidth(types.Errortype)
-
-	// We create separate byte and rune types for better error messages
-	// rather than just creating type alias *types.Sym's for the uint8 and
-	// int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
-	// TODO(gri) Should we get rid of this special case (at the cost
-	// of less informative error messages involving bytes and runes)?
-	// (Alternatively, we could introduce an OTALIAS node representing
-	// type aliases, albeit at the cost of having to deal with it everywhere).
-
-	// byte alias
-	s = builtinpkg.Lookup("byte")
-	types.Bytetype = types.New(TUINT8)
-	types.Bytetype.Sym = s
-	s.Def = asTypesNode(typenod(types.Bytetype))
-	asNode(s.Def).Name = new(Name)
-	dowidth(types.Bytetype)
-
-	// rune alias
-	s = builtinpkg.Lookup("rune")
-	types.Runetype = types.New(TINT32)
-	types.Runetype.Sym = s
-	s.Def = asTypesNode(typenod(types.Runetype))
-	asNode(s.Def).Name = new(Name)
-	dowidth(types.Runetype)
-
-	// backend-dependent builtin types (e.g. int).
-	for _, s := range &typedefs {
-		s1 := builtinpkg.Lookup(s.name)
-
-		sameas := s.sameas32
-		if Widthptr == 8 {
-			sameas = s.sameas64
-		}
-
-		simtype[s.etype] = sameas
-		minfltval[s.etype] = minfltval[sameas]
-		maxfltval[s.etype] = maxfltval[sameas]
-		minintval[s.etype] = minintval[sameas]
-		maxintval[s.etype] = maxintval[sameas]
-
-		t := types.New(s.etype)
-		t.Sym = s1
-		types.Types[s.etype] = t
-		s1.Def = asTypesNode(typenod(t))
-		asNode(s1.Def).Name = new(Name)
-		s1.Origpkg = builtinpkg
-
-		dowidth(t)
-	}
-}
-
-// finishUniverse makes the universe block visible within the current package.
-func finishUniverse() {
-	// Operationally, this is similar to a dot import of builtinpkg, except
-	// that we silently skip symbols that are already declared in the
-	// package block rather than emitting a redeclared symbol error.
-
-	for _, s := range builtinpkg.Syms {
-		if s.Def == nil {
-			continue
-		}
-		s1 := lookup(s.Name)
-		if s1.Def != nil {
-			continue
-		}
-
-		s1.Def = s.Def
-		s1.Block = s.Block
-	}
-
-	nodfp = newname(lookup(".fp"))
-	nodfp.Type = types.Types[TINT32]
-	nodfp.SetClass(PPARAM)
-	nodfp.Name.SetUsed(true)
-}
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
deleted file mode 100644
index 2233961..0000000
--- a/src/cmd/compile/internal/gc/unsafe.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *Node) int64 {
-	switch n.Op {
-	case OALIGNOF, OSIZEOF:
-		n.Left = typecheck(n.Left, ctxExpr)
-		n.Left = defaultlit(n.Left, nil)
-		tr := n.Left.Type
-		if tr == nil {
-			return 0
-		}
-		dowidth(tr)
-		if n.Op == OALIGNOF {
-			return int64(tr.Align)
-		}
-		return tr.Width
-
-	case OOFFSETOF:
-		// must be a selector.
-		if n.Left.Op != OXDOT {
-			yyerror("invalid expression %v", n)
-			return 0
-		}
-
-		// Remember base of selector to find it back after dot insertion.
-		// Since r->left may be mutated by typechecking, check it explicitly
-		// first to track it correctly.
-		n.Left.Left = typecheck(n.Left.Left, ctxExpr)
-		base := n.Left.Left
-
-		n.Left = typecheck(n.Left, ctxExpr)
-		if n.Left.Type == nil {
-			return 0
-		}
-		switch n.Left.Op {
-		case ODOT, ODOTPTR:
-			break
-		case OCALLPART:
-			yyerror("invalid expression %v: argument is a method value", n)
-			return 0
-		default:
-			yyerror("invalid expression %v", n)
-			return 0
-		}
-
-		// Sum offsets for dots until we reach base.
-		var v int64
-		for r := n.Left; r != base; r = r.Left {
-			switch r.Op {
-			case ODOTPTR:
-				// For Offsetof(s.f), s may itself be a pointer,
-				// but accessing f must not otherwise involve
-				// indirection via embedded pointer types.
-				if r.Left != base {
-					yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
-					return 0
-				}
-				fallthrough
-			case ODOT:
-				v += r.Xoffset
-			default:
-				Dump("unsafenmagic", n.Left)
-				Fatalf("impossible %#v node after dot insertion", r.Op)
-			}
-		}
-		return v
-	}
-
-	Fatalf("unexpected op %v", n.Op)
-	return 0
-}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index 58be2f8..4baddbc 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -8,59 +8,35 @@
 	"os"
 	"runtime"
 	"runtime/pprof"
+
+	"cmd/compile/internal/base"
 )
 
-// Line returns n's position as a string. If n has been inlined,
-// it uses the outermost position where n has been inlined.
-func (n *Node) Line() string {
-	return linestr(n.Pos)
-}
-
-var atExitFuncs []func()
-
-func atExit(f func()) {
-	atExitFuncs = append(atExitFuncs, f)
-}
-
-func Exit(code int) {
-	for i := len(atExitFuncs) - 1; i >= 0; i-- {
-		f := atExitFuncs[i]
-		atExitFuncs = atExitFuncs[:i]
-		f()
-	}
-	os.Exit(code)
-}
-
 var (
-	blockprofile   string
-	cpuprofile     string
-	memprofile     string
 	memprofilerate int64
-	traceprofile   string
 	traceHandler   func(string)
-	mutexprofile   string
 )
 
 func startProfile() {
-	if cpuprofile != "" {
-		f, err := os.Create(cpuprofile)
+	if base.Flag.CPUProfile != "" {
+		f, err := os.Create(base.Flag.CPUProfile)
 		if err != nil {
-			Fatalf("%v", err)
+			base.Fatalf("%v", err)
 		}
 		if err := pprof.StartCPUProfile(f); err != nil {
-			Fatalf("%v", err)
+			base.Fatalf("%v", err)
 		}
-		atExit(pprof.StopCPUProfile)
+		base.AtExit(pprof.StopCPUProfile)
 	}
-	if memprofile != "" {
+	if base.Flag.MemProfile != "" {
 		if memprofilerate != 0 {
 			runtime.MemProfileRate = int(memprofilerate)
 		}
-		f, err := os.Create(memprofile)
+		f, err := os.Create(base.Flag.MemProfile)
 		if err != nil {
-			Fatalf("%v", err)
+			base.Fatalf("%v", err)
 		}
-		atExit(func() {
+		base.AtExit(func() {
 			// Profile all outstanding allocations.
 			runtime.GC()
 			// compilebench parses the memory profile to extract memstats,
@@ -68,36 +44,36 @@
 			// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
 			const writeLegacyFormat = 1
 			if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
-				Fatalf("%v", err)
+				base.Fatalf("%v", err)
 			}
 		})
 	} else {
 		// Not doing memory profiling; disable it entirely.
 		runtime.MemProfileRate = 0
 	}
-	if blockprofile != "" {
-		f, err := os.Create(blockprofile)
+	if base.Flag.BlockProfile != "" {
+		f, err := os.Create(base.Flag.BlockProfile)
 		if err != nil {
-			Fatalf("%v", err)
+			base.Fatalf("%v", err)
 		}
 		runtime.SetBlockProfileRate(1)
-		atExit(func() {
+		base.AtExit(func() {
 			pprof.Lookup("block").WriteTo(f, 0)
 			f.Close()
 		})
 	}
-	if mutexprofile != "" {
-		f, err := os.Create(mutexprofile)
+	if base.Flag.MutexProfile != "" {
+		f, err := os.Create(base.Flag.MutexProfile)
 		if err != nil {
-			Fatalf("%v", err)
+			base.Fatalf("%v", err)
 		}
 		startMutexProfiling()
-		atExit(func() {
+		base.AtExit(func() {
 			pprof.Lookup("mutex").WriteTo(f, 0)
 			f.Close()
 		})
 	}
-	if traceprofile != "" && traceHandler != nil {
-		traceHandler(traceprofile)
+	if base.Flag.TraceProfile != "" && traceHandler != nil {
+		traceHandler(base.Flag.TraceProfile)
 	}
 }
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
deleted file mode 100644
index 98ebb23..0000000
--- a/src/cmd/compile/internal/gc/walk.go
+++ /dev/null
@@ -1,4112 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/objabi"
-	"cmd/internal/sys"
-	"encoding/binary"
-	"fmt"
-	"strings"
-)
-
-// The constant is known to runtime.
-const tmpstringbufsize = 32
-const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-
-func walk(fn *Node) {
-	Curfn = fn
-
-	if Debug.W != 0 {
-		s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Nbody)
-	}
-
-	lno := lineno
-
-	// Final typecheck for any unused variables.
-	for i, ln := range fn.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
-			ln = typecheck(ln, ctxExpr|ctxAssign)
-			fn.Func.Dcl[i] = ln
-		}
-	}
-
-	// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
-			ln.Name.Defn.Left.Name.SetUsed(true)
-		}
-	}
-
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
-			continue
-		}
-		if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
-			if defn.Left.Name.Used() {
-				continue
-			}
-			yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
-			defn.Left.Name.SetUsed(true) // suppress repeats
-		} else {
-			yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
-		}
-	}
-
-	lineno = lno
-	if nerrors != 0 {
-		return
-	}
-	walkstmtlist(Curfn.Nbody.Slice())
-	if Debug.W != 0 {
-		s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Nbody)
-	}
-
-	zeroResults()
-	heapmoves()
-	if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
-		s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Func.Enter)
-	}
-}
-
-func walkstmtlist(s []*Node) {
-	for i := range s {
-		s[i] = walkstmt(s[i])
-	}
-}
-
-func paramoutheap(fn *Node) bool {
-	for _, ln := range fn.Func.Dcl {
-		switch ln.Class() {
-		case PPARAMOUT:
-			if ln.isParamStackCopy() || ln.Name.Addrtaken() {
-				return true
-			}
-
-		case PAUTO:
-			// stop early - parameters are over
-			return false
-		}
-	}
-
-	return false
-}
-
-// The result of walkstmt MUST be assigned back to n, e.g.
-// 	n.Left = walkstmt(n.Left)
-func walkstmt(n *Node) *Node {
-	if n == nil {
-		return n
-	}
-
-	setlineno(n)
-
-	walkstmtlist(n.Ninit.Slice())
-
-	switch n.Op {
-	default:
-		if n.Op == ONAME {
-			yyerror("%v is not a top level statement", n.Sym)
-		} else {
-			yyerror("%v is not a top level statement", n.Op)
-		}
-		Dump("nottop", n)
-
-	case OAS,
-		OASOP,
-		OAS2,
-		OAS2DOTTYPE,
-		OAS2RECV,
-		OAS2FUNC,
-		OAS2MAPR,
-		OCLOSE,
-		OCOPY,
-		OCALLMETH,
-		OCALLINTER,
-		OCALL,
-		OCALLFUNC,
-		ODELETE,
-		OSEND,
-		OPRINT,
-		OPRINTN,
-		OPANIC,
-		OEMPTY,
-		ORECOVER,
-		OGETG:
-		if n.Typecheck() == 0 {
-			Fatalf("missing typecheck: %+v", n)
-		}
-		wascopy := n.Op == OCOPY
-		init := n.Ninit
-		n.Ninit.Set(nil)
-		n = walkexpr(n, &init)
-		n = addinit(n, init.Slice())
-		if wascopy && n.Op == OCONVNOP {
-			n.Op = OEMPTY // don't leave plain values as statements.
-		}
-
-	// special case for a receive where we throw away
-	// the value received.
-	case ORECV:
-		if n.Typecheck() == 0 {
-			Fatalf("missing typecheck: %+v", n)
-		}
-		init := n.Ninit
-		n.Ninit.Set(nil)
-
-		n.Left = walkexpr(n.Left, &init)
-		n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
-		n = walkexpr(n, &init)
-
-		n = addinit(n, init.Slice())
-
-	case OBREAK,
-		OCONTINUE,
-		OFALL,
-		OGOTO,
-		OLABEL,
-		ODCLCONST,
-		ODCLTYPE,
-		OCHECKNIL,
-		OVARDEF,
-		OVARKILL,
-		OVARLIVE:
-		break
-
-	case ODCL:
-		v := n.Left
-		if v.Class() == PAUTOHEAP {
-			if compiling_runtime {
-				yyerror("%v escapes to heap, not allowed in runtime", v)
-			}
-			if prealloc[v] == nil {
-				prealloc[v] = callnew(v.Type)
-			}
-			nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
-			nn.SetColas(true)
-			nn = typecheck(nn, ctxStmt)
-			return walkstmt(nn)
-		}
-
-	case OBLOCK:
-		walkstmtlist(n.List.Slice())
-
-	case OCASE:
-		yyerror("case statement out of place")
-
-	case ODEFER:
-		Curfn.Func.SetHasDefer(true)
-		Curfn.Func.numDefers++
-		if Curfn.Func.numDefers > maxOpenDefers {
-			// Don't allow open-coded defers if there are more than
-			// 8 defers in the function, since we use a single
-			// byte to record active defers.
-			Curfn.Func.SetOpenCodedDeferDisallowed(true)
-		}
-		if n.Esc != EscNever {
-			// If n.Esc is not EscNever, then this defer occurs in a loop,
-			// so open-coded defers cannot be used in this function.
-			Curfn.Func.SetOpenCodedDeferDisallowed(true)
-		}
-		fallthrough
-	case OGO:
-		switch n.Left.Op {
-		case OPRINT, OPRINTN:
-			n.Left = wrapCall(n.Left, &n.Ninit)
-
-		case ODELETE:
-			if mapfast(n.Left.List.First().Type) == mapslow {
-				n.Left = wrapCall(n.Left, &n.Ninit)
-			} else {
-				n.Left = walkexpr(n.Left, &n.Ninit)
-			}
-
-		case OCOPY:
-			n.Left = copyany(n.Left, &n.Ninit, true)
-
-		case OCALLFUNC, OCALLMETH, OCALLINTER:
-			if n.Left.Nbody.Len() > 0 {
-				n.Left = wrapCall(n.Left, &n.Ninit)
-			} else {
-				n.Left = walkexpr(n.Left, &n.Ninit)
-			}
-
-		default:
-			n.Left = walkexpr(n.Left, &n.Ninit)
-		}
-
-	case OFOR, OFORUNTIL:
-		if n.Left != nil {
-			walkstmtlist(n.Left.Ninit.Slice())
-			init := n.Left.Ninit
-			n.Left.Ninit.Set(nil)
-			n.Left = walkexpr(n.Left, &init)
-			n.Left = addinit(n.Left, init.Slice())
-		}
-
-		n.Right = walkstmt(n.Right)
-		if n.Op == OFORUNTIL {
-			walkstmtlist(n.List.Slice())
-		}
-		walkstmtlist(n.Nbody.Slice())
-
-	case OIF:
-		n.Left = walkexpr(n.Left, &n.Ninit)
-		walkstmtlist(n.Nbody.Slice())
-		walkstmtlist(n.Rlist.Slice())
-
-	case ORETURN:
-		Curfn.Func.numReturns++
-		if n.List.Len() == 0 {
-			break
-		}
-		if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) || Curfn.Func.HasDefer() {
-			// assign to the function out parameters,
-			// so that reorder3 can fix up conflicts
-			var rl []*Node
-
-			for _, ln := range Curfn.Func.Dcl {
-				cl := ln.Class()
-				if cl == PAUTO || cl == PAUTOHEAP {
-					break
-				}
-				if cl == PPARAMOUT {
-					if ln.isParamStackCopy() {
-						ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
-					}
-					rl = append(rl, ln)
-				}
-			}
-
-			if got, want := n.List.Len(), len(rl); got != want {
-				// order should have rewritten multi-value function calls
-				// with explicit OAS2FUNC nodes.
-				Fatalf("expected %v return arguments, have %v", want, got)
-			}
-
-			// move function calls out, to make reorder3's job easier.
-			walkexprlistsafe(n.List.Slice(), &n.Ninit)
-
-			ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
-			n.List.Set(reorder3(ll))
-			break
-		}
-		walkexprlist(n.List.Slice(), &n.Ninit)
-
-		// For each return parameter (lhs), assign the corresponding result (rhs).
-		lhs := Curfn.Type.Results()
-		rhs := n.List.Slice()
-		res := make([]*Node, lhs.NumFields())
-		for i, nl := range lhs.FieldSlice() {
-			nname := asNode(nl.Nname)
-			if nname.isParamHeapCopy() {
-				nname = nname.Name.Param.Stackcopy
-			}
-			a := nod(OAS, nname, rhs[i])
-			res[i] = convas(a, &n.Ninit)
-		}
-		n.List.Set(res)
-
-	case ORETJMP:
-		break
-
-	case OINLMARK:
-		break
-
-	case OSELECT:
-		walkselect(n)
-
-	case OSWITCH:
-		walkswitch(n)
-
-	case ORANGE:
-		n = walkrange(n)
-	}
-
-	if n.Op == ONAME {
-		Fatalf("walkstmt ended up with name: %+v", n)
-	}
-	return n
-}
-
-// walk the whole tree of the body of an
-// expression or simple statement.
-// the types expressions are calculated.
-// compile-time constants are evaluated.
-// complex side effects like statements are appended to init
-func walkexprlist(s []*Node, init *Nodes) {
-	for i := range s {
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-func walkexprlistsafe(s []*Node, init *Nodes) {
-	for i, n := range s {
-		s[i] = safeexpr(n, init)
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-func walkexprlistcheap(s []*Node, init *Nodes) {
-	for i, n := range s {
-		s[i] = cheapexpr(n, init)
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-// convFuncName builds the runtime function name for interface conversion.
-// It also reports whether the function expects the data by address.
-// Not all names are possible. For example, we never generate convE2E or convE2I.
-func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
-	tkind := to.Tie()
-	switch from.Tie() {
-	case 'I':
-		if tkind == 'I' {
-			return "convI2I", false
-		}
-	case 'T':
-		switch {
-		case from.Size() == 2 && from.Align == 2:
-			return "convT16", false
-		case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
-			return "convT32", false
-		case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
-			return "convT64", false
-		}
-		if sc := from.SoleComponent(); sc != nil {
-			switch {
-			case sc.IsString():
-				return "convTstring", false
-			case sc.IsSlice():
-				return "convTslice", false
-			}
-		}
-
-		switch tkind {
-		case 'E':
-			if !from.HasPointers() {
-				return "convT2Enoptr", true
-			}
-			return "convT2E", true
-		case 'I':
-			if !from.HasPointers() {
-				return "convT2Inoptr", true
-			}
-			return "convT2I", true
-		}
-	}
-	Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
-	panic("unreachable")
-}
-
-// The result of walkexpr MUST be assigned back to n, e.g.
-// 	n.Left = walkexpr(n.Left, init)
-func walkexpr(n *Node, init *Nodes) *Node {
-	if n == nil {
-		return n
-	}
-
-	// Eagerly checkwidth all expressions for the back end.
-	if n.Type != nil && !n.Type.WidthCalculated() {
-		switch n.Type.Etype {
-		case TBLANK, TNIL, TIDEAL:
-		default:
-			checkwidth(n.Type)
-		}
-	}
-
-	if init == &n.Ninit {
-		// not okay to use n->ninit when walking n,
-		// because we might replace n with some other node
-		// and would lose the init list.
-		Fatalf("walkexpr init == &n->ninit")
-	}
-
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	lno := setlineno(n)
-
-	if Debug.w > 1 {
-		Dump("before walk expr", n)
-	}
-
-	if n.Typecheck() != 1 {
-		Fatalf("missed typecheck: %+v", n)
-	}
-
-	if n.Type.IsUntyped() {
-		Fatalf("expression has untyped type: %+v", n)
-	}
-
-	if n.Op == ONAME && n.Class() == PAUTOHEAP {
-		nn := nod(ODEREF, n.Name.Param.Heapaddr, nil)
-		nn = typecheck(nn, ctxExpr)
-		nn = walkexpr(nn, init)
-		nn.Left.MarkNonNil()
-		return nn
-	}
-
-opswitch:
-	switch n.Op {
-	default:
-		Dump("walk", n)
-		Fatalf("walkexpr: switch 1 unknown op %+S", n)
-
-	case ONONAME, OEMPTY, OGETG, ONEWOBJ:
-
-	case OTYPE, ONAME, OLITERAL:
-		// TODO(mdempsky): Just return n; see discussion on CL 38655.
-		// Perhaps refactor to use Node.mayBeShared for these instead.
-		// If these return early, make sure to still call
-		// stringsym for constant strings.
-
-	case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER,
-		ODEREF, OSPTR, OITAB, OIDATA, OADDR:
-		n.Left = walkexpr(n.Left, init)
-
-	case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-	case ODOT, ODOTPTR:
-		usefield(n)
-		n.Left = walkexpr(n.Left, init)
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left = walkexpr(n.Left, init)
-		// Set up interface type addresses for back end.
-		n.Right = typename(n.Type)
-		if n.Op == ODOTTYPE {
-			n.Right.Right = typename(n.Left.Type)
-		}
-		if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
-			n.List.Set1(itabname(n.Type, n.Left.Type))
-		}
-
-	case OLEN, OCAP:
-		if isRuneCount(n) {
-			// Replace len([]rune(string)) with runtime.countrunes(string).
-			n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
-			break
-		}
-
-		n.Left = walkexpr(n.Left, init)
-
-		// replace len(*[10]int) with 10.
-		// delayed until now to preserve side effects.
-		t := n.Left.Type
-
-		if t.IsPtr() {
-			t = t.Elem()
-		}
-		if t.IsArray() {
-			safeexpr(n.Left, init)
-			setintconst(n, t.NumElem())
-			n.SetTypecheck(1)
-		}
-
-	case OCOMPLEX:
-		// Use results from call expression as arguments for complex.
-		if n.Left == nil && n.Right == nil {
-			n.Left = n.List.First()
-			n.Right = n.List.Second()
-		}
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		n = walkcompare(n, init)
-
-	case OANDAND, OOROR:
-		n.Left = walkexpr(n.Left, init)
-
-		// cannot put side effects from n.Right on init,
-		// because they cannot run before n.Left is checked.
-		// save elsewhere and store on the eventual n.Right.
-		var ll Nodes
-
-		n.Right = walkexpr(n.Right, &ll)
-		n.Right = addinit(n.Right, ll.Slice())
-
-	case OPRINT, OPRINTN:
-		n = walkprint(n, init)
-
-	case OPANIC:
-		n = mkcall("gopanic", nil, init, n.Left)
-
-	case ORECOVER:
-		n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
-
-	case OCLOSUREVAR, OCFUNC:
-
-	case OCALLINTER, OCALLFUNC, OCALLMETH:
-		if n.Op == OCALLINTER || n.Op == OCALLMETH {
-			// We expect both interface call reflect.Type.Method and concrete
-			// call reflect.(*rtype).Method.
-			usemethod(n)
-		}
-		if n.Op == OCALLINTER {
-			markUsedIfaceMethod(n)
-		}
-
-		if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
-			// Transform direct call of a closure to call of a normal function.
-			// transformclosure already did all preparation work.
-
-			// Prepend captured variables to argument list.
-			n.List.Prepend(n.Left.Func.Enter.Slice()...)
-
-			n.Left.Func.Enter.Set(nil)
-
-			// Replace OCLOSURE with ONAME/PFUNC.
-			n.Left = n.Left.Func.Closure.Func.Nname
-
-			// Update type of OCALLFUNC node.
-			// Output arguments had not changed, but their offsets could.
-			if n.Left.Type.NumResults() == 1 {
-				n.Type = n.Left.Type.Results().Field(0).Type
-			} else {
-				n.Type = n.Left.Type.Results()
-			}
-		}
-
-		walkCall(n, init)
-
-	case OAS, OASOP:
-		init.AppendNodes(&n.Ninit)
-
-		// Recognize m[k] = append(m[k], ...) so we can reuse
-		// the mapassign call.
-		mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
-		if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
-			Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
-		}
-
-		n.Left = walkexpr(n.Left, init)
-		n.Left = safeexpr(n.Left, init)
-
-		if mapAppend {
-			n.Right.List.SetFirst(n.Left)
-		}
-
-		if n.Op == OASOP {
-			// Rewrite x op= y into x = x op y.
-			n.Right = nod(n.SubOp(), n.Left, n.Right)
-			n.Right = typecheck(n.Right, ctxExpr)
-
-			n.Op = OAS
-			n.ResetAux()
-		}
-
-		if oaslit(n, init) {
-			break
-		}
-
-		if n.Right == nil {
-			// TODO(austin): Check all "implicit zeroing"
-			break
-		}
-
-		if !instrumenting && isZero(n.Right) {
-			break
-		}
-
-		switch n.Right.Op {
-		default:
-			n.Right = walkexpr(n.Right, init)
-
-		case ORECV:
-			// x = <-c; n.Left is x, n.Right.Left is c.
-			// order.stmt made sure x is addressable.
-			n.Right.Left = walkexpr(n.Right.Left, init)
-
-			n1 := nod(OADDR, n.Left, nil)
-			r := n.Right.Left // the channel
-			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
-			n = walkexpr(n, init)
-			break opswitch
-
-		case OAPPEND:
-			// x = append(...)
-			r := n.Right
-			if r.Type.Elem().NotInHeap() {
-				yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
-			}
-			switch {
-			case isAppendOfMake(r):
-				// x = append(y, make([]T, y)...)
-				r = extendslice(r, init)
-			case r.IsDDD():
-				r = appendslice(r, init) // also works for append(slice, string).
-			default:
-				r = walkappend(r, init, n)
-			}
-			n.Right = r
-			if r.Op == OAPPEND {
-				// Left in place for back end.
-				// Do not add a new write barrier.
-				// Set up address of type for back end.
-				r.Left = typename(r.Type.Elem())
-				break opswitch
-			}
-			// Otherwise, lowered for race detector.
-			// Treat as ordinary assignment.
-		}
-
-		if n.Left != nil && n.Right != nil {
-			n = convas(n, init)
-		}
-
-	case OAS2:
-		init.AppendNodes(&n.Ninit)
-		walkexprlistsafe(n.List.Slice(), init)
-		walkexprlistsafe(n.Rlist.Slice(), init)
-		ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
-		ll = reorder3(ll)
-		n = liststmt(ll)
-
-	// a,b,... = fn()
-	case OAS2FUNC:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Right
-		walkexprlistsafe(n.List.Slice(), init)
-		r = walkexpr(r, init)
-
-		if isIntrinsicCall(r) {
-			n.Right = r
-			break
-		}
-		init.Append(r)
-
-		ll := ascompatet(n.List, r.Type)
-		n = liststmt(ll)
-
-	// x, y = <-c
-	// order.stmt made sure x is addressable or blank.
-	case OAS2RECV:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Right
-		walkexprlistsafe(n.List.Slice(), init)
-		r.Left = walkexpr(r.Left, init)
-		var n1 *Node
-		if n.List.First().isBlank() {
-			n1 = nodnil()
-		} else {
-			n1 = nod(OADDR, n.List.First(), nil)
-		}
-		fn := chanfn("chanrecv2", 2, r.Left.Type)
-		ok := n.List.Second()
-		call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
-		n = nod(OAS, ok, call)
-		n = typecheck(n, ctxStmt)
-
-	// a,b = m[i]
-	case OAS2MAPR:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Right
-		walkexprlistsafe(n.List.Slice(), init)
-		r.Left = walkexpr(r.Left, init)
-		r.Right = walkexpr(r.Right, init)
-		t := r.Left.Type
-
-		fast := mapfast(t)
-		var key *Node
-		if fast != mapslow {
-			// fast versions take key by value
-			key = r.Right
-		} else {
-			// standard version takes key by reference
-			// order.expr made sure key is addressable.
-			key = nod(OADDR, r.Right, nil)
-		}
-
-		// from:
-		//   a,b = m[i]
-		// to:
-		//   var,b = mapaccess2*(t, m, i)
-		//   a = *var
-		a := n.List.First()
-
-		if w := t.Elem().Width; w <= zeroValSize {
-			fn := mapfn(mapaccess2[fast], t)
-			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
-		} else {
-			fn := mapfn("mapaccess2_fat", t)
-			z := zeroaddr(w)
-			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
-		}
-
-		// mapaccess2* returns a typed bool, but due to spec changes,
-		// the boolean result of i.(T) is now untyped so we make it the
-		// same type as the variable on the lhs.
-		if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
-			r.Type.Field(1).Type = ok.Type
-		}
-		n.Right = r
-		n.Op = OAS2FUNC
-
-		// don't generate a = *var if a is _
-		if !a.isBlank() {
-			var_ := temp(types.NewPtr(t.Elem()))
-			var_.SetTypecheck(1)
-			var_.MarkNonNil() // mapaccess always returns a non-nil pointer
-			n.List.SetFirst(var_)
-			n = walkexpr(n, init)
-			init.Append(n)
-			n = nod(OAS, a, nod(ODEREF, var_, nil))
-		}
-
-		n = typecheck(n, ctxStmt)
-		n = walkexpr(n, init)
-
-	case ODELETE:
-		init.AppendNodes(&n.Ninit)
-		map_ := n.List.First()
-		key := n.List.Second()
-		map_ = walkexpr(map_, init)
-		key = walkexpr(key, init)
-
-		t := map_.Type
-		fast := mapfast(t)
-		if fast == mapslow {
-			// order.stmt made sure key is addressable.
-			key = nod(OADDR, key, nil)
-		}
-		n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
-
-	case OAS2DOTTYPE:
-		walkexprlistsafe(n.List.Slice(), init)
-		n.Right = walkexpr(n.Right, init)
-
-	case OCONVIFACE:
-		n.Left = walkexpr(n.Left, init)
-
-		fromType := n.Left.Type
-		toType := n.Type
-
-		if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _())
-			markTypeUsedInInterface(fromType, Curfn.Func.lsym)
-		}
-
-		// typeword generates the type word of the interface value.
-		typeword := func() *Node {
-			if toType.IsEmptyInterface() {
-				return typename(fromType)
-			}
-			return itabname(fromType, toType)
-		}
-
-		// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
-		if isdirectiface(fromType) {
-			l := nod(OEFACE, typeword(), n.Left)
-			l.Type = toType
-			l.SetTypecheck(n.Typecheck())
-			n = l
-			break
-		}
-
-		if staticuint64s == nil {
-			staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
-			staticuint64s.SetClass(PEXTERN)
-			// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
-			// individual bytes.
-			staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
-			zerobase = newname(Runtimepkg.Lookup("zerobase"))
-			zerobase.SetClass(PEXTERN)
-			zerobase.Type = types.Types[TUINTPTR]
-		}
-
-		// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
-		// by using an existing addressable value identical to n.Left
-		// or creating one on the stack.
-		var value *Node
-		switch {
-		case fromType.Size() == 0:
-			// n.Left is zero-sized. Use zerobase.
-			cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
-			value = zerobase
-		case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
-			// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
-			// and staticuint64s[n.Left * 8 + 7] on big-endian.
-			n.Left = cheapexpr(n.Left, init)
-			// byteindex widens n.Left so that the multiplication doesn't overflow.
-			index := nod(OLSH, byteindex(n.Left), nodintconst(3))
-			if thearch.LinkArch.ByteOrder == binary.BigEndian {
-				index = nod(OADD, index, nodintconst(7))
-			}
-			value = nod(OINDEX, staticuint64s, index)
-			value.SetBounded(true)
-		case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
-			// n.Left is a readonly global; use it directly.
-			value = n.Left
-		case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
-			// n.Left does not escape. Use a stack temporary initialized to n.Left.
-			value = temp(fromType)
-			init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt))
-		}
-
-		if value != nil {
-			// Value is identical to n.Left.
-			// Construct the interface directly: {type/itab, &value}.
-			l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr))
-			l.Type = toType
-			l.SetTypecheck(n.Typecheck())
-			n = l
-			break
-		}
-
-		// Implement interface to empty interface conversion.
-		// tmp = i.itab
-		// if tmp != nil {
-		//    tmp = tmp.type
-		// }
-		// e = iface{tmp, i.data}
-		if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
-			// Evaluate the input interface.
-			c := temp(fromType)
-			init.Append(nod(OAS, c, n.Left))
-
-			// Get the itab out of the interface.
-			tmp := temp(types.NewPtr(types.Types[TUINT8]))
-			init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr)))
-
-			// Get the type out of the itab.
-			nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil)
-			nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
-			init.Append(nif)
-
-			// Build the result.
-			e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8])))
-			e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
-			e.SetTypecheck(1)
-			n = e
-			break
-		}
-
-		fnname, needsaddr := convFuncName(fromType, toType)
-
-		if !needsaddr && !fromType.IsInterface() {
-			// Use a specialized conversion routine that only returns a data pointer.
-			// ptr = convT2X(val)
-			// e = iface{typ/tab, ptr}
-			fn := syslook(fnname)
-			dowidth(fromType)
-			fn = substArgTypes(fn, fromType)
-			dowidth(fn.Type)
-			call := nod(OCALL, fn, nil)
-			call.List.Set1(n.Left)
-			call = typecheck(call, ctxExpr)
-			call = walkexpr(call, init)
-			call = safeexpr(call, init)
-			e := nod(OEFACE, typeword(), call)
-			e.Type = toType
-			e.SetTypecheck(1)
-			n = e
-			break
-		}
-
-		var tab *Node
-		if fromType.IsInterface() {
-			// convI2I
-			tab = typename(toType)
-		} else {
-			// convT2x
-			tab = typeword()
-		}
-
-		v := n.Left
-		if needsaddr {
-			// Types of large or unknown size are passed by reference.
-			// Orderexpr arranged for n.Left to be a temporary for all
-			// the conversions it could see. Comparison of an interface
-			// with a non-interface, especially in a switch on interface value
-			// with non-interface cases, is not visible to order.stmt, so we
-			// have to fall back on allocating a temp here.
-			if !islvalue(v) {
-				v = copyexpr(v, v.Type, init)
-			}
-			v = nod(OADDR, v, nil)
-		}
-
-		dowidth(fromType)
-		fn := syslook(fnname)
-		fn = substArgTypes(fn, fromType, toType)
-		dowidth(fn.Type)
-		n = nod(OCALL, fn, nil)
-		n.List.Set2(tab, v)
-		n = typecheck(n, ctxExpr)
-		n = walkexpr(n, init)
-
-	case OCONV, OCONVNOP:
-		n.Left = walkexpr(n.Left, init)
-		if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
-			if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
-				n = walkCheckPtrAlignment(n, init, nil)
-				break
-			}
-			if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
-				n = walkCheckPtrArithmetic(n, init)
-				break
-			}
-		}
-		param, result := rtconvfn(n.Left.Type, n.Type)
-		if param == Txxx {
-			break
-		}
-		fn := basicnames[param] + "to" + basicnames[result]
-		n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
-
-	case ODIV, OMOD:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-		// rewrite complex div into function call.
-		et := n.Left.Type.Etype
-
-		if isComplex[et] && n.Op == ODIV {
-			t := n.Type
-			n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
-			n = conv(n, t)
-			break
-		}
-
-		// Nothing to do for float divisions.
-		if isFloat[et] {
-			break
-		}
-
-		// rewrite 64-bit div and mod on 32-bit architectures.
-		// TODO: Remove this code once we can introduce
-		// runtime calls late in SSA processing.
-		if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
-			if n.Right.Op == OLITERAL {
-				// Leave div/mod by constant powers of 2 or small 16-bit constants.
-				// The SSA backend will handle those.
-				switch et {
-				case TINT64:
-					c := n.Right.Int64Val()
-					if c < 0 {
-						c = -c
-					}
-					if c != 0 && c&(c-1) == 0 {
-						break opswitch
-					}
-				case TUINT64:
-					c := uint64(n.Right.Int64Val())
-					if c < 1<<16 {
-						break opswitch
-					}
-					if c != 0 && c&(c-1) == 0 {
-						break opswitch
-					}
-				}
-			}
-			var fn string
-			if et == TINT64 {
-				fn = "int64"
-			} else {
-				fn = "uint64"
-			}
-			if n.Op == ODIV {
-				fn += "div"
-			} else {
-				fn += "mod"
-			}
-			n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
-		}
-
-	case OINDEX:
-		n.Left = walkexpr(n.Left, init)
-
-		// save the original node for bounds checking elision.
-		// If it was a ODIV/OMOD walk might rewrite it.
-		r := n.Right
-
-		n.Right = walkexpr(n.Right, init)
-
-		// if range of type cannot exceed static array bound,
-		// disable bounds check.
-		if n.Bounded() {
-			break
-		}
-		t := n.Left.Type
-		if t != nil && t.IsPtr() {
-			t = t.Elem()
-		}
-		if t.IsArray() {
-			n.SetBounded(bounded(r, t.NumElem()))
-			if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
-				Warn("index bounds check elided")
-			}
-			if smallintconst(n.Right) && !n.Bounded() {
-				yyerror("index out of bounds")
-			}
-		} else if Isconst(n.Left, CTSTR) {
-			n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
-			if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
-				Warn("index bounds check elided")
-			}
-			if smallintconst(n.Right) && !n.Bounded() {
-				yyerror("index out of bounds")
-			}
-		}
-
-		if Isconst(n.Right, CTINT) {
-			if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-				yyerror("index out of bounds")
-			}
-		}
-
-	case OINDEXMAP:
-		// Replace m[k] with *map{access1,assign}(maptype, m, &k)
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-		map_ := n.Left
-		key := n.Right
-		t := map_.Type
-		if n.IndexMapLValue() {
-			// This m[k] expression is on the left-hand side of an assignment.
-			fast := mapfast(t)
-			if fast == mapslow {
-				// standard version takes key by reference.
-				// order.expr made sure key is addressable.
-				key = nod(OADDR, key, nil)
-			}
-			n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
-		} else {
-			// m[k] is not the target of an assignment.
-			fast := mapfast(t)
-			if fast == mapslow {
-				// standard version takes key by reference.
-				// order.expr made sure key is addressable.
-				key = nod(OADDR, key, nil)
-			}
-
-			if w := t.Elem().Width; w <= zeroValSize {
-				n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
-			} else {
-				z := zeroaddr(w)
-				n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
-			}
-		}
-		n.Type = types.NewPtr(t.Elem())
-		n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
-		n = nod(ODEREF, n, nil)
-		n.Type = t.Elem()
-		n.SetTypecheck(1)
-
-	case ORECV:
-		Fatalf("walkexpr ORECV") // should see inside OAS only
-
-	case OSLICEHEADER:
-		n.Left = walkexpr(n.Left, init)
-		n.List.SetFirst(walkexpr(n.List.First(), init))
-		n.List.SetSecond(walkexpr(n.List.Second(), init))
-
-	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
-		checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
-		if checkSlice {
-			n.Left.Left = walkexpr(n.Left.Left, init)
-		} else {
-			n.Left = walkexpr(n.Left, init)
-		}
-		low, high, max := n.SliceBounds()
-		low = walkexpr(low, init)
-		if low != nil && isZero(low) {
-			// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
-			low = nil
-		}
-		high = walkexpr(high, init)
-		max = walkexpr(max, init)
-		n.SetSliceBounds(low, high, max)
-		if checkSlice {
-			n.Left = walkCheckPtrAlignment(n.Left, init, max)
-		}
-		if n.Op.IsSlice3() {
-			if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
-				// Reduce x[i:j:cap(x)] to x[i:j].
-				if n.Op == OSLICE3 {
-					n.Op = OSLICE
-				} else {
-					n.Op = OSLICEARR
-				}
-				n = reduceSlice(n)
-			}
-		} else {
-			n = reduceSlice(n)
-		}
-
-	case ONEW:
-		if n.Type.Elem().NotInHeap() {
-			yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
-		}
-		if n.Esc == EscNone {
-			if n.Type.Elem().Width >= maxImplicitStackVarSize {
-				Fatalf("large ONEW with EscNone: %v", n)
-			}
-			r := temp(n.Type.Elem())
-			r = nod(OAS, r, nil) // zero temp
-			r = typecheck(r, ctxStmt)
-			init.Append(r)
-			r = nod(OADDR, r.Left, nil)
-			r = typecheck(r, ctxExpr)
-			n = r
-		} else {
-			n = callnew(n.Type.Elem())
-		}
-
-	case OADDSTR:
-		n = addstr(n, init)
-
-	case OAPPEND:
-		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
-		Fatalf("append outside assignment")
-
-	case OCOPY:
-		n = copyany(n, init, instrumenting && !compiling_runtime)
-
-		// cannot use chanfn - closechan takes any, not chan any
-	case OCLOSE:
-		fn := syslook("closechan")
-
-		fn = substArgTypes(fn, n.Left.Type)
-		n = mkcall1(fn, nil, init, n.Left)
-
-	case OMAKECHAN:
-		// When size fits into int, use makechan instead of
-		// makechan64, which is faster and shorter on 32 bit platforms.
-		size := n.Left
-		fnname := "makechan64"
-		argtype := types.Types[TINT64]
-
-		// Type checking guarantees that TIDEAL size is positive and fits in an int.
-		// The case of size overflow when converting TUINT or TUINTPTR to TINT
-		// will be handled by the negative range checks in makechan during runtime.
-		if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
-			fnname = "makechan"
-			argtype = types.Types[TINT]
-		}
-
-		n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
-
-	case OMAKEMAP:
-		t := n.Type
-		hmapType := hmap(t)
-		hint := n.Left
-
-		// var h *hmap
-		var h *Node
-		if n.Esc == EscNone {
-			// Allocate hmap on stack.
-
-			// var hv hmap
-			hv := temp(hmapType)
-			zero := nod(OAS, hv, nil)
-			zero = typecheck(zero, ctxStmt)
-			init.Append(zero)
-			// h = &hv
-			h = nod(OADDR, hv, nil)
-
-			// Allocate one bucket pointed to by hmap.buckets on stack if hint
-			// is not larger than BUCKETSIZE. In case hint is larger than
-			// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
-			// Maximum key and elem size is 128 bytes, larger objects
-			// are stored with an indirection. So max bucket size is 2048+eps.
-			if !Isconst(hint, CTINT) ||
-				hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
-
-				// In case hint is larger than BUCKETSIZE runtime.makemap
-				// will allocate the buckets on the heap, see #20184
-				//
-				// if hint <= BUCKETSIZE {
-				//     var bv bmap
-				//     b = &bv
-				//     h.buckets = b
-				// }
-
-				nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil)
-				nif.SetLikely(true)
-
-				// var bv bmap
-				bv := temp(bmap(t))
-				zero = nod(OAS, bv, nil)
-				nif.Nbody.Append(zero)
-
-				// b = &bv
-				b := nod(OADDR, bv, nil)
-
-				// h.buckets = b
-				bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
-				na := nod(OAS, nodSym(ODOT, h, bsym), b)
-				nif.Nbody.Append(na)
-
-				nif = typecheck(nif, ctxStmt)
-				nif = walkstmt(nif)
-				init.Append(nif)
-			}
-		}
-
-		if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
-			// Handling make(map[any]any) and
-			// make(map[any]any, hint) where hint <= BUCKETSIZE
-			// special allows for faster map initialization and
-			// improves binary size by using calls with fewer arguments.
-			// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
-			// and no buckets will be allocated by makemap. Therefore,
-			// no buckets need to be allocated in this code path.
-			if n.Esc == EscNone {
-				// Only need to initialize h.hash0 since
-				// hmap h has been allocated on the stack already.
-				// h.hash0 = fastrand()
-				rand := mkcall("fastrand", types.Types[TUINT32], init)
-				hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
-				a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
-				a = typecheck(a, ctxStmt)
-				a = walkexpr(a, init)
-				init.Append(a)
-				n = convnop(h, t)
-			} else {
-				// Call runtime.makehmap to allocate an
-				// hmap on the heap and initialize hmap's hash0 field.
-				fn := syslook("makemap_small")
-				fn = substArgTypes(fn, t.Key(), t.Elem())
-				n = mkcall1(fn, n.Type, init)
-			}
-		} else {
-			if n.Esc != EscNone {
-				h = nodnil()
-			}
-			// Map initialization with a variable or large hint is
-			// more complicated. We therefore generate a call to
-			// runtime.makemap to initialize hmap and allocate the
-			// map buckets.
-
-			// When hint fits into int, use makemap instead of
-			// makemap64, which is faster and shorter on 32 bit platforms.
-			fnname := "makemap64"
-			argtype := types.Types[TINT64]
-
-			// Type checking guarantees that TIDEAL hint is positive and fits in an int.
-			// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
-			// The case of hint overflow when converting TUINT or TUINTPTR to TINT
-			// will be handled by the negative range checks in makemap during runtime.
-			if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
-				fnname = "makemap"
-				argtype = types.Types[TINT]
-			}
-
-			fn := syslook(fnname)
-			fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
-			n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
-		}
-
-	case OMAKESLICE:
-		l := n.Left
-		r := n.Right
-		if r == nil {
-			r = safeexpr(l, init)
-			l = r
-		}
-		t := n.Type
-		if t.Elem().NotInHeap() {
-			yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
-		}
-		if n.Esc == EscNone {
-			if why := heapAllocReason(n); why != "" {
-				Fatalf("%v has EscNone, but %v", n, why)
-			}
-			// var arr [r]T
-			// n = arr[:l]
-			i := indexconst(r)
-			if i < 0 {
-				Fatalf("walkexpr: invalid index %v", r)
-			}
-
-			// cap is constrained to [0,2^31) or [0,2^63) depending on whether
-			// we're in 32-bit or 64-bit systems. So it's safe to do:
-			//
-			// if uint64(len) > cap {
-			//     if len < 0 { panicmakeslicelen() }
-			//     panicmakeslicecap()
-			// }
-			nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil)
-			niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil)
-			niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
-			nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
-			nif = typecheck(nif, ctxStmt)
-			init.Append(nif)
-
-			t = types.NewArray(t.Elem(), i) // [r]T
-			var_ := temp(t)
-			a := nod(OAS, var_, nil) // zero temp
-			a = typecheck(a, ctxStmt)
-			init.Append(a)
-			r := nod(OSLICE, var_, nil) // arr[:l]
-			r.SetSliceBounds(nil, l, nil)
-			r = conv(r, n.Type) // in case n.Type is named.
-			r = typecheck(r, ctxExpr)
-			r = walkexpr(r, init)
-			n = r
-		} else {
-			// n escapes; set up a call to makeslice.
-			// When len and cap can fit into int, use makeslice instead of
-			// makeslice64, which is faster and shorter on 32 bit platforms.
-
-			len, cap := l, r
-
-			fnname := "makeslice64"
-			argtype := types.Types[TINT64]
-
-			// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
-			// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
-			// will be handled by the negative range checks in makeslice during runtime.
-			if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
-				(cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
-				fnname = "makeslice"
-				argtype = types.Types[TINT]
-			}
-
-			m := nod(OSLICEHEADER, nil, nil)
-			m.Type = t
-
-			fn := syslook(fnname)
-			m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
-			m.Left.MarkNonNil()
-			m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT]))
-
-			m = typecheck(m, ctxExpr)
-			m = walkexpr(m, init)
-			n = m
-		}
-
-	case OMAKESLICECOPY:
-		if n.Esc == EscNone {
-			Fatalf("OMAKESLICECOPY with EscNone: %v", n)
-		}
-
-		t := n.Type
-		if t.Elem().NotInHeap() {
-			yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
-		}
-
-		length := conv(n.Left, types.Types[TINT])
-		copylen := nod(OLEN, n.Right, nil)
-		copyptr := nod(OSPTR, n.Right, nil)
-
-		if !t.Elem().HasPointers() && n.Bounded() {
-			// When len(to)==len(from) and elements have no pointers:
-			// replace make+copy with runtime.mallocgc+runtime.memmove.
-
-			// We do not check for overflow of len(to)*elem.Width here
-			// since len(from) is an existing checked slice capacity
-			// with same elem.Width for the from slice.
-			size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR]))
-
-			// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
-			fn := syslook("mallocgc")
-			sh := nod(OSLICEHEADER, nil, nil)
-			sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false))
-			sh.Left.MarkNonNil()
-			sh.List.Set2(length, length)
-			sh.Type = t
-
-			s := temp(t)
-			r := typecheck(nod(OAS, s, sh), ctxStmt)
-			r = walkexpr(r, init)
-			init.Append(r)
-
-			// instantiate memmove(to *any, frm *any, size uintptr)
-			fn = syslook("memmove")
-			fn = substArgTypes(fn, t.Elem(), t.Elem())
-			ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size)
-			ncopy = typecheck(ncopy, ctxStmt)
-			ncopy = walkexpr(ncopy, init)
-			init.Append(ncopy)
-
-			n = s
-		} else { // Replace make+copy with runtime.makeslicecopy.
-			// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
-			fn := syslook("makeslicecopy")
-			s := nod(OSLICEHEADER, nil, nil)
-			s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR]))
-			s.Left.MarkNonNil()
-			s.List.Set2(length, length)
-			s.Type = t
-			n = typecheck(s, ctxExpr)
-			n = walkexpr(n, init)
-		}
-
-	case ORUNESTR:
-		a := nodnil()
-		if n.Esc == EscNone {
-			t := types.NewArray(types.Types[TUINT8], 4)
-			a = nod(OADDR, temp(t), nil)
-		}
-		// intstring(*[4]byte, rune)
-		n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
-
-	case OBYTES2STR, ORUNES2STR:
-		a := nodnil()
-		if n.Esc == EscNone {
-			// Create temporary buffer for string on stack.
-			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
-			a = nod(OADDR, temp(t), nil)
-		}
-		if n.Op == ORUNES2STR {
-			// slicerunetostring(*[32]byte, []rune) string
-			n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
-		} else {
-			// slicebytetostring(*[32]byte, ptr *byte, n int) string
-			n.Left = cheapexpr(n.Left, init)
-			ptr, len := n.Left.backingArrayPtrLen()
-			n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
-		}
-
-	case OBYTES2STRTMP:
-		n.Left = walkexpr(n.Left, init)
-		if !instrumenting {
-			// Let the backend handle OBYTES2STRTMP directly
-			// to avoid a function call to slicebytetostringtmp.
-			break
-		}
-		// slicebytetostringtmp(ptr *byte, n int) string
-		n.Left = cheapexpr(n.Left, init)
-		ptr, len := n.Left.backingArrayPtrLen()
-		n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
-
-	case OSTR2BYTES:
-		s := n.Left
-		if Isconst(s, CTSTR) {
-			sc := s.StringVal()
-
-			// Allocate a [n]byte of the right size.
-			t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
-			var a *Node
-			if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
-				a = nod(OADDR, temp(t), nil)
-			} else {
-				a = callnew(t)
-			}
-			p := temp(t.PtrTo()) // *[n]byte
-			init.Append(typecheck(nod(OAS, p, a), ctxStmt))
-
-			// Copy from the static string data to the [n]byte.
-			if len(sc) > 0 {
-				as := nod(OAS,
-					nod(ODEREF, p, nil),
-					nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
-				as = typecheck(as, ctxStmt)
-				as = walkstmt(as)
-				init.Append(as)
-			}
-
-			// Slice the [n]byte to a []byte.
-			n.Op = OSLICEARR
-			n.Left = p
-			n = walkexpr(n, init)
-			break
-		}
-
-		a := nodnil()
-		if n.Esc == EscNone {
-			// Create temporary buffer for slice on stack.
-			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
-			a = nod(OADDR, temp(t), nil)
-		}
-		// stringtoslicebyte(*32[byte], string) []byte
-		n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
-
-	case OSTR2BYTESTMP:
-		// []byte(string) conversion that creates a slice
-		// referring to the actual string bytes.
-		// This conversion is handled later by the backend and
-		// is only for use by internal compiler optimizations
-		// that know that the slice won't be mutated.
-		// The only such case today is:
-		// for i, c := range []byte(string)
-		n.Left = walkexpr(n.Left, init)
-
-	case OSTR2RUNES:
-		a := nodnil()
-		if n.Esc == EscNone {
-			// Create temporary buffer for slice on stack.
-			t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
-			a = nod(OADDR, temp(t), nil)
-		}
-		// stringtoslicerune(*[32]rune, string) []rune
-		n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
-
-	case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
-		if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
-			// n can be directly represented in the read-only data section.
-			// Make direct reference to the static data. See issue 12841.
-			vstat := readonlystaticname(n.Type)
-			fixedlit(inInitFunction, initKindStatic, n, vstat, init)
-			n = vstat
-			n = typecheck(n, ctxExpr)
-			break
-		}
-		var_ := temp(n.Type)
-		anylit(n, var_, init)
-		n = var_
-
-	case OSEND:
-		n1 := n.Right
-		n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
-		n1 = walkexpr(n1, init)
-		n1 = nod(OADDR, n1, nil)
-		n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
-
-	case OCLOSURE:
-		n = walkclosure(n, init)
-
-	case OCALLPART:
-		n = walkpartialcall(n, init)
-	}
-
-	// Expressions that are constant at run time but not
-	// considered const by the language spec are not turned into
-	// constants until walk. For example, if n is y%1 == 0, the
-	// walk of y%1 may have replaced it by 0.
-	// Check whether n with its updated args is itself now a constant.
-	t := n.Type
-	evconst(n)
-	if n.Type != t {
-		Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
-	}
-	if n.Op == OLITERAL {
-		n = typecheck(n, ctxExpr)
-		// Emit string symbol now to avoid emitting
-		// any concurrently during the backend.
-		if s, ok := n.Val().U.(string); ok {
-			_ = stringsym(n.Pos, s)
-		}
-	}
-
-	updateHasCall(n)
-
-	if Debug.w != 0 && n != nil {
-		Dump("after walk expr", n)
-	}
-
-	lineno = lno
-	return n
-}
-
-// markTypeUsedInInterface marks that type t is converted to an interface.
-// This information is used in the linker in dead method elimination.
-func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
-	tsym := typenamesym(t).Linksym()
-	// Emit a marker relocation. The linker will know the type is converted
-	// to an interface if "from" is reachable.
-	r := obj.Addrel(from)
-	r.Sym = tsym
-	r.Type = objabi.R_USEIFACE
-}
-
-// markUsedIfaceMethod marks that an interface method is used in the current
-// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *Node) {
-	ityp := n.Left.Left.Type
-	tsym := typenamesym(ityp).Linksym()
-	r := obj.Addrel(Curfn.Func.lsym)
-	r.Sym = tsym
-	// n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
-	// in itab).
-	midx := n.Left.Xoffset / int64(Widthptr)
-	r.Add = ifaceMethodOffset(ityp, midx)
-	r.Type = objabi.R_USEIFACEMETHOD
-}
-
-// rtconvfn returns the parameter and result types that will be used by a
-// runtime function to convert from type src to type dst. The runtime function
-// name can be derived from the names of the returned types.
-//
-// If no such function is necessary, it returns (Txxx, Txxx).
-func rtconvfn(src, dst *types.Type) (param, result types.EType) {
-	if thearch.SoftFloat {
-		return Txxx, Txxx
-	}
-
-	switch thearch.LinkArch.Family {
-	case sys.ARM, sys.MIPS:
-		if src.IsFloat() {
-			switch dst.Etype {
-			case TINT64, TUINT64:
-				return TFLOAT64, dst.Etype
-			}
-		}
-		if dst.IsFloat() {
-			switch src.Etype {
-			case TINT64, TUINT64:
-				return src.Etype, TFLOAT64
-			}
-		}
-
-	case sys.I386:
-		if src.IsFloat() {
-			switch dst.Etype {
-			case TINT64, TUINT64:
-				return TFLOAT64, dst.Etype
-			case TUINT32, TUINT, TUINTPTR:
-				return TFLOAT64, TUINT32
-			}
-		}
-		if dst.IsFloat() {
-			switch src.Etype {
-			case TINT64, TUINT64:
-				return src.Etype, TFLOAT64
-			case TUINT32, TUINT, TUINTPTR:
-				return TUINT32, TFLOAT64
-			}
-		}
-	}
-	return Txxx, Txxx
-}
-
-// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *Node) *Node {
-	low, high, max := n.SliceBounds()
-	if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
-		// Reduce x[i:len(x)] to x[i:].
-		high = nil
-	}
-	n.SetSliceBounds(low, high, max)
-	if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
-		// Reduce x[:] to x.
-		if Debug_slice > 0 {
-			Warn("slice: omit slice operation")
-		}
-		return n.Left
-	}
-	return n
-}
-
-func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
-	// convas will turn map assigns into function calls,
-	// making it impossible for reorder3 to work.
-	n := nod(OAS, l, r)
-
-	if l.Op == OINDEXMAP {
-		return n
-	}
-
-	return convas(n, init)
-}
-
-func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
-	// check assign expression list to
-	// an expression list. called in
-	//	expr-list = expr-list
-
-	// ensure order of evaluation for function calls
-	for i := range nl {
-		nl[i] = safeexpr(nl[i], init)
-	}
-	for i1 := range nr {
-		nr[i1] = safeexpr(nr[i1], init)
-	}
-
-	var nn []*Node
-	i := 0
-	for ; i < len(nl); i++ {
-		if i >= len(nr) {
-			break
-		}
-		// Do not generate 'x = x' during return. See issue 4014.
-		if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
-			continue
-		}
-		nn = append(nn, ascompatee1(nl[i], nr[i], init))
-	}
-
-	// cannot happen: caller checked that lists had same length
-	if i < len(nl) || i < len(nr) {
-		var nln, nrn Nodes
-		nln.Set(nl)
-		nrn.Set(nr)
-		Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
-	}
-	return nn
-}
-
-// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *Node, rt *types.Type) bool {
-	if l.HasCall() || l.Op == OINDEXMAP {
-		return true
-	}
-	if types.Identical(l.Type, rt) {
-		return false
-	}
-	// There might be a conversion required, which might involve a runtime call.
-	return true
-}
-
-// check assign type list to
-// an expression list. called in
-//	expr-list = func()
-func ascompatet(nl Nodes, nr *types.Type) []*Node {
-	if nl.Len() != nr.NumFields() {
-		Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
-	}
-
-	var nn, mm Nodes
-	for i, l := range nl.Slice() {
-		if l.isBlank() {
-			continue
-		}
-		r := nr.Field(i)
-
-		// Any assignment to an lvalue that might cause a function call must be
-		// deferred until all the returned values have been read.
-		if fncall(l, r.Type) {
-			tmp := temp(r.Type)
-			tmp = typecheck(tmp, ctxExpr)
-			a := nod(OAS, l, tmp)
-			a = convas(a, &mm)
-			mm.Append(a)
-			l = tmp
-		}
-
-		res := nod(ORESULT, nil, nil)
-		res.Xoffset = Ctxt.FixedFrameSize() + r.Offset
-		res.Type = r.Type
-		res.SetTypecheck(1)
-
-		a := nod(OAS, l, res)
-		a = convas(a, &nn)
-		updateHasCall(a)
-		if a.HasCall() {
-			Dump("ascompatet ucount", a)
-			Fatalf("ascompatet: too many function calls evaluating parameters")
-		}
-
-		nn.Append(a)
-	}
-	return append(nn.Slice(), mm.Slice()...)
-}
-
-// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*Node) *Node {
-	var n *Node
-	if len(args) == 0 {
-		n = nodnil()
-		n.Type = typ
-	} else {
-		n = nod(OCOMPLIT, nil, typenod(typ))
-		n.List.Append(args...)
-		n.SetImplicit(true)
-	}
-
-	n = typecheck(n, ctxExpr)
-	if n.Type == nil {
-		Fatalf("mkdotargslice: typecheck failed")
-	}
-	return n
-}
-
-// fixVariadicCall rewrites calls to variadic functions to use an
-// explicit ... argument if one is not already present.
-func fixVariadicCall(call *Node) {
-	fntype := call.Left.Type
-	if !fntype.IsVariadic() || call.IsDDD() {
-		return
-	}
-
-	vi := fntype.NumParams() - 1
-	vt := fntype.Params().Field(vi).Type
-
-	args := call.List.Slice()
-	extra := args[vi:]
-	slice := mkdotargslice(vt, extra)
-	for i := range extra {
-		extra[i] = nil // allow GC
-	}
-
-	call.List.Set(append(args[:vi], slice))
-	call.SetIsDDD(true)
-}
-
-func walkCall(n *Node, init *Nodes) {
-	if n.Rlist.Len() != 0 {
-		return // already walked
-	}
-
-	params := n.Left.Type.Params()
-	args := n.List.Slice()
-
-	n.Left = walkexpr(n.Left, init)
-	walkexprlist(args, init)
-
-	// If this is a method call, add the receiver at the beginning of the args.
-	if n.Op == OCALLMETH {
-		withRecv := make([]*Node, len(args)+1)
-		withRecv[0] = n.Left.Left
-		n.Left.Left = nil
-		copy(withRecv[1:], args)
-		args = withRecv
-	}
-
-	// For any argument whose evaluation might require a function call,
-	// store that argument into a temporary variable,
-	// to prevent that calls from clobbering arguments already on the stack.
-	// When instrumenting, all arguments might require function calls.
-	var tempAssigns []*Node
-	for i, arg := range args {
-		updateHasCall(arg)
-		// Determine param type.
-		var t *types.Type
-		if n.Op == OCALLMETH {
-			if i == 0 {
-				t = n.Left.Type.Recv().Type
-			} else {
-				t = params.Field(i - 1).Type
-			}
-		} else {
-			t = params.Field(i).Type
-		}
-		if instrumenting || fncall(arg, t) {
-			// make assignment of fncall to tempAt
-			tmp := temp(t)
-			a := nod(OAS, tmp, arg)
-			a = convas(a, init)
-			tempAssigns = append(tempAssigns, a)
-			// replace arg with temp
-			args[i] = tmp
-		}
-	}
-
-	n.List.Set(tempAssigns)
-	n.Rlist.Set(args)
-}
-
-// generate code for print
-func walkprint(nn *Node, init *Nodes) *Node {
-	// Hoist all the argument evaluation up before the lock.
-	walkexprlistcheap(nn.List.Slice(), init)
-
-	// For println, add " " between elements and "\n" at the end.
-	if nn.Op == OPRINTN {
-		s := nn.List.Slice()
-		t := make([]*Node, 0, len(s)*2)
-		for i, n := range s {
-			if i != 0 {
-				t = append(t, nodstr(" "))
-			}
-			t = append(t, n)
-		}
-		t = append(t, nodstr("\n"))
-		nn.List.Set(t)
-	}
-
-	// Collapse runs of constant strings.
-	s := nn.List.Slice()
-	t := make([]*Node, 0, len(s))
-	for i := 0; i < len(s); {
-		var strs []string
-		for i < len(s) && Isconst(s[i], CTSTR) {
-			strs = append(strs, s[i].StringVal())
-			i++
-		}
-		if len(strs) > 0 {
-			t = append(t, nodstr(strings.Join(strs, "")))
-		}
-		if i < len(s) {
-			t = append(t, s[i])
-			i++
-		}
-	}
-	nn.List.Set(t)
-
-	calls := []*Node{mkcall("printlock", nil, init)}
-	for i, n := range nn.List.Slice() {
-		if n.Op == OLITERAL {
-			switch n.Val().Ctype() {
-			case CTRUNE:
-				n = defaultlit(n, types.Runetype)
-
-			case CTINT:
-				n = defaultlit(n, types.Types[TINT64])
-
-			case CTFLT:
-				n = defaultlit(n, types.Types[TFLOAT64])
-			}
-		}
-
-		if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
-			n = defaultlit(n, types.Types[TINT64])
-		}
-		n = defaultlit(n, nil)
-		nn.List.SetIndex(i, n)
-		if n.Type == nil || n.Type.Etype == TFORW {
-			continue
-		}
-
-		var on *Node
-		switch n.Type.Etype {
-		case TINTER:
-			if n.Type.IsEmptyInterface() {
-				on = syslook("printeface")
-			} else {
-				on = syslook("printiface")
-			}
-			on = substArgTypes(on, n.Type) // any-1
-		case TPTR:
-			if n.Type.Elem().NotInHeap() {
-				on = syslook("printuintptr")
-				n = nod(OCONV, n, nil)
-				n.Type = types.Types[TUNSAFEPTR]
-				n = nod(OCONV, n, nil)
-				n.Type = types.Types[TUINTPTR]
-				break
-			}
-			fallthrough
-		case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
-			on = syslook("printpointer")
-			on = substArgTypes(on, n.Type) // any-1
-		case TSLICE:
-			on = syslook("printslice")
-			on = substArgTypes(on, n.Type) // any-1
-		case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
-			if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
-				on = syslook("printhex")
-			} else {
-				on = syslook("printuint")
-			}
-		case TINT, TINT8, TINT16, TINT32, TINT64:
-			on = syslook("printint")
-		case TFLOAT32, TFLOAT64:
-			on = syslook("printfloat")
-		case TCOMPLEX64, TCOMPLEX128:
-			on = syslook("printcomplex")
-		case TBOOL:
-			on = syslook("printbool")
-		case TSTRING:
-			cs := ""
-			if Isconst(n, CTSTR) {
-				cs = n.StringVal()
-			}
-			switch cs {
-			case " ":
-				on = syslook("printsp")
-			case "\n":
-				on = syslook("printnl")
-			default:
-				on = syslook("printstring")
-			}
-		default:
-			badtype(OPRINT, n.Type, nil)
-			continue
-		}
-
-		r := nod(OCALL, on, nil)
-		if params := on.Type.Params().FieldSlice(); len(params) > 0 {
-			t := params[0].Type
-			if !types.Identical(t, n.Type) {
-				n = nod(OCONV, n, nil)
-				n.Type = t
-			}
-			r.List.Append(n)
-		}
-		calls = append(calls, r)
-	}
-
-	calls = append(calls, mkcall("printunlock", nil, init))
-
-	typecheckslice(calls, ctxStmt)
-	walkexprlist(calls, init)
-
-	r := nod(OEMPTY, nil, nil)
-	r = typecheck(r, ctxStmt)
-	r = walkexpr(r, init)
-	r.Ninit.Set(calls)
-	return r
-}
-
-func callnew(t *types.Type) *Node {
-	dowidth(t)
-	n := nod(ONEWOBJ, typename(t), nil)
-	n.Type = types.NewPtr(t)
-	n.SetTypecheck(1)
-	n.MarkNonNil()
-	return n
-}
-
-// isReflectHeaderDataField reports whether l is an expression p.Data
-// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *Node) bool {
-	if l.Type != types.Types[TUINTPTR] {
-		return false
-	}
-
-	var tsym *types.Sym
-	switch l.Op {
-	case ODOT:
-		tsym = l.Left.Type.Sym
-	case ODOTPTR:
-		tsym = l.Left.Type.Elem().Sym
-	default:
-		return false
-	}
-
-	if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
-		return false
-	}
-	return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
-}
-
-func convas(n *Node, init *Nodes) *Node {
-	if n.Op != OAS {
-		Fatalf("convas: not OAS %v", n.Op)
-	}
-	defer updateHasCall(n)
-
-	n.SetTypecheck(1)
-
-	if n.Left == nil || n.Right == nil {
-		return n
-	}
-
-	lt := n.Left.Type
-	rt := n.Right.Type
-	if lt == nil || rt == nil {
-		return n
-	}
-
-	if n.Left.isBlank() {
-		n.Right = defaultlit(n.Right, nil)
-		return n
-	}
-
-	if !types.Identical(lt, rt) {
-		n.Right = assignconv(n.Right, lt, "assignment")
-		n.Right = walkexpr(n.Right, init)
-	}
-	dowidth(n.Right.Type)
-
-	return n
-}
-
-// from ascompat[ee]
-//	a,b = c,d
-// simultaneous assignment. there cannot
-// be later use of an earlier lvalue.
-//
-// function calls have been removed.
-func reorder3(all []*Node) []*Node {
-	// If a needed expression may be affected by an
-	// earlier assignment, make an early copy of that
-	// expression and use the copy instead.
-	var early []*Node
-
-	var mapinit Nodes
-	for i, n := range all {
-		l := n.Left
-
-		// Save subexpressions needed on left side.
-		// Drill through non-dereferences.
-		for {
-			if l.Op == ODOT || l.Op == OPAREN {
-				l = l.Left
-				continue
-			}
-
-			if l.Op == OINDEX && l.Left.Type.IsArray() {
-				l.Right = reorder3save(l.Right, all, i, &early)
-				l = l.Left
-				continue
-			}
-
-			break
-		}
-
-		switch l.Op {
-		default:
-			Fatalf("reorder3 unexpected lvalue %#v", l.Op)
-
-		case ONAME:
-			break
-
-		case OINDEX, OINDEXMAP:
-			l.Left = reorder3save(l.Left, all, i, &early)
-			l.Right = reorder3save(l.Right, all, i, &early)
-			if l.Op == OINDEXMAP {
-				all[i] = convas(all[i], &mapinit)
-			}
-
-		case ODEREF, ODOTPTR:
-			l.Left = reorder3save(l.Left, all, i, &early)
-		}
-
-		// Save expression on right side.
-		all[i].Right = reorder3save(all[i].Right, all, i, &early)
-	}
-
-	early = append(mapinit.Slice(), early...)
-	return append(early, all...)
-}
-
-// if the evaluation of *np would be affected by the
-// assignments in all up to but not including the ith assignment,
-// copy into a temporary during *early and
-// replace *np with that temp.
-// The result of reorder3save MUST be assigned back to n, e.g.
-// 	n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
-	if !aliased(n, all[:i]) {
-		return n
-	}
-
-	q := temp(n.Type)
-	q = nod(OAS, q, n)
-	q = typecheck(q, ctxStmt)
-	*early = append(*early, q)
-	return q.Left
-}
-
-// what's the outer value that a write to n affects?
-// outer value means containing struct or array.
-func outervalue(n *Node) *Node {
-	for {
-		switch n.Op {
-		case OXDOT:
-			Fatalf("OXDOT in walk")
-		case ODOT, OPAREN, OCONVNOP:
-			n = n.Left
-			continue
-		case OINDEX:
-			if n.Left.Type != nil && n.Left.Type.IsArray() {
-				n = n.Left
-				continue
-			}
-		}
-
-		return n
-	}
-}
-
-// Is it possible that the computation of r might be
-// affected by assignments in all?
-func aliased(r *Node, all []*Node) bool {
-	if r == nil {
-		return false
-	}
-
-	// Treat all fields of a struct as referring to the whole struct.
-	// We could do better but we would have to keep track of the fields.
-	for r.Op == ODOT {
-		r = r.Left
-	}
-
-	// Look for obvious aliasing: a variable being assigned
-	// during the all list and appearing in n.
-	// Also record whether there are any writes to addressable
-	// memory (either main memory or variables whose addresses
-	// have been taken).
-	memwrite := false
-	for _, as := range all {
-		// We can ignore assignments to blank.
-		if as.Left.isBlank() {
-			continue
-		}
-
-		l := outervalue(as.Left)
-		if l.Op != ONAME {
-			memwrite = true
-			continue
-		}
-
-		switch l.Class() {
-		default:
-			Fatalf("unexpected class: %v, %v", l, l.Class())
-
-		case PAUTOHEAP, PEXTERN:
-			memwrite = true
-			continue
-
-		case PPARAMOUT:
-			// Assignments to a result parameter in a function with defers
-			// becomes visible early if evaluation of any later expression
-			// panics (#43835).
-			if Curfn.Func.HasDefer() {
-				return true
-			}
-			fallthrough
-		case PAUTO, PPARAM:
-			if l.Name.Addrtaken() {
-				memwrite = true
-				continue
-			}
-
-			if vmatch2(l, r) {
-				// Direct hit: l appears in r.
-				return true
-			}
-		}
-	}
-
-	// The variables being written do not appear in r.
-	// However, r might refer to computed addresses
-	// that are being written.
-
-	// If no computed addresses are affected by the writes, no aliasing.
-	if !memwrite {
-		return false
-	}
-
-	// If r does not refer to computed addresses
-	// (that is, if r only refers to variables whose addresses
-	// have not been taken), no aliasing.
-	if varexpr(r) {
-		return false
-	}
-
-	// Otherwise, both the writes and r refer to computed memory addresses.
-	// Assume that they might conflict.
-	return true
-}
-
-// does the evaluation of n only refer to variables
-// whose addresses have not been taken?
-// (and no other memory)
-func varexpr(n *Node) bool {
-	if n == nil {
-		return true
-	}
-
-	switch n.Op {
-	case OLITERAL:
-		return true
-
-	case ONAME:
-		switch n.Class() {
-		case PAUTO, PPARAM, PPARAMOUT:
-			if !n.Name.Addrtaken() {
-				return true
-			}
-		}
-
-		return false
-
-	case OADD,
-		OSUB,
-		OOR,
-		OXOR,
-		OMUL,
-		ODIV,
-		OMOD,
-		OLSH,
-		ORSH,
-		OAND,
-		OANDNOT,
-		OPLUS,
-		ONEG,
-		OBITNOT,
-		OPAREN,
-		OANDAND,
-		OOROR,
-		OCONV,
-		OCONVNOP,
-		OCONVIFACE,
-		ODOTTYPE:
-		return varexpr(n.Left) && varexpr(n.Right)
-
-	case ODOT: // but not ODOTPTR
-		// Should have been handled in aliased.
-		Fatalf("varexpr unexpected ODOT")
-	}
-
-	// Be conservative.
-	return false
-}
-
-// is the name l mentioned in r?
-func vmatch2(l *Node, r *Node) bool {
-	if r == nil {
-		return false
-	}
-	switch r.Op {
-	// match each right given left
-	case ONAME:
-		return l == r
-
-	case OLITERAL:
-		return false
-	}
-
-	if vmatch2(l, r.Left) {
-		return true
-	}
-	if vmatch2(l, r.Right) {
-		return true
-	}
-	for _, n := range r.List.Slice() {
-		if vmatch2(l, n) {
-			return true
-		}
-	}
-	return false
-}
-
-// is any name mentioned in l also mentioned in r?
-// called by sinit.go
-func vmatch1(l *Node, r *Node) bool {
-	// isolate all left sides
-	if l == nil || r == nil {
-		return false
-	}
-	switch l.Op {
-	case ONAME:
-		switch l.Class() {
-		case PPARAM, PAUTO:
-			break
-
-		default:
-			// assignment to non-stack variable must be
-			// delayed if right has function calls.
-			if r.HasCall() {
-				return true
-			}
-		}
-
-		return vmatch2(l, r)
-
-	case OLITERAL:
-		return false
-	}
-
-	if vmatch1(l.Left, r) {
-		return true
-	}
-	if vmatch1(l.Right, r) {
-		return true
-	}
-	for _, n := range l.List.Slice() {
-		if vmatch1(n, r) {
-			return true
-		}
-	}
-	return false
-}
-
-// paramstoheap returns code to allocate memory for heap-escaped parameters
-// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*Node {
-	var nn []*Node
-	for _, t := range params.Fields().Slice() {
-		v := asNode(t.Nname)
-		if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
-			v = nil
-		}
-		if v == nil {
-			continue
-		}
-
-		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
-			nn = append(nn, walkstmt(nod(ODCL, v, nil)))
-			if stackcopy.Class() == PPARAM {
-				nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt)))
-			}
-		}
-	}
-
-	return nn
-}
-
-// zeroResults zeros the return values at the start of the function.
-// We need to do this very early in the function.  Defer might stop a
-// panic and show the return values as they exist at the time of
-// panic.  For precise stacks, the garbage collector assumes results
-// are always live, so we need to zero them before any allocations,
-// even allocations to move params/results to the heap.
-// The generated code is added to Curfn's Enter list.
-func zeroResults() {
-	for _, f := range Curfn.Type.Results().Fields().Slice() {
-		v := asNode(f.Nname)
-		if v != nil && v.Name.Param.Heapaddr != nil {
-			// The local which points to the return value is the
-			// thing that needs zeroing. This is already handled
-			// by a Needzero annotation in plive.go:livenessepilogue.
-			continue
-		}
-		if v.isParamHeapCopy() {
-			// TODO(josharian/khr): Investigate whether we can switch to "continue" here,
-			// and document more in either case.
-			// In the review of CL 114797, Keith wrote (roughly):
-			// I don't think the zeroing below matters.
-			// The stack return value will never be marked as live anywhere in the function.
-			// It is not written to until deferreturn returns.
-			v = v.Name.Param.Stackcopy
-		}
-		// Zero the stack location containing f.
-		Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
-	}
-}
-
-// returnsfromheap returns code to copy values for heap-escaped parameters
-// back to the stack.
-func returnsfromheap(params *types.Type) []*Node {
-	var nn []*Node
-	for _, t := range params.Fields().Slice() {
-		v := asNode(t.Nname)
-		if v == nil {
-			continue
-		}
-		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
-			nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt)))
-		}
-	}
-
-	return nn
-}
-
-// heapmoves generates code to handle migrating heap-escaped parameters
-// between the stack and the heap. The generated code is added to Curfn's
-// Enter and Exit lists.
-func heapmoves() {
-	lno := lineno
-	lineno = Curfn.Pos
-	nn := paramstoheap(Curfn.Type.Recvs())
-	nn = append(nn, paramstoheap(Curfn.Type.Params())...)
-	nn = append(nn, paramstoheap(Curfn.Type.Results())...)
-	Curfn.Func.Enter.Append(nn...)
-	lineno = Curfn.Func.Endlineno
-	Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
-	lineno = lno
-}
-
-func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
-	if fn.Type == nil || fn.Type.Etype != TFUNC {
-		Fatalf("mkcall %v %v", fn, fn.Type)
-	}
-
-	n := fn.Type.NumParams()
-	if n != len(va) {
-		Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
-	}
-
-	r := nod(OCALL, fn, nil)
-	r.List.Set(va)
-	if fn.Type.NumResults() > 0 {
-		r = typecheck(r, ctxExpr|ctxMultiOK)
-	} else {
-		r = typecheck(r, ctxStmt)
-	}
-	r = walkexpr(r, init)
-	r.Type = t
-	return r
-}
-
-func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
-	return vmkcall(syslook(name), t, init, args)
-}
-
-func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
-	return vmkcall(fn, t, init, args)
-}
-
-func conv(n *Node, t *types.Type) *Node {
-	if types.Identical(n.Type, t) {
-		return n
-	}
-	n = nod(OCONV, n, nil)
-	n.Type = t
-	n = typecheck(n, ctxExpr)
-	return n
-}
-
-// convnop converts node n to type t using the OCONVNOP op
-// and typechecks the result with ctxExpr.
-func convnop(n *Node, t *types.Type) *Node {
-	if types.Identical(n.Type, t) {
-		return n
-	}
-	n = nod(OCONVNOP, n, nil)
-	n.Type = t
-	n = typecheck(n, ctxExpr)
-	return n
-}
-
-// byteindex converts n, which is byte-sized, to an int used to index into an array.
-// We cannot use conv, because we allow converting bool to int here,
-// which is forbidden in user code.
-func byteindex(n *Node) *Node {
-	// We cannot convert from bool to int directly.
-	// While converting from int8 to int is possible, it would yield
-	// the wrong result for negative values.
-	// Reinterpreting the value as an unsigned byte solves both cases.
-	if !types.Identical(n.Type, types.Types[TUINT8]) {
-		n = nod(OCONV, n, nil)
-		n.Type = types.Types[TUINT8]
-		n.SetTypecheck(1)
-	}
-	n = nod(OCONV, n, nil)
-	n.Type = types.Types[TINT]
-	n.SetTypecheck(1)
-	return n
-}
-
-func chanfn(name string, n int, t *types.Type) *Node {
-	if !t.IsChan() {
-		Fatalf("chanfn %v", t)
-	}
-	fn := syslook(name)
-	switch n {
-	default:
-		Fatalf("chanfn %d", n)
-	case 1:
-		fn = substArgTypes(fn, t.Elem())
-	case 2:
-		fn = substArgTypes(fn, t.Elem(), t.Elem())
-	}
-	return fn
-}
-
-func mapfn(name string, t *types.Type) *Node {
-	if !t.IsMap() {
-		Fatalf("mapfn %v", t)
-	}
-	fn := syslook(name)
-	fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
-	return fn
-}
-
-func mapfndel(name string, t *types.Type) *Node {
-	if !t.IsMap() {
-		Fatalf("mapfn %v", t)
-	}
-	fn := syslook(name)
-	fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
-	return fn
-}
-
-const (
-	mapslow = iota
-	mapfast32
-	mapfast32ptr
-	mapfast64
-	mapfast64ptr
-	mapfaststr
-	nmapfast
-)
-
-type mapnames [nmapfast]string
-
-func mkmapnames(base string, ptr string) mapnames {
-	return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
-}
-
-var mapaccess1 = mkmapnames("mapaccess1", "")
-var mapaccess2 = mkmapnames("mapaccess2", "")
-var mapassign = mkmapnames("mapassign", "ptr")
-var mapdelete = mkmapnames("mapdelete", "")
-
-func mapfast(t *types.Type) int {
-	// Check runtime/map.go:maxElemSize before changing.
-	if t.Elem().Width > 128 {
-		return mapslow
-	}
-	switch algtype(t.Key()) {
-	case AMEM32:
-		if !t.Key().HasPointers() {
-			return mapfast32
-		}
-		if Widthptr == 4 {
-			return mapfast32ptr
-		}
-		Fatalf("small pointer %v", t.Key())
-	case AMEM64:
-		if !t.Key().HasPointers() {
-			return mapfast64
-		}
-		if Widthptr == 8 {
-			return mapfast64ptr
-		}
-		// Two-word object, at least one of which is a pointer.
-		// Use the slow path.
-	case ASTRING:
-		return mapfaststr
-	}
-	return mapslow
-}
-
-func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
-	fn := syslook(name)
-	fn = substArgTypes(fn, l, r)
-	return fn
-}
-
-func addstr(n *Node, init *Nodes) *Node {
-	// order.expr rewrote OADDSTR to have a list of strings.
-	c := n.List.Len()
-
-	if c < 2 {
-		Fatalf("addstr count %d too small", c)
-	}
-
-	buf := nodnil()
-	if n.Esc == EscNone {
-		sz := int64(0)
-		for _, n1 := range n.List.Slice() {
-			if n1.Op == OLITERAL {
-				sz += int64(len(n1.StringVal()))
-			}
-		}
-
-		// Don't allocate the buffer if the result won't fit.
-		if sz < tmpstringbufsize {
-			// Create temporary buffer for result string on stack.
-			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
-			buf = nod(OADDR, temp(t), nil)
-		}
-	}
-
-	// build list of string arguments
-	args := []*Node{buf}
-	for _, n2 := range n.List.Slice() {
-		args = append(args, conv(n2, types.Types[TSTRING]))
-	}
-
-	var fn string
-	if c <= 5 {
-		// small numbers of strings use direct runtime helpers.
-		// note: order.expr knows this cutoff too.
-		fn = fmt.Sprintf("concatstring%d", c)
-	} else {
-		// large numbers of strings are passed to the runtime as a slice.
-		fn = "concatstrings"
-
-		t := types.NewSlice(types.Types[TSTRING])
-		slice := nod(OCOMPLIT, nil, typenod(t))
-		if prealloc[n] != nil {
-			prealloc[slice] = prealloc[n]
-		}
-		slice.List.Set(args[1:]) // skip buf arg
-		args = []*Node{buf, slice}
-		slice.Esc = EscNone
-	}
-
-	cat := syslook(fn)
-	r := nod(OCALL, cat, nil)
-	r.List.Set(args)
-	r = typecheck(r, ctxExpr)
-	r = walkexpr(r, init)
-	r.Type = n.Type
-
-	return r
-}
-
-func walkAppendArgs(n *Node, init *Nodes) {
-	walkexprlistsafe(n.List.Slice(), init)
-
-	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
-	// and n are name or literal, but those may index the slice we're
-	// modifying here. Fix explicitly.
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		ls[i1] = cheapexpr(n1, init)
-	}
-}
-
-// expand append(l1, l2...) to
-//   init {
-//     s := l1
-//     n := len(s) + len(l2)
-//     // Compare as uint so growslice can panic on overflow.
-//     if uint(n) > uint(cap(s)) {
-//       s = growslice(s, n)
-//     }
-//     s = s[:n]
-//     memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-//   }
-//   s
-//
-// l2 is allowed to be a string.
-func appendslice(n *Node, init *Nodes) *Node {
-	walkAppendArgs(n, init)
-
-	l1 := n.List.First()
-	l2 := n.List.Second()
-	l2 = cheapexpr(l2, init)
-	n.List.SetSecond(l2)
-
-	var nodes Nodes
-
-	// var s []T
-	s := temp(l1.Type)
-	nodes.Append(nod(OAS, s, l1)) // s = l1
-
-	elemtype := s.Type.Elem()
-
-	// n := len(s) + len(l2)
-	nn := temp(types.Types[TINT])
-	nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
-
-	// if uint(n) > uint(cap(s))
-	nif := nod(OIF, nil, nil)
-	nuint := conv(nn, types.Types[TUINT])
-	scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
-	nif.Left = nod(OGT, nuint, scapuint)
-
-	// instantiate growslice(typ *type, []any, int) []any
-	fn := syslook("growslice")
-	fn = substArgTypes(fn, elemtype, elemtype)
-
-	// s = growslice(T, s, n)
-	nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
-	nodes.Append(nif)
-
-	// s = s[:n]
-	nt := nod(OSLICE, s, nil)
-	nt.SetSliceBounds(nil, nn, nil)
-	nt.SetBounded(true)
-	nodes.Append(nod(OAS, s, nt))
-
-	var ncopy *Node
-	if elemtype.HasPointers() {
-		// copy(s[len(l1):], l2)
-		nptr1 := nod(OSLICE, s, nil)
-		nptr1.Type = s.Type
-		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
-		nptr1 = cheapexpr(nptr1, &nodes)
-
-		nptr2 := l2
-
-		Curfn.Func.setWBPos(n.Pos)
-
-		// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
-		fn := syslook("typedslicecopy")
-		fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
-		ptr1, len1 := nptr1.backingArrayPtrLen()
-		ptr2, len2 := nptr2.backingArrayPtrLen()
-		ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
-	} else if instrumenting && !compiling_runtime {
-		// rely on runtime to instrument:
-		//  copy(s[len(l1):], l2)
-		// l2 can be a slice or string.
-		nptr1 := nod(OSLICE, s, nil)
-		nptr1.Type = s.Type
-		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
-		nptr1 = cheapexpr(nptr1, &nodes)
-		nptr2 := l2
-
-		ptr1, len1 := nptr1.backingArrayPtrLen()
-		ptr2, len2 := nptr2.backingArrayPtrLen()
-
-		fn := syslook("slicecopy")
-		fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
-		ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
-	} else {
-		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-		nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
-		nptr1.SetBounded(true)
-		nptr1 = nod(OADDR, nptr1, nil)
-
-		nptr2 := nod(OSPTR, l2, nil)
-
-		nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes)
-		nwid = nod(OMUL, nwid, nodintconst(elemtype.Width))
-
-		// instantiate func memmove(to *any, frm *any, length uintptr)
-		fn := syslook("memmove")
-		fn = substArgTypes(fn, elemtype, elemtype)
-		ncopy = mkcall1(fn, nil, &nodes, nptr1, nptr2, nwid)
-	}
-	ln := append(nodes.Slice(), ncopy)
-
-	typecheckslice(ln, ctxStmt)
-	walkstmtlist(ln)
-	init.Append(ln...)
-	return s
-}
-
-// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
-// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *Node) bool {
-	if Debug.N != 0 || instrumenting {
-		return false
-	}
-
-	if n.Typecheck() == 0 {
-		Fatalf("missing typecheck: %+v", n)
-	}
-
-	if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
-		return false
-	}
-
-	second := n.List.Second()
-	if second.Op != OMAKESLICE || second.Right != nil {
-		return false
-	}
-
-	// y must be either an integer constant or the largest possible positive value
-	// of variable y needs to fit into an uint.
-
-	// typecheck made sure that constant arguments to make are not negative and fit into an int.
-
-	// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
-	y := second.Left
-	if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 {
-		return false
-	}
-
-	return true
-}
-
-// extendslice rewrites append(l1, make([]T, l2)...) to
-//   init {
-//     if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
-//     } else {
-//       panicmakeslicelen()
-//     }
-//     s := l1
-//     n := len(s) + l2
-//     // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
-//     // cap is a positive int and n can become negative when len(s) + l2
-//     // overflows int. Interpreting n when negative as uint makes it larger
-//     // than cap(s). growslice will check the int n arg and panic if n is
-//     // negative. This prevents the overflow from being undetected.
-//     if uint(n) > uint(cap(s)) {
-//       s = growslice(T, s, n)
-//     }
-//     s = s[:n]
-//     lptr := &l1[0]
-//     sptr := &s[0]
-//     if lptr == sptr || !T.HasPointers() {
-//       // growslice did not clear the whole underlying array (or did not get called)
-//       hp := &s[len(l1)]
-//       hn := l2 * sizeof(T)
-//       memclr(hp, hn)
-//     }
-//   }
-//   s
-func extendslice(n *Node, init *Nodes) *Node {
-	// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
-	// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
-	// check of l2 < 0 at runtime which is generated below.
-	l2 := conv(n.List.Second().Left, types.Types[TINT])
-	l2 = typecheck(l2, ctxExpr)
-	n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
-
-	walkAppendArgs(n, init)
-
-	l1 := n.List.First()
-	l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
-
-	var nodes []*Node
-
-	// if l2 >= 0 (likely happens), do nothing
-	nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil)
-	nifneg.SetLikely(true)
-
-	// else panicmakeslicelen()
-	nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
-	nodes = append(nodes, nifneg)
-
-	// s := l1
-	s := temp(l1.Type)
-	nodes = append(nodes, nod(OAS, s, l1))
-
-	elemtype := s.Type.Elem()
-
-	// n := len(s) + l2
-	nn := temp(types.Types[TINT])
-	nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
-
-	// if uint(n) > uint(cap(s))
-	nuint := conv(nn, types.Types[TUINT])
-	capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
-	nif := nod(OIF, nod(OGT, nuint, capuint), nil)
-
-	// instantiate growslice(typ *type, old []any, newcap int) []any
-	fn := syslook("growslice")
-	fn = substArgTypes(fn, elemtype, elemtype)
-
-	// s = growslice(T, s, n)
-	nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
-	nodes = append(nodes, nif)
-
-	// s = s[:n]
-	nt := nod(OSLICE, s, nil)
-	nt.SetSliceBounds(nil, nn, nil)
-	nt.SetBounded(true)
-	nodes = append(nodes, nod(OAS, s, nt))
-
-	// lptr := &l1[0]
-	l1ptr := temp(l1.Type.Elem().PtrTo())
-	tmp := nod(OSPTR, l1, nil)
-	nodes = append(nodes, nod(OAS, l1ptr, tmp))
-
-	// sptr := &s[0]
-	sptr := temp(elemtype.PtrTo())
-	tmp = nod(OSPTR, s, nil)
-	nodes = append(nodes, nod(OAS, sptr, tmp))
-
-	// hp := &s[len(l1)]
-	hp := nod(OINDEX, s, nod(OLEN, l1, nil))
-	hp.SetBounded(true)
-	hp = nod(OADDR, hp, nil)
-	hp = convnop(hp, types.Types[TUNSAFEPTR])
-
-	// hn := l2 * sizeof(elem(s))
-	hn := nod(OMUL, l2, nodintconst(elemtype.Width))
-	hn = conv(hn, types.Types[TUINTPTR])
-
-	clrname := "memclrNoHeapPointers"
-	hasPointers := elemtype.HasPointers()
-	if hasPointers {
-		clrname = "memclrHasPointers"
-		Curfn.Func.setWBPos(n.Pos)
-	}
-
-	var clr Nodes
-	clrfn := mkcall(clrname, nil, &clr, hp, hn)
-	clr.Append(clrfn)
-
-	if hasPointers {
-		// if l1ptr == sptr
-		nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
-		nifclr.Nbody = clr
-		nodes = append(nodes, nifclr)
-	} else {
-		nodes = append(nodes, clr.Slice()...)
-	}
-
-	typecheckslice(nodes, ctxStmt)
-	walkstmtlist(nodes)
-	init.Append(nodes...)
-	return s
-}
-
-// Rewrite append(src, x, y, z) so that any side effects in
-// x, y, z (including runtime panics) are evaluated in
-// initialization statements before the append.
-// For normal code generation, stop there and leave the
-// rest to cgen_append.
-//
-// For race detector, expand append(src, a [, b]* ) to
-//
-//   init {
-//     s := src
-//     const argc = len(args) - 1
-//     if cap(s) - len(s) < argc {
-//	    s = growslice(s, len(s)+argc)
-//     }
-//     n := len(s)
-//     s = s[:n+argc]
-//     s[n] = a
-//     s[n+1] = b
-//     ...
-//   }
-//   s
-func walkappend(n *Node, init *Nodes, dst *Node) *Node {
-	if !samesafeexpr(dst, n.List.First()) {
-		n.List.SetFirst(safeexpr(n.List.First(), init))
-		n.List.SetFirst(walkexpr(n.List.First(), init))
-	}
-	walkexprlistsafe(n.List.Slice()[1:], init)
-
-	nsrc := n.List.First()
-
-	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
-	// and n are name or literal, but those may index the slice we're
-	// modifying here. Fix explicitly.
-	// Using cheapexpr also makes sure that the evaluation
-	// of all arguments (and especially any panics) happen
-	// before we begin to modify the slice in a visible way.
-	ls := n.List.Slice()[1:]
-	for i, n := range ls {
-		n = cheapexpr(n, init)
-		if !types.Identical(n.Type, nsrc.Type.Elem()) {
-			n = assignconv(n, nsrc.Type.Elem(), "append")
-			n = walkexpr(n, init)
-		}
-		ls[i] = n
-	}
-
-	argc := n.List.Len() - 1
-	if argc < 1 {
-		return nsrc
-	}
-
-	// General case, with no function calls left as arguments.
-	// Leave for gen, except that instrumentation requires old form.
-	if !instrumenting || compiling_runtime {
-		return n
-	}
-
-	var l []*Node
-
-	ns := temp(nsrc.Type)
-	l = append(l, nod(OAS, ns, nsrc)) // s = src
-
-	na := nodintconst(int64(argc)) // const argc
-	nx := nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
-	nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
-
-	fn := syslook("growslice") //   growslice(<type>, old []T, mincap int) (ret []T)
-	fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
-
-	nx.Nbody.Set1(nod(OAS, ns,
-		mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
-			nod(OADD, nod(OLEN, ns, nil), na))))
-
-	l = append(l, nx)
-
-	nn := temp(types.Types[TINT])
-	l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
-
-	nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
-	nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
-	nx.SetBounded(true)
-	l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
-
-	ls = n.List.Slice()[1:]
-	for i, n := range ls {
-		nx = nod(OINDEX, ns, nn) // s[n] ...
-		nx.SetBounded(true)
-		l = append(l, nod(OAS, nx, n)) // s[n] = arg
-		if i+1 < len(ls) {
-			l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
-		}
-	}
-
-	typecheckslice(l, ctxStmt)
-	walkstmtlist(l)
-	init.Append(l...)
-	return ns
-}
-
-// Lower copy(a, b) to a memmove call or a runtime call.
-//
-// init {
-//   n := len(a)
-//   if n > len(b) { n = len(b) }
-//   if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
-// }
-// n;
-//
-// Also works if b is a string.
-//
-func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
-	if n.Left.Type.Elem().HasPointers() {
-		Curfn.Func.setWBPos(n.Pos)
-		fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
-		n.Left = cheapexpr(n.Left, init)
-		ptrL, lenL := n.Left.backingArrayPtrLen()
-		n.Right = cheapexpr(n.Right, init)
-		ptrR, lenR := n.Right.backingArrayPtrLen()
-		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
-	}
-
-	if runtimecall {
-		// rely on runtime to instrument:
-		//  copy(n.Left, n.Right)
-		// n.Right can be a slice or string.
-
-		n.Left = cheapexpr(n.Left, init)
-		ptrL, lenL := n.Left.backingArrayPtrLen()
-		n.Right = cheapexpr(n.Right, init)
-		ptrR, lenR := n.Right.backingArrayPtrLen()
-
-		fn := syslook("slicecopy")
-		fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
-
-		return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
-	}
-
-	n.Left = walkexpr(n.Left, init)
-	n.Right = walkexpr(n.Right, init)
-	nl := temp(n.Left.Type)
-	nr := temp(n.Right.Type)
-	var l []*Node
-	l = append(l, nod(OAS, nl, n.Left))
-	l = append(l, nod(OAS, nr, n.Right))
-
-	nfrm := nod(OSPTR, nr, nil)
-	nto := nod(OSPTR, nl, nil)
-
-	nlen := temp(types.Types[TINT])
-
-	// n = len(to)
-	l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
-
-	// if n > len(frm) { n = len(frm) }
-	nif := nod(OIF, nil, nil)
-
-	nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
-	nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
-	l = append(l, nif)
-
-	// if to.ptr != frm.ptr { memmove( ... ) }
-	ne := nod(OIF, nod(ONE, nto, nfrm), nil)
-	ne.SetLikely(true)
-	l = append(l, ne)
-
-	fn := syslook("memmove")
-	fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
-	nwid := temp(types.Types[TUINTPTR])
-	setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
-	ne.Nbody.Append(setwid)
-	nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
-	call := mkcall1(fn, nil, init, nto, nfrm, nwid)
-	ne.Nbody.Append(call)
-
-	typecheckslice(l, ctxStmt)
-	walkstmtlist(l)
-	init.Append(l...)
-	return nlen
-}
-
-func eqfor(t *types.Type) (n *Node, needsize bool) {
-	// Should only arrive here with large memory or
-	// a struct/array containing a non-memory field/element.
-	// Small memory is handled inline, and single non-memory
-	// is handled by walkcompare.
-	switch a, _ := algtype1(t); a {
-	case AMEM:
-		n := syslook("memequal")
-		n = substArgTypes(n, t, t)
-		return n, true
-	case ASPECIAL:
-		sym := typesymprefix(".eq", t)
-		n := newname(sym)
-		setNodeNameFunc(n)
-		n.Type = functype(nil, []*Node{
-			anonfield(types.NewPtr(t)),
-			anonfield(types.NewPtr(t)),
-		}, []*Node{
-			anonfield(types.Types[TBOOL]),
-		})
-		return n, false
-	}
-	Fatalf("eqfor %v", t)
-	return nil, false
-}
-
-// The result of walkcompare MUST be assigned back to n, e.g.
-// 	n.Left = walkcompare(n.Left, init)
-func walkcompare(n *Node, init *Nodes) *Node {
-	if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
-		return walkcompareInterface(n, init)
-	}
-
-	if n.Left.Type.IsString() && n.Right.Type.IsString() {
-		return walkcompareString(n, init)
-	}
-
-	n.Left = walkexpr(n.Left, init)
-	n.Right = walkexpr(n.Right, init)
-
-	// Given mixed interface/concrete comparison,
-	// rewrite into types-equal && data-equal.
-	// This is efficient, avoids allocations, and avoids runtime calls.
-	if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
-		// Preserve side-effects in case of short-circuiting; see #32187.
-		l := cheapexpr(n.Left, init)
-		r := cheapexpr(n.Right, init)
-		// Swap so that l is the interface value and r is the concrete value.
-		if n.Right.Type.IsInterface() {
-			l, r = r, l
-		}
-
-		// Handle both == and !=.
-		eq := n.Op
-		andor := OOROR
-		if eq == OEQ {
-			andor = OANDAND
-		}
-		// Check for types equal.
-		// For empty interface, this is:
-		//   l.tab == type(r)
-		// For non-empty interface, this is:
-		//   l.tab != nil && l.tab._type == type(r)
-		var eqtype *Node
-		tab := nod(OITAB, l, nil)
-		rtyp := typename(r.Type)
-		if l.Type.IsEmptyInterface() {
-			tab.Type = types.NewPtr(types.Types[TUINT8])
-			tab.SetTypecheck(1)
-			eqtype = nod(eq, tab, rtyp)
-		} else {
-			nonnil := nod(brcom(eq), nodnil(), tab)
-			match := nod(eq, itabType(tab), rtyp)
-			eqtype = nod(andor, nonnil, match)
-		}
-		// Check for data equal.
-		eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r)
-		// Put it all together.
-		expr := nod(andor, eqtype, eqdata)
-		n = finishcompare(n, expr, init)
-		return n
-	}
-
-	// Must be comparison of array or struct.
-	// Otherwise back end handles it.
-	// While we're here, decide whether to
-	// inline or call an eq alg.
-	t := n.Left.Type
-	var inline bool
-
-	maxcmpsize := int64(4)
-	unalignedLoad := canMergeLoads()
-	if unalignedLoad {
-		// Keep this low enough to generate less code than a function call.
-		maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
-	}
-
-	switch t.Etype {
-	default:
-		if Debug_libfuzzer != 0 && t.IsInteger() {
-			n.Left = cheapexpr(n.Left, init)
-			n.Right = cheapexpr(n.Right, init)
-
-			// If exactly one comparison operand is
-			// constant, invoke the constcmp functions
-			// instead, and arrange for the constant
-			// operand to be the first argument.
-			l, r := n.Left, n.Right
-			if r.Op == OLITERAL {
-				l, r = r, l
-			}
-			constcmp := l.Op == OLITERAL && r.Op != OLITERAL
-
-			var fn string
-			var paramType *types.Type
-			switch t.Size() {
-			case 1:
-				fn = "libfuzzerTraceCmp1"
-				if constcmp {
-					fn = "libfuzzerTraceConstCmp1"
-				}
-				paramType = types.Types[TUINT8]
-			case 2:
-				fn = "libfuzzerTraceCmp2"
-				if constcmp {
-					fn = "libfuzzerTraceConstCmp2"
-				}
-				paramType = types.Types[TUINT16]
-			case 4:
-				fn = "libfuzzerTraceCmp4"
-				if constcmp {
-					fn = "libfuzzerTraceConstCmp4"
-				}
-				paramType = types.Types[TUINT32]
-			case 8:
-				fn = "libfuzzerTraceCmp8"
-				if constcmp {
-					fn = "libfuzzerTraceConstCmp8"
-				}
-				paramType = types.Types[TUINT64]
-			default:
-				Fatalf("unexpected integer size %d for %v", t.Size(), t)
-			}
-			init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
-		}
-		return n
-	case TARRAY:
-		// We can compare several elements at once with 2/4/8 byte integer compares
-		inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
-	case TSTRUCT:
-		inline = t.NumComponents(types.IgnoreBlankFields) <= 4
-	}
-
-	cmpl := n.Left
-	for cmpl != nil && cmpl.Op == OCONVNOP {
-		cmpl = cmpl.Left
-	}
-	cmpr := n.Right
-	for cmpr != nil && cmpr.Op == OCONVNOP {
-		cmpr = cmpr.Left
-	}
-
-	// Chose not to inline. Call equality function directly.
-	if !inline {
-		// eq algs take pointers; cmpl and cmpr must be addressable
-		if !islvalue(cmpl) || !islvalue(cmpr) {
-			Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
-		}
-
-		fn, needsize := eqfor(t)
-		call := nod(OCALL, fn, nil)
-		call.List.Append(nod(OADDR, cmpl, nil))
-		call.List.Append(nod(OADDR, cmpr, nil))
-		if needsize {
-			call.List.Append(nodintconst(t.Width))
-		}
-		res := call
-		if n.Op != OEQ {
-			res = nod(ONOT, res, nil)
-		}
-		n = finishcompare(n, res, init)
-		return n
-	}
-
-	// inline: build boolean expression comparing element by element
-	andor := OANDAND
-	if n.Op == ONE {
-		andor = OOROR
-	}
-	var expr *Node
-	compare := func(el, er *Node) {
-		a := nod(n.Op, el, er)
-		if expr == nil {
-			expr = a
-		} else {
-			expr = nod(andor, expr, a)
-		}
-	}
-	cmpl = safeexpr(cmpl, init)
-	cmpr = safeexpr(cmpr, init)
-	if t.IsStruct() {
-		for _, f := range t.Fields().Slice() {
-			sym := f.Sym
-			if sym.IsBlank() {
-				continue
-			}
-			compare(
-				nodSym(OXDOT, cmpl, sym),
-				nodSym(OXDOT, cmpr, sym),
-			)
-		}
-	} else {
-		step := int64(1)
-		remains := t.NumElem() * t.Elem().Width
-		combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
-		combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
-		combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
-		for i := int64(0); remains > 0; {
-			var convType *types.Type
-			switch {
-			case remains >= 8 && combine64bit:
-				convType = types.Types[TINT64]
-				step = 8 / t.Elem().Width
-			case remains >= 4 && combine32bit:
-				convType = types.Types[TUINT32]
-				step = 4 / t.Elem().Width
-			case remains >= 2 && combine16bit:
-				convType = types.Types[TUINT16]
-				step = 2 / t.Elem().Width
-			default:
-				step = 1
-			}
-			if step == 1 {
-				compare(
-					nod(OINDEX, cmpl, nodintconst(i)),
-					nod(OINDEX, cmpr, nodintconst(i)),
-				)
-				i++
-				remains -= t.Elem().Width
-			} else {
-				elemType := t.Elem().ToUnsigned()
-				cmplw := nod(OINDEX, cmpl, nodintconst(i))
-				cmplw = conv(cmplw, elemType) // convert to unsigned
-				cmplw = conv(cmplw, convType) // widen
-				cmprw := nod(OINDEX, cmpr, nodintconst(i))
-				cmprw = conv(cmprw, elemType)
-				cmprw = conv(cmprw, convType)
-				// For code like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
-				// ssa will generate a single large load.
-				for offset := int64(1); offset < step; offset++ {
-					lb := nod(OINDEX, cmpl, nodintconst(i+offset))
-					lb = conv(lb, elemType)
-					lb = conv(lb, convType)
-					lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
-					cmplw = nod(OOR, cmplw, lb)
-					rb := nod(OINDEX, cmpr, nodintconst(i+offset))
-					rb = conv(rb, elemType)
-					rb = conv(rb, convType)
-					rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
-					cmprw = nod(OOR, cmprw, rb)
-				}
-				compare(cmplw, cmprw)
-				i += step
-				remains -= step * t.Elem().Width
-			}
-		}
-	}
-	if expr == nil {
-		expr = nodbool(n.Op == OEQ)
-		// We still need to use cmpl and cmpr, in case they contain
-		// an expression which might panic. See issue 23837.
-		t := temp(cmpl.Type)
-		a1 := nod(OAS, t, cmpl)
-		a1 = typecheck(a1, ctxStmt)
-		a2 := nod(OAS, t, cmpr)
-		a2 = typecheck(a2, ctxStmt)
-		init.Append(a1, a2)
-	}
-	n = finishcompare(n, expr, init)
-	return n
-}
-
-func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
-	// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
-	if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
-		n = copyexpr(n, n.Type, init)
-	}
-
-	return conv(n, t)
-}
-
-func walkcompareInterface(n *Node, init *Nodes) *Node {
-	n.Right = cheapexpr(n.Right, init)
-	n.Left = cheapexpr(n.Left, init)
-	eqtab, eqdata := eqinterface(n.Left, n.Right)
-	var cmp *Node
-	if n.Op == OEQ {
-		cmp = nod(OANDAND, eqtab, eqdata)
-	} else {
-		eqtab.Op = ONE
-		cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil))
-	}
-	return finishcompare(n, cmp, init)
-}
-
-func walkcompareString(n *Node, init *Nodes) *Node {
-	// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
-	var cs, ncs *Node // const string, non-const string
-	switch {
-	case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
-		// ignore; will be constant evaluated
-	case Isconst(n.Left, CTSTR):
-		cs = n.Left
-		ncs = n.Right
-	case Isconst(n.Right, CTSTR):
-		cs = n.Right
-		ncs = n.Left
-	}
-	if cs != nil {
-		cmp := n.Op
-		// Our comparison below assumes that the non-constant string
-		// is on the left hand side, so rewrite "" cmp x to x cmp "".
-		// See issue 24817.
-		if Isconst(n.Left, CTSTR) {
-			cmp = brrev(cmp)
-		}
-
-		// maxRewriteLen was chosen empirically.
-		// It is the value that minimizes cmd/go file size
-		// across most architectures.
-		// See the commit description for CL 26758 for details.
-		maxRewriteLen := 6
-		// Some architectures can load unaligned byte sequence as 1 word.
-		// So we can cover longer strings with the same amount of code.
-		canCombineLoads := canMergeLoads()
-		combine64bit := false
-		if canCombineLoads {
-			// Keep this low enough to generate less code than a function call.
-			maxRewriteLen = 2 * thearch.LinkArch.RegSize
-			combine64bit = thearch.LinkArch.RegSize >= 8
-		}
-
-		var and Op
-		switch cmp {
-		case OEQ:
-			and = OANDAND
-		case ONE:
-			and = OOROR
-		default:
-			// Don't do byte-wise comparisons for <, <=, etc.
-			// They're fairly complicated.
-			// Length-only checks are ok, though.
-			maxRewriteLen = 0
-		}
-		if s := cs.StringVal(); len(s) <= maxRewriteLen {
-			if len(s) > 0 {
-				ncs = safeexpr(ncs, init)
-			}
-			r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
-			remains := len(s)
-			for i := 0; remains > 0; {
-				if remains == 1 || !canCombineLoads {
-					cb := nodintconst(int64(s[i]))
-					ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
-					r = nod(and, r, nod(cmp, ncb, cb))
-					remains--
-					i++
-					continue
-				}
-				var step int
-				var convType *types.Type
-				switch {
-				case remains >= 8 && combine64bit:
-					convType = types.Types[TINT64]
-					step = 8
-				case remains >= 4:
-					convType = types.Types[TUINT32]
-					step = 4
-				case remains >= 2:
-					convType = types.Types[TUINT16]
-					step = 2
-				}
-				ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
-				ncsubstr = conv(ncsubstr, convType)
-				csubstr := int64(s[i])
-				// Calculate large constant from bytes as sequence of shifts and ors.
-				// Like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
-				// ssa will combine this into a single large load.
-				for offset := 1; offset < step; offset++ {
-					b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
-					b = conv(b, convType)
-					b = nod(OLSH, b, nodintconst(int64(8*offset)))
-					ncsubstr = nod(OOR, ncsubstr, b)
-					csubstr |= int64(s[i+offset]) << uint8(8*offset)
-				}
-				csubstrPart := nodintconst(csubstr)
-				// Compare "step" bytes as once
-				r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
-				remains -= step
-				i += step
-			}
-			return finishcompare(n, r, init)
-		}
-	}
-
-	var r *Node
-	if n.Op == OEQ || n.Op == ONE {
-		// prepare for rewrite below
-		n.Left = cheapexpr(n.Left, init)
-		n.Right = cheapexpr(n.Right, init)
-		eqlen, eqmem := eqstring(n.Left, n.Right)
-		// quick check of len before full compare for == or !=.
-		// memequal then tests equality up to length len.
-		if n.Op == OEQ {
-			// len(left) == len(right) && memequal(left, right, len)
-			r = nod(OANDAND, eqlen, eqmem)
-		} else {
-			// len(left) != len(right) || !memequal(left, right, len)
-			eqlen.Op = ONE
-			r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil))
-		}
-	} else {
-		// sys_cmpstring(s1, s2) :: 0
-		r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
-		r = nod(n.Op, r, nodintconst(0))
-	}
-
-	return finishcompare(n, r, init)
-}
-
-// The result of finishcompare MUST be assigned back to n, e.g.
-// 	n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *Node, init *Nodes) *Node {
-	r = typecheck(r, ctxExpr)
-	r = conv(r, n.Type)
-	r = walkexpr(r, init)
-	return r
-}
-
-// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) bool {
-	if n.Type == nil || !n.Type.IsInteger() {
-		return false
-	}
-
-	sign := n.Type.IsSigned()
-	bits := int32(8 * n.Type.Width)
-
-	if smallintconst(n) {
-		v := n.Int64Val()
-		return 0 <= v && v < max
-	}
-
-	switch n.Op {
-	case OAND, OANDNOT:
-		v := int64(-1)
-		switch {
-		case smallintconst(n.Left):
-			v = n.Left.Int64Val()
-		case smallintconst(n.Right):
-			v = n.Right.Int64Val()
-			if n.Op == OANDNOT {
-				v = ^v
-				if !sign {
-					v &= 1<<uint(bits) - 1
-				}
-			}
-		}
-		if 0 <= v && v < max {
-			return true
-		}
-
-	case OMOD:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64Val()
-			if 0 <= v && v <= max {
-				return true
-			}
-		}
-
-	case ODIV:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64Val()
-			for bits > 0 && v >= 2 {
-				bits--
-				v >>= 1
-			}
-		}
-
-	case ORSH:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64Val()
-			if v > int64(bits) {
-				return true
-			}
-			bits -= int32(v)
-		}
-	}
-
-	if !sign && bits <= 62 && 1<<uint(bits) <= max {
-		return true
-	}
-
-	return false
-}
-
-// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *Node) {
-	t := n.Left.Type
-
-	// Looking for either of:
-	//	Method(int) reflect.Method
-	//	MethodByName(string) (reflect.Method, bool)
-	//
-	// TODO(crawshaw): improve precision of match by working out
-	//                 how to check the method name.
-	if n := t.NumParams(); n != 1 {
-		return
-	}
-	if n := t.NumResults(); n != 1 && n != 2 {
-		return
-	}
-	p0 := t.Params().Field(0)
-	res0 := t.Results().Field(0)
-	var res1 *types.Field
-	if t.NumResults() == 2 {
-		res1 = t.Results().Field(1)
-	}
-
-	if res1 == nil {
-		if p0.Type.Etype != TINT {
-			return
-		}
-	} else {
-		if !p0.Type.IsString() {
-			return
-		}
-		if !res1.Type.IsBoolean() {
-			return
-		}
-	}
-
-	// Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
-	// Those functions may be alive via the itab, which should not cause all methods
-	// alive. We only want to mark their callers.
-	if myimportpath == "reflect" {
-		switch Curfn.Func.Nname.Sym.Name { // TODO: is there a better way than hardcoding the names?
-		case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
-			return
-		}
-	}
-
-	// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
-	//       (including global variables such as numImports - was issue #19028).
-	// Also need to check for reflect package itself (see Issue #38515).
-	if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
-		Curfn.Func.SetReflectMethod(true)
-		// The LSym is initialized at this point. We need to set the attribute on the LSym.
-		Curfn.Func.lsym.Set(obj.AttrReflectMethod, true)
-	}
-}
-
-func usefield(n *Node) {
-	if objabi.Fieldtrack_enabled == 0 {
-		return
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("usefield %v", n.Op)
-
-	case ODOT, ODOTPTR:
-		break
-	}
-	if n.Sym == nil {
-		// No field name.  This DOTPTR was built by the compiler for access
-		// to runtime data structures.  Ignore.
-		return
-	}
-
-	t := n.Left.Type
-	if t.IsPtr() {
-		t = t.Elem()
-	}
-	field := n.Opt().(*types.Field)
-	if field == nil {
-		Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
-	}
-	if field.Sym != n.Sym || field.Offset != n.Xoffset {
-		Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
-	}
-	if !strings.Contains(field.Note, "go:\"track\"") {
-		return
-	}
-
-	outer := n.Left.Type
-	if outer.IsPtr() {
-		outer = outer.Elem()
-	}
-	if outer.Sym == nil {
-		yyerror("tracked field must be in named struct type")
-	}
-	if !types.IsExported(field.Sym.Name) {
-		yyerror("tracked field must be exported (upper case)")
-	}
-
-	sym := tracksym(outer, field)
-	if Curfn.Func.FieldTrack == nil {
-		Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
-	}
-	Curfn.Func.FieldTrack[sym] = struct{}{}
-}
-
-func candiscardlist(l Nodes) bool {
-	for _, n := range l.Slice() {
-		if !candiscard(n) {
-			return false
-		}
-	}
-	return true
-}
-
-func candiscard(n *Node) bool {
-	if n == nil {
-		return true
-	}
-
-	switch n.Op {
-	default:
-		return false
-
-		// Discardable as long as the subpieces are.
-	case ONAME,
-		ONONAME,
-		OTYPE,
-		OPACK,
-		OLITERAL,
-		OADD,
-		OSUB,
-		OOR,
-		OXOR,
-		OADDSTR,
-		OADDR,
-		OANDAND,
-		OBYTES2STR,
-		ORUNES2STR,
-		OSTR2BYTES,
-		OSTR2RUNES,
-		OCAP,
-		OCOMPLIT,
-		OMAPLIT,
-		OSTRUCTLIT,
-		OARRAYLIT,
-		OSLICELIT,
-		OPTRLIT,
-		OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		ODOT,
-		OEQ,
-		ONE,
-		OLT,
-		OLE,
-		OGT,
-		OGE,
-		OKEY,
-		OSTRUCTKEY,
-		OLEN,
-		OMUL,
-		OLSH,
-		ORSH,
-		OAND,
-		OANDNOT,
-		ONEW,
-		ONOT,
-		OBITNOT,
-		OPLUS,
-		ONEG,
-		OOROR,
-		OPAREN,
-		ORUNESTR,
-		OREAL,
-		OIMAG,
-		OCOMPLEX:
-		break
-
-		// Discardable as long as we know it's not division by zero.
-	case ODIV, OMOD:
-		if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
-			break
-		}
-		if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
-			break
-		}
-		return false
-
-		// Discardable as long as we know it won't fail because of a bad size.
-	case OMAKECHAN, OMAKEMAP:
-		if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
-			break
-		}
-		return false
-
-		// Difficult to tell what sizes are okay.
-	case OMAKESLICE:
-		return false
-
-	case OMAKESLICECOPY:
-		return false
-	}
-
-	if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
-		return false
-	}
-
-	return true
-}
-
-// Rewrite
-//	go builtin(x, y, z)
-// into
-//	go func(a1, a2, a3) {
-//		builtin(a1, a2, a3)
-//	}(x, y, z)
-// for print, println, and delete.
-//
-// Rewrite
-//	go f(x, y, uintptr(unsafe.Pointer(z)))
-// into
-//	go func(a1, a2, a3) {
-//		builtin(a1, a2, uintptr(a3))
-//	}(x, y, unsafe.Pointer(z))
-// for function contains unsafe-uintptr arguments.
-
-var wrapCall_prgen int
-
-// The result of wrapCall MUST be assigned back to n, e.g.
-// 	n.Left = wrapCall(n.Left, init)
-func wrapCall(n *Node, init *Nodes) *Node {
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
-
-	// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
-	if !isBuiltinCall && n.IsDDD() {
-		last := n.List.Len() - 1
-		if va := n.List.Index(last); va.Op == OSLICELIT {
-			n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
-			n.SetIsDDD(false)
-		}
-	}
-
-	// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
-	origArgs := make([]*Node, n.List.Len())
-	t := nod(OTFUNC, nil, nil)
-	for i, arg := range n.List.Slice() {
-		s := lookupN("a", i)
-		if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
-			origArgs[i] = arg
-			arg = arg.Left
-			n.List.SetIndex(i, arg)
-		}
-		t.List.Append(symfield(s, arg.Type))
-	}
-
-	wrapCall_prgen++
-	sym := lookupN("wrap·", wrapCall_prgen)
-	fn := dclfunc(sym, t)
-
-	args := paramNnames(t.Type)
-	for i, origArg := range origArgs {
-		if origArg == nil {
-			continue
-		}
-		arg := nod(origArg.Op, args[i], nil)
-		arg.Type = origArg.Type
-		args[i] = arg
-	}
-	call := nod(n.Op, nil, nil)
-	if !isBuiltinCall {
-		call.Op = OCALL
-		call.Left = n.Left
-		call.SetIsDDD(n.IsDDD())
-	}
-	call.List.Set(args)
-	fn.Nbody.Set1(call)
-
-	funcbody()
-
-	fn = typecheck(fn, ctxStmt)
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-	xtop = append(xtop, fn)
-
-	call = nod(OCALL, nil, nil)
-	call.Left = fn.Func.Nname
-	call.List.Set(n.List.Slice())
-	call = typecheck(call, ctxStmt)
-	call = walkexpr(call, init)
-	return call
-}
-
-// substArgTypes substitutes the given list of types for
-// successive occurrences of the "any" placeholder in the
-// type syntax expression n.Type.
-// The result of substArgTypes MUST be assigned back to old, e.g.
-// 	n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types_ ...*types.Type) *Node {
-	n := old.copy()
-
-	for _, t := range types_ {
-		dowidth(t)
-	}
-	n.Type = types.SubstAny(n.Type, &types_)
-	if len(types_) > 0 {
-		Fatalf("substArgTypes: too many argument types")
-	}
-	return n
-}
-
-// canMergeLoads reports whether the backend optimization passes for
-// the current architecture can combine adjacent loads into a single
-// larger, possibly unaligned, load. Note that currently the
-// optimizations must be able to handle little endian byte order.
-func canMergeLoads() bool {
-	switch thearch.LinkArch.Family {
-	case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
-		return true
-	case sys.PPC64:
-		// Load combining only supported on ppc64le.
-		return thearch.LinkArch.ByteOrder == binary.LittleEndian
-	}
-	return false
-}
-
-// isRuneCount reports whether n is of the form len([]rune(string)).
-// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *Node) bool {
-	return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
-}
-
-func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
-	if !n.Type.IsPtr() {
-		Fatalf("expected pointer type: %v", n.Type)
-	}
-	elem := n.Type.Elem()
-	if count != nil {
-		if !elem.IsArray() {
-			Fatalf("expected array type: %v", elem)
-		}
-		elem = elem.Elem()
-	}
-
-	size := elem.Size()
-	if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
-		return n
-	}
-
-	if count == nil {
-		count = nodintconst(1)
-	}
-
-	n.Left = cheapexpr(n.Left, init)
-	init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR])))
-	return n
-}
-
-var walkCheckPtrArithmeticMarker byte
-
-func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
-	// Calling cheapexpr(n, init) below leads to a recursive call
-	// to walkexpr, which leads us back here again. Use n.Opt to
-	// prevent infinite loops.
-	if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
-		return n
-	} else if opt != nil {
-		// We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
-		// there's no guarantee that temporarily replacing it is safe, so just hard fail here.
-		Fatalf("unexpected Opt: %v", opt)
-	}
-	n.SetOpt(&walkCheckPtrArithmeticMarker)
-	defer n.SetOpt(nil)
-
-	// TODO(mdempsky): Make stricter. We only need to exempt
-	// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
-	switch n.Left.Op {
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		return n
-	}
-
-	if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) {
-		return n
-	}
-
-	// Find original unsafe.Pointer operands involved in this
-	// arithmetic expression.
-	//
-	// "It is valid both to add and to subtract offsets from a
-	// pointer in this way. It is also valid to use &^ to round
-	// pointers, usually for alignment."
-	var originals []*Node
-	var walk func(n *Node)
-	walk = func(n *Node) {
-		switch n.Op {
-		case OADD:
-			walk(n.Left)
-			walk(n.Right)
-		case OSUB, OANDNOT:
-			walk(n.Left)
-		case OCONVNOP:
-			if n.Left.Type.IsUnsafePtr() {
-				n.Left = cheapexpr(n.Left, init)
-				originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
-			}
-		}
-	}
-	walk(n.Left)
-
-	n = cheapexpr(n, init)
-
-	slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals)
-	slice.Esc = EscNone
-
-	init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
-	// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
-	// the backing store for multiple calls to checkptrArithmetic.
-
-	return n
-}
-
-// checkPtr reports whether pointer checking should be enabled for
-// function fn at a given level. See debugHelpFooter for defined
-// levels.
-func checkPtr(fn *Node, level int) bool {
-	return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0
-}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
new file mode 100644
index 0000000..9f9bb87
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -0,0 +1,1420 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then InlineCalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
+// are not supported.
+//      0: disabled
+//      1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
+//      2: (unassigned)
+//      3: (unassigned)
+//      4: allow non-leaf functions
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The -d typcheckinl flag enables early typechecking of all imported bodies,
+// which is useful to flush out bugs.
+//
+// The Debug.m flag enables diagnostic output.  a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+
+package inline
+
+import (
+	"fmt"
+	"go/constant"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+// Inlining budget parameters, gathered in one place
+const (
+	inlineMaxBudget       = 80
+	inlineExtraAppendCost = 0
+	// default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
+	inlineExtraCallCost  = 57              // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
+	inlineExtraPanicCost = 1               // do not penalize inlining panics.
+	inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
+
+	inlineBigFunctionNodes   = 5000 // Functions with this many nodes are considered "big".
+	inlineBigFunctionMaxCost = 20   // Max cost of inlinee when inlining into a "big" function.
+)
+
+func InlinePackage() {
+	// Find functions that can be inlined and clone them before walk expands them.
+	ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+		numfns := numNonClosures(list)
+		for _, n := range list {
+			if !recursive || numfns > 1 {
+				// We allow inlining if there is no
+				// recursion, or the recursion cycle is
+				// across more than one function.
+				CanInline(n)
+			} else {
+				if base.Flag.LowerM > 1 {
+					fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
+				}
+			}
+			InlineCalls(n)
+		}
+	})
+}
+
+// CanInline determines whether fn is inlineable.
+// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy.
+// fn and ->nbody will already have been typechecked.
+func CanInline(fn *ir.Func) {
+	if fn.Nname == nil {
+		base.Fatalf("CanInline no nname %+v", fn)
+	}
+
+	var reason string // reason, if any, that the function was not inlined
+	if base.Flag.LowerM > 1 || logopt.Enabled() {
+		defer func() {
+			if reason != "" {
+				if base.Flag.LowerM > 1 {
+					fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
+				}
+				if logopt.Enabled() {
+					logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
+				}
+			}
+		}()
+	}
+
+	// If marked "go:noinline", don't inline
+	if fn.Pragma&ir.Noinline != 0 {
+		reason = "marked go:noinline"
+		return
+	}
+
+	// If marked "go:norace" and -race compilation, don't inline.
+	if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
+		reason = "marked go:norace with -race compilation"
+		return
+	}
+
+	// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
+	if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
+		reason = "marked go:nocheckptr"
+		return
+	}
+
+	// If marked "go:cgo_unsafe_args", don't inline, since the
+	// function makes assumptions about its argument frame layout.
+	if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+		reason = "marked go:cgo_unsafe_args"
+		return
+	}
+
+	// If marked as "go:uintptrescapes", don't inline, since the
+	// escape information is lost during inlining.
+	if fn.Pragma&ir.UintptrEscapes != 0 {
+		reason = "marked as having an escaping uintptr argument"
+		return
+	}
+
+	// The nowritebarrierrec checker currently works at function
+	// granularity, so inlining yeswritebarrierrec functions can
+	// confuse it (#22342). As a workaround, disallow inlining
+	// them for now.
+	if fn.Pragma&ir.Yeswritebarrierrec != 0 {
+		reason = "marked go:yeswritebarrierrec"
+		return
+	}
+
+	// If fn has no body (is defined outside of Go), cannot inline it.
+	if len(fn.Body) == 0 {
+		reason = "no function body"
+		return
+	}
+
+	if fn.Typecheck() == 0 {
+		base.Fatalf("CanInline on non-typechecked function %v", fn)
+	}
+
+	n := fn.Nname
+	if n.Func.InlinabilityChecked() {
+		return
+	}
+	defer n.Func.SetInlinabilityChecked(true)
+
+	cc := int32(inlineExtraCallCost)
+	if base.Flag.LowerL == 4 {
+		cc = 1 // this appears to yield better performance than 0.
+	}
+
+	// At this point in the game the function we're looking at may
+	// have "stale" autos, vars that still appear in the Dcl list, but
+	// which no longer have any uses in the function body (due to
+	// elimination by deadcode). We'd like to exclude these dead vars
+	// when creating the "Inline.Dcl" field below; to accomplish this,
+	// the hairyVisitor below builds up a map of used/referenced
+	// locals, and we use this map to produce a pruned Inline.Dcl
+	// list. See issue 25249 for more context.
+
+	visitor := hairyVisitor{
+		budget:        inlineMaxBudget,
+		extraCallCost: cc,
+	}
+	if visitor.tooHairy(fn) {
+		reason = visitor.reason
+		return
+	}
+
+	n.Func.Inl = &ir.Inline{
+		Cost: inlineMaxBudget - visitor.budget,
+		Dcl:  pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
+		Body: inlcopylist(fn.Body),
+	}
+
+	if base.Flag.LowerM > 1 {
+		fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
+	} else if base.Flag.LowerM != 0 {
+		fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
+	}
+	if logopt.Enabled() {
+		logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+	}
+}
+
+// Inline_Flood marks n's inline body for export and recursively ensures
+// all called functions are marked too.
+func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
+	if n == nil {
+		return
+	}
+	if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+		base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
+	}
+	fn := n.Func
+	if fn == nil {
+		base.Fatalf("Inline_Flood: missing Func on %v", n)
+	}
+	if fn.Inl == nil {
+		return
+	}
+
+	if fn.ExportInline() {
+		return
+	}
+	fn.SetExportInline(true)
+
+	typecheck.ImportedBody(fn)
+
+	var doFlood func(n ir.Node)
+	doFlood = func(n ir.Node) {
+		switch n.Op() {
+		case ir.OMETHEXPR, ir.ODOTMETH:
+			Inline_Flood(ir.MethodExprName(n), exportsym)
+
+		case ir.ONAME:
+			n := n.(*ir.Name)
+			switch n.Class {
+			case ir.PFUNC:
+				Inline_Flood(n, exportsym)
+				exportsym(n)
+			case ir.PEXTERN:
+				exportsym(n)
+			}
+
+		case ir.OCALLPART:
+			// Okay, because we don't yet inline indirect
+			// calls to method values.
+		case ir.OCLOSURE:
+			// VisitList doesn't visit closure bodies, so force a
+			// recursive call to VisitList on the body of the closure.
+			ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+		}
+	}
+
+	// Recursively identify all referenced functions for
+	// reexport. We want to include even non-called functions,
+	// because after inlining they might be callable.
+	ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
+}
+
+// hairyVisitor visits a function body to determine its inlining
+// hairiness and whether or not it can be inlined.
+type hairyVisitor struct {
+	budget        int32
+	reason        string
+	extraCallCost int32
+	usedLocals    ir.NameSet
+	do            func(ir.Node) bool
+}
+
+func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
+	v.do = v.doNode // cache closure
+	if ir.DoChildren(fn, v.do) {
+		return true
+	}
+	if v.budget < 0 {
+		v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-v.budget, inlineMaxBudget)
+		return true
+	}
+	return false
+}
+
+func (v *hairyVisitor) doNode(n ir.Node) bool {
+	if n == nil {
+		return false
+	}
+	switch n.Op() {
+	// Call is okay if inlinable and we have the budget for the body.
+	case ir.OCALLFUNC:
+		n := n.(*ir.CallExpr)
+		// Functions that call runtime.getcaller{pc,sp} can not be inlined
+		// because getcaller{pc,sp} expect a pointer to the caller's first argument.
+		//
+		// runtime.throw is a "cheap call" like panic in normal code.
+		if n.X.Op() == ir.ONAME {
+			name := n.X.(*ir.Name)
+			if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
+				fn := name.Sym().Name
+				if fn == "getcallerpc" || fn == "getcallersp" {
+					v.reason = "call to " + fn
+					return true
+				}
+				if fn == "throw" {
+					v.budget -= inlineExtraThrowCost
+					break
+				}
+			}
+		}
+
+		if ir.IsIntrinsicCall(n) {
+			// Treat like any other node.
+			break
+		}
+
+		if fn := inlCallee(n.X); fn != nil && fn.Inl != nil {
+			v.budget -= fn.Inl.Cost
+			break
+		}
+
+		// Call cost for non-leaf inlining.
+		v.budget -= v.extraCallCost
+
+	// Call is okay if inlinable and we have the budget for the body.
+	case ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		t := n.X.Type()
+		if t == nil {
+			base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
+		}
+		fn := ir.MethodExprName(n.X).Func
+		if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
+			// Special case: explicitly allow
+			// mid-stack inlining of
+			// runtime.heapBits.next even though
+			// it calls slow-path
+			// runtime.heapBits.nextArena.
+			break
+		}
+		if fn.Inl != nil {
+			v.budget -= fn.Inl.Cost
+			break
+		}
+		// Call cost for non-leaf inlining.
+		v.budget -= v.extraCallCost
+
+	// Things that are too hairy, irrespective of the budget
+	case ir.OCALL, ir.OCALLINTER:
+		// Call cost for non-leaf inlining.
+		v.budget -= v.extraCallCost
+
+	case ir.OPANIC:
+		n := n.(*ir.UnaryExpr)
+		if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
+			// Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
+			// Before CL 284412, these conversions were introduced later in the
+			// compiler, so they didn't count against inlining budget.
+			v.budget++
+		}
+		v.budget -= inlineExtraPanicCost
+
+	case ir.ORECOVER:
+		// recover matches the argument frame pointer to find
+		// the right panic value, so it needs an argument frame.
+		v.reason = "call to recover"
+		return true
+
+	case ir.OCLOSURE:
+		// TODO(danscales) - fix some bugs when budget is lowered below 15
+		// Maybe make budget proportional to number of closure variables, e.g.:
+		//v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
+		v.budget -= 15
+		// Scan body of closure (which DoChildren doesn't automatically
+		// do) to check for disallowed ops in the body and include the
+		// body in the budget.
+		if doList(n.(*ir.ClosureExpr).Func.Body, v.do) {
+			return true
+		}
+
+	case ir.ORANGE,
+		ir.OSELECT,
+		ir.OGO,
+		ir.ODEFER,
+		ir.ODCLTYPE, // can't print yet
+		ir.OTAILCALL:
+		v.reason = "unhandled op " + n.Op().String()
+		return true
+
+	case ir.OAPPEND:
+		v.budget -= inlineExtraAppendCost
+
+	case ir.ODCLCONST, ir.OFALL:
+		// These nodes don't produce code; omit from inlining budget.
+		return false
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		if n.Label != nil {
+			v.reason = "labeled control"
+			return true
+		}
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		if n.Label != nil {
+			v.reason = "labeled control"
+			return true
+		}
+	// case ir.ORANGE, ir.OSELECT in "unhandled" above
+
+	case ir.OBREAK, ir.OCONTINUE:
+		n := n.(*ir.BranchStmt)
+		if n.Label != nil {
+			// Should have short-circuited due to labeled control error above.
+			base.Fatalf("unexpected labeled break/continue: %v", n)
+		}
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		if ir.IsConst(n.Cond, constant.Bool) {
+			// This if and the condition cost nothing.
+			// TODO(rsc): It seems strange that we visit the dead branch.
+			return doList(n.Init(), v.do) ||
+				doList(n.Body, v.do) ||
+				doList(n.Else, v.do)
+		}
+
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PAUTO {
+			v.usedLocals.Add(n)
+		}
+
+	case ir.OBLOCK:
+		// The only OBLOCK we should see at this point is an empty one.
+		// In any event, let the visitList(n.List()) below take care of the statements,
+		// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
+		v.budget++
+
+	case ir.OCALLPART, ir.OSLICELIT:
+		v.budget-- // Hack for toolstash -cmp.
+
+	case ir.OMETHEXPR:
+		v.budget++ // Hack for toolstash -cmp.
+	}
+
+	v.budget--
+
+	// When debugging, don't stop early, to get full cost of inlining this function
+	if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
+		v.reason = "too expensive"
+		return true
+	}
+
+	return ir.DoChildren(n, v.do)
+}
+
+func isBigFunc(fn *ir.Func) bool {
+	budget := inlineBigFunctionNodes
+	return ir.Any(fn, func(n ir.Node) bool {
+		budget--
+		return budget <= 0
+	})
+}
+
+// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
+// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
+// the body and dcls of an inlineable function.
+func inlcopylist(ll []ir.Node) []ir.Node {
+	s := make([]ir.Node, len(ll))
+	for i, n := range ll {
+		s[i] = inlcopy(n)
+	}
+	return s
+}
+
+// inlcopy is like DeepCopy(), but does extra work to copy closures.
+func inlcopy(n ir.Node) ir.Node {
+	var edit func(ir.Node) ir.Node
+	edit = func(x ir.Node) ir.Node {
+		switch x.Op() {
+		case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
+			return x
+		}
+		m := ir.Copy(x)
+		ir.EditChildren(m, edit)
+		if x.Op() == ir.OCLOSURE {
+			x := x.(*ir.ClosureExpr)
+			// Need to save/duplicate x.Func.Nname,
+			// x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
+			// x.Func.Body for iexport and local inlining.
+			oldfn := x.Func
+			newfn := ir.NewFunc(oldfn.Pos())
+			if oldfn.ClosureCalled() {
+				newfn.SetClosureCalled(true)
+			}
+			m.(*ir.ClosureExpr).Func = newfn
+			newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
+			// XXX OK to share fn.Type() ??
+			newfn.Nname.SetType(oldfn.Nname.Type())
+			newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype)
+			newfn.Body = inlcopylist(oldfn.Body)
+			// Make shallow copy of the Dcl and ClosureVar slices
+			newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
+			newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
+		}
+		return m
+	}
+	return edit(n)
+}
+
+// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions. This is the external entry point.
+func InlineCalls(fn *ir.Func) {
+	savefn := ir.CurFunc
+	ir.CurFunc = fn
+	maxCost := int32(inlineMaxBudget)
+	if isBigFunc(fn) {
+		maxCost = inlineBigFunctionMaxCost
+	}
+	// Map to keep track of functions that have been inlined at a particular
+	// call site, in order to stop inlining when we reach the beginning of a
+	// recursion cycle again. We don't inline immediately recursive functions,
+	// but allow inlining if there is a recursion cycle of many functions.
+	// Most likely, the inlining will stop before we even hit the beginning of
+	// the cycle again, but the map catches the unusual case.
+	inlMap := make(map[*ir.Func]bool)
+	var edit func(ir.Node) ir.Node
+	edit = func(n ir.Node) ir.Node {
+		return inlnode(n, maxCost, inlMap, edit)
+	}
+	ir.EditChildren(fn, edit)
+	ir.CurFunc = savefn
+}
+
+// Turn an OINLCALL into a statement.
+func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
+	n := ir.NewBlockStmt(inlcall.Pos(), nil)
+	n.List = inlcall.Init()
+	n.List.Append(inlcall.Body.Take()...)
+	return n
+}
+
+// Turn an OINLCALL into a single valued expression.
+// The result of inlconv2expr MUST be assigned back to n, e.g.
+// 	n.Left = inlconv2expr(n.Left)
+func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
+	r := n.ReturnVars[0]
+	return ir.InitExpr(append(n.Init(), n.Body...), r)
+}
+
+// Turn the rlist (with the return values) of the OINLCALL in
+// n into an expression list lumping the ninit and body
+// containing the inlined statements on the first list element so
+// order will be preserved. Used in return, oas2func and call
+// statements.
+func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
+	if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 {
+		base.Fatalf("inlconv2list %+v\n", n)
+	}
+
+	s := n.ReturnVars
+	s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0])
+	return s
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here.  so this is cleaner and
+// shorter and less complicated.
+// The result of inlnode MUST be assigned back to n, e.g.
+// 	n.Left = inlnode(n.Left)
+func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
+	if n == nil {
+		return n
+	}
+
+	switch n.Op() {
+	case ir.ODEFER, ir.OGO:
+		n := n.(*ir.GoDeferStmt)
+		switch call := n.Call; call.Op() {
+		case ir.OCALLFUNC, ir.OCALLMETH:
+			call := call.(*ir.CallExpr)
+			call.NoInline = true
+		}
+
+	// TODO do them here (or earlier),
+	// so escape analysis can avoid more heapmoves.
+	case ir.OCLOSURE:
+		return n
+	case ir.OCALLMETH:
+		// Prevent inlining some reflect.Value methods when using checkptr,
+		// even when package reflect was compiled without it (#35073).
+		n := n.(*ir.CallExpr)
+		if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+			return n
+		}
+	}
+
+	lno := ir.SetPos(n)
+
+	ir.EditChildren(n, edit)
+
+	if as := n; as.Op() == ir.OAS2FUNC {
+		as := as.(*ir.AssignListStmt)
+		if as.Rhs[0].Op() == ir.OINLCALL {
+			as.Rhs = inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))
+			as.SetOp(ir.OAS2)
+			as.SetTypecheck(0)
+			n = typecheck.Stmt(as)
+		}
+	}
+
+	// with all the branches out of the way, it is now time to
+	// transmogrify this node itself unless inhibited by the
+	// switch at the top of this function.
+	switch n.Op() {
+	case ir.OCALLFUNC, ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		if n.NoInline {
+			return n
+		}
+	}
+
+	var call *ir.CallExpr
+	switch n.Op() {
+	case ir.OCALLFUNC:
+		call = n.(*ir.CallExpr)
+		if base.Flag.LowerM > 3 {
+			fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
+		}
+		if ir.IsIntrinsicCall(call) {
+			break
+		}
+		if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
+			n = mkinlcall(call, fn, maxCost, inlMap, edit)
+		}
+
+	case ir.OCALLMETH:
+		call = n.(*ir.CallExpr)
+		if base.Flag.LowerM > 3 {
+			fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
+		}
+
+		// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
+		if call.X.Type() == nil {
+			base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
+		}
+
+		n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
+	}
+
+	base.Pos = lno
+
+	if n.Op() == ir.OINLCALL {
+		ic := n.(*ir.InlinedCallExpr)
+		switch call.Use {
+		default:
+			ir.Dump("call", call)
+			base.Fatalf("call missing use")
+		case ir.CallUseExpr:
+			n = inlconv2expr(ic)
+		case ir.CallUseStmt:
+			n = inlconv2stmt(ic)
+		case ir.CallUseList:
+			// leave for caller to convert
+		}
+	}
+
+	return n
+}
+
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(fn ir.Node) *ir.Func {
+	fn = ir.StaticValue(fn)
+	switch fn.Op() {
+	case ir.OMETHEXPR:
+		fn := fn.(*ir.SelectorExpr)
+		n := ir.MethodExprName(fn)
+		// Check that receiver type matches fn.X.
+		// TODO(mdempsky): Handle implicit dereference
+		// of pointer receiver argument?
+		if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
+			return nil
+		}
+		return n.Func
+	case ir.ONAME:
+		fn := fn.(*ir.Name)
+		if fn.Class == ir.PFUNC {
+			return fn.Func
+		}
+	case ir.OCLOSURE:
+		fn := fn.(*ir.ClosureExpr)
+		c := fn.Func
+		CanInline(c)
+		return c
+	}
+	return nil
+}
+
+func inlParam(t *types.Field, as ir.InitNode, inlvars map[*ir.Name]*ir.Name) ir.Node {
+	if t.Nname == nil {
+		return ir.BlankNode
+	}
+	n := t.Nname.(*ir.Name)
+	if ir.IsBlank(n) {
+		return ir.BlankNode
+	}
+	inlvar := inlvars[n]
+	if inlvar == nil {
+		base.Fatalf("missing inlvar for %v", n)
+	}
+	as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar))
+	inlvar.Name().Defn = as
+	return inlvar
+}
+
+var inlgen int
+
+// SSADumpInline gives the SSA back end a chance to dump the function
+// when producing output for debugging the compiler itself.
+var SSADumpInline = func(*ir.Func) {}
+
+// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
+// function with an inlinable body, return an OINLCALL node that can replace n.
+// The returned node's Ninit has the parameter assignments, the Nbody is the
+// inlined function body, and (List, Rlist) contain the (input, output)
+// parameters.
+// The result of mkinlcall MUST be assigned back to n, e.g.
+// 	n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
+	if fn.Inl == nil {
+		if logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
+				fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
+		}
+		return n
+	}
+	if fn.Inl.Cost > maxCost {
+		// The inlined function body is too big. Typically we use this check to restrict
+		// inlining into very big functions.  See issue 26546 and 17566.
+		if logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
+				fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
+		}
+		return n
+	}
+
+	if fn == ir.CurFunc {
+		// Can't recursively inline a function into itself.
+		if logopt.Enabled() {
+			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
+		}
+		return n
+	}
+
+	if base.Flag.Cfg.Instrumenting && types.IsRuntimePkg(fn.Sym().Pkg) {
+		// Runtime package must not be instrumented.
+		// Instrument skips runtime package. However, some runtime code can be
+		// inlined into other packages and instrumented there. To avoid this,
+		// we disable inlining of runtime functions when instrumenting.
+		// The example that we observed is inlining of LockOSThread,
+		// which lead to false race reports on m contents.
+		return n
+	}
+
+	if inlMap[fn] {
+		if base.Flag.LowerM > 1 {
+			fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
+		}
+		return n
+	}
+	inlMap[fn] = true
+	defer func() {
+		inlMap[fn] = false
+	}()
+	if base.Debug.TypecheckInl == 0 {
+		typecheck.ImportedBody(fn)
+	}
+
+	// We have a function node, and it has an inlineable body.
+	if base.Flag.LowerM > 1 {
+		fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
+	} else if base.Flag.LowerM != 0 {
+		fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+	}
+	if base.Flag.LowerM > 2 {
+		fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
+	}
+
+	SSADumpInline(fn)
+
+	ninit := n.Init()
+
+	// For normal function calls, the function callee expression
+	// may contain side effects (e.g., added by addinit during
+	// inlconv2expr or inlconv2list). Make sure to preserve these,
+	// if necessary (#42703).
+	if n.Op() == ir.OCALLFUNC {
+		callee := n.X
+		for callee.Op() == ir.OCONVNOP {
+			conv := callee.(*ir.ConvExpr)
+			ninit.Append(ir.TakeInit(conv)...)
+			callee = conv.X
+		}
+		if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
+			base.Fatalf("unexpected callee expression: %v", callee)
+		}
+	}
+
+	// Make temp names to use instead of the originals.
+	inlvars := make(map[*ir.Name]*ir.Name)
+
+	// record formals/locals for later post-processing
+	var inlfvars []*ir.Name
+
+	for _, ln := range fn.Inl.Dcl {
+		if ln.Op() != ir.ONAME {
+			continue
+		}
+		if ln.Class == ir.PPARAMOUT { // return values handled below.
+			continue
+		}
+		inlf := typecheck.Expr(inlvar(ln)).(*ir.Name)
+		inlvars[ln] = inlf
+		if base.Flag.GenDwarfInl > 0 {
+			if ln.Class == ir.PPARAM {
+				inlf.Name().SetInlFormal(true)
+			} else {
+				inlf.Name().SetInlLocal(true)
+			}
+			inlf.SetPos(ln.Pos())
+			inlfvars = append(inlfvars, inlf)
+		}
+	}
+
+	nreturns := 0
+	ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
+		if n != nil && n.Op() == ir.ORETURN {
+			nreturns++
+		}
+	})
+
+	// We can delay declaring+initializing result parameters if:
+	// (1) there's only one "return" statement in the inlined
+	// function, and (2) the result parameters aren't named.
+	delayretvars := nreturns == 1
+
+	// temporaries for return values.
+	var retvars []ir.Node
+	for i, t := range fn.Type().Results().Fields().Slice() {
+		var m *ir.Name
+		if nn := t.Nname; nn != nil && !ir.IsBlank(nn.(*ir.Name)) && !strings.HasPrefix(nn.Sym().Name, "~r") {
+			n := nn.(*ir.Name)
+			m = inlvar(n)
+			m = typecheck.Expr(m).(*ir.Name)
+			inlvars[n] = m
+			delayretvars = false // found a named result parameter
+		} else {
+			// anonymous return values, synthesize names for use in assignment that replaces return
+			m = retvar(t, i)
+		}
+
+		if base.Flag.GenDwarfInl > 0 {
+			// Don't update the src.Pos on a return variable if it
+			// was manufactured by the inliner (e.g. "~R2"); such vars
+			// were not part of the original callee.
+			if !strings.HasPrefix(m.Sym().Name, "~R") {
+				m.Name().SetInlFormal(true)
+				m.SetPos(t.Pos)
+				inlfvars = append(inlfvars, m)
+			}
+		}
+
+		retvars = append(retvars, m)
+	}
+
+	// Assign arguments to the parameters' temp names.
+	as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+	as.Def = true
+	if n.Op() == ir.OCALLMETH {
+		sel := n.X.(*ir.SelectorExpr)
+		if sel.X == nil {
+			base.Fatalf("method call without receiver: %+v", n)
+		}
+		as.Rhs.Append(sel.X)
+	}
+	as.Rhs.Append(n.Args...)
+
+	// For non-dotted calls to variadic functions, we assign the
+	// variadic parameter's temp name separately.
+	var vas *ir.AssignStmt
+
+	if recv := fn.Type().Recv(); recv != nil {
+		as.Lhs.Append(inlParam(recv, as, inlvars))
+	}
+	for _, param := range fn.Type().Params().Fields().Slice() {
+		// For ordinary parameters or variadic parameters in
+		// dotted calls, just add the variable to the
+		// assignment list, and we're done.
+		if !param.IsDDD() || n.IsDDD {
+			as.Lhs.Append(inlParam(param, as, inlvars))
+			continue
+		}
+
+		// Otherwise, we need to collect the remaining values
+		// to pass as a slice.
+
+		x := len(as.Lhs)
+		for len(as.Lhs) < len(as.Rhs) {
+			as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
+		}
+		varargs := as.Lhs[x:]
+
+		vas = ir.NewAssignStmt(base.Pos, nil, nil)
+		vas.X = inlParam(param, vas, inlvars)
+		if len(varargs) == 0 {
+			vas.Y = typecheck.NodNil()
+			vas.Y.SetType(param.Type)
+		} else {
+			lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
+			lit.List = varargs
+			vas.Y = lit
+		}
+	}
+
+	if len(as.Rhs) != 0 {
+		ninit.Append(typecheck.Stmt(as))
+	}
+
+	if vas != nil {
+		ninit.Append(typecheck.Stmt(vas))
+	}
+
+	if !delayretvars {
+		// Zero the return parameters.
+		for _, n := range retvars {
+			ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+			ras := ir.NewAssignStmt(base.Pos, n, nil)
+			ninit.Append(typecheck.Stmt(ras))
+		}
+	}
+
+	retlabel := typecheck.AutoLabel(".i")
+
+	inlgen++
+
+	parent := -1
+	if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
+		parent = b.InliningIndex()
+	}
+
+	sym := fn.Linksym()
+	newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
+
+	// Add an inline mark just before the inlined body.
+	// This mark is inline in the code so that it's a reasonable spot
+	// to put a breakpoint. Not sure if that's really necessary or not
+	// (in which case it could go at the end of the function instead).
+	// Note issue 28603.
+	inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
+	inlMark.SetPos(n.Pos().WithIsStmt())
+	inlMark.Index = int64(newIndex)
+	ninit.Append(inlMark)
+
+	if base.Flag.GenDwarfInl > 0 {
+		if !sym.WasInlined() {
+			base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+			sym.Set(obj.AttrWasInlined, true)
+		}
+	}
+
+	subst := inlsubst{
+		retlabel:     retlabel,
+		retvars:      retvars,
+		delayretvars: delayretvars,
+		inlvars:      inlvars,
+		bases:        make(map[*src.PosBase]*src.PosBase),
+		newInlIndex:  newIndex,
+		fn:           fn,
+	}
+	subst.edit = subst.node
+
+	body := subst.list(ir.Nodes(fn.Inl.Body))
+
+	lab := ir.NewLabelStmt(base.Pos, retlabel)
+	body = append(body, lab)
+
+	typecheck.Stmts(body)
+
+	if base.Flag.GenDwarfInl > 0 {
+		for _, v := range inlfvars {
+			v.SetPos(subst.updatedPos(v.Pos()))
+		}
+	}
+
+	//dumplist("ninit post", ninit);
+
+	call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
+	*call.PtrInit() = ninit
+	call.Body = body
+	call.ReturnVars = retvars
+	call.SetType(n.Type())
+	call.SetTypecheck(1)
+
+	// transitive inlining
+	// might be nice to do this before exporting the body,
+	// but can't emit the body with inlining expanded.
+	// instead we emit the things that the body needs
+	// and each use must redo the inlining.
+	// luckily these are small.
+	ir.EditChildren(call, edit)
+
+	if base.Flag.LowerM > 2 {
+		fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
+	}
+
+	return call
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *ir.Name) *ir.Name {
+	if base.Flag.LowerM > 3 {
+		fmt.Printf("inlvar %+v\n", var_)
+	}
+
+	n := typecheck.NewName(var_.Sym())
+	n.SetType(var_.Type())
+	n.Class = ir.PAUTO
+	n.SetUsed(true)
+	n.Curfn = ir.CurFunc // the calling function, not the called one
+	n.SetAddrtaken(var_.Addrtaken())
+
+	ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+	return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *types.Field, i int) *ir.Name {
+	n := typecheck.NewName(typecheck.LookupNum("~R", i))
+	n.SetType(t.Type)
+	n.Class = ir.PAUTO
+	n.SetUsed(true)
+	n.Curfn = ir.CurFunc // the calling function, not the called one
+	ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+	return n
+}
+
+// Synthesize a variable to store the inlined function's arguments
+// when they come from a multiple return call.
+func argvar(t *types.Type, i int) ir.Node {
+	n := typecheck.NewName(typecheck.LookupNum("~arg", i))
+	n.SetType(t.Elem())
+	n.Class = ir.PAUTO
+	n.SetUsed(true)
+	n.Curfn = ir.CurFunc // the calling function, not the called one
+	ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+	return n
+}
+
+// The inlsubst type implements the actual inlining of a single
+// function call.
+type inlsubst struct {
+	// Target of the goto substituted in place of a return.
+	retlabel *types.Sym
+
+	// Temporary result variables.
+	retvars []ir.Node
+
+	// Whether result variables should be initialized at the
+	// "return" statement.
+	delayretvars bool
+
+	inlvars map[*ir.Name]*ir.Name
+
+	// bases maps from original PosBase to PosBase with an extra
+	// inlined call frame.
+	bases map[*src.PosBase]*src.PosBase
+
+	// newInlIndex is the index of the inlined call frame to
+	// insert for inlined nodes.
+	newInlIndex int
+
+	edit func(ir.Node) ir.Node // cached copy of subst.node method value closure
+
+	// If non-nil, we are inside a closure inside the inlined function, and
+	// newclofn is the Func of the new inlined closure.
+	newclofn *ir.Func
+
+	fn *ir.Func // For debug -- the func that is being inlined
+}
+
+// list inlines a list of nodes.
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+	s := make([]ir.Node, 0, len(ll))
+	for _, n := range ll {
+		s = append(s, subst.node(n))
+	}
+	return s
+}
+
+// fields returns a list of the fields of a struct type representing receiver,
+// params, or results, after duplicating the field nodes and substituting the
+// Nname nodes inside the field nodes.
+func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
+	oldfields := oldt.FieldSlice()
+	newfields := make([]*types.Field, len(oldfields))
+	for i := range oldfields {
+		newfields[i] = oldfields[i].Copy()
+		if oldfields[i].Nname != nil {
+			newfields[i].Nname = subst.node(oldfields[i].Nname.(*ir.Name))
+		}
+	}
+	return newfields
+}
+
+// clovar creates a new ONAME node for a local variable or param of a closure
+// inside a function being inlined.
+func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
+	// TODO(danscales): want to get rid of this shallow copy, with code like the
+	// following, but it is hard to copy all the necessary flags in a maintainable way.
+	// m := ir.NewNameAt(n.Pos(), n.Sym())
+	// m.Class = n.Class
+	// m.SetType(n.Type())
+	// m.SetTypecheck(1)
+	//if n.IsClosureVar() {
+	//	m.SetIsClosureVar(true)
+	//}
+	m := &ir.Name{}
+	*m = *n
+	m.Curfn = subst.newclofn
+	if n.Defn != nil && n.Defn.Op() == ir.ONAME {
+		if !n.IsClosureVar() {
+			base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n)
+		}
+		if n.Sym().Pkg != types.LocalPkg {
+			// If the closure came from inlining a function from
+			// another package, must change package of captured
+			// variable to localpkg, so that the fields of the closure
+			// struct are local package and can be accessed even if
+			// name is not exported. If you disable this code, you can
+			// reproduce the problem by running 'go test
+			// go/internal/srcimporter'. TODO(mdempsky) - maybe change
+			// how we create closure structs?
+			m.SetSym(types.LocalPkg.Lookup(n.Sym().Name))
+		}
+		// Make sure any inlvar which is the Defn
+		// of an ONAME closure var is rewritten
+		// during inlining. Don't substitute
+		// if Defn node is outside inlined function.
+		if subst.inlvars[n.Defn.(*ir.Name)] != nil {
+			m.Defn = subst.node(n.Defn)
+		}
+	}
+	if n.Outer != nil {
+		// Either the outer variable is defined in function being inlined,
+		// and we will replace it with the substituted variable, or it is
+		// defined outside the function being inlined, and we should just
+		// skip the outer variable (the closure variable of the function
+		// being inlined).
+		s := subst.node(n.Outer).(*ir.Name)
+		if s == n.Outer {
+			s = n.Outer.Outer
+		}
+		m.Outer = s
+	}
+	return m
+}
+
+// closure does the necessary substitions for a ClosureExpr n and returns the new
+// closure node.
+func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
+	m := ir.Copy(n)
+	m.SetPos(subst.updatedPos(m.Pos()))
+	ir.EditChildren(m, subst.edit)
+
+	//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
+
+	// The following is similar to funcLit
+	oldfn := n.Func
+	newfn := ir.NewFunc(oldfn.Pos())
+	// These three lines are not strictly necessary, but just to be clear
+	// that new function needs to redo typechecking and inlinability.
+	newfn.SetTypecheck(0)
+	newfn.SetInlinabilityChecked(false)
+	newfn.Inl = nil
+	newfn.SetIsHiddenClosure(true)
+	newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
+	newfn.Nname.Func = newfn
+	newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
+	newfn.Nname.Defn = newfn
+
+	m.(*ir.ClosureExpr).Func = newfn
+	newfn.OClosure = m.(*ir.ClosureExpr)
+
+	if subst.newclofn != nil {
+		//fmt.Printf("Inlining a closure with a nested closure\n")
+	}
+	prevxfunc := subst.newclofn
+
+	// Mark that we are now substituting within a closure (within the
+	// inlined function), and create new nodes for all the local
+	// vars/params inside this closure.
+	subst.newclofn = newfn
+	newfn.Dcl = nil
+	newfn.ClosureVars = nil
+	for _, oldv := range oldfn.Dcl {
+		newv := subst.clovar(oldv)
+		subst.inlvars[oldv] = newv
+		newfn.Dcl = append(newfn.Dcl, newv)
+	}
+	for _, oldv := range oldfn.ClosureVars {
+		newv := subst.clovar(oldv)
+		subst.inlvars[oldv] = newv
+		newfn.ClosureVars = append(newfn.ClosureVars, newv)
+	}
+
+	// Need to replace ONAME nodes in
+	// newfn.Type().FuncType().Receiver/Params/Results.FieldSlice().Nname
+	oldt := oldfn.Type()
+	newrecvs := subst.fields(oldt.Recvs())
+	var newrecv *types.Field
+	if len(newrecvs) > 0 {
+		newrecv = newrecvs[0]
+	}
+	newt := types.NewSignature(oldt.Pkg(), newrecv,
+		subst.fields(oldt.Params()), subst.fields(oldt.Results()))
+
+	newfn.Nname.SetType(newt)
+	newfn.Body = subst.list(oldfn.Body)
+
+	// Remove the nodes for the current closure from subst.inlvars
+	for _, oldv := range oldfn.Dcl {
+		delete(subst.inlvars, oldv)
+	}
+	for _, oldv := range oldfn.ClosureVars {
+		delete(subst.inlvars, oldv)
+	}
+	// Go back to previous closure func
+	subst.newclofn = prevxfunc
+
+	// Actually create the named function for the closure, now that
+	// the closure is inlined in a specific function.
+	m.SetTypecheck(0)
+	if oldfn.ClosureCalled() {
+		typecheck.Callee(m)
+	} else {
+		typecheck.Expr(m)
+	}
+	return m
+}
+
+// node recursively copies a node from the saved pristine body of the
+// inlined function, substituting references to input/output
+// parameters with ones to the tmpnames, and substituting returns with
+// assignments to the output.
+func (subst *inlsubst) node(n ir.Node) ir.Node {
+	if n == nil {
+		return nil
+	}
+
+	switch n.Op() {
+	case ir.ONAME:
+		n := n.(*ir.Name)
+
+		// Handle captured variables when inlining closures.
+		if n.IsClosureVar() && subst.newclofn == nil {
+			o := n.Outer
+
+			// Deal with case where sequence of closures are inlined.
+			// TODO(danscales) - write test case to see if we need to
+			// go up multiple levels.
+			if o.Curfn != ir.CurFunc {
+				o = o.Outer
+			}
+
+			// make sure the outer param matches the inlining location
+			if o == nil || o.Curfn != ir.CurFunc {
+				base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n)
+			}
+
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("substituting captured name %+v  ->  %+v\n", n, o)
+			}
+			return o
+		}
+
+		if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("substituting name %+v  ->  %+v\n", n, inlvar)
+			}
+			return inlvar
+		}
+
+		if base.Flag.LowerM > 2 {
+			fmt.Printf("not substituting name %+v\n", n)
+		}
+		return n
+
+	case ir.OMETHEXPR:
+		n := n.(*ir.SelectorExpr)
+		return n
+
+	case ir.OLITERAL, ir.ONIL, ir.OTYPE:
+		// If n is a named constant or type, we can continue
+		// using it in the inline copy. Otherwise, make a copy
+		// so we can update the line number.
+		if n.Sym() != nil {
+			return n
+		}
+
+	case ir.ORETURN:
+		if subst.newclofn != nil {
+			// Don't do special substitutions if inside a closure
+			break
+		}
+		// Since we don't handle bodies with closures,
+		// this return is guaranteed to belong to the current inlined function.
+		n := n.(*ir.ReturnStmt)
+		init := subst.list(n.Init())
+		if len(subst.retvars) != 0 && len(n.Results) != 0 {
+			as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+
+			// Make a shallow copy of retvars.
+			// Otherwise OINLCALL.Rlist will be the same list,
+			// and later walk and typecheck may clobber it.
+			for _, n := range subst.retvars {
+				as.Lhs.Append(n)
+			}
+			as.Rhs = subst.list(n.Results)
+
+			if subst.delayretvars {
+				for _, n := range as.Lhs {
+					as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+					n.Name().Defn = as
+				}
+			}
+
+			init = append(init, typecheck.Stmt(as))
+		}
+		init = append(init, ir.NewBranchStmt(base.Pos, ir.OGOTO, subst.retlabel))
+		typecheck.Stmts(init)
+		return ir.NewBlockStmt(base.Pos, init)
+
+	case ir.OGOTO:
+		n := n.(*ir.BranchStmt)
+		m := ir.Copy(n).(*ir.BranchStmt)
+		m.SetPos(subst.updatedPos(m.Pos()))
+		*m.PtrInit() = nil
+		p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
+		m.Label = typecheck.Lookup(p)
+		return m
+
+	case ir.OLABEL:
+		if subst.newclofn != nil {
+			// Don't do special substitutions if inside a closure
+			break
+		}
+		n := n.(*ir.LabelStmt)
+		m := ir.Copy(n).(*ir.LabelStmt)
+		m.SetPos(subst.updatedPos(m.Pos()))
+		*m.PtrInit() = nil
+		p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
+		m.Label = typecheck.Lookup(p)
+		return m
+
+	case ir.OCLOSURE:
+		return subst.closure(n.(*ir.ClosureExpr))
+
+	}
+
+	m := ir.Copy(n)
+	m.SetPos(subst.updatedPos(m.Pos()))
+	ir.EditChildren(m, subst.edit)
+	return m
+}
+
+func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
+	pos := base.Ctxt.PosTable.Pos(xpos)
+	oldbase := pos.Base() // can be nil
+	newbase := subst.bases[oldbase]
+	if newbase == nil {
+		newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
+		subst.bases[oldbase] = newbase
+	}
+	pos.SetBase(newbase)
+	return base.Ctxt.PosTable.XPos(pos)
+}
+
+func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
+	s := make([]*ir.Name, 0, len(ll))
+	for _, n := range ll {
+		if n.Class == ir.PAUTO {
+			if !vis.usedLocals.Has(n) {
+				continue
+			}
+		}
+		s = append(s, n)
+	}
+	return s
+}
+
+// numNonClosures returns the number of functions in list which are not closures.
+func numNonClosures(list []*ir.Func) int {
+	count := 0
+	for _, fn := range list {
+		if fn.OClosure == nil {
+			count++
+		}
+	}
+	return count
+}
+
+func doList(list []ir.Node, do func(ir.Node) bool) bool {
+	for _, x := range list {
+		if x != nil {
+			if do(x) {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/ir/bitset.go
similarity index 79%
rename from src/cmd/compile/internal/gc/bitset.go
rename to src/cmd/compile/internal/ir/bitset.go
index ed5eea0..0c7bd54 100644
--- a/src/cmd/compile/internal/gc/bitset.go
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package ir
 
 type bitset8 uint8
 
@@ -14,6 +14,18 @@
 	}
 }
 
+func (f bitset8) get2(shift uint8) uint8 {
+	return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset8) set2(shift uint8, b uint8) {
+	// Clear old bits.
+	*(*uint8)(f) &^= 3 << shift
+	// Set new bits.
+	*(*uint8)(f) |= uint8(b&3) << shift
+}
+
 type bitset16 uint16
 
 func (f *bitset16) set(mask uint16, b bool) {
diff --git a/src/cmd/compile/internal/ir/cfg.go b/src/cmd/compile/internal/ir/cfg.go
new file mode 100644
index 0000000..d986ac3
--- /dev/null
+++ b/src/cmd/compile/internal/ir/cfg.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+var (
+	// maximum size variable which we will allocate on the stack.
+	// This limit is for explicit variable declarations like "var x T" or "x := ...".
+	// Note: the flag smallframes can update this value.
+	MaxStackVarSize = int64(10 * 1024 * 1024)
+
+	// maximum size of implicit variables that we will allocate on the stack.
+	//   p := new(T)          allocating T on the stack
+	//   p := &T{}            allocating T on the stack
+	//   s := make([]T, n)    allocating [n]T on the stack
+	//   s := []byte("...")   allocating [n]byte on the stack
+	// Note: the flag smallframes can update this value.
+	MaxImplicitStackVarSize = int64(64 * 1024)
+
+	// MaxSmallArraySize is the maximum size of an array which is considered small.
+	// Small arrays will be initialized directly with a sequence of constant stores.
+	// Large arrays will be initialized by copying from a static temp.
+	// 256 bytes was chosen to minimize generated code + statictmp size.
+	MaxSmallArraySize = int64(256)
+)
diff --git a/src/cmd/compile/internal/gc/class_string.go b/src/cmd/compile/internal/ir/class_string.go
similarity index 89%
rename from src/cmd/compile/internal/gc/class_string.go
rename to src/cmd/compile/internal/ir/class_string.go
index a4084a7..13b9bd4 100644
--- a/src/cmd/compile/internal/gc/class_string.go
+++ b/src/cmd/compile/internal/ir/class_string.go
@@ -1,6 +1,6 @@
-// Code generated by "stringer -type=Class"; DO NOT EDIT.
+// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
 
-package gc
+package ir
 
 import "strconv"
 
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
new file mode 100644
index 0000000..eaa4d5b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/const.go
@@ -0,0 +1,99 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"go/constant"
+	"math"
+	"math/big"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+)
+
+func NewBool(b bool) Node {
+	return NewLiteral(constant.MakeBool(b))
+}
+
+func NewInt(v int64) Node {
+	return NewLiteral(constant.MakeInt64(v))
+}
+
+func NewString(s string) Node {
+	return NewLiteral(constant.MakeString(s))
+}
+
+const (
+	// Maximum size in bits for big.Ints before signalling
+	// overflow and also mantissa precision for big.Floats.
+	ConstPrec = 512
+)
+
+func BigFloat(v constant.Value) *big.Float {
+	f := new(big.Float)
+	f.SetPrec(ConstPrec)
+	switch u := constant.Val(v).(type) {
+	case int64:
+		f.SetInt64(u)
+	case *big.Int:
+		f.SetInt(u)
+	case *big.Float:
+		f.Set(u)
+	case *big.Rat:
+		f.SetRat(u)
+	default:
+		base.Fatalf("unexpected: %v", u)
+	}
+	return f
+}
+
+// ConstOverflow reports whether constant value v is too large
+// to represent with type t.
+func ConstOverflow(v constant.Value, t *types.Type) bool {
+	switch {
+	case t.IsInteger():
+		bits := uint(8 * t.Size())
+		if t.IsUnsigned() {
+			x, ok := constant.Uint64Val(v)
+			return !ok || x>>bits != 0
+		}
+		x, ok := constant.Int64Val(v)
+		if x < 0 {
+			x = ^x
+		}
+		return !ok || x>>(bits-1) != 0
+	case t.IsFloat():
+		switch t.Size() {
+		case 4:
+			f, _ := constant.Float32Val(v)
+			return math.IsInf(float64(f), 0)
+		case 8:
+			f, _ := constant.Float64Val(v)
+			return math.IsInf(f, 0)
+		}
+	case t.IsComplex():
+		ft := types.FloatForComplex(t)
+		return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
+	}
+	base.Fatalf("ConstOverflow: %v, %v", v, t)
+	panic("unreachable")
+}
+
+// IsConstNode reports whether n is a Go language constant (as opposed to a
+// compile-time constant).
+//
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+func IsConstNode(n Node) bool {
+	return n.Op() == OLITERAL
+}
+
+func IsSmallIntConst(n Node) bool {
+	if n.Op() == OLITERAL {
+		v, ok := constant.Int64Val(n.Val())
+		return ok && int64(int32(v)) == v
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
new file mode 100644
index 0000000..7da9b24
--- /dev/null
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -0,0 +1,102 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/internal/src"
+)
+
+// A Node may implement the Orig and SetOrig method to
+// maintain a pointer to the "unrewritten" form of a Node.
+// If a Node does not implement OrigNode, it is its own Orig.
+//
+// Note that both SepCopy and Copy have definitions compatible
+// with a Node that does not implement OrigNode: such a Node
+// is its own Orig, and in that case, that's what both want to return
+// anyway (SepCopy unconditionally, and Copy only when the input
+// is its own Orig as well, but if the output does not implement
+// OrigNode, then neither does the input, making the condition true).
+type OrigNode interface {
+	Node
+	Orig() Node
+	SetOrig(Node)
+}
+
+// origNode may be embedded into a Node to make it implement OrigNode.
+type origNode struct {
+	orig Node `mknode:"-"`
+}
+
+func (n *origNode) Orig() Node     { return n.orig }
+func (n *origNode) SetOrig(o Node) { n.orig = o }
+
+// Orig returns the “original” node for n.
+// If n implements OrigNode, Orig returns n.Orig().
+// Otherwise Orig returns n itself.
+func Orig(n Node) Node {
+	if n, ok := n.(OrigNode); ok {
+		o := n.Orig()
+		if o == nil {
+			Dump("Orig nil", n)
+			base.Fatalf("Orig returned nil")
+		}
+		return o
+	}
+	return n
+}
+
+// SepCopy returns a separate shallow copy of n,
+// breaking any Orig link to any other nodes.
+func SepCopy(n Node) Node {
+	n = n.copy()
+	if n, ok := n.(OrigNode); ok {
+		n.SetOrig(n)
+	}
+	return n
+}
+
+// Copy returns a shallow copy of n.
+// If Orig(n) == n, then Orig(Copy(n)) == the copy.
+// Otherwise the Orig link is preserved as well.
+//
+// The specific semantics surrounding Orig are subtle but right for most uses.
+// See issues #26855 and #27765 for pitfalls.
+func Copy(n Node) Node {
+	c := n.copy()
+	if n, ok := n.(OrigNode); ok && n.Orig() == n {
+		c.(OrigNode).SetOrig(c)
+	}
+	return c
+}
+
+// DeepCopy returns a “deep” copy of n, with its entire structure copied
+// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
+// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.
+func DeepCopy(pos src.XPos, n Node) Node {
+	var edit func(Node) Node
+	edit = func(x Node) Node {
+		switch x.Op() {
+		case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
+			return x
+		}
+		x = Copy(x)
+		if pos.IsKnown() {
+			x.SetPos(pos)
+		}
+		EditChildren(x, edit)
+		return x
+	}
+	return edit(n)
+}
+
+// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
+func DeepCopyList(pos src.XPos, list []Node) []Node {
+	var out []Node
+	for _, n := range list {
+		out = append(out, DeepCopy(pos, n))
+	}
+	return out
+}
diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/ir/dump.go
similarity index 90%
rename from src/cmd/compile/internal/gc/dump.go
rename to src/cmd/compile/internal/ir/dump.go
index 29eb1c1..fc995ce 100644
--- a/src/cmd/compile/internal/gc/dump.go
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -6,21 +6,23 @@
 // for debugging purposes. The code is customized for Node graphs
 // and may be used for an alternative view of the node structure.
 
-package gc
+package ir
 
 import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
 	"fmt"
 	"io"
 	"os"
 	"reflect"
 	"regexp"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
 )
 
 // dump is like fdump but prints to stderr.
-func dump(root interface{}, filter string, depth int) {
-	fdump(os.Stderr, root, filter, depth)
+func DumpAny(root interface{}, filter string, depth int) {
+	FDumpAny(os.Stderr, root, filter, depth)
 }
 
 // fdump prints the structure of a rooted data structure
@@ -40,7 +42,7 @@
 // rather than their type; struct fields with zero values or
 // non-matching field names are omitted, and "…" means recursion
 // depth has been reached or struct fields have been omitted.
-func fdump(w io.Writer, root interface{}, filter string, depth int) {
+func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
 	if root == nil {
 		fmt.Fprintln(w, "nil")
 		return
@@ -138,19 +140,9 @@
 		return
 	}
 
-	// special cases
-	switch v := x.Interface().(type) {
-	case Nodes:
-		// unpack Nodes since reflect cannot look inside
-		// due to the unexported field in its struct
-		x = reflect.ValueOf(v.Slice())
-
-	case src.XPos:
-		p.printf("%s", linestr(v))
+	if pos, ok := x.Interface().(src.XPos); ok {
+		p.printf("%s", base.FmtPos(pos))
 		return
-
-	case *types.Node:
-		x = reflect.ValueOf(asNode(v))
 	}
 
 	switch x.Kind() {
@@ -203,7 +195,7 @@
 		isNode := false
 		if n, ok := x.Interface().(Node); ok {
 			isNode = true
-			p.printf("%s %s {", n.Op.String(), p.addr(x))
+			p.printf("%s %s {", n.Op().String(), p.addr(x))
 		} else {
 			p.printf("%s {", typ)
 		}
@@ -230,7 +222,7 @@
 					omitted = true
 					continue // exclude zero-valued fields
 				}
-				if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
+				if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
 					omitted = true
 					continue // exclude empty Nodes slices
 				}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
new file mode 100644
index 0000000..b32ed71
--- /dev/null
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -0,0 +1,1057 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"bytes"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+	"fmt"
+	"go/constant"
+	"go/token"
+)
+
+// An Expr is a Node that can appear as an expression.
+type Expr interface {
+	Node
+	isExpr()
+}
+
+// A miniExpr is a miniNode with extra fields common to expressions.
+// TODO(rsc): Once we are sure about the contents, compact the bools
+// into a bit field and leave extra bits available for implementations
+// embedding miniExpr. Right now there are ~60 unused bits sitting here.
+type miniExpr struct {
+	miniNode
+	typ   *types.Type
+	init  Nodes // TODO(rsc): Don't require every Node to have an init
+	flags bitset8
+}
+
+const (
+	miniExprNonNil = 1 << iota
+	miniExprTransient
+	miniExprBounded
+	miniExprImplicit // for use by implementations; not supported by every Expr
+	miniExprCheckPtr
+)
+
+func (*miniExpr) isExpr() {}
+
+func (n *miniExpr) Type() *types.Type     { return n.typ }
+func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
+func (n *miniExpr) NonNil() bool          { return n.flags&miniExprNonNil != 0 }
+func (n *miniExpr) MarkNonNil()           { n.flags |= miniExprNonNil }
+func (n *miniExpr) Transient() bool       { return n.flags&miniExprTransient != 0 }
+func (n *miniExpr) SetTransient(b bool)   { n.flags.set(miniExprTransient, b) }
+func (n *miniExpr) Bounded() bool         { return n.flags&miniExprBounded != 0 }
+func (n *miniExpr) SetBounded(b bool)     { n.flags.set(miniExprBounded, b) }
+func (n *miniExpr) Init() Nodes           { return n.init }
+func (n *miniExpr) PtrInit() *Nodes       { return &n.init }
+func (n *miniExpr) SetInit(x Nodes)       { n.init = x }
+
+// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
+type AddStringExpr struct {
+	miniExpr
+	List     Nodes
+	Prealloc *Name
+}
+
+func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
+	n := &AddStringExpr{}
+	n.pos = pos
+	n.op = OADDSTR
+	n.List = list
+	return n
+}
+
+// An AddrExpr is an address-of expression &X.
+// It may end up being a normal address-of or an allocation of a composite literal.
+type AddrExpr struct {
+	miniExpr
+	X        Node
+	Prealloc *Name // preallocated storage if any
+}
+
+func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+	n := &AddrExpr{X: x}
+	n.op = OADDR
+	n.pos = pos
+	return n
+}
+
+func (n *AddrExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *AddrExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OADDR, OPTRLIT:
+		n.op = op
+	}
+}
+
+// A BasicLit is a literal of basic type.
+type BasicLit struct {
+	miniExpr
+	val constant.Value
+}
+
+func NewBasicLit(pos src.XPos, val constant.Value) Node {
+	n := &BasicLit{val: val}
+	n.op = OLITERAL
+	n.pos = pos
+	if k := val.Kind(); k != constant.Unknown {
+		n.SetType(idealType(k))
+	}
+	return n
+}
+
+func (n *BasicLit) Val() constant.Value       { return n.val }
+func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
+
+// A BinaryExpr is a binary expression X Op Y,
+// or Op(X, Y) for builtin functions that do not become calls.
+type BinaryExpr struct {
+	miniExpr
+	X Node
+	Y Node
+}
+
+func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
+	n := &BinaryExpr{X: x, Y: y}
+	n.pos = pos
+	n.SetOp(op)
+	return n
+}
+
+func (n *BinaryExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
+		OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
+		OCOPY, OCOMPLEX,
+		OEFACE:
+		n.op = op
+	}
+}
+
+// A CallUse records how the result of the call is used:
+type CallUse byte
+
+const (
+	_ CallUse = iota
+
+	CallUseExpr // single expression result is used
+	CallUseList // list of results are used
+	CallUseStmt // results not used - call is a statement
+)
+
+// A CallExpr is a function call X(Args).
+type CallExpr struct {
+	miniExpr
+	origNode
+	X         Node
+	Args      Nodes
+	KeepAlive []*Name // vars to be kept alive until call returns
+	IsDDD     bool
+	Use       CallUse
+	NoInline  bool
+}
+
+func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
+	n := &CallExpr{X: fun}
+	n.pos = pos
+	n.orig = n
+	n.SetOp(op)
+	n.Args = args
+	return n
+}
+
+func (*CallExpr) isStmt() {}
+
+func (n *CallExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+		OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
+		n.op = op
+	}
+}
+
+// A ClosureExpr is a function literal expression.
+type ClosureExpr struct {
+	miniExpr
+	Func     *Func `mknode:"-"`
+	Prealloc *Name
+}
+
+func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
+	n := &ClosureExpr{Func: fn}
+	n.op = OCLOSURE
+	n.pos = pos
+	return n
+}
+
+// A CompLitExpr is a composite literal Type{Vals}.
+// Before type-checking, the type is Ntype.
+type CompLitExpr struct {
+	miniExpr
+	origNode
+	Ntype    Ntype
+	List     Nodes // initialized values
+	Prealloc *Name
+	Len      int64 // backing array length for OSLICELIT
+}
+
+func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
+	n := &CompLitExpr{Ntype: typ}
+	n.pos = pos
+	n.SetOp(op)
+	n.List = list
+	n.orig = n
+	return n
+}
+
+func (n *CompLitExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *CompLitExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
+		n.op = op
+	}
+}
+
+type ConstExpr struct {
+	miniExpr
+	origNode
+	val constant.Value
+}
+
+func NewConstExpr(val constant.Value, orig Node) Node {
+	n := &ConstExpr{val: val}
+	n.op = OLITERAL
+	n.pos = orig.Pos()
+	n.orig = orig
+	n.SetType(orig.Type())
+	n.SetTypecheck(orig.Typecheck())
+	n.SetDiag(orig.Diag())
+	return n
+}
+
+func (n *ConstExpr) Sym() *types.Sym     { return n.orig.Sym() }
+func (n *ConstExpr) Val() constant.Value { return n.val }
+
+// A ConvExpr is a conversion Type(X).
+// It may end up being a value or a type.
+type ConvExpr struct {
+	miniExpr
+	X Node
+}
+
+func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
+	n := &ConvExpr{X: x}
+	n.pos = pos
+	n.typ = typ
+	n.SetOp(op)
+	return n
+}
+
+func (n *ConvExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *ConvExpr) CheckPtr() bool     { return n.flags&miniExprCheckPtr != 0 }
+func (n *ConvExpr) SetCheckPtr(b bool) { n.flags.set(miniExprCheckPtr, b) }
+
+func (n *ConvExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR:
+		n.op = op
+	}
+}
+
+// An IndexExpr is an index expression X[Y].
+type IndexExpr struct {
+	miniExpr
+	X        Node
+	Index    Node
+	Assigned bool
+}
+
+func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr {
+	n := &IndexExpr{X: x, Index: index}
+	n.pos = pos
+	n.op = OINDEX
+	return n
+}
+
+func (n *IndexExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OINDEX, OINDEXMAP:
+		n.op = op
+	}
+}
+
+// A KeyExpr is a Key: Value composite literal key.
+type KeyExpr struct {
+	miniExpr
+	Key   Node
+	Value Node
+}
+
+func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
+	n := &KeyExpr{Key: key, Value: value}
+	n.pos = pos
+	n.op = OKEY
+	return n
+}
+
+// A StructKeyExpr is an Field: Value composite literal key.
+type StructKeyExpr struct {
+	miniExpr
+	Field  *types.Sym
+	Value  Node
+	Offset int64
+}
+
+func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
+	n := &StructKeyExpr{Field: field, Value: value}
+	n.pos = pos
+	n.op = OSTRUCTKEY
+	n.Offset = types.BADWIDTH
+	return n
+}
+
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
+
+// An InlinedCallExpr is an inlined function call.
+type InlinedCallExpr struct {
+	miniExpr
+	Body       Nodes
+	ReturnVars Nodes
+}
+
+func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
+	n := &InlinedCallExpr{}
+	n.pos = pos
+	n.op = OINLCALL
+	n.Body = body
+	n.ReturnVars = retvars
+	return n
+}
+
+// A LogicalExpr is a expression X Op Y where Op is && or ||.
+// It is separate from BinaryExpr to make room for statements
+// that must be executed before Y but after X.
+type LogicalExpr struct {
+	miniExpr
+	X Node
+	Y Node
+}
+
+func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr {
+	n := &LogicalExpr{X: x, Y: y}
+	n.pos = pos
+	n.SetOp(op)
+	return n
+}
+
+func (n *LogicalExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OANDAND, OOROR:
+		n.op = op
+	}
+}
+
+// A MakeExpr is a make expression: make(Type[, Len[, Cap]]).
+// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY,
+// but *not* OMAKE (that's a pre-typechecking CallExpr).
+type MakeExpr struct {
+	miniExpr
+	Len Node
+	Cap Node
+}
+
+func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
+	n := &MakeExpr{Len: len, Cap: cap}
+	n.pos = pos
+	n.SetOp(op)
+	return n
+}
+
+func (n *MakeExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
+		n.op = op
+	}
+}
+
+// A NilExpr represents the predefined untyped constant nil.
+// (It may be copied and assigned a type, though.)
+type NilExpr struct {
+	miniExpr
+	Sym_ *types.Sym // TODO: Remove
+}
+
+func NewNilExpr(pos src.XPos) *NilExpr {
+	n := &NilExpr{}
+	n.pos = pos
+	n.op = ONIL
+	return n
+}
+
+func (n *NilExpr) Sym() *types.Sym     { return n.Sym_ }
+func (n *NilExpr) SetSym(x *types.Sym) { n.Sym_ = x }
+
+// A ParenExpr is a parenthesized expression (X).
+// It may end up being a value or a type.
+type ParenExpr struct {
+	miniExpr
+	X Node
+}
+
+func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
+	n := &ParenExpr{X: x}
+	n.op = OPAREN
+	n.pos = pos
+	return n
+}
+
+func (n *ParenExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*ParenExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *ParenExpr) SetOTYPE(t *types.Type) {
+	n.op = OTYPE
+	n.typ = t
+	t.SetNod(n)
+}
+
+// A ResultExpr represents a direct access to a result slot on the stack frame.
+type ResultExpr struct {
+	miniExpr
+	Offset int64
+}
+
+func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
+	n := &ResultExpr{Offset: offset}
+	n.pos = pos
+	n.op = ORESULT
+	n.typ = typ
+	return n
+}
+
+// A LinksymOffsetExpr refers to an offset within a global variable.
+// It is like a SelectorExpr but without the field name.
+type LinksymOffsetExpr struct {
+	miniExpr
+	Linksym *obj.LSym
+	Offset_ int64
+}
+
+func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+	n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
+	n.typ = typ
+	n.op = OLINKSYMOFFSET
+	return n
+}
+
+// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0.
+func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr {
+	return NewLinksymOffsetExpr(pos, lsym, 0, typ)
+}
+
+// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name
+// representing a global variable instead of an *obj.LSym directly.
+func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr {
+	if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) {
+		base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name)
+	}
+	return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ)
+}
+
+// A SelectorExpr is a selector expression X.Sel.
+type SelectorExpr struct {
+	miniExpr
+	X         Node
+	Sel       *types.Sym
+	Selection *types.Field
+	Prealloc  *Name // preallocated storage for OCALLPART, if any
+}
+
+func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
+	n := &SelectorExpr{X: x, Sel: sel}
+	n.pos = pos
+	n.SetOp(op)
+	return n
+}
+
+func (n *SelectorExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR:
+		n.op = op
+	}
+}
+
+func (n *SelectorExpr) Sym() *types.Sym    { return n.Sel }
+func (n *SelectorExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *SelectorExpr) Offset() int64      { return n.Selection.Offset }
+
+func (n *SelectorExpr) FuncName() *Name {
+	if n.Op() != OMETHEXPR {
+		panic(n.no("FuncName"))
+	}
+	fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel))
+	fn.Class = PFUNC
+	fn.SetType(n.Type())
+	return fn
+}
+
+// Before type-checking, bytes.Buffer is a SelectorExpr.
+// After type-checking it becomes a Name.
+func (*SelectorExpr) CanBeNtype() {}
+
+// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
+type SliceExpr struct {
+	miniExpr
+	X    Node
+	Low  Node
+	High Node
+	Max  Node
+}
+
+func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr {
+	n := &SliceExpr{X: x, Low: low, High: high, Max: max}
+	n.pos = pos
+	n.op = op
+	return n
+}
+
+func (n *SliceExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+		n.op = op
+	}
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+	switch o {
+	case OSLICE, OSLICEARR, OSLICESTR:
+		return false
+	case OSLICE3, OSLICE3ARR:
+		return true
+	}
+	base.Fatalf("IsSlice3 op %v", o)
+	return false
+}
+
+// A SliceHeader expression constructs a slice header from its parts.
+type SliceHeaderExpr struct {
+	miniExpr
+	Ptr Node
+	Len Node
+	Cap Node
+}
+
+func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
+	n := &SliceHeaderExpr{Ptr: ptr, Len: len, Cap: cap}
+	n.pos = pos
+	n.op = OSLICEHEADER
+	n.typ = typ
+	return n
+}
+
+// A StarExpr is a dereference expression *X.
+// It may end up being a value or a type.
+type StarExpr struct {
+	miniExpr
+	X Node
+}
+
+func NewStarExpr(pos src.XPos, x Node) *StarExpr {
+	n := &StarExpr{X: x}
+	n.op = ODEREF
+	n.pos = pos
+	return n
+}
+
+func (n *StarExpr) Implicit() bool     { return n.flags&miniExprImplicit != 0 }
+func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*StarExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *StarExpr) SetOTYPE(t *types.Type) {
+	n.op = OTYPE
+	n.X = nil
+	n.typ = t
+	t.SetNod(n)
+}
+
+// A TypeAssertionExpr is a selector expression X.(Type).
+// Before type-checking, the type is Ntype.
+type TypeAssertExpr struct {
+	miniExpr
+	X     Node
+	Ntype Ntype
+
+	// Runtime type information provided by walkDotType for
+	// assertions from non-empty interface to concrete type.
+	Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
+}
+
+func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
+	n := &TypeAssertExpr{X: x, Ntype: typ}
+	n.pos = pos
+	n.op = ODOTTYPE
+	return n
+}
+
+func (n *TypeAssertExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case ODOTTYPE, ODOTTYPE2:
+		n.op = op
+	}
+}
+
+// A UnaryExpr is a unary expression Op X,
+// or Op(X) for a builtin function that does not end up being a call.
+type UnaryExpr struct {
+	miniExpr
+	X Node
+}
+
+func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr {
+	n := &UnaryExpr{X: x}
+	n.pos = pos
+	n.SetOp(op)
+	return n
+}
+
+func (n *UnaryExpr) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
+		OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW,
+		OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+		OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
+		n.op = op
+	}
+}
+
+func IsZero(n Node) bool {
+	switch n.Op() {
+	case ONIL:
+		return true
+
+	case OLITERAL:
+		switch u := n.Val(); u.Kind() {
+		case constant.String:
+			return constant.StringVal(u) == ""
+		case constant.Bool:
+			return !constant.BoolVal(u)
+		default:
+			return constant.Sign(u) == 0
+		}
+
+	case OARRAYLIT:
+		n := n.(*CompLitExpr)
+		for _, n1 := range n.List {
+			if n1.Op() == OKEY {
+				n1 = n1.(*KeyExpr).Value
+			}
+			if !IsZero(n1) {
+				return false
+			}
+		}
+		return true
+
+	case OSTRUCTLIT:
+		n := n.(*CompLitExpr)
+		for _, n1 := range n.List {
+			n1 := n1.(*StructKeyExpr)
+			if !IsZero(n1.Value) {
+				return false
+			}
+		}
+		return true
+	}
+
+	return false
+}
+
+// lvalue etc
+func IsAddressable(n Node) bool {
+	switch n.Op() {
+	case OINDEX:
+		n := n.(*IndexExpr)
+		if n.X.Type() != nil && n.X.Type().IsArray() {
+			return IsAddressable(n.X)
+		}
+		if n.X.Type() != nil && n.X.Type().IsString() {
+			return false
+		}
+		fallthrough
+	case ODEREF, ODOTPTR:
+		return true
+
+	case ODOT:
+		n := n.(*SelectorExpr)
+		return IsAddressable(n.X)
+
+	case ONAME:
+		n := n.(*Name)
+		if n.Class == PFUNC {
+			return false
+		}
+		return true
+
+	case OLINKSYMOFFSET:
+		return true
+	}
+
+	return false
+}
+
+func StaticValue(n Node) Node {
+	for {
+		if n.Op() == OCONVNOP {
+			n = n.(*ConvExpr).X
+			continue
+		}
+
+		n1 := staticValue1(n)
+		if n1 == nil {
+			return n
+		}
+		n = n1
+	}
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(nn Node) Node {
+	if nn.Op() != ONAME {
+		return nil
+	}
+	n := nn.(*Name)
+	if n.Class != PAUTO {
+		return nil
+	}
+
+	defn := n.Defn
+	if defn == nil {
+		return nil
+	}
+
+	var rhs Node
+FindRHS:
+	switch defn.Op() {
+	case OAS:
+		defn := defn.(*AssignStmt)
+		rhs = defn.Y
+	case OAS2:
+		defn := defn.(*AssignListStmt)
+		for i, lhs := range defn.Lhs {
+			if lhs == n {
+				rhs = defn.Rhs[i]
+				break FindRHS
+			}
+		}
+		base.Fatalf("%v missing from LHS of %v", n, defn)
+	default:
+		return nil
+	}
+	if rhs == nil {
+		base.Fatalf("RHS is nil: %v", defn)
+	}
+
+	if reassigned(n) {
+		return nil
+	}
+
+	return rhs
+}
+
+// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
+// indicating whether the name has any assignments other than its declaration.
+// The second return value is the first such assignment encountered in the walk, if any. It is mostly
+// useful for -m output documenting the reason for inhibited optimizations.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and followed by a single assignment?
+func reassigned(name *Name) bool {
+	if name.Op() != ONAME {
+		base.Fatalf("reassigned %v", name)
+	}
+	// no way to reliably check for no-reassignment of globals, assume it can be
+	if name.Curfn == nil {
+		return true
+	}
+
+	// TODO(mdempsky): This is inefficient and becoming increasingly
+	// unwieldy. Figure out a way to generalize escape analysis's
+	// reassignment detection for use by inlining and devirtualization.
+
+	// isName reports whether n is a reference to name.
+	isName := func(x Node) bool {
+		n, ok := x.(*Name)
+		return ok && n.Canonical() == name
+	}
+
+	var do func(n Node) bool
+	do = func(n Node) bool {
+		switch n.Op() {
+		case OAS:
+			n := n.(*AssignStmt)
+			if isName(n.X) && n != name.Defn {
+				return true
+			}
+		case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+			n := n.(*AssignListStmt)
+			for _, p := range n.Lhs {
+				if isName(p) && n != name.Defn {
+					return true
+				}
+			}
+		case OADDR:
+			n := n.(*AddrExpr)
+			if isName(OuterValue(n.X)) {
+				return true
+			}
+		case OCLOSURE:
+			n := n.(*ClosureExpr)
+			if Any(n.Func, do) {
+				return true
+			}
+		}
+		return false
+	}
+	return Any(name.Curfn, do)
+}
+
+// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
+var IsIntrinsicCall = func(*CallExpr) bool { return false }
+
+// SameSafeExpr checks whether it is safe to reuse one of l and r
+// instead of computing both. SameSafeExpr assumes that l and r are
+// used in the same statement or expression. In order for it to be
+// safe to reuse l or r, they must:
+// * be the same expression
+// * not have side-effects (no function calls, no channel ops);
+//   however, panics are ok
+// * not cause inappropriate aliasing; e.g. two string to []byte
+//   conversions, must result in two distinct slices
+//
+// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
+// as an lvalue (map assignment) and an rvalue (map access). This is
+// currently OK, since the only place SameSafeExpr gets used on an
+// lvalue expression is for OSLICE and OAPPEND optimizations, and it
+// is correct in those settings.
+func SameSafeExpr(l Node, r Node) bool {
+	if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
+		return false
+	}
+
+	switch l.Op() {
+	case ONAME:
+		return l == r
+
+	case ODOT, ODOTPTR:
+		l := l.(*SelectorExpr)
+		r := r.(*SelectorExpr)
+		return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X)
+
+	case ODEREF:
+		l := l.(*StarExpr)
+		r := r.(*StarExpr)
+		return SameSafeExpr(l.X, r.X)
+
+	case ONOT, OBITNOT, OPLUS, ONEG:
+		l := l.(*UnaryExpr)
+		r := r.(*UnaryExpr)
+		return SameSafeExpr(l.X, r.X)
+
+	case OCONVNOP:
+		l := l.(*ConvExpr)
+		r := r.(*ConvExpr)
+		return SameSafeExpr(l.X, r.X)
+
+	case OCONV:
+		l := l.(*ConvExpr)
+		r := r.(*ConvExpr)
+		// Some conversions can't be reused, such as []byte(str).
+		// Allow only numeric-ish types. This is a bit conservative.
+		return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X)
+
+	case OINDEX, OINDEXMAP:
+		l := l.(*IndexExpr)
+		r := r.(*IndexExpr)
+		return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index)
+
+	case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+		l := l.(*BinaryExpr)
+		r := r.(*BinaryExpr)
+		return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y)
+
+	case OLITERAL:
+		return constant.Compare(l.Val(), token.EQL, r.Val())
+
+	case ONIL:
+		return true
+	}
+
+	return false
+}
+
+// ShouldCheckPtr reports whether pointer checking should be enabled for
+// function fn at a given level. See debugHelpFooter for defined
+// levels.
+func ShouldCheckPtr(fn *Func, level int) bool {
+	return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0
+}
+
+// IsReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func IsReflectHeaderDataField(l Node) bool {
+	if l.Type() != types.Types[types.TUINTPTR] {
+		return false
+	}
+
+	var tsym *types.Sym
+	switch l.Op() {
+	case ODOT:
+		l := l.(*SelectorExpr)
+		tsym = l.X.Type().Sym()
+	case ODOTPTR:
+		l := l.(*SelectorExpr)
+		tsym = l.X.Type().Elem().Sym()
+	default:
+		return false
+	}
+
+	if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
+		return false
+	}
+	return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
+func ParamNames(ft *types.Type) []Node {
+	args := make([]Node, ft.NumParams())
+	for i, f := range ft.Params().FieldSlice() {
+		args[i] = AsNode(f.Nname)
+	}
+	return args
+}
+
+// MethodSym returns the method symbol representing a method name
+// associated with a specific receiver type.
+//
+// Method symbols can be used to distinguish the same method appearing
+// in different method sets. For example, T.M and (*T).M have distinct
+// method symbols.
+//
+// The returned symbol will be marked as a function.
+func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
+	sym := MethodSymSuffix(recv, msym, "")
+	sym.SetFunc(true)
+	return sym
+}
+
+// MethodSymSuffix is like methodsym, but allows attaching a
+// distinguisher suffix. To avoid collisions, the suffix must not
+// start with a letter, number, or period.
+func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
+	if msym.IsBlank() {
+		base.Fatalf("blank method name")
+	}
+
+	rsym := recv.Sym()
+	if recv.IsPtr() {
+		if rsym != nil {
+			base.Fatalf("declared pointer receiver type: %v", recv)
+		}
+		rsym = recv.Elem().Sym()
+	}
+
+	// Find the package the receiver type appeared in. For
+	// anonymous receiver types (i.e., anonymous structs with
+	// embedded fields), use the "go" pseudo-package instead.
+	rpkg := Pkgs.Go
+	if rsym != nil {
+		rpkg = rsym.Pkg
+	}
+
+	var b bytes.Buffer
+	if recv.IsPtr() {
+		// The parentheses aren't really necessary, but
+		// they're pretty traditional at this point.
+		fmt.Fprintf(&b, "(%-S)", recv)
+	} else {
+		fmt.Fprintf(&b, "%-S", recv)
+	}
+
+	// A particular receiver type may have multiple non-exported
+	// methods with the same name. To disambiguate them, include a
+	// package qualifier for names that came from a different
+	// package than the receiver type.
+	if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
+		b.WriteString(".")
+		b.WriteString(msym.Pkg.Prefix)
+	}
+
+	b.WriteString(".")
+	b.WriteString(msym.Name)
+	b.WriteString(suffix)
+
+	return rpkg.LookupBytes(b.Bytes())
+}
+
+// MethodName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func MethodExprName(n Node) *Name {
+	name, _ := MethodExprFunc(n).Nname.(*Name)
+	return name
+}
+
+// MethodFunc is like MethodName, but returns the types.Field instead.
+func MethodExprFunc(n Node) *types.Field {
+	switch n.Op() {
+	case ODOTMETH, OMETHEXPR, OCALLPART:
+		return n.(*SelectorExpr).Selection
+	}
+	base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+	panic("unreachable")
+}
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
new file mode 100644
index 0000000..1a05079
--- /dev/null
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -0,0 +1,1331 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"bytes"
+	"fmt"
+	"go/constant"
+	"io"
+	"math"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+
+	"unicode/utf8"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// Op
+
+var OpNames = []string{
+	OADDR:     "&",
+	OADD:      "+",
+	OADDSTR:   "+",
+	OALIGNOF:  "unsafe.Alignof",
+	OANDAND:   "&&",
+	OANDNOT:   "&^",
+	OAND:      "&",
+	OAPPEND:   "append",
+	OAS:       "=",
+	OAS2:      "=",
+	OBREAK:    "break",
+	OCALL:     "function call", // not actual syntax
+	OCAP:      "cap",
+	OCASE:     "case",
+	OCLOSE:    "close",
+	OCOMPLEX:  "complex",
+	OBITNOT:   "^",
+	OCONTINUE: "continue",
+	OCOPY:     "copy",
+	ODELETE:   "delete",
+	ODEFER:    "defer",
+	ODIV:      "/",
+	OEQ:       "==",
+	OFALL:     "fallthrough",
+	OFOR:      "for",
+	OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
+	OGE:       ">=",
+	OGOTO:     "goto",
+	OGT:       ">",
+	OIF:       "if",
+	OIMAG:     "imag",
+	OINLMARK:  "inlmark",
+	ODEREF:    "*",
+	OLEN:      "len",
+	OLE:       "<=",
+	OLSH:      "<<",
+	OLT:       "<",
+	OMAKE:     "make",
+	ONEG:      "-",
+	OMOD:      "%",
+	OMUL:      "*",
+	ONEW:      "new",
+	ONE:       "!=",
+	ONOT:      "!",
+	OOFFSETOF: "unsafe.Offsetof",
+	OOROR:     "||",
+	OOR:       "|",
+	OPANIC:    "panic",
+	OPLUS:     "+",
+	OPRINTN:   "println",
+	OPRINT:    "print",
+	ORANGE:    "range",
+	OREAL:     "real",
+	ORECV:     "<-",
+	ORECOVER:  "recover",
+	ORETURN:   "return",
+	ORSH:      ">>",
+	OSELECT:   "select",
+	OSEND:     "<-",
+	OSIZEOF:   "unsafe.Sizeof",
+	OSUB:      "-",
+	OSWITCH:   "switch",
+	OXOR:      "^",
+}
+
+// GoString returns the Go syntax for the Op, or else its name.
+func (o Op) GoString() string {
+	if int(o) < len(OpNames) && OpNames[o] != "" {
+		return OpNames[o]
+	}
+	return o.String()
+}
+
+// Format implements formatting for an Op.
+// The valid formats are:
+//
+//	%v	Go syntax ("+", "<-", "print")
+//	%+v	Debug syntax ("ADD", "RECV", "PRINT")
+//
+func (o Op) Format(s fmt.State, verb rune) {
+	switch verb {
+	default:
+		fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
+	case 'v':
+		if s.Flag('+') {
+			// %+v is OMUL instead of "*"
+			io.WriteString(s, o.String())
+			return
+		}
+		io.WriteString(s, o.GoString())
+	}
+}
+
+// Node
+
+// FmtNode implements formatting for a Node n.
+// Every Node implementation must define a Format method that calls FmtNode.
+// The valid formats are:
+//
+//	%v	Go syntax
+//	%L	Go syntax followed by " (type T)" if type is known.
+//	%+v	Debug syntax, as in Dump.
+//
+func fmtNode(n Node, s fmt.State, verb rune) {
+	// %+v prints Dump.
+	// Otherwise we print Go syntax.
+	if s.Flag('+') && verb == 'v' {
+		dumpNode(s, n, 1)
+		return
+	}
+
+	if verb != 'v' && verb != 'S' && verb != 'L' {
+		fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
+		return
+	}
+
+	if n == nil {
+		fmt.Fprint(s, "<nil>")
+		return
+	}
+
+	t := n.Type()
+	if verb == 'L' && t != nil {
+		if t.Kind() == types.TNIL {
+			fmt.Fprint(s, "nil")
+		} else if n.Op() == ONAME && n.Name().AutoTemp() {
+			fmt.Fprintf(s, "%v value", t)
+		} else {
+			fmt.Fprintf(s, "%v (type %v)", n, t)
+		}
+		return
+	}
+
+	// TODO inlining produces expressions with ninits. we can't print these yet.
+
+	if OpPrec[n.Op()] < 0 {
+		stmtFmt(n, s)
+		return
+	}
+
+	exprFmt(n, s, 0)
+}
+
+var OpPrec = []int{
+	OALIGNOF:       8,
+	OAPPEND:        8,
+	OBYTES2STR:     8,
+	OARRAYLIT:      8,
+	OSLICELIT:      8,
+	ORUNES2STR:     8,
+	OCALLFUNC:      8,
+	OCALLINTER:     8,
+	OCALLMETH:      8,
+	OCALL:          8,
+	OCAP:           8,
+	OCLOSE:         8,
+	OCOMPLIT:       8,
+	OCONVIFACE:     8,
+	OCONVNOP:       8,
+	OCONV:          8,
+	OCOPY:          8,
+	ODELETE:        8,
+	OGETG:          8,
+	OLEN:           8,
+	OLITERAL:       8,
+	OMAKESLICE:     8,
+	OMAKESLICECOPY: 8,
+	OMAKE:          8,
+	OMAPLIT:        8,
+	ONAME:          8,
+	ONEW:           8,
+	ONIL:           8,
+	ONONAME:        8,
+	OOFFSETOF:      8,
+	OPACK:          8,
+	OPANIC:         8,
+	OPAREN:         8,
+	OPRINTN:        8,
+	OPRINT:         8,
+	ORUNESTR:       8,
+	OSIZEOF:        8,
+	OSTR2BYTES:     8,
+	OSTR2RUNES:     8,
+	OSTRUCTLIT:     8,
+	OTARRAY:        8,
+	OTSLICE:        8,
+	OTCHAN:         8,
+	OTFUNC:         8,
+	OTINTER:        8,
+	OTMAP:          8,
+	OTSTRUCT:       8,
+	OTYPE:          8,
+	OINDEXMAP:      8,
+	OINDEX:         8,
+	OSLICE:         8,
+	OSLICESTR:      8,
+	OSLICEARR:      8,
+	OSLICE3:        8,
+	OSLICE3ARR:     8,
+	OSLICEHEADER:   8,
+	ODOTINTER:      8,
+	ODOTMETH:       8,
+	ODOTPTR:        8,
+	ODOTTYPE2:      8,
+	ODOTTYPE:       8,
+	ODOT:           8,
+	OXDOT:          8,
+	OCALLPART:      8,
+	OMETHEXPR:      8,
+	OPLUS:          7,
+	ONOT:           7,
+	OBITNOT:        7,
+	ONEG:           7,
+	OADDR:          7,
+	ODEREF:         7,
+	ORECV:          7,
+	OMUL:           6,
+	ODIV:           6,
+	OMOD:           6,
+	OLSH:           6,
+	ORSH:           6,
+	OAND:           6,
+	OANDNOT:        6,
+	OADD:           5,
+	OSUB:           5,
+	OOR:            5,
+	OXOR:           5,
+	OEQ:            4,
+	OLT:            4,
+	OLE:            4,
+	OGE:            4,
+	OGT:            4,
+	ONE:            4,
+	OSEND:          3,
+	OANDAND:        2,
+	OOROR:          1,
+
+	// Statements handled by stmtfmt
+	OAS:         -1,
+	OAS2:        -1,
+	OAS2DOTTYPE: -1,
+	OAS2FUNC:    -1,
+	OAS2MAPR:    -1,
+	OAS2RECV:    -1,
+	OASOP:       -1,
+	OBLOCK:      -1,
+	OBREAK:      -1,
+	OCASE:       -1,
+	OCONTINUE:   -1,
+	ODCL:        -1,
+	ODEFER:      -1,
+	OFALL:       -1,
+	OFOR:        -1,
+	OFORUNTIL:   -1,
+	OGOTO:       -1,
+	OIF:         -1,
+	OLABEL:      -1,
+	OGO:         -1,
+	ORANGE:      -1,
+	ORETURN:     -1,
+	OSELECT:     -1,
+	OSWITCH:     -1,
+
+	OEND: 0,
+}
+
+// StmtWithInit reports whether op is a statement with an explicit init list.
+func StmtWithInit(op Op) bool {
+	switch op {
+	case OIF, OFOR, OFORUNTIL, OSWITCH:
+		return true
+	}
+	return false
+}
+
+func stmtFmt(n Node, s fmt.State) {
+	// NOTE(rsc): This code used to support the text-based
+	// which was more aggressive about printing full Go syntax
+	// (for example, an actual loop instead of "for loop").
+	// The code is preserved for now in case we want to expand
+	// any of those shortenings later. Or maybe we will delete
+	// the code. But for now, keep it.
+	const exportFormat = false
+
+	// some statements allow for an init, but at most one,
+	// but we may have an arbitrary number added, eg by typecheck
+	// and inlining. If it doesn't fit the syntax, emit an enclosing
+	// block starting with the init statements.
+
+	// if we can just say "for" n->ninit; ... then do so
+	simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op())
+
+	// otherwise, print the inits as separate statements
+	complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat
+
+	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+	extrablock := complexinit && StmtWithInit(n.Op())
+
+	if extrablock {
+		fmt.Fprint(s, "{")
+	}
+
+	if complexinit {
+		fmt.Fprintf(s, " %v; ", n.Init())
+	}
+
+	switch n.Op() {
+	case ODCL:
+		n := n.(*Decl)
+		fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type())
+
+	// Don't export "v = <N>" initializing statements, hope they're always
+	// preceded by the DCL which will be re-parsed and typechecked to reproduce
+	// the "v = <N>" again.
+	case OAS:
+		n := n.(*AssignStmt)
+		if n.Def && !complexinit {
+			fmt.Fprintf(s, "%v := %v", n.X, n.Y)
+		} else {
+			fmt.Fprintf(s, "%v = %v", n.X, n.Y)
+		}
+
+	case OASOP:
+		n := n.(*AssignOpStmt)
+		if n.IncDec {
+			if n.AsOp == OADD {
+				fmt.Fprintf(s, "%v++", n.X)
+			} else {
+				fmt.Fprintf(s, "%v--", n.X)
+			}
+			break
+		}
+
+		fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y)
+
+	case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+		n := n.(*AssignListStmt)
+		if n.Def && !complexinit {
+			fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs)
+		} else {
+			fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs)
+		}
+
+	case OBLOCK:
+		n := n.(*BlockStmt)
+		if len(n.List) != 0 {
+			fmt.Fprintf(s, "%v", n.List)
+		}
+
+	case ORETURN:
+		n := n.(*ReturnStmt)
+		fmt.Fprintf(s, "return %.v", n.Results)
+
+	case OTAILCALL:
+		n := n.(*TailCallStmt)
+		fmt.Fprintf(s, "tailcall %v", n.Target)
+
+	case OINLMARK:
+		n := n.(*InlineMarkStmt)
+		fmt.Fprintf(s, "inlmark %d", n.Index)
+
+	case OGO:
+		n := n.(*GoDeferStmt)
+		fmt.Fprintf(s, "go %v", n.Call)
+
+	case ODEFER:
+		n := n.(*GoDeferStmt)
+		fmt.Fprintf(s, "defer %v", n.Call)
+
+	case OIF:
+		n := n.(*IfStmt)
+		if simpleinit {
+			fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body)
+		} else {
+			fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body)
+		}
+		if len(n.Else) != 0 {
+			fmt.Fprintf(s, " else { %v }", n.Else)
+		}
+
+	case OFOR, OFORUNTIL:
+		n := n.(*ForStmt)
+		opname := "for"
+		if n.Op() == OFORUNTIL {
+			opname = "foruntil"
+		}
+		if !exportFormat { // TODO maybe only if FmtShort, same below
+			fmt.Fprintf(s, "%s loop", opname)
+			break
+		}
+
+		fmt.Fprint(s, opname)
+		if simpleinit {
+			fmt.Fprintf(s, " %v;", n.Init()[0])
+		} else if n.Post != nil {
+			fmt.Fprint(s, " ;")
+		}
+
+		if n.Cond != nil {
+			fmt.Fprintf(s, " %v", n.Cond)
+		}
+
+		if n.Post != nil {
+			fmt.Fprintf(s, "; %v", n.Post)
+		} else if simpleinit {
+			fmt.Fprint(s, ";")
+		}
+
+		if n.Op() == OFORUNTIL && len(n.Late) != 0 {
+			fmt.Fprintf(s, "; %v", n.Late)
+		}
+
+		fmt.Fprintf(s, " { %v }", n.Body)
+
+	case ORANGE:
+		n := n.(*RangeStmt)
+		if !exportFormat {
+			fmt.Fprint(s, "for loop")
+			break
+		}
+
+		fmt.Fprint(s, "for")
+		if n.Key != nil {
+			fmt.Fprintf(s, " %v", n.Key)
+			if n.Value != nil {
+				fmt.Fprintf(s, ", %v", n.Value)
+			}
+			fmt.Fprint(s, " =")
+		}
+		fmt.Fprintf(s, " range %v { %v }", n.X, n.Body)
+
+	case OSELECT:
+		n := n.(*SelectStmt)
+		if !exportFormat {
+			fmt.Fprintf(s, "%v statement", n.Op())
+			break
+		}
+		fmt.Fprintf(s, "select { %v }", n.Cases)
+
+	case OSWITCH:
+		n := n.(*SwitchStmt)
+		if !exportFormat {
+			fmt.Fprintf(s, "%v statement", n.Op())
+			break
+		}
+		fmt.Fprintf(s, "switch")
+		if simpleinit {
+			fmt.Fprintf(s, " %v;", n.Init()[0])
+		}
+		if n.Tag != nil {
+			fmt.Fprintf(s, " %v ", n.Tag)
+		}
+		fmt.Fprintf(s, " { %v }", n.Cases)
+
+	case OCASE:
+		n := n.(*CaseClause)
+		if len(n.List) != 0 {
+			fmt.Fprintf(s, "case %.v", n.List)
+		} else {
+			fmt.Fprint(s, "default")
+		}
+		fmt.Fprintf(s, ": %v", n.Body)
+
+	case OBREAK, OCONTINUE, OGOTO, OFALL:
+		n := n.(*BranchStmt)
+		if n.Label != nil {
+			fmt.Fprintf(s, "%v %v", n.Op(), n.Label)
+		} else {
+			fmt.Fprintf(s, "%v", n.Op())
+		}
+
+	case OLABEL:
+		n := n.(*LabelStmt)
+		fmt.Fprintf(s, "%v: ", n.Label)
+	}
+
+	if extrablock {
+		fmt.Fprint(s, "}")
+	}
+}
+
+func exprFmt(n Node, s fmt.State, prec int) {
+	// NOTE(rsc): This code used to support the text-based
+	// which was more aggressive about printing full Go syntax
+	// (for example, an actual loop instead of "for loop").
+	// The code is preserved for now in case we want to expand
+	// any of those shortenings later. Or maybe we will delete
+	// the code. But for now, keep it.
+	const exportFormat = false
+
+	for {
+		if n == nil {
+			fmt.Fprint(s, "<nil>")
+			return
+		}
+
+		// We always want the original, if any.
+		if o := Orig(n); o != n {
+			n = o
+			continue
+		}
+
+		// Skip implicit operations introduced during typechecking.
+		switch nn := n; nn.Op() {
+		case OADDR:
+			nn := nn.(*AddrExpr)
+			if nn.Implicit() {
+				n = nn.X
+				continue
+			}
+		case ODEREF:
+			nn := nn.(*StarExpr)
+			if nn.Implicit() {
+				n = nn.X
+				continue
+			}
+		case OCONV, OCONVNOP, OCONVIFACE:
+			nn := nn.(*ConvExpr)
+			if nn.Implicit() {
+				n = nn.X
+				continue
+			}
+		}
+
+		break
+	}
+
+	nprec := OpPrec[n.Op()]
+	if n.Op() == OTYPE && n.Type().IsPtr() {
+		nprec = OpPrec[ODEREF]
+	}
+
+	if prec > nprec {
+		fmt.Fprintf(s, "(%v)", n)
+		return
+	}
+
+	switch n.Op() {
+	case OPAREN:
+		n := n.(*ParenExpr)
+		fmt.Fprintf(s, "(%v)", n.X)
+
+	case ONIL:
+		fmt.Fprint(s, "nil")
+
+	case OLITERAL: // this is a bit of a mess
+		if !exportFormat && n.Sym() != nil {
+			fmt.Fprint(s, n.Sym())
+			return
+		}
+
+		needUnparen := false
+		if n.Type() != nil && !n.Type().IsUntyped() {
+			// Need parens when type begins with what might
+			// be misinterpreted as a unary operator: * or <-.
+			if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
+				fmt.Fprintf(s, "(%v)(", n.Type())
+			} else {
+				fmt.Fprintf(s, "%v(", n.Type())
+			}
+			needUnparen = true
+		}
+
+		if n.Type() == types.UntypedRune {
+			switch x, ok := constant.Uint64Val(n.Val()); {
+			case !ok:
+				fallthrough
+			default:
+				fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
+
+			case x < utf8.RuneSelf:
+				fmt.Fprintf(s, "%q", x)
+
+			case x < 1<<16:
+				fmt.Fprintf(s, "'\\u%04x'", x)
+
+			case x <= utf8.MaxRune:
+				fmt.Fprintf(s, "'\\U%08x'", x)
+			}
+		} else {
+			fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
+		}
+
+		if needUnparen {
+			fmt.Fprintf(s, ")")
+		}
+
+	case ODCLFUNC:
+		n := n.(*Func)
+		if sym := n.Sym(); sym != nil {
+			fmt.Fprint(s, sym)
+			return
+		}
+		fmt.Fprintf(s, "<unnamed Func>")
+
+	case ONAME:
+		n := n.(*Name)
+		// Special case: name used as local variable in export.
+		// _ becomes ~b%d internally; print as _ for export
+		if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
+			fmt.Fprint(s, "_")
+			return
+		}
+		fallthrough
+	case OPACK, ONONAME:
+		fmt.Fprint(s, n.Sym())
+
+	case OLINKSYMOFFSET:
+		n := n.(*LinksymOffsetExpr)
+		fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_)
+
+	case OTYPE:
+		if n.Type() == nil && n.Sym() != nil {
+			fmt.Fprint(s, n.Sym())
+			return
+		}
+		fmt.Fprintf(s, "%v", n.Type())
+
+	case OTSLICE:
+		n := n.(*SliceType)
+		if n.DDD {
+			fmt.Fprintf(s, "...%v", n.Elem)
+		} else {
+			fmt.Fprintf(s, "[]%v", n.Elem) // happens before typecheck
+		}
+
+	case OTARRAY:
+		n := n.(*ArrayType)
+		if n.Len == nil {
+			fmt.Fprintf(s, "[...]%v", n.Elem)
+		} else {
+			fmt.Fprintf(s, "[%v]%v", n.Len, n.Elem)
+		}
+
+	case OTMAP:
+		n := n.(*MapType)
+		fmt.Fprintf(s, "map[%v]%v", n.Key, n.Elem)
+
+	case OTCHAN:
+		n := n.(*ChanType)
+		switch n.Dir {
+		case types.Crecv:
+			fmt.Fprintf(s, "<-chan %v", n.Elem)
+
+		case types.Csend:
+			fmt.Fprintf(s, "chan<- %v", n.Elem)
+
+		default:
+			if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv {
+				fmt.Fprintf(s, "chan (%v)", n.Elem)
+			} else {
+				fmt.Fprintf(s, "chan %v", n.Elem)
+			}
+		}
+
+	case OTSTRUCT:
+		fmt.Fprint(s, "<struct>")
+
+	case OTINTER:
+		fmt.Fprint(s, "<inter>")
+
+	case OTFUNC:
+		fmt.Fprint(s, "<func>")
+
+	case OCLOSURE:
+		n := n.(*ClosureExpr)
+		if !exportFormat {
+			fmt.Fprint(s, "func literal")
+			return
+		}
+		fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
+
+	case OCOMPLIT:
+		n := n.(*CompLitExpr)
+		if !exportFormat {
+			if n.Implicit() {
+				fmt.Fprintf(s, "... argument")
+				return
+			}
+			if n.Ntype != nil {
+				fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
+				return
+			}
+
+			fmt.Fprint(s, "composite literal")
+			return
+		}
+		fmt.Fprintf(s, "(%v{ %.v })", n.Ntype, n.List)
+
+	case OPTRLIT:
+		n := n.(*AddrExpr)
+		fmt.Fprintf(s, "&%v", n.X)
+
+	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+		n := n.(*CompLitExpr)
+		if !exportFormat {
+			fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
+			return
+		}
+		fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
+
+	case OKEY:
+		n := n.(*KeyExpr)
+		if n.Key != nil && n.Value != nil {
+			fmt.Fprintf(s, "%v:%v", n.Key, n.Value)
+			return
+		}
+
+		if n.Key == nil && n.Value != nil {
+			fmt.Fprintf(s, ":%v", n.Value)
+			return
+		}
+		if n.Key != nil && n.Value == nil {
+			fmt.Fprintf(s, "%v:", n.Key)
+			return
+		}
+		fmt.Fprint(s, ":")
+
+	case OSTRUCTKEY:
+		n := n.(*StructKeyExpr)
+		fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
+
+	case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR:
+		n := n.(*SelectorExpr)
+		exprFmt(n.X, s, nprec)
+		if n.Sel == nil {
+			fmt.Fprint(s, ".<nil>")
+			return
+		}
+		fmt.Fprintf(s, ".%s", n.Sel.Name)
+
+	case ODOTTYPE, ODOTTYPE2:
+		n := n.(*TypeAssertExpr)
+		exprFmt(n.X, s, nprec)
+		if n.Ntype != nil {
+			fmt.Fprintf(s, ".(%v)", n.Ntype)
+			return
+		}
+		fmt.Fprintf(s, ".(%v)", n.Type())
+
+	case OINDEX, OINDEXMAP:
+		n := n.(*IndexExpr)
+		exprFmt(n.X, s, nprec)
+		fmt.Fprintf(s, "[%v]", n.Index)
+
+	case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+		n := n.(*SliceExpr)
+		exprFmt(n.X, s, nprec)
+		fmt.Fprint(s, "[")
+		if n.Low != nil {
+			fmt.Fprint(s, n.Low)
+		}
+		fmt.Fprint(s, ":")
+		if n.High != nil {
+			fmt.Fprint(s, n.High)
+		}
+		if n.Op().IsSlice3() {
+			fmt.Fprint(s, ":")
+			if n.Max != nil {
+				fmt.Fprint(s, n.Max)
+			}
+		}
+		fmt.Fprint(s, "]")
+
+	case OSLICEHEADER:
+		n := n.(*SliceHeaderExpr)
+		fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap)
+
+	case OCOMPLEX, OCOPY:
+		n := n.(*BinaryExpr)
+		fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y)
+
+	case OCONV,
+		OCONVIFACE,
+		OCONVNOP,
+		OBYTES2STR,
+		ORUNES2STR,
+		OSTR2BYTES,
+		OSTR2RUNES,
+		ORUNESTR:
+		n := n.(*ConvExpr)
+		if n.Type() == nil || n.Type().Sym() == nil {
+			fmt.Fprintf(s, "(%v)", n.Type())
+		} else {
+			fmt.Fprintf(s, "%v", n.Type())
+		}
+		fmt.Fprintf(s, "(%v)", n.X)
+
+	case OREAL,
+		OIMAG,
+		OCAP,
+		OCLOSE,
+		OLEN,
+		ONEW,
+		OPANIC,
+		OALIGNOF,
+		OOFFSETOF,
+		OSIZEOF:
+		n := n.(*UnaryExpr)
+		fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
+
+	case OAPPEND,
+		ODELETE,
+		OMAKE,
+		ORECOVER,
+		OPRINT,
+		OPRINTN:
+		n := n.(*CallExpr)
+		if n.IsDDD {
+			fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
+			return
+		}
+		fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args)
+
+	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+		n := n.(*CallExpr)
+		exprFmt(n.X, s, nprec)
+		if n.IsDDD {
+			fmt.Fprintf(s, "(%.v...)", n.Args)
+			return
+		}
+		fmt.Fprintf(s, "(%.v)", n.Args)
+
+	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+		n := n.(*MakeExpr)
+		if n.Cap != nil {
+			fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+			return
+		}
+		if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) {
+			fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len)
+			return
+		}
+		fmt.Fprintf(s, "make(%v)", n.Type())
+
+	case OMAKESLICECOPY:
+		n := n.(*MakeExpr)
+		fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+
+	case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
+		// Unary
+		n := n.(*UnaryExpr)
+		fmt.Fprintf(s, "%v", n.Op())
+		if n.X != nil && n.X.Op() == n.Op() {
+			fmt.Fprint(s, " ")
+		}
+		exprFmt(n.X, s, nprec+1)
+
+	case OADDR:
+		n := n.(*AddrExpr)
+		fmt.Fprintf(s, "%v", n.Op())
+		if n.X != nil && n.X.Op() == n.Op() {
+			fmt.Fprint(s, " ")
+		}
+		exprFmt(n.X, s, nprec+1)
+
+	case ODEREF:
+		n := n.(*StarExpr)
+		fmt.Fprintf(s, "%v", n.Op())
+		exprFmt(n.X, s, nprec+1)
+
+		// Binary
+	case OADD,
+		OAND,
+		OANDNOT,
+		ODIV,
+		OEQ,
+		OGE,
+		OGT,
+		OLE,
+		OLT,
+		OLSH,
+		OMOD,
+		OMUL,
+		ONE,
+		OOR,
+		ORSH,
+		OSUB,
+		OXOR:
+		n := n.(*BinaryExpr)
+		exprFmt(n.X, s, nprec)
+		fmt.Fprintf(s, " %v ", n.Op())
+		exprFmt(n.Y, s, nprec+1)
+
+	case OANDAND,
+		OOROR:
+		n := n.(*LogicalExpr)
+		exprFmt(n.X, s, nprec)
+		fmt.Fprintf(s, " %v ", n.Op())
+		exprFmt(n.Y, s, nprec+1)
+
+	case OSEND:
+		n := n.(*SendStmt)
+		exprFmt(n.Chan, s, nprec)
+		fmt.Fprintf(s, " <- ")
+		exprFmt(n.Value, s, nprec+1)
+
+	case OADDSTR:
+		n := n.(*AddStringExpr)
+		for i, n1 := range n.List {
+			if i != 0 {
+				fmt.Fprint(s, " + ")
+			}
+			exprFmt(n1, s, nprec)
+		}
+	default:
+		fmt.Fprintf(s, "<node %v>", n.Op())
+	}
+}
+
+func ellipsisIf(b bool) string {
+	if b {
+		return "..."
+	}
+	return ""
+}
+
+// Nodes
+
+// Format implements formatting for a Nodes.
+// The valid formats are:
+//
+//	%v	Go syntax, semicolon-separated
+//	%.v	Go syntax, comma-separated
+//	%+v	Debug syntax, as in DumpList.
+//
+func (l Nodes) Format(s fmt.State, verb rune) {
+	if s.Flag('+') && verb == 'v' {
+		// %+v is DumpList output
+		dumpNodes(s, l, 1)
+		return
+	}
+
+	if verb != 'v' {
+		fmt.Fprintf(s, "%%!%c(Nodes)", verb)
+		return
+	}
+
+	sep := "; "
+	if _, ok := s.Precision(); ok { // %.v is expr list
+		sep = ", "
+	}
+
+	for i, n := range l {
+		fmt.Fprint(s, n)
+		if i+1 < len(l) {
+			fmt.Fprint(s, sep)
+		}
+	}
+}
+
+// Dump
+
+// Dump prints the message s followed by a debug dump of n.
+func Dump(s string, n Node) {
+	fmt.Printf("%s [%p]%+v\n", s, n, n)
+}
+
+// DumpList prints the message s followed by a debug dump of each node in the list.
+func DumpList(s string, list Nodes) {
+	var buf bytes.Buffer
+	FDumpList(&buf, s, list)
+	os.Stdout.Write(buf.Bytes())
+}
+
+// FDumpList prints to w the message s followed by a debug dump of each node in the list.
+func FDumpList(w io.Writer, s string, list Nodes) {
+	io.WriteString(w, s)
+	dumpNodes(w, list, 1)
+	io.WriteString(w, "\n")
+}
+
+// indent prints indentation to w.
+func indent(w io.Writer, depth int) {
+	fmt.Fprint(w, "\n")
+	for i := 0; i < depth; i++ {
+		fmt.Fprint(w, ".   ")
+	}
+}
+
+// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
+var EscFmt func(n Node) string
+
+// dumpNodeHeader prints the debug-format node header line to w.
+func dumpNodeHeader(w io.Writer, n Node) {
+	// Useful to see which nodes in an AST printout are actually identical
+	if base.Debug.DumpPtrs != 0 {
+		fmt.Fprintf(w, " p(%p)", n)
+	}
+
+	if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil {
+		// Useful to see where Defn is set and what node it points to
+		fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
+	}
+
+	if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil {
+		// Useful to see where Defn is set and what node it points to
+		fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn)
+	}
+	if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil {
+		// Useful to see where Defn is set and what node it points to
+		fmt.Fprintf(w, " outer(%p)", n.Name().Outer)
+	}
+
+	if EscFmt != nil {
+		if esc := EscFmt(n); esc != "" {
+			fmt.Fprintf(w, " %s", esc)
+		}
+	}
+
+	if n.Typecheck() != 0 {
+		fmt.Fprintf(w, " tc(%d)", n.Typecheck())
+	}
+
+	// Print Node-specific fields of basic type in header line.
+	v := reflect.ValueOf(n).Elem()
+	t := v.Type()
+	nf := t.NumField()
+	for i := 0; i < nf; i++ {
+		tf := t.Field(i)
+		if tf.PkgPath != "" {
+			// skip unexported field - Interface will fail
+			continue
+		}
+		k := tf.Type.Kind()
+		if reflect.Bool <= k && k <= reflect.Complex128 {
+			name := strings.TrimSuffix(tf.Name, "_")
+			vf := v.Field(i)
+			vfi := vf.Interface()
+			if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && isZero(vf) {
+				continue
+			}
+			if vfi == true {
+				fmt.Fprintf(w, " %s", name)
+			} else {
+				fmt.Fprintf(w, " %s:%+v", name, vf.Interface())
+			}
+		}
+	}
+
+	// Print Node-specific booleans by looking for methods.
+	// Different v, t from above - want *Struct not Struct, for methods.
+	v = reflect.ValueOf(n)
+	t = v.Type()
+	nm := t.NumMethod()
+	for i := 0; i < nm; i++ {
+		tm := t.Method(i)
+		if tm.PkgPath != "" {
+			// skip unexported method - call will fail
+			continue
+		}
+		m := v.Method(i)
+		mt := m.Type()
+		if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool {
+			// TODO(rsc): Remove the func/defer/recover wrapping,
+			// which is guarding against panics in miniExpr,
+			// once we get down to the simpler state in which
+			// nodes have no getter methods that aren't allowed to be called.
+			func() {
+				defer func() { recover() }()
+				if m.Call(nil)[0].Bool() {
+					name := strings.TrimSuffix(tm.Name, "_")
+					fmt.Fprintf(w, " %s", name)
+				}
+			}()
+		}
+	}
+
+	if n.Op() == OCLOSURE {
+		n := n.(*ClosureExpr)
+		if fn := n.Func; fn != nil && fn.Nname.Sym() != nil {
+			fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
+		}
+	}
+
+	if n.Type() != nil {
+		if n.Op() == OTYPE {
+			fmt.Fprintf(w, " type")
+		}
+		fmt.Fprintf(w, " %+v", n.Type())
+	}
+
+	if n.Pos().IsKnown() {
+		pfx := ""
+		switch n.Pos().IsStmt() {
+		case src.PosNotStmt:
+			pfx = "_" // "-" would be confusing
+		case src.PosIsStmt:
+			pfx = "+"
+		}
+		pos := base.Ctxt.PosTable.Pos(n.Pos())
+		file := filepath.Base(pos.Filename())
+		fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
+	}
+}
+
+func dumpNode(w io.Writer, n Node, depth int) {
+	indent(w, depth)
+	if depth > 40 {
+		fmt.Fprint(w, "...")
+		return
+	}
+
+	if n == nil {
+		fmt.Fprint(w, "NilIrNode")
+		return
+	}
+
+	if len(n.Init()) != 0 {
+		fmt.Fprintf(w, "%+v-init", n.Op())
+		dumpNodes(w, n.Init(), depth+1)
+		indent(w, depth)
+	}
+
+	switch n.Op() {
+	default:
+		fmt.Fprintf(w, "%+v", n.Op())
+		dumpNodeHeader(w, n)
+
+	case OLITERAL:
+		fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val())
+		dumpNodeHeader(w, n)
+		return
+
+	case ONAME, ONONAME:
+		if n.Sym() != nil {
+			fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym())
+		} else {
+			fmt.Fprintf(w, "%+v", n.Op())
+		}
+		dumpNodeHeader(w, n)
+		if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-ntype", n.Op())
+			dumpNode(w, n.Name().Ntype, depth+1)
+		}
+		return
+
+	case OASOP:
+		n := n.(*AssignOpStmt)
+		fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp)
+		dumpNodeHeader(w, n)
+
+	case OTYPE:
+		fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym())
+		dumpNodeHeader(w, n)
+		if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-ntype", n.Op())
+			dumpNode(w, n.Name().Ntype, depth+1)
+		}
+		return
+
+	case OCLOSURE:
+		fmt.Fprintf(w, "%+v", n.Op())
+		dumpNodeHeader(w, n)
+
+	case ODCLFUNC:
+		// Func has many fields we don't want to print.
+		// Bypass reflection and just print what we want.
+		n := n.(*Func)
+		fmt.Fprintf(w, "%+v", n.Op())
+		dumpNodeHeader(w, n)
+		fn := n
+		if len(fn.Dcl) > 0 {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-Dcl", n.Op())
+			for _, dcl := range n.Dcl {
+				dumpNode(w, dcl, depth+1)
+			}
+		}
+		if len(fn.ClosureVars) > 0 {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-ClosureVars", n.Op())
+			for _, cv := range fn.ClosureVars {
+				dumpNode(w, cv, depth+1)
+			}
+		}
+		if len(fn.Enter) > 0 {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-Enter", n.Op())
+			dumpNodes(w, fn.Enter, depth+1)
+		}
+		if len(fn.Body) > 0 {
+			indent(w, depth)
+			fmt.Fprintf(w, "%+v-body", n.Op())
+			dumpNodes(w, fn.Body, depth+1)
+		}
+		return
+	}
+
+	if n.Sym() != nil {
+		fmt.Fprintf(w, " %+v", n.Sym())
+	}
+	if n.Type() != nil {
+		fmt.Fprintf(w, " %+v", n.Type())
+	}
+
+	v := reflect.ValueOf(n).Elem()
+	t := reflect.TypeOf(n).Elem()
+	nf := t.NumField()
+	for i := 0; i < nf; i++ {
+		tf := t.Field(i)
+		vf := v.Field(i)
+		if tf.PkgPath != "" {
+			// skip unexported field - Interface will fail
+			continue
+		}
+		switch tf.Type.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Slice:
+			if vf.IsNil() {
+				continue
+			}
+		}
+		name := strings.TrimSuffix(tf.Name, "_")
+		// Do not bother with field name header lines for the
+		// most common positional arguments: unary, binary expr,
+		// index expr, send stmt, go and defer call expression.
+		switch name {
+		case "X", "Y", "Index", "Chan", "Value", "Call":
+			name = ""
+		}
+		switch val := vf.Interface().(type) {
+		case Node:
+			if name != "" {
+				indent(w, depth)
+				fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+			}
+			dumpNode(w, val, depth+1)
+		case Nodes:
+			if len(val) == 0 {
+				continue
+			}
+			if name != "" {
+				indent(w, depth)
+				fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+			}
+			dumpNodes(w, val, depth+1)
+		default:
+			if vf.Kind() == reflect.Slice && vf.Type().Elem().Implements(nodeType) {
+				if vf.Len() == 0 {
+					continue
+				}
+				if name != "" {
+					indent(w, depth)
+					fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+				}
+				for i, n := 0, vf.Len(); i < n; i++ {
+					dumpNode(w, vf.Index(i).Interface().(Node), depth+1)
+				}
+			}
+		}
+	}
+}
+
+var nodeType = reflect.TypeOf((*Node)(nil)).Elem()
+
+func dumpNodes(w io.Writer, list Nodes, depth int) {
+	if len(list) == 0 {
+		fmt.Fprintf(w, " <nil>")
+		return
+	}
+
+	for _, n := range list {
+		dumpNode(w, n, depth)
+	}
+}
+
+// reflect.IsZero is not available in Go 1.4 (added in Go 1.13), so we use this copy instead.
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return math.Float64bits(v.Float()) == 0
+	case reflect.Complex64, reflect.Complex128:
+		c := v.Complex()
+		return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if !isZero(v.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+		return v.IsNil()
+	case reflect.String:
+		return v.Len() == 0
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	default:
+		return false
+	}
+}
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
new file mode 100644
index 0000000..0a9db92
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func.go
@@ -0,0 +1,284 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+// A Func corresponds to a single function in a Go program
+// (and vice versa: each function is denoted by exactly one *Func).
+//
+// There are multiple nodes that represent a Func in the IR.
+//
+// The ONAME node (Func.Nname) is used for plain references to it.
+// The ODCLFUNC node (the Func itself) is used for its declaration code.
+// The OCLOSURE node (Func.OClosure) is used for a reference to a
+// function literal.
+//
+// An imported function will have an ONAME node which points to a Func
+// with an empty body.
+// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME.
+// A function literal is represented directly by an OCLOSURE, but it also
+// has an ODCLFUNC (and a matching ONAME) representing the compiled
+// underlying form of the closure, which accesses the captured variables
+// using a special data structure passed in a register.
+//
+// A method declaration is represented like functions, except f.Sym
+// will be the qualified method name (e.g., "T.m") and
+// f.Func.Shortname is the bare method name (e.g., "m").
+//
+// A method expression (T.M) is represented as an OMETHEXPR node,
+// in which n.Left and n.Right point to the type and method, respectively.
+// Each distinct mention of a method expression in the source code
+// constructs a fresh node.
+//
+// A method value (t.M) is represented by ODOTMETH/ODOTINTER
+// when it is called directly and by OCALLPART otherwise.
+// These are like method expressions, except that for ODOTMETH/ODOTINTER,
+// the method name is stored in Sym instead of Right.
+// Each OCALLPART ends up being implemented as a new
+// function, a bit like a closure, with its own ODCLFUNC.
+// The OCALLPART uses n.Func to record the linkage to
+// the generated ODCLFUNC, but there is no
+// pointer from the Func back to the OCALLPART.
+type Func struct {
+	miniNode
+	Body Nodes
+	Iota int64
+
+	Nname    *Name        // ONAME node
+	OClosure *ClosureExpr // OCLOSURE node
+
+	Shortname *types.Sym
+
+	// Extra entry code for the function. For example, allocate and initialize
+	// memory for escaping parameters.
+	Enter Nodes
+	Exit  Nodes
+
+	// ONAME nodes for all params/locals for this func/closure, does NOT
+	// include closurevars until transforming closures during walk.
+	// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
+	// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
+	// However, as anonymous or blank PPARAMs are not actually declared,
+	// they are omitted from Dcl.
+	// Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively.
+	Dcl []*Name
+
+	// ClosureVars lists the free variables that are used within a
+	// function literal, but formally declared in an enclosing
+	// function. The variables in this slice are the closure function's
+	// own copy of the variables, which are used within its function
+	// body. They will also each have IsClosureVar set, and will have
+	// Byval set if they're captured by value.
+	ClosureVars []*Name
+
+	// Enclosed functions that need to be compiled.
+	// Populated during walk.
+	Closures []*Func
+
+	// Parents records the parent scope of each scope within a
+	// function. The root scope (0) has no parent, so the i'th
+	// scope's parent is stored at Parents[i-1].
+	Parents []ScopeID
+
+	// Marks records scope boundary changes.
+	Marks []Mark
+
+	FieldTrack map[*obj.LSym]struct{}
+	DebugInfo  interface{}
+	LSym       *obj.LSym
+
+	Inl *Inline
+
+	// Closgen tracks how many closures have been generated within
+	// this function. Used by closurename for creating unique
+	// function names.
+	Closgen int32
+
+	Label int32 // largest auto-generated label in this function
+
+	Endlineno src.XPos
+	WBPos     src.XPos // position of first write barrier; see SetWBPos
+
+	Pragma PragmaFlag // go:xxx function annotations
+
+	flags      bitset16
+	NumDefers  int32 // number of defer calls in the function
+	NumReturns int32 // number of explicit returns in the function
+
+	// nwbrCalls records the LSyms of functions called by this
+	// function for go:nowritebarrierrec analysis. Only filled in
+	// if nowritebarrierrecCheck != nil.
+	NWBRCalls *[]SymAndPos
+}
+
+func NewFunc(pos src.XPos) *Func {
+	f := new(Func)
+	f.pos = pos
+	f.op = ODCLFUNC
+	f.Iota = -1
+	return f
+}
+
+func (f *Func) isStmt() {}
+
+func (n *Func) copy() Node                         { panic(n.no("copy")) }
+func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
+func (n *Func) editChildren(edit func(Node) Node)  { editNodes(n.Body, edit) }
+
+func (f *Func) Type() *types.Type                { return f.Nname.Type() }
+func (f *Func) Sym() *types.Sym                  { return f.Nname.Sym() }
+func (f *Func) Linksym() *obj.LSym               { return f.Nname.Linksym() }
+func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
+
+// An Inline holds fields used for function bodies that can be inlined.
+type Inline struct {
+	Cost int32 // heuristic cost of inlining this function
+
+	// Copies of Func.Dcl and Nbody for use during inlining.
+	Dcl  []*Name
+	Body []Node
+}
+
+// A Mark represents a scope boundary.
+type Mark struct {
+	// Pos is the position of the token that marks the scope
+	// change.
+	Pos src.XPos
+
+	// Scope identifies the innermost scope to the right of Pos.
+	Scope ScopeID
+}
+
+// A ScopeID represents a lexical scope within a function.
+type ScopeID int32
+
+const (
+	funcDupok         = 1 << iota // duplicate definitions ok
+	funcWrapper                   // is method wrapper
+	funcNeedctxt                  // function uses context register (has closure variables)
+	funcReflectMethod             // function calls reflect.Type.Method or MethodByName
+	// true if closure inside a function; false if a simple function or a
+	// closure in a global variable initialization
+	funcIsHiddenClosure
+	funcHasDefer                 // contains a defer statement
+	funcNilCheckDisabled         // disable nil checks when compiling this function
+	funcInlinabilityChecked      // inliner has already determined whether the function is inlinable
+	funcExportInline             // include inline body in export data
+	funcInstrumentBody           // add race/msan instrumentation during SSA construction
+	funcOpenCodedDeferDisallowed // can't do open-coded defers
+	funcClosureCalled            // closure is only immediately called
+)
+
+type SymAndPos struct {
+	Sym *obj.LSym // LSym of callee
+	Pos src.XPos  // line of call
+}
+
+func (f *Func) Dupok() bool                    { return f.flags&funcDupok != 0 }
+func (f *Func) Wrapper() bool                  { return f.flags&funcWrapper != 0 }
+func (f *Func) Needctxt() bool                 { return f.flags&funcNeedctxt != 0 }
+func (f *Func) ReflectMethod() bool            { return f.flags&funcReflectMethod != 0 }
+func (f *Func) IsHiddenClosure() bool          { return f.flags&funcIsHiddenClosure != 0 }
+func (f *Func) HasDefer() bool                 { return f.flags&funcHasDefer != 0 }
+func (f *Func) NilCheckDisabled() bool         { return f.flags&funcNilCheckDisabled != 0 }
+func (f *Func) InlinabilityChecked() bool      { return f.flags&funcInlinabilityChecked != 0 }
+func (f *Func) ExportInline() bool             { return f.flags&funcExportInline != 0 }
+func (f *Func) InstrumentBody() bool           { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+func (f *Func) ClosureCalled() bool            { return f.flags&funcClosureCalled != 0 }
+
+func (f *Func) SetDupok(b bool)                    { f.flags.set(funcDupok, b) }
+func (f *Func) SetWrapper(b bool)                  { f.flags.set(funcWrapper, b) }
+func (f *Func) SetNeedctxt(b bool)                 { f.flags.set(funcNeedctxt, b) }
+func (f *Func) SetReflectMethod(b bool)            { f.flags.set(funcReflectMethod, b) }
+func (f *Func) SetIsHiddenClosure(b bool)          { f.flags.set(funcIsHiddenClosure, b) }
+func (f *Func) SetHasDefer(b bool)                 { f.flags.set(funcHasDefer, b) }
+func (f *Func) SetNilCheckDisabled(b bool)         { f.flags.set(funcNilCheckDisabled, b) }
+func (f *Func) SetInlinabilityChecked(b bool)      { f.flags.set(funcInlinabilityChecked, b) }
+func (f *Func) SetExportInline(b bool)             { f.flags.set(funcExportInline, b) }
+func (f *Func) SetInstrumentBody(b bool)           { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+func (f *Func) SetClosureCalled(b bool)            { f.flags.set(funcClosureCalled, b) }
+
+func (f *Func) SetWBPos(pos src.XPos) {
+	if base.Debug.WB != 0 {
+		base.WarnfAt(pos, "write barrier")
+	}
+	if !f.WBPos.IsKnown() {
+		f.WBPos = pos
+	}
+}
+
+// funcname returns the name (without the package) of the function n.
+func FuncName(f *Func) string {
+	if f == nil || f.Nname == nil {
+		return "<nil>"
+	}
+	return f.Sym().Name
+}
+
+// pkgFuncName returns the name of the function referenced by n, with package prepended.
+// This differs from the compiler's internal convention where local functions lack a package
+// because the ultimate consumer of this is a human looking at an IDE; package is only empty
+// if the compilation package is actually the empty string.
+func PkgFuncName(f *Func) string {
+	if f == nil || f.Nname == nil {
+		return "<nil>"
+	}
+	s := f.Sym()
+	pkg := s.Pkg
+
+	p := base.Ctxt.Pkgpath
+	if pkg != nil && pkg.Path != "" {
+		p = pkg.Path
+	}
+	if p == "" {
+		return s.Name
+	}
+	return p + "." + s.Name
+}
+
+var CurFunc *Func
+
+func FuncSymName(s *types.Sym) string {
+	return s.Name + "·f"
+}
+
+// MarkFunc marks a node as a function.
+func MarkFunc(n *Name) {
+	if n.Op() != ONAME || n.Class != Pxxx {
+		base.Fatalf("expected ONAME/Pxxx node, got %v", n)
+	}
+
+	n.Class = PFUNC
+	n.Sym().SetFunc(true)
+}
+
+// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
+// and compiling runtime
+func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
+	if base.Debug.Closure > 0 {
+		if clo.Esc() == EscHeap {
+			base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
+		} else {
+			base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
+		}
+	}
+	if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
+		base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
+	}
+}
+
+// IsTrivialClosure reports whether closure clo has an
+// empty list of captured vars.
+func IsTrivialClosure(clo *ClosureExpr) bool {
+	return len(clo.Func.ClosureVars) == 0
+}
diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go
new file mode 100644
index 0000000..82224ca
--- /dev/null
+++ b/src/cmd/compile/internal/ir/ir.go
@@ -0,0 +1,5 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
new file mode 100644
index 0000000..a7ff4ac
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -0,0 +1,92 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run -mod=mod mknode.go
+
+package ir
+
+import (
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"fmt"
+	"go/constant"
+)
+
+// A miniNode is a minimal node implementation,
+// meant to be embedded as the first field in a larger node implementation,
+// at a cost of 8 bytes.
+//
+// A miniNode is NOT a valid Node by itself: the embedding struct
+// must at the least provide:
+//
+//	func (n *MyNode) String() string { return fmt.Sprint(n) }
+//	func (n *MyNode) rawCopy() Node { c := *n; return &c }
+//	func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+//
+// The embedding struct should also fill in n.op in its constructor,
+// for more useful panic messages when invalid methods are called,
+// instead of implementing Op itself.
+//
+type miniNode struct {
+	pos  src.XPos // uint32
+	op   Op       // uint8
+	bits bitset8
+	esc  uint16
+}
+
+// posOr returns pos if known, or else n.pos.
+// For use in DeepCopy.
+func (n *miniNode) posOr(pos src.XPos) src.XPos {
+	if pos.IsKnown() {
+		return pos
+	}
+	return n.pos
+}
+
+// op can be read, but not written.
+// An embedding implementation can provide a SetOp if desired.
+// (The panicking SetOp is with the other panics below.)
+func (n *miniNode) Op() Op            { return n.op }
+func (n *miniNode) Pos() src.XPos     { return n.pos }
+func (n *miniNode) SetPos(x src.XPos) { n.pos = x }
+func (n *miniNode) Esc() uint16       { return n.esc }
+func (n *miniNode) SetEsc(x uint16)   { n.esc = x }
+
+const (
+	miniWalkdefShift   = 0 // TODO(mdempsky): Move to Name.flags.
+	miniTypecheckShift = 2
+	miniDiag           = 1 << 4
+	miniWalked         = 1 << 5 // to prevent/catch re-walking
+)
+
+func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
+func (n *miniNode) SetTypecheck(x uint8) {
+	if x > 3 {
+		panic(fmt.Sprintf("cannot SetTypecheck %d", x))
+	}
+	n.bits.set2(miniTypecheckShift, x)
+}
+
+func (n *miniNode) Diag() bool     { return n.bits&miniDiag != 0 }
+func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
+
+func (n *miniNode) Walked() bool     { return n.bits&miniWalked != 0 }
+func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
+
+// Empty, immutable graph structure.
+
+func (n *miniNode) Init() Nodes { return Nodes{} }
+
+// Additional functionality unavailable.
+
+func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
+
+func (n *miniNode) Type() *types.Type       { return nil }
+func (n *miniNode) SetType(*types.Type)     { panic(n.no("SetType")) }
+func (n *miniNode) Name() *Name             { return nil }
+func (n *miniNode) Sym() *types.Sym         { return nil }
+func (n *miniNode) Val() constant.Value     { panic(n.no("Val")) }
+func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
+func (n *miniNode) NonNil() bool            { return false }
+func (n *miniNode) MarkNonNil()             { panic(n.no("MarkNonNil")) }
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
new file mode 100644
index 0000000..326f491
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -0,0 +1,228 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"go/format"
+	"go/types"
+	"io/ioutil"
+	"log"
+	"reflect"
+	"sort"
+	"strings"
+
+	"golang.org/x/tools/go/packages"
+)
+
+var irPkg *types.Package
+var buf bytes.Buffer
+
+func main() {
+	cfg := &packages.Config{
+		Mode: packages.NeedSyntax | packages.NeedTypes,
+	}
+	pkgs, err := packages.Load(cfg, "cmd/compile/internal/ir")
+	if err != nil {
+		log.Fatal(err)
+	}
+	irPkg = pkgs[0].Types
+
+	fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.")
+	fmt.Fprintln(&buf)
+	fmt.Fprintln(&buf, "package ir")
+	fmt.Fprintln(&buf)
+	fmt.Fprintln(&buf, `import "fmt"`)
+
+	scope := irPkg.Scope()
+	for _, name := range scope.Names() {
+		if strings.HasPrefix(name, "mini") {
+			continue
+		}
+
+		obj, ok := scope.Lookup(name).(*types.TypeName)
+		if !ok {
+			continue
+		}
+		typ := obj.Type().(*types.Named)
+		if !implementsNode(types.NewPointer(typ)) {
+			continue
+		}
+
+		fmt.Fprintf(&buf, "\n")
+		fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name)
+
+		switch name {
+		case "Name", "Func":
+			// Too specialized to automate.
+			continue
+		}
+
+		forNodeFields(typ,
+			"func (n *%[1]s) copy() Node { c := *n\n",
+			"",
+			"c.%[1]s = copy%[2]s(c.%[1]s)",
+			"return &c }\n")
+
+		forNodeFields(typ,
+			"func (n *%[1]s) doChildren(do func(Node) bool) bool {\n",
+			"if n.%[1]s != nil && do(n.%[1]s) { return true }",
+			"if do%[2]s(n.%[1]s, do) { return true }",
+			"return false }\n")
+
+		forNodeFields(typ,
+			"func (n *%[1]s) editChildren(edit func(Node) Node) {\n",
+			"if n.%[1]s != nil { n.%[1]s = edit(n.%[1]s).(%[2]s) }",
+			"edit%[2]s(n.%[1]s, edit)",
+			"}\n")
+	}
+
+	makeHelpers()
+
+	out, err := format.Source(buf.Bytes())
+	if err != nil {
+		// write out mangled source so we can see the bug.
+		out = buf.Bytes()
+	}
+
+	err = ioutil.WriteFile("node_gen.go", out, 0666)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+// needHelper maps needed slice helpers from their base name to their
+// respective slice-element type.
+var needHelper = map[string]string{}
+
+func makeHelpers() {
+	var names []string
+	for name := range needHelper {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	for _, name := range names {
+		fmt.Fprintf(&buf, sliceHelperTmpl, name, needHelper[name])
+	}
+}
+
+const sliceHelperTmpl = `
+func copy%[1]s(list []%[2]s) []%[2]s {
+	if list == nil {
+		return nil
+	}
+	c := make([]%[2]s, len(list))
+	copy(c, list)
+	return c
+}
+func do%[1]s(list []%[2]s, do func(Node) bool) bool {
+	for _, x := range list {
+		if x != nil && do(x) {
+			return true
+		}
+	}
+	return false
+}
+func edit%[1]s(list []%[2]s, edit func(Node) Node) {
+	for i, x := range list {
+		if x != nil {
+			list[i] = edit(x).(%[2]s)
+		}
+	}
+}
+`
+
+func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue string) {
+	fmt.Fprintf(&buf, prologue, named.Obj().Name())
+
+	anyField(named.Underlying().(*types.Struct), func(f *types.Var) bool {
+		if f.Embedded() {
+			return false
+		}
+		name, typ := f.Name(), f.Type()
+
+		slice, _ := typ.Underlying().(*types.Slice)
+		if slice != nil {
+			typ = slice.Elem()
+		}
+
+		tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg))
+		if implementsNode(typ) {
+			if slice != nil {
+				helper := strings.TrimPrefix(what, "*") + "s"
+				needHelper[helper] = what
+				tmpl, what = sliceTmpl, helper
+			}
+		} else if what == "*Field" {
+			// Special case for *Field.
+			tmpl = sliceTmpl
+			if slice != nil {
+				what = "Fields"
+			} else {
+				what = "Field"
+			}
+		} else {
+			return false
+		}
+
+		if tmpl == "" {
+			return false
+		}
+
+		// Allow template to not use all arguments without
+		// upsetting fmt.Printf.
+		s := fmt.Sprintf(tmpl+"\x00 %[1]s %[2]s", name, what)
+		fmt.Fprintln(&buf, s[:strings.LastIndex(s, "\x00")])
+		return false
+	})
+
+	fmt.Fprintf(&buf, epilogue)
+}
+
+func implementsNode(typ types.Type) bool {
+	if _, ok := typ.Underlying().(*types.Interface); ok {
+		// TODO(mdempsky): Check the interface implements Node.
+		// Worst case, node_gen.go will fail to compile if we're wrong.
+		return true
+	}
+
+	if ptr, ok := typ.(*types.Pointer); ok {
+		if str, ok := ptr.Elem().Underlying().(*types.Struct); ok {
+			return anyField(str, func(f *types.Var) bool {
+				return f.Embedded() && f.Name() == "miniNode"
+			})
+		}
+	}
+
+	return false
+}
+
+func anyField(typ *types.Struct, pred func(f *types.Var) bool) bool {
+	for i, n := 0, typ.NumFields(); i < n; i++ {
+		if value, ok := reflect.StructTag(typ.Tag(i)).Lookup("mknode"); ok {
+			if value != "-" {
+				panic(fmt.Sprintf("unexpected tag value: %q", value))
+			}
+			continue
+		}
+
+		f := typ.Field(i)
+		if pred(f) {
+			return true
+		}
+		if f.Embedded() {
+			if typ, ok := f.Type().Underlying().(*types.Struct); ok {
+				if anyField(typ, pred) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
new file mode 100644
index 0000000..fa06396
--- /dev/null
+++ b/src/cmd/compile/internal/ir/name.go
@@ -0,0 +1,512 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+	"fmt"
+
+	"go/constant"
+)
+
+// An Ident is an identifier, possibly qualified.
+type Ident struct {
+	miniExpr
+	sym *types.Sym
+}
+
+func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
+	n := new(Ident)
+	n.op = ONONAME
+	n.pos = pos
+	n.sym = sym
+	return n
+}
+
+func (n *Ident) Sym() *types.Sym { return n.sym }
+
+func (*Ident) CanBeNtype() {}
+
+// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
+type Name struct {
+	miniExpr
+	BuiltinOp Op         // uint8
+	Class     Class      // uint8
+	pragma    PragmaFlag // int16
+	flags     bitset16
+	sym       *types.Sym
+	Func      *Func
+	Offset_   int64
+	val       constant.Value
+	Opt       interface{} // for use by escape analysis
+	Embed     *[]Embed    // list of embedded files, for ONAME var
+
+	PkgName *PkgName // real package for import . names
+	// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
+	// For a closure var, the ONAME node of the outer captured variable
+	Defn Node
+
+	// The function, method, or closure in which local variable or param is declared.
+	Curfn *Func
+
+	Ntype    Ntype
+	Heapaddr *Name // temp holding heap address of param
+
+	// ONAME closure linkage
+	// Consider:
+	//
+	//	func f() {
+	//		x := 1 // x1
+	//		func() {
+	//			use(x) // x2
+	//			func() {
+	//				use(x) // x3
+	//				--- parser is here ---
+	//			}()
+	//		}()
+	//	}
+	//
+	// There is an original declaration of x and then a chain of mentions of x
+	// leading into the current function. Each time x is mentioned in a new closure,
+	// we create a variable representing x for use in that specific closure,
+	// since the way you get to x is different in each closure.
+	//
+	// Let's number the specific variables as shown in the code:
+	// x1 is the original x, x2 is when mentioned in the closure,
+	// and x3 is when mentioned in the closure in the closure.
+	//
+	// We keep these linked (assume N > 1):
+	//
+	//   - x1.Defn = original declaration statement for x (like most variables)
+	//   - x1.Innermost = current innermost closure x (in this case x3), or nil for none
+	//   - x1.IsClosureVar() = false
+	//
+	//   - xN.Defn = x1, N > 1
+	//   - xN.IsClosureVar() = true, N > 1
+	//   - x2.Outer = nil
+	//   - xN.Outer = x(N-1), N > 2
+	//
+	//
+	// When we look up x in the symbol table, we always get x1.
+	// Then we can use x1.Innermost (if not nil) to get the x
+	// for the innermost known closure function,
+	// but the first reference in a closure will find either no x1.Innermost
+	// or an x1.Innermost with .Funcdepth < Funcdepth.
+	// In that case, a new xN must be created, linked in with:
+	//
+	//     xN.Defn = x1
+	//     xN.Outer = x1.Innermost
+	//     x1.Innermost = xN
+	//
+	// When we finish the function, we'll process its closure variables
+	// and find xN and pop it off the list using:
+	//
+	//     x1 := xN.Defn
+	//     x1.Innermost = xN.Outer
+	//
+	// We leave x1.Innermost set so that we can still get to the original
+	// variable quickly. Not shown here, but once we're
+	// done parsing a function and no longer need xN.Outer for the
+	// lexical x reference links as described above, funcLit
+	// recomputes xN.Outer as the semantic x reference link tree,
+	// even filling in x in intermediate closures that might not
+	// have mentioned it along the way to inner closures that did.
+	// See funcLit for details.
+	//
+	// During the eventual compilation, then, for closure variables we have:
+	//
+	//     xN.Defn = original variable
+	//     xN.Outer = variable captured in next outward scope
+	//                to make closure where xN appears
+	//
+	// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
+	// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
+	Innermost *Name
+	Outer     *Name
+}
+
+func (n *Name) isExpr() {}
+
+func (n *Name) copy() Node                         { panic(n.no("copy")) }
+func (n *Name) doChildren(do func(Node) bool) bool { return false }
+func (n *Name) editChildren(edit func(Node) Node)  {}
+
+// TypeDefn returns the type definition for a named OTYPE.
+// That is, given "type T Defn", it returns Defn.
+// It is used by package types.
+func (n *Name) TypeDefn() *types.Type {
+	return n.Ntype.Type()
+}
+
+// RecordFrameOffset records the frame offset for the name.
+// It is used by package types when laying out function arguments.
+func (n *Name) RecordFrameOffset(offset int64) {
+	n.SetFrameOffset(offset)
+}
+
+// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
+	if sym == nil {
+		base.Fatalf("NewNameAt nil")
+	}
+	return newNameAt(pos, ONAME, sym)
+}
+
+// NewIota returns a new OIOTA Node.
+func NewIota(pos src.XPos, sym *types.Sym) *Name {
+	if sym == nil {
+		base.Fatalf("NewIota nil")
+	}
+	return newNameAt(pos, OIOTA, sym)
+}
+
+// NewDeclNameAt returns a new Name associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+	if sym == nil {
+		base.Fatalf("NewDeclNameAt nil")
+	}
+	switch op {
+	case ONAME, OTYPE, OLITERAL:
+		// ok
+	default:
+		base.Fatalf("NewDeclNameAt op %v", op)
+	}
+	return newNameAt(pos, op, sym)
+}
+
+// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos.
+func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name {
+	if sym == nil {
+		base.Fatalf("NewConstAt nil")
+	}
+	n := newNameAt(pos, OLITERAL, sym)
+	n.SetType(typ)
+	n.SetVal(val)
+	return n
+}
+
+// newNameAt is like NewNameAt but allows sym == nil.
+func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+	n := new(Name)
+	n.op = op
+	n.pos = pos
+	n.sym = sym
+	return n
+}
+
+func (n *Name) Name() *Name         { return n }
+func (n *Name) Sym() *types.Sym     { return n.sym }
+func (n *Name) SetSym(x *types.Sym) { n.sym = x }
+func (n *Name) SubOp() Op           { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op)       { n.BuiltinOp = x }
+func (n *Name) SetFunc(x *Func)     { n.Func = x }
+func (n *Name) Offset() int64       { panic("Name.Offset") }
+func (n *Name) SetOffset(x int64) {
+	if x != 0 {
+		panic("Name.SetOffset")
+	}
+}
+func (n *Name) FrameOffset() int64     { return n.Offset_ }
+func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
+func (n *Name) Iota() int64            { return n.Offset_ }
+func (n *Name) SetIota(x int64)        { n.Offset_ = x }
+func (n *Name) Walkdef() uint8         { return n.bits.get2(miniWalkdefShift) }
+func (n *Name) SetWalkdef(x uint8) {
+	if x > 3 {
+		panic(fmt.Sprintf("cannot SetWalkdef %d", x))
+	}
+	n.bits.set2(miniWalkdefShift, x)
+}
+
+func (n *Name) Linksym() *obj.LSym               { return n.sym.Linksym() }
+func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
+
+func (*Name) CanBeNtype()    {}
+func (*Name) CanBeAnSSASym() {}
+func (*Name) CanBeAnSSAAux() {}
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) Pragma() PragmaFlag { return n.pragma }
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag }
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
+
+const (
+	nameReadonly              = 1 << iota
+	nameByval                 // is the variable captured by value or by reference
+	nameNeedzero              // if it contains pointers, needs to be zeroed on function entry
+	nameAutoTemp              // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
+	nameUsed                  // for variable declared and not used error
+	nameIsClosureVar          // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn
+	nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
+	nameAddrtaken             // address taken, even if not moved to heap
+	nameInlFormal             // PAUTO created by inliner, derived from callee formal
+	nameInlLocal              // PAUTO created by inliner, derived from callee local
+	nameOpenDeferSlot         // if temporary var storing info for open-coded defers
+	nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
+	nameAlias                 // is type name an alias
+)
+
+func (n *Name) Readonly() bool              { return n.flags&nameReadonly != 0 }
+func (n *Name) Needzero() bool              { return n.flags&nameNeedzero != 0 }
+func (n *Name) AutoTemp() bool              { return n.flags&nameAutoTemp != 0 }
+func (n *Name) Used() bool                  { return n.flags&nameUsed != 0 }
+func (n *Name) IsClosureVar() bool          { return n.flags&nameIsClosureVar != 0 }
+func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
+func (n *Name) Addrtaken() bool             { return n.flags&nameAddrtaken != 0 }
+func (n *Name) InlFormal() bool             { return n.flags&nameInlFormal != 0 }
+func (n *Name) InlLocal() bool              { return n.flags&nameInlLocal != 0 }
+func (n *Name) OpenDeferSlot() bool         { return n.flags&nameOpenDeferSlot != 0 }
+func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
+
+func (n *Name) setReadonly(b bool)              { n.flags.set(nameReadonly, b) }
+func (n *Name) SetNeedzero(b bool)              { n.flags.set(nameNeedzero, b) }
+func (n *Name) SetAutoTemp(b bool)              { n.flags.set(nameAutoTemp, b) }
+func (n *Name) SetUsed(b bool)                  { n.flags.set(nameUsed, b) }
+func (n *Name) SetIsClosureVar(b bool)          { n.flags.set(nameIsClosureVar, b) }
+func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
+func (n *Name) SetAddrtaken(b bool)             { n.flags.set(nameAddrtaken, b) }
+func (n *Name) SetInlFormal(b bool)             { n.flags.set(nameInlFormal, b) }
+func (n *Name) SetInlLocal(b bool)              { n.flags.set(nameInlLocal, b) }
+func (n *Name) SetOpenDeferSlot(b bool)         { n.flags.set(nameOpenDeferSlot, b) }
+func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
+
+// OnStack reports whether variable n may reside on the stack.
+func (n *Name) OnStack() bool {
+	if n.Op() == ONAME {
+		switch n.Class {
+		case PPARAM, PPARAMOUT, PAUTO:
+			return n.Esc() != EscHeap
+		case PEXTERN, PAUTOHEAP:
+			return false
+		}
+	}
+	// Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
+	// methods, but it can only recover from panics, not Fatalf.
+	panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
+}
+
+// MarkReadonly indicates that n is an ONAME with readonly contents.
+func (n *Name) MarkReadonly() {
+	if n.Op() != ONAME {
+		base.Fatalf("Node.MarkReadonly %v", n.Op())
+	}
+	n.setReadonly(true)
+	// Mark the linksym as readonly immediately
+	// so that the SSA backend can use this information.
+	// It will be overridden later during dumpglobls.
+	n.Linksym().Type = objabi.SRODATA
+}
+
+// Val returns the constant.Value for the node.
+func (n *Name) Val() constant.Value {
+	if n.val == nil {
+		return constant.MakeUnknown()
+	}
+	return n.val
+}
+
+// SetVal sets the constant.Value for the node.
+func (n *Name) SetVal(v constant.Value) {
+	if n.op != OLITERAL {
+		panic(n.no("SetVal"))
+	}
+	AssertValidTypeForConst(n.Type(), v)
+	n.val = v
+}
+
+// Canonical returns the logical declaration that n represents. If n
+// is a closure variable, then Canonical returns the original Name as
+// it appears in the function that immediately contains the
+// declaration. Otherwise, Canonical simply returns n itself.
+func (n *Name) Canonical() *Name {
+	if n.IsClosureVar() && n.Defn != nil {
+		n = n.Defn.(*Name)
+	}
+	return n
+}
+
+func (n *Name) SetByval(b bool) {
+	if n.Canonical() != n {
+		base.Fatalf("SetByval called on non-canonical variable: %v", n)
+	}
+	n.flags.set(nameByval, b)
+}
+
+func (n *Name) Byval() bool {
+	// We require byval to be set on the canonical variable, but we
+	// allow it to be accessed from any instance.
+	return n.Canonical().flags&nameByval != 0
+}
+
+// CaptureName returns a Name suitable for referring to n from within function
+// fn or from the package block if fn is nil. If n is a free variable declared
+// within a function that encloses fn, then CaptureName returns a closure
+// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
+// returns n.
+func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
+	if n.IsClosureVar() {
+		base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
+	}
+	if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
+		return n // okay to use directly
+	}
+	if fn == nil {
+		base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+	}
+
+	c := n.Innermost
+	if c != nil && c.Curfn == fn {
+		return c
+	}
+
+	// Do not have a closure var for the active closure yet; make one.
+	c = NewNameAt(pos, n.Sym())
+	c.Curfn = fn
+	c.Class = PAUTOHEAP
+	c.SetIsClosureVar(true)
+	c.Defn = n
+
+	// Link into list of active closure variables.
+	// Popped from list in FinishCaptureNames.
+	c.Outer = n.Innermost
+	n.Innermost = c
+	fn.ClosureVars = append(fn.ClosureVars, c)
+
+	return c
+}
+
+// FinishCaptureNames handles any work leftover from calling CaptureName
+// earlier. outerfn should be the function that immediately encloses fn.
+func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) {
+	// closure-specific variables are hanging off the
+	// ordinary ones; see CaptureName above.
+	// unhook them.
+	// make the list of pointers for the closure call.
+	for _, cv := range fn.ClosureVars {
+		// Unlink from n; see comment in syntax.go type Param for these fields.
+		n := cv.Defn.(*Name)
+		n.Innermost = cv.Outer
+
+		// If the closure usage of n is not dense, we need to make it
+		// dense by recapturing n within the enclosing function.
+		//
+		// That is, suppose we just finished parsing the innermost
+		// closure f4 in this code:
+		//
+		//	func f() {
+		//		n := 1
+		//		func() { // f2
+		//			use(n)
+		//			func() { // f3
+		//				func() { // f4
+		//					use(n)
+		//				}()
+		//			}()
+		//		}()
+		//	}
+		//
+		// At this point cv.Outer is f2's n; there is no n for f3. To
+		// construct the closure f4 from within f3, we need to use f3's
+		// n and in this case we need to create f3's n with CaptureName.
+		//
+		// We'll decide later in walk whether to use v directly or &v.
+		cv.Outer = CaptureName(pos, outerfn, n)
+	}
+}
+
+// SameSource reports whether two nodes refer to the same source
+// element.
+//
+// It exists to help incrementally migrate the compiler towards
+// allowing the introduction of IdentExpr (#42990). Once we have
+// IdentExpr, it will no longer be safe to directly compare Node
+// values to tell if they refer to the same Name. Instead, code will
+// need to explicitly get references to the underlying Name object(s),
+// and compare those instead.
+//
+// It will still be safe to compare Nodes directly for checking if two
+// nodes are syntactically the same. The SameSource function exists to
+// indicate code that intentionally compares Nodes for syntactic
+// equality as opposed to code that has yet to be updated in
+// preparation for IdentExpr.
+func SameSource(n1, n2 Node) bool {
+	return n1 == n2
+}
+
+// Uses reports whether expression x is a (direct) use of the given
+// variable.
+func Uses(x Node, v *Name) bool {
+	if v == nil || v.Op() != ONAME {
+		base.Fatalf("RefersTo bad Name: %v", v)
+	}
+	return x.Op() == ONAME && x.Name() == v
+}
+
+// DeclaredBy reports whether expression x refers (directly) to a
+// variable that was declared by the given statement.
+func DeclaredBy(x, stmt Node) bool {
+	if stmt == nil {
+		base.Fatalf("DeclaredBy nil")
+	}
+	return x.Op() == ONAME && SameSource(x.Name().Defn, stmt)
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class name.go
+const (
+	Pxxx      Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+	PEXTERN                // global variables
+	PAUTO                  // local variables
+	PAUTOHEAP              // local variables or parameters moved to heap
+	PPARAM                 // input arguments
+	PPARAMOUT              // output results
+	PFUNC                  // global functions
+
+	// Careful: Class is stored in three bits in Node.flags.
+	_ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+type Embed struct {
+	Pos      src.XPos
+	Patterns []string
+}
+
+// A Pack is an identifier referring to an imported package.
+type PkgName struct {
+	miniNode
+	sym  *types.Sym
+	Pkg  *types.Pkg
+	Used bool
+}
+
+func (p *PkgName) Sym() *types.Sym { return p.sym }
+
+func (*PkgName) CanBeNtype() {}
+
+func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
+	p := &PkgName{sym: sym, Pkg: pkg}
+	p.op = OPACK
+	p.pos = pos
+	return p
+}
+
+var RegFP *Name
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
new file mode 100644
index 0000000..ffa7daf
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node.go
@@ -0,0 +1,591 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package ir
+
+import (
+	"fmt"
+	"go/constant"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// A Node is the abstract interface to an IR node.
+type Node interface {
+	// Formatting
+	Format(s fmt.State, verb rune)
+
+	// Source position.
+	Pos() src.XPos
+	SetPos(x src.XPos)
+
+	// For making copies. For Copy and SepCopy.
+	copy() Node
+
+	doChildren(func(Node) bool) bool
+	editChildren(func(Node) Node)
+
+	// Abstract graph structure, for generic traversals.
+	Op() Op
+	Init() Nodes
+
+	// Fields specific to certain Ops only.
+	Type() *types.Type
+	SetType(t *types.Type)
+	Name() *Name
+	Sym() *types.Sym
+	Val() constant.Value
+	SetVal(v constant.Value)
+
+	// Storage for analysis passes.
+	Esc() uint16
+	SetEsc(x uint16)
+	Diag() bool
+	SetDiag(x bool)
+	Typecheck() uint8
+	SetTypecheck(x uint8)
+	NonNil() bool
+	MarkNonNil()
+}
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func Line(n Node) string {
+	return base.FmtPos(n.Pos())
+}
+
+func IsSynthetic(n Node) bool {
+	name := n.Sym().Name
+	return name[0] == '.' || name[0] == '~'
+}
+
+// IsAutoTmp indicates if n was created by the compiler as a temporary,
+// based on the setting of the .AutoTemp flag in n's Name.
+func IsAutoTmp(n Node) bool {
+	if n == nil || n.Op() != ONAME {
+		return false
+	}
+	return n.Name().AutoTemp()
+}
+
+// mayBeShared reports whether n may occur in multiple places in the AST.
+// Extra care must be taken when mutating such a node.
+func MayBeShared(n Node) bool {
+	switch n.Op() {
+	case ONAME, OLITERAL, ONIL, OTYPE:
+		return true
+	}
+	return false
+}
+
+type InitNode interface {
+	Node
+	PtrInit() *Nodes
+	SetInit(x Nodes)
+}
+
+func TakeInit(n Node) Nodes {
+	init := n.Init()
+	if len(init) != 0 {
+		n.(InitNode).SetInit(nil)
+	}
+	return init
+}
+
+//go:generate stringer -type=Op -trimprefix=O node.go
+
+type Op uint8
+
+// Node ops.
+const (
+	OXXX Op = iota
+
+	// names
+	ONAME // var or func name
+	// Unnamed arg or return value: f(int, string) (int, error) { etc }
+	// Also used for a qualified package identifier that hasn't been resolved yet.
+	ONONAME
+	OTYPE    // type name
+	OPACK    // import
+	OLITERAL // literal
+	ONIL     // nil
+
+	// expressions
+	OADD          // Left + Right
+	OSUB          // Left - Right
+	OOR           // Left | Right
+	OXOR          // Left ^ Right
+	OADDSTR       // +{List} (string addition, list elements are strings)
+	OADDR         // &Left
+	OANDAND       // Left && Right
+	OAPPEND       // append(List); after walk, Left may contain elem type descriptor
+	OBYTES2STR    // Type(Left) (Type is string, Left is a []byte)
+	OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
+	ORUNES2STR    // Type(Left) (Type is string, Left is a []rune)
+	OSTR2BYTES    // Type(Left) (Type is []byte, Left is a string)
+	OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
+	OSTR2RUNES    // Type(Left) (Type is []rune, Left is a string)
+	// Left = Right or (if Colas=true) Left := Right
+	// If Colas, then Ninit includes a DCL node for Left.
+	OAS
+	// List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
+	// If Colas, then Ninit includes DCL nodes for List
+	OAS2
+	OAS2DOTTYPE // List = Right (x, ok = I.(int))
+	OAS2FUNC    // List = Right (x, y = f())
+	OAS2MAPR    // List = Right (x, ok = m["foo"])
+	OAS2RECV    // List = Right (x, ok = <-c)
+	OASOP       // Left Etype= Right (x += y)
+	OCALL       // Left(List) (function call, method call or type conversion)
+
+	// OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+	// Prior to walk, they are: Left(List), where List is all regular arguments.
+	// After walk, List is a series of assignments to temporaries,
+	// and Rlist is an updated set of arguments.
+	// Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
+	// TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
+	OCALLFUNC  // Left(List/Rlist) (function call f(args))
+	OCALLMETH  // Left(List/Rlist) (direct method call x.Method(args))
+	OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
+	OCALLPART  // Left.Right (method expression x.Method, not called)
+	OCAP       // cap(Left)
+	OCLOSE     // close(Left)
+	OCLOSURE   // func Type { Func.Closure.Nbody } (func literal)
+	OCOMPLIT   // Right{List} (composite literal, not yet lowered to specific form)
+	OMAPLIT    // Type{List} (composite literal, Type is map)
+	OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+	OARRAYLIT  // Type{List} (composite literal, Type is array)
+	OSLICELIT  // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
+	OPTRLIT    // &Left (left is composite literal)
+	OCONV      // Type(Left) (type conversion)
+	OCONVIFACE // Type(Left) (type conversion, to interface)
+	OCONVNOP   // Type(Left) (type conversion, no effect)
+	OCOPY      // copy(Left, Right)
+	ODCL       // var Left (declares Left of type Left.Type)
+
+	// Used during parsing but don't last.
+	ODCLFUNC  // func f() or func (r) f()
+	ODCLCONST // const pi = 3.14
+	ODCLTYPE  // type Int int or type Int = int
+
+	ODELETE        // delete(List)
+	ODOT           // Left.Sym (Left is of struct type)
+	ODOTPTR        // Left.Sym (Left is of pointer to struct type)
+	ODOTMETH       // Left.Sym (Left is non-interface, Right is method name)
+	ODOTINTER      // Left.Sym (Left is interface, Right is method name)
+	OXDOT          // Left.Sym (before rewrite to one of the preceding)
+	ODOTTYPE       // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
+	ODOTTYPE2      // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
+	OEQ            // Left == Right
+	ONE            // Left != Right
+	OLT            // Left < Right
+	OLE            // Left <= Right
+	OGE            // Left >= Right
+	OGT            // Left > Right
+	ODEREF         // *Left
+	OINDEX         // Left[Right] (index of array or slice)
+	OINDEXMAP      // Left[Right] (index of map)
+	OKEY           // Left:Right (key:value in struct/array/map literal)
+	OSTRUCTKEY     // Sym:Left (key:value in struct literal, after type checking)
+	OLEN           // len(Left)
+	OMAKE          // make(List) (before type checking converts to one of the following)
+	OMAKECHAN      // make(Type, Left) (type is chan)
+	OMAKEMAP       // make(Type, Left) (type is map)
+	OMAKESLICE     // make(Type, Left, Right) (type is slice)
+	OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
+	// OMAKESLICECOPY is created by the order pass and corresponds to:
+	//  s = make(Type, Left); copy(s, Right)
+	//
+	// Bounded can be set on the node when Left == len(Right) is known at compile time.
+	//
+	// This node is created so the walk pass can optimize this pattern which would
+	// otherwise be hard to detect after the order pass.
+	OMUL         // Left * Right
+	ODIV         // Left / Right
+	OMOD         // Left % Right
+	OLSH         // Left << Right
+	ORSH         // Left >> Right
+	OAND         // Left & Right
+	OANDNOT      // Left &^ Right
+	ONEW         // new(Left); corresponds to calls to new in source code
+	ONOT         // !Left
+	OBITNOT      // ^Left
+	OPLUS        // +Left
+	ONEG         // -Left
+	OOROR        // Left || Right
+	OPANIC       // panic(Left)
+	OPRINT       // print(List)
+	OPRINTN      // println(List)
+	OPAREN       // (Left)
+	OSEND        // Left <- Right
+	OSLICE       // Left[List[0] : List[1]] (Left is untypechecked or slice)
+	OSLICEARR    // Left[List[0] : List[1]] (Left is pointer to array)
+	OSLICESTR    // Left[List[0] : List[1]] (Left is string)
+	OSLICE3      // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
+	OSLICE3ARR   // Left[List[0] : List[1] : List[2]] (Left is pointer to array)
+	OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
+	ORECOVER     // recover()
+	ORECV        // <-Left
+	ORUNESTR     // Type(Left) (Type is string, Left is rune)
+	OSELRECV2    // like OAS2: List = Rlist where len(List)=2, len(Rlist)=1, Rlist[0].Op = ORECV (appears as .Left of OCASE)
+	OIOTA        // iota
+	OREAL        // real(Left)
+	OIMAG        // imag(Left)
+	OCOMPLEX     // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
+	OALIGNOF     // unsafe.Alignof(Left)
+	OOFFSETOF    // unsafe.Offsetof(Left)
+	OSIZEOF      // unsafe.Sizeof(Left)
+	OMETHEXPR    // method expression
+	OSTMTEXPR    // statement expression (Init; Left)
+
+	// statements
+	OBLOCK // { List } (block of code)
+	OBREAK // break [Sym]
+	// OCASE:  case List: Nbody (List==nil means default)
+	//   For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
+	//   for nil), and, if a type-switch variable is specified, Rlist is an
+	//   ONAME for the version of the type-switch variable with the specified
+	//   type.
+	OCASE
+	OCONTINUE // continue [Sym]
+	ODEFER    // defer Left (Left must be call)
+	OFALL     // fallthrough
+	OFOR      // for Ninit; Left; Right { Nbody }
+	// OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
+	// 	Ninit
+	// 	top: { Nbody }   // Execute the body at least once
+	// 	cont: Right
+	// 	if Left {        // And then test the loop condition
+	// 		List     // Before looping to top, execute List
+	// 		goto top
+	// 	}
+	// OFORUNTIL is created by walk. There's no way to write this in Go code.
+	OFORUNTIL
+	OGOTO   // goto Sym
+	OIF     // if Ninit; Left { Nbody } else { Rlist }
+	OLABEL  // Sym:
+	OGO     // go Left (Left must be call)
+	ORANGE  // for List = range Right { Nbody }
+	ORETURN // return List
+	OSELECT // select { List } (List is list of OCASE)
+	OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
+	// OTYPESW:  Left := Right.(type) (appears as .Left of OSWITCH)
+	//   Left is nil if there is no type-switch variable
+	OTYPESW
+
+	// types
+	OTCHAN   // chan int
+	OTMAP    // map[string]int
+	OTSTRUCT // struct{}
+	OTINTER  // interface{}
+	// OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
+	// list of result fields.
+	OTFUNC
+	OTARRAY // [8]int or [...]int
+	OTSLICE // []int
+
+	// misc
+	// intermediate representation of an inlined call.  Uses Init (assignments
+	// for the captured variables, parameters, retvars, & INLMARK op),
+	// Body (body of the inlined function), and ReturnVars (list of
+	// return values)
+	OINLCALL       // intermediary representation of an inlined call.
+	OEFACE         // itable and data words of an empty-interface value.
+	OITAB          // itable word of an interface value.
+	OIDATA         // data word of an interface value in Left
+	OSPTR          // base pointer of a slice or string.
+	OCFUNC         // reference to c function pointer (not go func value)
+	OCHECKNIL      // emit code to ensure pointer/interface not nil
+	OVARDEF        // variable is about to be fully initialized
+	OVARKILL       // variable is dead
+	OVARLIVE       // variable is alive
+	ORESULT        // result of a function call; Xoffset is stack offset
+	OINLMARK       // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+	OLINKSYMOFFSET // offset within a name
+
+	// arch-specific opcodes
+	OTAILCALL // tail call to another function
+	OGETG     // runtime.getg() (read g pointer)
+
+	OEND
+)
+
+// Nodes is a pointer to a slice of *Node.
+// For fields that are not used in most nodes, this is used instead of
+// a slice to save space.
+type Nodes []Node
+
+// Append appends entries to Nodes.
+func (n *Nodes) Append(a ...Node) {
+	if len(a) == 0 {
+		return
+	}
+	*n = append(*n, a...)
+}
+
+// Prepend prepends entries to Nodes.
+// If a slice is passed in, this will take ownership of it.
+func (n *Nodes) Prepend(a ...Node) {
+	if len(a) == 0 {
+		return
+	}
+	*n = append(a, *n...)
+}
+
+// Take clears n, returning its former contents.
+func (n *Nodes) Take() []Node {
+	ret := *n
+	*n = nil
+	return ret
+}
+
+// Copy returns a copy of the content of the slice.
+func (n Nodes) Copy() Nodes {
+	if n == nil {
+		return nil
+	}
+	c := make(Nodes, len(n))
+	copy(c, n)
+	return c
+}
+
+// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is
+// a ready-to-use empty queue.
+type NameQueue struct {
+	ring       []*Name
+	head, tail int
+}
+
+// Empty reports whether q contains no Names.
+func (q *NameQueue) Empty() bool {
+	return q.head == q.tail
+}
+
+// PushRight appends n to the right of the queue.
+func (q *NameQueue) PushRight(n *Name) {
+	if len(q.ring) == 0 {
+		q.ring = make([]*Name, 16)
+	} else if q.head+len(q.ring) == q.tail {
+		// Grow the ring.
+		nring := make([]*Name, len(q.ring)*2)
+		// Copy the old elements.
+		part := q.ring[q.head%len(q.ring):]
+		if q.tail-q.head <= len(part) {
+			part = part[:q.tail-q.head]
+			copy(nring, part)
+		} else {
+			pos := copy(nring, part)
+			copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+		}
+		q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+	}
+
+	q.ring[q.tail%len(q.ring)] = n
+	q.tail++
+}
+
+// PopLeft pops a Name from the left of the queue. It panics if q is
+// empty.
+func (q *NameQueue) PopLeft() *Name {
+	if q.Empty() {
+		panic("dequeue empty")
+	}
+	n := q.ring[q.head%len(q.ring)]
+	q.head++
+	return n
+}
+
+// NameSet is a set of Names.
+type NameSet map[*Name]struct{}
+
+// Has reports whether s contains n.
+func (s NameSet) Has(n *Name) bool {
+	_, isPresent := s[n]
+	return isPresent
+}
+
+// Add adds n to s.
+func (s *NameSet) Add(n *Name) {
+	if *s == nil {
+		*s = make(map[*Name]struct{})
+	}
+	(*s)[n] = struct{}{}
+}
+
+// Sorted returns s sorted according to less.
+func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
+	var res []*Name
+	for n := range s {
+		res = append(res, n)
+	}
+	sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
+	return res
+}
+
+type PragmaFlag int16
+
+const (
+	// Func pragmas.
+	Nointerface    PragmaFlag = 1 << iota
+	Noescape                  // func parameters don't escape
+	Norace                    // func must not have race detector annotations
+	Nosplit                   // func should not execute on separate stack
+	Noinline                  // func should not be inlined
+	NoCheckPtr                // func should not be instrumented by checkptr
+	CgoUnsafeArgs             // treat a pointer to one arg as a pointer to them all
+	UintptrEscapes            // pointers converted to uintptr escape
+
+	// Runtime-only func pragmas.
+	// See ../../../../runtime/README.md for detailed descriptions.
+	Systemstack        // func must run on system stack
+	Nowritebarrier     // emit compiler error instead of write barrier
+	Nowritebarrierrec  // error on write barrier in this or recursive callees
+	Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+	// Runtime and cgo type pragmas
+	NotInHeap // values of this type must not be heap allocated
+
+	// Go command pragmas
+	GoBuildPragma
+
+	RegisterParams // TODO remove after register abi is working
+
+)
+
+func AsNode(n types.Object) Node {
+	if n == nil {
+		return nil
+	}
+	return n.(Node)
+}
+
+var BlankNode Node
+
+func IsConst(n Node, ct constant.Kind) bool {
+	return ConstType(n) == ct
+}
+
+// isNil reports whether n represents the universal untyped zero value "nil".
+func IsNil(n Node) bool {
+	// Check n.Orig because constant propagation may produce typed nil constants,
+	// which don't exist in the Go spec.
+	return n != nil && Orig(n).Op() == ONIL
+}
+
+func IsBlank(n Node) bool {
+	if n == nil {
+		return false
+	}
+	return n.Sym().IsBlank()
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func IsMethod(n Node) bool {
+	return n.Type().Recv() != nil
+}
+
+func HasNamedResults(fn *Func) bool {
+	typ := fn.Type()
+	return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
+}
+
+// HasUniquePos reports whether n has a unique position that can be
+// used for reporting error messages.
+//
+// It's primarily used to distinguish references to named objects,
+// whose Pos will point back to their declaration position rather than
+// their usage position.
+func HasUniquePos(n Node) bool {
+	switch n.Op() {
+	case ONAME, OPACK:
+		return false
+	case OLITERAL, ONIL, OTYPE:
+		if n.Sym() != nil {
+			return false
+		}
+	}
+
+	if !n.Pos().IsKnown() {
+		if base.Flag.K != 0 {
+			base.Warn("setlineno: unknown position (line 0)")
+		}
+		return false
+	}
+
+	return true
+}
+
+func SetPos(n Node) src.XPos {
+	lno := base.Pos
+	if n != nil && HasUniquePos(n) {
+		base.Pos = n.Pos()
+	}
+	return lno
+}
+
+// The result of InitExpr MUST be assigned back to n, e.g.
+// 	n.Left = InitExpr(init, n.Left)
+func InitExpr(init []Node, expr Node) Node {
+	if len(init) == 0 {
+		return expr
+	}
+
+	n, ok := expr.(InitNode)
+	if !ok || MayBeShared(n) {
+		// Introduce OCONVNOP to hold init list.
+		n = NewConvExpr(base.Pos, OCONVNOP, nil, expr)
+		n.SetType(expr.Type())
+		n.SetTypecheck(1)
+	}
+
+	n.PtrInit().Prepend(init...)
+	return n
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func OuterValue(n Node) Node {
+	for {
+		switch nn := n; nn.Op() {
+		case OXDOT:
+			base.Fatalf("OXDOT in walk")
+		case ODOT:
+			nn := nn.(*SelectorExpr)
+			n = nn.X
+			continue
+		case OPAREN:
+			nn := nn.(*ParenExpr)
+			n = nn.X
+			continue
+		case OCONVNOP:
+			nn := nn.(*ConvExpr)
+			n = nn.X
+			continue
+		case OINDEX:
+			nn := nn.(*IndexExpr)
+			if nn.X.Type() == nil {
+				base.Fatalf("OuterValue needs type for %v", nn.X)
+			}
+			if nn.X.Type().IsArray() {
+				n = nn.X
+				continue
+			}
+		}
+
+		return n
+	}
+}
+
+const (
+	EscUnknown = iota
+	EscNone    // Does not escape to heap, result, or parameters.
+	EscHeap    // Reachable from the heap
+	EscNever   // By construction will not escape.
+)
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
new file mode 100644
index 0000000..fe436867
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -0,0 +1,1425 @@
+// Code generated by mknode.go. DO NOT EDIT.
+
+package ir
+
+import "fmt"
+
+func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddStringExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.List = copyNodes(c.List)
+	return &c
+}
+func (n *AddStringExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doNodes(n.List, do) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *AddStringExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editNodes(n.List, edit)
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddrExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *AddrExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *AddrExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *ArrayType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ArrayType) copy() Node {
+	c := *n
+	return &c
+}
+func (n *ArrayType) doChildren(do func(Node) bool) bool {
+	if n.Len != nil && do(n.Len) {
+		return true
+	}
+	if n.Elem != nil && do(n.Elem) {
+		return true
+	}
+	return false
+}
+func (n *ArrayType) editChildren(edit func(Node) Node) {
+	if n.Len != nil {
+		n.Len = edit(n.Len).(Node)
+	}
+	if n.Elem != nil {
+		n.Elem = edit(n.Elem).(Ntype)
+	}
+}
+
+func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignListStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Lhs = copyNodes(c.Lhs)
+	c.Rhs = copyNodes(c.Rhs)
+	return &c
+}
+func (n *AssignListStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doNodes(n.Lhs, do) {
+		return true
+	}
+	if doNodes(n.Rhs, do) {
+		return true
+	}
+	return false
+}
+func (n *AssignListStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editNodes(n.Lhs, edit)
+	editNodes(n.Rhs, edit)
+}
+
+func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignOpStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *AssignOpStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Y != nil && do(n.Y) {
+		return true
+	}
+	return false
+}
+func (n *AssignOpStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Y != nil {
+		n.Y = edit(n.Y).(Node)
+	}
+}
+
+func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *AssignStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Y != nil && do(n.Y) {
+		return true
+	}
+	return false
+}
+func (n *AssignStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Y != nil {
+		n.Y = edit(n.Y).(Node)
+	}
+}
+
+func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BasicLit) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *BasicLit) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *BasicLit) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BinaryExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *BinaryExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Y != nil && do(n.Y) {
+		return true
+	}
+	return false
+}
+func (n *BinaryExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Y != nil {
+		n.Y = edit(n.Y).(Node)
+	}
+}
+
+func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BlockStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.List = copyNodes(c.List)
+	return &c
+}
+func (n *BlockStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doNodes(n.List, do) {
+		return true
+	}
+	return false
+}
+func (n *BlockStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editNodes(n.List, edit)
+}
+
+func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BranchStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *BranchStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *BranchStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CallExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Args = copyNodes(c.Args)
+	c.KeepAlive = copyNames(c.KeepAlive)
+	return &c
+}
+func (n *CallExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if doNodes(n.Args, do) {
+		return true
+	}
+	if doNames(n.KeepAlive, do) {
+		return true
+	}
+	return false
+}
+func (n *CallExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	editNodes(n.Args, edit)
+	editNames(n.KeepAlive, edit)
+}
+
+func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CaseClause) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.List = copyNodes(c.List)
+	c.Body = copyNodes(c.Body)
+	return &c
+}
+func (n *CaseClause) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Var != nil && do(n.Var) {
+		return true
+	}
+	if doNodes(n.List, do) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	return false
+}
+func (n *CaseClause) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Var != nil {
+		n.Var = edit(n.Var).(*Name)
+	}
+	editNodes(n.List, edit)
+	editNodes(n.Body, edit)
+}
+
+func (n *ChanType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ChanType) copy() Node {
+	c := *n
+	return &c
+}
+func (n *ChanType) doChildren(do func(Node) bool) bool {
+	if n.Elem != nil && do(n.Elem) {
+		return true
+	}
+	return false
+}
+func (n *ChanType) editChildren(edit func(Node) Node) {
+	if n.Elem != nil {
+		n.Elem = edit(n.Elem).(Ntype)
+	}
+}
+
+func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ClosureExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *ClosureExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *ClosureExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CommClause) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Body = copyNodes(c.Body)
+	return &c
+}
+func (n *CommClause) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Comm != nil && do(n.Comm) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	return false
+}
+func (n *CommClause) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Comm != nil {
+		n.Comm = edit(n.Comm).(Node)
+	}
+	editNodes(n.Body, edit)
+}
+
+func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CompLitExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.List = copyNodes(c.List)
+	return &c
+}
+func (n *CompLitExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Ntype != nil && do(n.Ntype) {
+		return true
+	}
+	if doNodes(n.List, do) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *CompLitExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Ntype != nil {
+		n.Ntype = edit(n.Ntype).(Ntype)
+	}
+	editNodes(n.List, edit)
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ConstExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *ConstExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *ConstExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ConvExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *ConvExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *ConvExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+}
+
+func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Decl) copy() Node {
+	c := *n
+	return &c
+}
+func (n *Decl) doChildren(do func(Node) bool) bool {
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *Decl) editChildren(edit func(Node) Node) {
+	if n.X != nil {
+		n.X = edit(n.X).(*Name)
+	}
+}
+
+func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ForStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Late = copyNodes(c.Late)
+	c.Body = copyNodes(c.Body)
+	return &c
+}
+func (n *ForStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Cond != nil && do(n.Cond) {
+		return true
+	}
+	if doNodes(n.Late, do) {
+		return true
+	}
+	if n.Post != nil && do(n.Post) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	return false
+}
+func (n *ForStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Cond != nil {
+		n.Cond = edit(n.Cond).(Node)
+	}
+	editNodes(n.Late, edit)
+	if n.Post != nil {
+		n.Post = edit(n.Post).(Node)
+	}
+	editNodes(n.Body, edit)
+}
+
+func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *FuncType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *FuncType) copy() Node {
+	c := *n
+	c.Recv = copyField(c.Recv)
+	c.Params = copyFields(c.Params)
+	c.Results = copyFields(c.Results)
+	return &c
+}
+func (n *FuncType) doChildren(do func(Node) bool) bool {
+	if doField(n.Recv, do) {
+		return true
+	}
+	if doFields(n.Params, do) {
+		return true
+	}
+	if doFields(n.Results, do) {
+		return true
+	}
+	return false
+}
+func (n *FuncType) editChildren(edit func(Node) Node) {
+	editField(n.Recv, edit)
+	editFields(n.Params, edit)
+	editFields(n.Results, edit)
+}
+
+func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *GoDeferStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *GoDeferStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Call != nil && do(n.Call) {
+		return true
+	}
+	return false
+}
+func (n *GoDeferStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Call != nil {
+		n.Call = edit(n.Call).(Node)
+	}
+}
+
+func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Ident) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *Ident) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *Ident) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IfStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Body = copyNodes(c.Body)
+	c.Else = copyNodes(c.Else)
+	return &c
+}
+func (n *IfStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Cond != nil && do(n.Cond) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	if doNodes(n.Else, do) {
+		return true
+	}
+	return false
+}
+func (n *IfStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Cond != nil {
+		n.Cond = edit(n.Cond).(Node)
+	}
+	editNodes(n.Body, edit)
+	editNodes(n.Else, edit)
+}
+
+func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IndexExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *IndexExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Index != nil && do(n.Index) {
+		return true
+	}
+	return false
+}
+func (n *IndexExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Index != nil {
+		n.Index = edit(n.Index).(Node)
+	}
+}
+
+func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlineMarkStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *InlineMarkStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *InlineMarkStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlinedCallExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Body = copyNodes(c.Body)
+	c.ReturnVars = copyNodes(c.ReturnVars)
+	return &c
+}
+func (n *InlinedCallExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	if doNodes(n.ReturnVars, do) {
+		return true
+	}
+	return false
+}
+func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editNodes(n.Body, edit)
+	editNodes(n.ReturnVars, edit)
+}
+
+func (n *InterfaceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InterfaceType) copy() Node {
+	c := *n
+	c.Methods = copyFields(c.Methods)
+	return &c
+}
+func (n *InterfaceType) doChildren(do func(Node) bool) bool {
+	if doFields(n.Methods, do) {
+		return true
+	}
+	return false
+}
+func (n *InterfaceType) editChildren(edit func(Node) Node) {
+	editFields(n.Methods, edit)
+}
+
+func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *KeyExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *KeyExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Key != nil && do(n.Key) {
+		return true
+	}
+	if n.Value != nil && do(n.Value) {
+		return true
+	}
+	return false
+}
+func (n *KeyExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Key != nil {
+		n.Key = edit(n.Key).(Node)
+	}
+	if n.Value != nil {
+		n.Value = edit(n.Value).(Node)
+	}
+}
+
+func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LabelStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *LabelStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *LabelStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LinksymOffsetExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LogicalExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *LogicalExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Y != nil && do(n.Y) {
+		return true
+	}
+	return false
+}
+func (n *LogicalExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Y != nil {
+		n.Y = edit(n.Y).(Node)
+	}
+}
+
+func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *MakeExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *MakeExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Len != nil && do(n.Len) {
+		return true
+	}
+	if n.Cap != nil && do(n.Cap) {
+		return true
+	}
+	return false
+}
+func (n *MakeExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Len != nil {
+		n.Len = edit(n.Len).(Node)
+	}
+	if n.Cap != nil {
+		n.Cap = edit(n.Cap).(Node)
+	}
+}
+
+func (n *MapType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *MapType) copy() Node {
+	c := *n
+	return &c
+}
+func (n *MapType) doChildren(do func(Node) bool) bool {
+	if n.Key != nil && do(n.Key) {
+		return true
+	}
+	if n.Elem != nil && do(n.Elem) {
+		return true
+	}
+	return false
+}
+func (n *MapType) editChildren(edit func(Node) Node) {
+	if n.Key != nil {
+		n.Key = edit(n.Key).(Ntype)
+	}
+	if n.Elem != nil {
+		n.Elem = edit(n.Elem).(Ntype)
+	}
+}
+
+func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *NilExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *NilExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *NilExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ParenExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *ParenExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *ParenExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+}
+
+func (n *PkgName) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *PkgName) copy() Node {
+	c := *n
+	return &c
+}
+func (n *PkgName) doChildren(do func(Node) bool) bool {
+	return false
+}
+func (n *PkgName) editChildren(edit func(Node) Node) {
+}
+
+func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RangeStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Body = copyNodes(c.Body)
+	return &c
+}
+func (n *RangeStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Key != nil && do(n.Key) {
+		return true
+	}
+	if n.Value != nil && do(n.Value) {
+		return true
+	}
+	if doNodes(n.Body, do) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *RangeStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Key != nil {
+		n.Key = edit(n.Key).(Node)
+	}
+	if n.Value != nil {
+		n.Value = edit(n.Value).(Node)
+	}
+	editNodes(n.Body, edit)
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ResultExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *ResultExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	return false
+}
+func (n *ResultExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+}
+
+func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ReturnStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Results = copyNodes(c.Results)
+	return &c
+}
+func (n *ReturnStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doNodes(n.Results, do) {
+		return true
+	}
+	return false
+}
+func (n *ReturnStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editNodes(n.Results, edit)
+}
+
+func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Cases = copyCommClauses(c.Cases)
+	c.Compiled = copyNodes(c.Compiled)
+	return &c
+}
+func (n *SelectStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if doCommClauses(n.Cases, do) {
+		return true
+	}
+	if doNodes(n.Compiled, do) {
+		return true
+	}
+	return false
+}
+func (n *SelectStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	editCommClauses(n.Cases, edit)
+	editNodes(n.Compiled, edit)
+}
+
+func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectorExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *SelectorExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Prealloc != nil && do(n.Prealloc) {
+		return true
+	}
+	return false
+}
+func (n *SelectorExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Prealloc != nil {
+		n.Prealloc = edit(n.Prealloc).(*Name)
+	}
+}
+
+func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SendStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *SendStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Chan != nil && do(n.Chan) {
+		return true
+	}
+	if n.Value != nil && do(n.Value) {
+		return true
+	}
+	return false
+}
+func (n *SendStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Chan != nil {
+		n.Chan = edit(n.Chan).(Node)
+	}
+	if n.Value != nil {
+		n.Value = edit(n.Value).(Node)
+	}
+}
+
+func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *SliceExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Low != nil && do(n.Low) {
+		return true
+	}
+	if n.High != nil && do(n.High) {
+		return true
+	}
+	if n.Max != nil && do(n.Max) {
+		return true
+	}
+	return false
+}
+func (n *SliceExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Low != nil {
+		n.Low = edit(n.Low).(Node)
+	}
+	if n.High != nil {
+		n.High = edit(n.High).(Node)
+	}
+	if n.Max != nil {
+		n.Max = edit(n.Max).(Node)
+	}
+}
+
+func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceHeaderExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *SliceHeaderExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Ptr != nil && do(n.Ptr) {
+		return true
+	}
+	if n.Len != nil && do(n.Len) {
+		return true
+	}
+	if n.Cap != nil && do(n.Cap) {
+		return true
+	}
+	return false
+}
+func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Ptr != nil {
+		n.Ptr = edit(n.Ptr).(Node)
+	}
+	if n.Len != nil {
+		n.Len = edit(n.Len).(Node)
+	}
+	if n.Cap != nil {
+		n.Cap = edit(n.Cap).(Node)
+	}
+}
+
+func (n *SliceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceType) copy() Node {
+	c := *n
+	return &c
+}
+func (n *SliceType) doChildren(do func(Node) bool) bool {
+	if n.Elem != nil && do(n.Elem) {
+		return true
+	}
+	return false
+}
+func (n *SliceType) editChildren(edit func(Node) Node) {
+	if n.Elem != nil {
+		n.Elem = edit(n.Elem).(Ntype)
+	}
+}
+
+func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StarExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *StarExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *StarExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+}
+
+func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StructKeyExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *StructKeyExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Value != nil && do(n.Value) {
+		return true
+	}
+	return false
+}
+func (n *StructKeyExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Value != nil {
+		n.Value = edit(n.Value).(Node)
+	}
+}
+
+func (n *StructType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StructType) copy() Node {
+	c := *n
+	c.Fields = copyFields(c.Fields)
+	return &c
+}
+func (n *StructType) doChildren(do func(Node) bool) bool {
+	if doFields(n.Fields, do) {
+		return true
+	}
+	return false
+}
+func (n *StructType) editChildren(edit func(Node) Node) {
+	editFields(n.Fields, edit)
+}
+
+func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SwitchStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	c.Cases = copyCaseClauses(c.Cases)
+	c.Compiled = copyNodes(c.Compiled)
+	return &c
+}
+func (n *SwitchStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Tag != nil && do(n.Tag) {
+		return true
+	}
+	if doCaseClauses(n.Cases, do) {
+		return true
+	}
+	if doNodes(n.Compiled, do) {
+		return true
+	}
+	return false
+}
+func (n *SwitchStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Tag != nil {
+		n.Tag = edit(n.Tag).(Node)
+	}
+	editCaseClauses(n.Cases, edit)
+	editNodes(n.Compiled, edit)
+}
+
+func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TailCallStmt) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.Target != nil && do(n.Target) {
+		return true
+	}
+	return false
+}
+func (n *TailCallStmt) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.Target != nil {
+		n.Target = edit(n.Target).(*Name)
+	}
+}
+
+func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeAssertExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *TypeAssertExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	if n.Ntype != nil && do(n.Ntype) {
+		return true
+	}
+	return false
+}
+func (n *TypeAssertExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+	if n.Ntype != nil {
+		n.Ntype = edit(n.Ntype).(Ntype)
+	}
+}
+
+func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeSwitchGuard) copy() Node {
+	c := *n
+	return &c
+}
+func (n *TypeSwitchGuard) doChildren(do func(Node) bool) bool {
+	if n.Tag != nil && do(n.Tag) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) {
+	if n.Tag != nil {
+		n.Tag = edit(n.Tag).(*Ident)
+	}
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+}
+
+func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *UnaryExpr) copy() Node {
+	c := *n
+	c.init = copyNodes(c.init)
+	return &c
+}
+func (n *UnaryExpr) doChildren(do func(Node) bool) bool {
+	if doNodes(n.init, do) {
+		return true
+	}
+	if n.X != nil && do(n.X) {
+		return true
+	}
+	return false
+}
+func (n *UnaryExpr) editChildren(edit func(Node) Node) {
+	editNodes(n.init, edit)
+	if n.X != nil {
+		n.X = edit(n.X).(Node)
+	}
+}
+
+func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *typeNode) copy() Node {
+	c := *n
+	return &c
+}
+func (n *typeNode) doChildren(do func(Node) bool) bool {
+	return false
+}
+func (n *typeNode) editChildren(edit func(Node) Node) {
+}
+
+func copyCaseClauses(list []*CaseClause) []*CaseClause {
+	if list == nil {
+		return nil
+	}
+	c := make([]*CaseClause, len(list))
+	copy(c, list)
+	return c
+}
+func doCaseClauses(list []*CaseClause, do func(Node) bool) bool {
+	for _, x := range list {
+		if x != nil && do(x) {
+			return true
+		}
+	}
+	return false
+}
+func editCaseClauses(list []*CaseClause, edit func(Node) Node) {
+	for i, x := range list {
+		if x != nil {
+			list[i] = edit(x).(*CaseClause)
+		}
+	}
+}
+
+func copyCommClauses(list []*CommClause) []*CommClause {
+	if list == nil {
+		return nil
+	}
+	c := make([]*CommClause, len(list))
+	copy(c, list)
+	return c
+}
+func doCommClauses(list []*CommClause, do func(Node) bool) bool {
+	for _, x := range list {
+		if x != nil && do(x) {
+			return true
+		}
+	}
+	return false
+}
+func editCommClauses(list []*CommClause, edit func(Node) Node) {
+	for i, x := range list {
+		if x != nil {
+			list[i] = edit(x).(*CommClause)
+		}
+	}
+}
+
+func copyNames(list []*Name) []*Name {
+	if list == nil {
+		return nil
+	}
+	c := make([]*Name, len(list))
+	copy(c, list)
+	return c
+}
+func doNames(list []*Name, do func(Node) bool) bool {
+	for _, x := range list {
+		if x != nil && do(x) {
+			return true
+		}
+	}
+	return false
+}
+func editNames(list []*Name, edit func(Node) Node) {
+	for i, x := range list {
+		if x != nil {
+			list[i] = edit(x).(*Name)
+		}
+	}
+}
+
+func copyNodes(list []Node) []Node {
+	if list == nil {
+		return nil
+	}
+	c := make([]Node, len(list))
+	copy(c, list)
+	return c
+}
+func doNodes(list []Node, do func(Node) bool) bool {
+	for _, x := range list {
+		if x != nil && do(x) {
+			return true
+		}
+	}
+	return false
+}
+func editNodes(list []Node, edit func(Node) Node) {
+	for i, x := range list {
+		if x != nil {
+			list[i] = edit(x).(Node)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
new file mode 100644
index 0000000..15c60ba
--- /dev/null
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -0,0 +1,174 @@
+// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[OXXX-0]
+	_ = x[ONAME-1]
+	_ = x[ONONAME-2]
+	_ = x[OTYPE-3]
+	_ = x[OPACK-4]
+	_ = x[OLITERAL-5]
+	_ = x[ONIL-6]
+	_ = x[OADD-7]
+	_ = x[OSUB-8]
+	_ = x[OOR-9]
+	_ = x[OXOR-10]
+	_ = x[OADDSTR-11]
+	_ = x[OADDR-12]
+	_ = x[OANDAND-13]
+	_ = x[OAPPEND-14]
+	_ = x[OBYTES2STR-15]
+	_ = x[OBYTES2STRTMP-16]
+	_ = x[ORUNES2STR-17]
+	_ = x[OSTR2BYTES-18]
+	_ = x[OSTR2BYTESTMP-19]
+	_ = x[OSTR2RUNES-20]
+	_ = x[OAS-21]
+	_ = x[OAS2-22]
+	_ = x[OAS2DOTTYPE-23]
+	_ = x[OAS2FUNC-24]
+	_ = x[OAS2MAPR-25]
+	_ = x[OAS2RECV-26]
+	_ = x[OASOP-27]
+	_ = x[OCALL-28]
+	_ = x[OCALLFUNC-29]
+	_ = x[OCALLMETH-30]
+	_ = x[OCALLINTER-31]
+	_ = x[OCALLPART-32]
+	_ = x[OCAP-33]
+	_ = x[OCLOSE-34]
+	_ = x[OCLOSURE-35]
+	_ = x[OCOMPLIT-36]
+	_ = x[OMAPLIT-37]
+	_ = x[OSTRUCTLIT-38]
+	_ = x[OARRAYLIT-39]
+	_ = x[OSLICELIT-40]
+	_ = x[OPTRLIT-41]
+	_ = x[OCONV-42]
+	_ = x[OCONVIFACE-43]
+	_ = x[OCONVNOP-44]
+	_ = x[OCOPY-45]
+	_ = x[ODCL-46]
+	_ = x[ODCLFUNC-47]
+	_ = x[ODCLCONST-48]
+	_ = x[ODCLTYPE-49]
+	_ = x[ODELETE-50]
+	_ = x[ODOT-51]
+	_ = x[ODOTPTR-52]
+	_ = x[ODOTMETH-53]
+	_ = x[ODOTINTER-54]
+	_ = x[OXDOT-55]
+	_ = x[ODOTTYPE-56]
+	_ = x[ODOTTYPE2-57]
+	_ = x[OEQ-58]
+	_ = x[ONE-59]
+	_ = x[OLT-60]
+	_ = x[OLE-61]
+	_ = x[OGE-62]
+	_ = x[OGT-63]
+	_ = x[ODEREF-64]
+	_ = x[OINDEX-65]
+	_ = x[OINDEXMAP-66]
+	_ = x[OKEY-67]
+	_ = x[OSTRUCTKEY-68]
+	_ = x[OLEN-69]
+	_ = x[OMAKE-70]
+	_ = x[OMAKECHAN-71]
+	_ = x[OMAKEMAP-72]
+	_ = x[OMAKESLICE-73]
+	_ = x[OMAKESLICECOPY-74]
+	_ = x[OMUL-75]
+	_ = x[ODIV-76]
+	_ = x[OMOD-77]
+	_ = x[OLSH-78]
+	_ = x[ORSH-79]
+	_ = x[OAND-80]
+	_ = x[OANDNOT-81]
+	_ = x[ONEW-82]
+	_ = x[ONOT-83]
+	_ = x[OBITNOT-84]
+	_ = x[OPLUS-85]
+	_ = x[ONEG-86]
+	_ = x[OOROR-87]
+	_ = x[OPANIC-88]
+	_ = x[OPRINT-89]
+	_ = x[OPRINTN-90]
+	_ = x[OPAREN-91]
+	_ = x[OSEND-92]
+	_ = x[OSLICE-93]
+	_ = x[OSLICEARR-94]
+	_ = x[OSLICESTR-95]
+	_ = x[OSLICE3-96]
+	_ = x[OSLICE3ARR-97]
+	_ = x[OSLICEHEADER-98]
+	_ = x[ORECOVER-99]
+	_ = x[ORECV-100]
+	_ = x[ORUNESTR-101]
+	_ = x[OSELRECV2-102]
+	_ = x[OIOTA-103]
+	_ = x[OREAL-104]
+	_ = x[OIMAG-105]
+	_ = x[OCOMPLEX-106]
+	_ = x[OALIGNOF-107]
+	_ = x[OOFFSETOF-108]
+	_ = x[OSIZEOF-109]
+	_ = x[OMETHEXPR-110]
+	_ = x[OSTMTEXPR-111]
+	_ = x[OBLOCK-112]
+	_ = x[OBREAK-113]
+	_ = x[OCASE-114]
+	_ = x[OCONTINUE-115]
+	_ = x[ODEFER-116]
+	_ = x[OFALL-117]
+	_ = x[OFOR-118]
+	_ = x[OFORUNTIL-119]
+	_ = x[OGOTO-120]
+	_ = x[OIF-121]
+	_ = x[OLABEL-122]
+	_ = x[OGO-123]
+	_ = x[ORANGE-124]
+	_ = x[ORETURN-125]
+	_ = x[OSELECT-126]
+	_ = x[OSWITCH-127]
+	_ = x[OTYPESW-128]
+	_ = x[OTCHAN-129]
+	_ = x[OTMAP-130]
+	_ = x[OTSTRUCT-131]
+	_ = x[OTINTER-132]
+	_ = x[OTFUNC-133]
+	_ = x[OTARRAY-134]
+	_ = x[OTSLICE-135]
+	_ = x[OINLCALL-136]
+	_ = x[OEFACE-137]
+	_ = x[OITAB-138]
+	_ = x[OIDATA-139]
+	_ = x[OSPTR-140]
+	_ = x[OCFUNC-141]
+	_ = x[OCHECKNIL-142]
+	_ = x[OVARDEF-143]
+	_ = x[OVARKILL-144]
+	_ = x[OVARLIVE-145]
+	_ = x[ORESULT-146]
+	_ = x[OINLMARK-147]
+	_ = x[OLINKSYMOFFSET-148]
+	_ = x[OTAILCALL-149]
+	_ = x[OGETG-150]
+	_ = x[OEND-151]
+}
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 860, 864, 867}
+
+func (i Op) String() string {
+	if i >= Op(len(_Op_index)-1) {
+		return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
new file mode 100644
index 0000000..3896e2b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/package.go
@@ -0,0 +1,35 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import "cmd/compile/internal/types"
+
+// A Package holds information about the package being compiled.
+type Package struct {
+	// Imports, listed in source order.
+	// See golang.org/issue/31636.
+	Imports []*types.Pkg
+
+	// Init functions, listed in source order.
+	Inits []*Func
+
+	// Top-level declarations.
+	Decls []Node
+
+	// Extern (package global) declarations.
+	Externs []Node
+
+	// Assembly function declarations.
+	Asms []*Name
+
+	// Cgo directives.
+	CgoPragmas [][]string
+
+	// Variables with //go:embed lines.
+	Embeds []*Name
+
+	// Exported (or re-exported) symbols.
+	Exports []*Name
+}
diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/ir/scc.go
similarity index 74%
rename from src/cmd/compile/internal/gc/scc.go
rename to src/cmd/compile/internal/ir/scc.go
index 5c7935a..83c6074 100644
--- a/src/cmd/compile/internal/gc/scc.go
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package ir
 
 // Strongly connected components.
 //
@@ -30,13 +30,13 @@
 // when analyzing a set of mutually recursive functions.
 
 type bottomUpVisitor struct {
-	analyze  func([]*Node, bool)
+	analyze  func([]*Func, bool)
 	visitgen uint32
-	nodeID   map[*Node]uint32
-	stack    []*Node
+	nodeID   map[*Func]uint32
+	stack    []*Func
 }
 
-// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
 // It calls analyze with successive groups of functions, working from
 // the bottom of the call graph upward. Each time analyze is called with
 // a list of functions, every function on that list only calls other functions
@@ -49,18 +49,21 @@
 // If recursive is false, the list consists of only a single function and its closures.
 // If recursive is true, the list may still contain only a single function,
 // if that function is itself recursive.
-func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
+func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
 	var v bottomUpVisitor
 	v.analyze = analyze
-	v.nodeID = make(map[*Node]uint32)
+	v.nodeID = make(map[*Func]uint32)
 	for _, n := range list {
-		if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
-			v.visit(n)
+		if n.Op() == ODCLFUNC {
+			n := n.(*Func)
+			if !n.IsHiddenClosure() {
+				v.visit(n)
+			}
 		}
 	}
 }
 
-func (v *bottomUpVisitor) visit(n *Node) uint32 {
+func (v *bottomUpVisitor) visit(n *Func) uint32 {
 	if id := v.nodeID[n]; id > 0 {
 		// already visited
 		return id
@@ -73,42 +76,31 @@
 	min := v.visitgen
 	v.stack = append(v.stack, n)
 
-	inspectList(n.Nbody, func(n *Node) bool {
-		switch n.Op {
-		case ONAME:
-			if n.Class() == PFUNC {
-				if n.isMethodExpression() {
-					n = asNode(n.Type.Nname())
-				}
-				if n != nil && n.Name.Defn != nil {
-					if m := v.visit(n.Name.Defn); m < min {
-						min = m
-					}
-				}
-			}
-		case ODOTMETH:
-			fn := asNode(n.Type.Nname())
-			if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
-				if m := v.visit(fn.Name.Defn); m < min {
-					min = m
-				}
-			}
-		case OCALLPART:
-			fn := asNode(callpartMethod(n).Type.Nname())
-			if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
-				if m := v.visit(fn.Name.Defn); m < min {
-					min = m
-				}
-			}
-		case OCLOSURE:
-			if m := v.visit(n.Func.Closure); m < min {
+	do := func(defn Node) {
+		if defn != nil {
+			if m := v.visit(defn.(*Func)); m < min {
 				min = m
 			}
 		}
-		return true
+	}
+
+	Visit(n, func(n Node) {
+		switch n.Op() {
+		case ONAME:
+			if n := n.(*Name); n.Class == PFUNC {
+				do(n.Defn)
+			}
+		case ODOTMETH, OCALLPART, OMETHEXPR:
+			if fn := MethodExprName(n); fn != nil {
+				do(fn.Defn)
+			}
+		case OCLOSURE:
+			n := n.(*ClosureExpr)
+			do(n.Func)
+		}
 	})
 
-	if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
+	if (min == id || min == id+1) && !n.IsHiddenClosure() {
 		// This node is the root of a strongly connected component.
 
 		// The original min passed to visitcodelist was v.nodeID[n]+1.
diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
similarity index 88%
rename from src/cmd/compile/internal/gc/sizeof_test.go
rename to src/cmd/compile/internal/ir/sizeof_test.go
index ce4a216..d8c1518 100644
--- a/src/cmd/compile/internal/gc/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package ir
 
 import (
 	"reflect"
@@ -20,10 +20,8 @@
 		_32bit uintptr     // size on 32bit platforms
 		_64bit uintptr     // size on 64bit platforms
 	}{
-		{Func{}, 124, 224},
-		{Name{}, 32, 56},
-		{Param{}, 24, 48},
-		{Node{}, 76, 128},
+		{Func{}, 188, 328},
+		{Name{}, 112, 200},
 	}
 
 	for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
new file mode 100644
index 0000000..c304867
--- /dev/null
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -0,0 +1,414 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
+type Decl struct {
+	miniNode
+	X *Name // the thing being declared
+}
+
+func NewDecl(pos src.XPos, op Op, x *Name) *Decl {
+	n := &Decl{X: x}
+	n.pos = pos
+	switch op {
+	default:
+		panic("invalid Decl op " + op.String())
+	case ODCL, ODCLCONST, ODCLTYPE:
+		n.op = op
+	}
+	return n
+}
+
+func (*Decl) isStmt() {}
+
+// A Stmt is a Node that can appear as a statement.
+// This includes statement-like expressions such as f().
+//
+// (It's possible it should include <-c, but that would require
+// splitting ORECV out of UnaryExpr, which hasn't yet been
+// necessary. Maybe instead we will introduce ExprStmt at
+// some point.)
+type Stmt interface {
+	Node
+	isStmt()
+}
+
+// A miniStmt is a miniNode with extra fields common to statements.
+type miniStmt struct {
+	miniNode
+	init Nodes
+}
+
+func (*miniStmt) isStmt() {}
+
+func (n *miniStmt) Init() Nodes     { return n.init }
+func (n *miniStmt) SetInit(x Nodes) { n.init = x }
+func (n *miniStmt) PtrInit() *Nodes { return &n.init }
+
+// An AssignListStmt is an assignment statement with
+// more than one item on at least one side: Lhs = Rhs.
+// If Def is true, the assignment is a :=.
+type AssignListStmt struct {
+	miniStmt
+	Lhs Nodes
+	Def bool
+	Rhs Nodes
+}
+
+func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
+	n := &AssignListStmt{}
+	n.pos = pos
+	n.SetOp(op)
+	n.Lhs = lhs
+	n.Rhs = rhs
+	return n
+}
+
+func (n *AssignListStmt) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
+		n.op = op
+	}
+}
+
+// An AssignStmt is a simple assignment statement: X = Y.
+// If Def is true, the assignment is a :=.
+type AssignStmt struct {
+	miniStmt
+	X   Node
+	Def bool
+	Y   Node
+}
+
+func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
+	n := &AssignStmt{X: x, Y: y}
+	n.pos = pos
+	n.op = OAS
+	return n
+}
+
+func (n *AssignStmt) SetOp(op Op) {
+	switch op {
+	default:
+		panic(n.no("SetOp " + op.String()))
+	case OAS:
+		n.op = op
+	}
+}
+
+// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y.
+type AssignOpStmt struct {
+	miniStmt
+	X      Node
+	AsOp   Op // OADD etc
+	Y      Node
+	IncDec bool // actually ++ or --
+}
+
+func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
+	n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
+	n.pos = pos
+	n.op = OASOP
+	return n
+}
+
+// A BlockStmt is a block: { List }.
+type BlockStmt struct {
+	miniStmt
+	List Nodes
+}
+
+func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
+	n := &BlockStmt{}
+	n.pos = pos
+	if !pos.IsKnown() {
+		n.pos = base.Pos
+		if len(list) > 0 {
+			n.pos = list[0].Pos()
+		}
+	}
+	n.op = OBLOCK
+	n.List = list
+	return n
+}
+
+// A BranchStmt is a break, continue, fallthrough, or goto statement.
+type BranchStmt struct {
+	miniStmt
+	Label *types.Sym // label if present
+}
+
+func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
+	switch op {
+	case OBREAK, OCONTINUE, OFALL, OGOTO:
+		// ok
+	default:
+		panic("NewBranch " + op.String())
+	}
+	n := &BranchStmt{Label: label}
+	n.pos = pos
+	n.op = op
+	return n
+}
+
+func (n *BranchStmt) Sym() *types.Sym { return n.Label }
+
+// A CaseClause is a case statement in a switch or select: case List: Body.
+type CaseClause struct {
+	miniStmt
+	Var  *Name // declared variable for this case in type switch
+	List Nodes // list of expressions for switch, early select
+	Body Nodes
+}
+
+func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause {
+	n := &CaseClause{List: list, Body: body}
+	n.pos = pos
+	n.op = OCASE
+	return n
+}
+
+type CommClause struct {
+	miniStmt
+	Comm Node // communication case
+	Body Nodes
+}
+
+func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause {
+	n := &CommClause{Comm: comm, Body: body}
+	n.pos = pos
+	n.op = OCASE
+	return n
+}
+
+// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
+// Op can be OFOR or OFORUNTIL (!Cond).
+type ForStmt struct {
+	miniStmt
+	Label    *types.Sym
+	Cond     Node
+	Late     Nodes
+	Post     Node
+	Body     Nodes
+	HasBreak bool
+}
+
+func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node) *ForStmt {
+	n := &ForStmt{Cond: cond, Post: post}
+	n.pos = pos
+	n.op = OFOR
+	if init != nil {
+		n.init = []Node{init}
+	}
+	n.Body = body
+	return n
+}
+
+func (n *ForStmt) SetOp(op Op) {
+	if op != OFOR && op != OFORUNTIL {
+		panic(n.no("SetOp " + op.String()))
+	}
+	n.op = op
+}
+
+// A GoDeferStmt is a go or defer statement: go Call / defer Call.
+//
+// The two opcodes use a signle syntax because the implementations
+// are very similar: both are concerned with saving Call and running it
+// in a different context (a separate goroutine or a later time).
+type GoDeferStmt struct {
+	miniStmt
+	Call Node
+}
+
+func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
+	n := &GoDeferStmt{Call: call}
+	n.pos = pos
+	switch op {
+	case ODEFER, OGO:
+		n.op = op
+	default:
+		panic("NewGoDeferStmt " + op.String())
+	}
+	return n
+}
+
+// A IfStmt is a return statement: if Init; Cond { Then } else { Else }.
+type IfStmt struct {
+	miniStmt
+	Cond   Node
+	Body   Nodes
+	Else   Nodes
+	Likely bool // code layout hint
+}
+
+func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
+	n := &IfStmt{Cond: cond}
+	n.pos = pos
+	n.op = OIF
+	n.Body = body
+	n.Else = els
+	return n
+}
+
+// An InlineMarkStmt is a marker placed just before an inlined body.
+type InlineMarkStmt struct {
+	miniStmt
+	Index int64
+}
+
+func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt {
+	n := &InlineMarkStmt{Index: index}
+	n.pos = pos
+	n.op = OINLMARK
+	return n
+}
+
+func (n *InlineMarkStmt) Offset() int64     { return n.Index }
+func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x }
+
+// A LabelStmt is a label statement (just the label, not including the statement it labels).
+type LabelStmt struct {
+	miniStmt
+	Label *types.Sym // "Label:"
+}
+
+func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
+	n := &LabelStmt{Label: label}
+	n.pos = pos
+	n.op = OLABEL
+	return n
+}
+
+func (n *LabelStmt) Sym() *types.Sym { return n.Label }
+
+// A RangeStmt is a range loop: for Key, Value = range X { Body }
+type RangeStmt struct {
+	miniStmt
+	Label    *types.Sym
+	Def      bool
+	X        Node
+	Key      Node
+	Value    Node
+	Body     Nodes
+	HasBreak bool
+	Prealloc *Name
+}
+
+func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt {
+	n := &RangeStmt{X: x, Key: key, Value: value}
+	n.pos = pos
+	n.op = ORANGE
+	n.Body = body
+	return n
+}
+
+// A ReturnStmt is a return statement.
+type ReturnStmt struct {
+	miniStmt
+	origNode       // for typecheckargs rewrite
+	Results  Nodes // return list
+}
+
+func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
+	n := &ReturnStmt{}
+	n.pos = pos
+	n.op = ORETURN
+	n.orig = n
+	n.Results = results
+	return n
+}
+
+// A SelectStmt is a block: { Cases }.
+type SelectStmt struct {
+	miniStmt
+	Label    *types.Sym
+	Cases    []*CommClause
+	HasBreak bool
+
+	// TODO(rsc): Instead of recording here, replace with a block?
+	Compiled Nodes // compiled form, after walkSwitch
+}
+
+func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
+	n := &SelectStmt{Cases: cases}
+	n.pos = pos
+	n.op = OSELECT
+	return n
+}
+
+// A SendStmt is a send statement: X <- Y.
+type SendStmt struct {
+	miniStmt
+	Chan  Node
+	Value Node
+}
+
+func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
+	n := &SendStmt{Chan: ch, Value: value}
+	n.pos = pos
+	n.op = OSEND
+	return n
+}
+
+// A SwitchStmt is a switch statement: switch Init; Expr { Cases }.
+type SwitchStmt struct {
+	miniStmt
+	Tag      Node
+	Cases    []*CaseClause
+	Label    *types.Sym
+	HasBreak bool
+
+	// TODO(rsc): Instead of recording here, replace with a block?
+	Compiled Nodes // compiled form, after walkSwitch
+}
+
+func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
+	n := &SwitchStmt{Tag: tag, Cases: cases}
+	n.pos = pos
+	n.op = OSWITCH
+	return n
+}
+
+// A TailCallStmt is a tail call statement, which is used for back-end
+// code generation to jump directly to another function entirely.
+type TailCallStmt struct {
+	miniStmt
+	Target *Name
+}
+
+func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt {
+	if target.Op() != ONAME || target.Class != PFUNC {
+		base.FatalfAt(pos, "tail call to non-func %v", target)
+	}
+	n := &TailCallStmt{Target: target}
+	n.pos = pos
+	n.op = OTAILCALL
+	return n
+}
+
+// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
+type TypeSwitchGuard struct {
+	miniNode
+	Tag  *Ident
+	X    Node
+	Used bool
+}
+
+func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
+	n := &TypeSwitchGuard{Tag: tag, X: x}
+	n.pos = pos
+	n.op = OTYPESW
+	return n
+}
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
new file mode 100644
index 0000000..61727fb
--- /dev/null
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -0,0 +1,72 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+// Syms holds known symbols.
+var Syms struct {
+	AssertE2I       *obj.LSym
+	AssertE2I2      *obj.LSym
+	AssertI2I       *obj.LSym
+	AssertI2I2      *obj.LSym
+	Deferproc       *obj.LSym
+	DeferprocStack  *obj.LSym
+	Deferreturn     *obj.LSym
+	Duffcopy        *obj.LSym
+	Duffzero        *obj.LSym
+	GCWriteBarrier  *obj.LSym
+	Goschedguarded  *obj.LSym
+	Growslice       *obj.LSym
+	Msanread        *obj.LSym
+	Msanwrite       *obj.LSym
+	Msanmove        *obj.LSym
+	Newobject       *obj.LSym
+	Newproc         *obj.LSym
+	Panicdivide     *obj.LSym
+	Panicshift      *obj.LSym
+	PanicdottypeE   *obj.LSym
+	PanicdottypeI   *obj.LSym
+	Panicnildottype *obj.LSym
+	Panicoverflow   *obj.LSym
+	Raceread        *obj.LSym
+	Racereadrange   *obj.LSym
+	Racewrite       *obj.LSym
+	Racewriterange  *obj.LSym
+	// Wasm
+	SigPanic        *obj.LSym
+	Staticuint64s   *obj.LSym
+	Typedmemclr     *obj.LSym
+	Typedmemmove    *obj.LSym
+	Udiv            *obj.LSym
+	WriteBarrier    *obj.LSym
+	Zerobase        *obj.LSym
+	ARM64HasATOMICS *obj.LSym
+	ARMHasVFPv4     *obj.LSym
+	X86HasFMA       *obj.LSym
+	X86HasPOPCNT    *obj.LSym
+	X86HasSSE41     *obj.LSym
+	// Wasm
+	WasmDiv *obj.LSym
+	// Wasm
+	WasmMove *obj.LSym
+	// Wasm
+	WasmZero *obj.LSym
+	// Wasm
+	WasmTruncS *obj.LSym
+	// Wasm
+	WasmTruncU *obj.LSym
+}
+
+// Pkgs holds known packages.
+var Pkgs struct {
+	Go      *types.Pkg
+	Itab    *types.Pkg
+	Runtime *types.Pkg
+	Unsafe  *types.Pkg
+}
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
new file mode 100644
index 0000000..a903ea8
--- /dev/null
+++ b/src/cmd/compile/internal/ir/type.go
@@ -0,0 +1,310 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"fmt"
+)
+
+// Nodes that represent the syntax of a type before type-checking.
+// After type-checking, they serve only as shells around a *types.Type.
+// Calling TypeNode converts a *types.Type to a Node shell.
+
+// An Ntype is a Node that syntactically looks like a type.
+// It can be the raw syntax for a type before typechecking,
+// or it can be an OTYPE with Type() set to a *types.Type.
+// Note that syntax doesn't guarantee it's a type: an expression
+// like *fmt is an Ntype (we don't know whether names are types yet),
+// but at least 1+1 is not an Ntype.
+type Ntype interface {
+	Node
+	CanBeNtype()
+}
+
+// A miniType is a minimal type syntax Node implementation,
+// to be embedded as the first field in a larger node implementation.
+type miniType struct {
+	miniNode
+	typ *types.Type
+}
+
+func (*miniType) CanBeNtype() {}
+
+func (n *miniType) Type() *types.Type { return n.typ }
+
+// setOTYPE changes n to be an OTYPE node returning t.
+// Rewriting the node in place this way should not be strictly
+// necessary (we should be able to update the uses with
+// proper OTYPE nodes), but it's mostly harmless and easy
+// to keep doing for now.
+//
+// setOTYPE also records t.Nod = self if t.Nod is not already set.
+// (Some types are shared by multiple OTYPE nodes, so only
+// the first such node is used as t.Nod.)
+func (n *miniType) setOTYPE(t *types.Type, self Ntype) {
+	if n.typ != nil {
+		panic(n.op.String() + " SetType: type already set")
+	}
+	n.op = OTYPE
+	n.typ = t
+	t.SetNod(self)
+}
+
+func (n *miniType) Sym() *types.Sym { return nil }   // for Format OTYPE
+func (n *miniType) Implicit() bool  { return false } // for Format OTYPE
+
+// A ChanType represents a chan Elem syntax with the direction Dir.
+type ChanType struct {
+	miniType
+	Elem Ntype
+	Dir  types.ChanDir
+}
+
+func NewChanType(pos src.XPos, elem Ntype, dir types.ChanDir) *ChanType {
+	n := &ChanType{Elem: elem, Dir: dir}
+	n.op = OTCHAN
+	n.pos = pos
+	return n
+}
+
+func (n *ChanType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Elem = nil
+}
+
+// A MapType represents a map[Key]Value type syntax.
+type MapType struct {
+	miniType
+	Key  Ntype
+	Elem Ntype
+}
+
+func NewMapType(pos src.XPos, key, elem Ntype) *MapType {
+	n := &MapType{Key: key, Elem: elem}
+	n.op = OTMAP
+	n.pos = pos
+	return n
+}
+
+func (n *MapType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Key = nil
+	n.Elem = nil
+}
+
+// A StructType represents a struct { ... } type syntax.
+type StructType struct {
+	miniType
+	Fields []*Field
+}
+
+func NewStructType(pos src.XPos, fields []*Field) *StructType {
+	n := &StructType{Fields: fields}
+	n.op = OTSTRUCT
+	n.pos = pos
+	return n
+}
+
+func (n *StructType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Fields = nil
+}
+
+// An InterfaceType represents a struct { ... } type syntax.
+type InterfaceType struct {
+	miniType
+	Methods []*Field
+}
+
+func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType {
+	n := &InterfaceType{Methods: methods}
+	n.op = OTINTER
+	n.pos = pos
+	return n
+}
+
+func (n *InterfaceType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Methods = nil
+}
+
+// A FuncType represents a func(Args) Results type syntax.
+type FuncType struct {
+	miniType
+	Recv    *Field
+	Params  []*Field
+	Results []*Field
+}
+
+func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType {
+	n := &FuncType{Recv: rcvr, Params: args, Results: results}
+	n.op = OTFUNC
+	n.pos = pos
+	return n
+}
+
+func (n *FuncType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Recv = nil
+	n.Params = nil
+	n.Results = nil
+}
+
+// A Field is a declared struct field, interface method, or function argument.
+// It is not a Node.
+type Field struct {
+	Pos      src.XPos
+	Sym      *types.Sym
+	Ntype    Ntype
+	Type     *types.Type
+	Embedded bool
+	IsDDD    bool
+	Note     string
+	Decl     *Name
+}
+
+func NewField(pos src.XPos, sym *types.Sym, ntyp Ntype, typ *types.Type) *Field {
+	return &Field{Pos: pos, Sym: sym, Ntype: ntyp, Type: typ}
+}
+
+func (f *Field) String() string {
+	var typ string
+	if f.Type != nil {
+		typ = fmt.Sprint(f.Type)
+	} else {
+		typ = fmt.Sprint(f.Ntype)
+	}
+	if f.Sym != nil {
+		return fmt.Sprintf("%v %v", f.Sym, typ)
+	}
+	return typ
+}
+
+// TODO(mdempsky): Make Field a Node again so these can be generated?
+// Fields are Nodes in go/ast and cmd/compile/internal/syntax.
+
+func copyField(f *Field) *Field {
+	if f == nil {
+		return nil
+	}
+	c := *f
+	return &c
+}
+func doField(f *Field, do func(Node) bool) bool {
+	if f == nil {
+		return false
+	}
+	if f.Decl != nil && do(f.Decl) {
+		return true
+	}
+	if f.Ntype != nil && do(f.Ntype) {
+		return true
+	}
+	return false
+}
+func editField(f *Field, edit func(Node) Node) {
+	if f == nil {
+		return
+	}
+	if f.Decl != nil {
+		f.Decl = edit(f.Decl).(*Name)
+	}
+	if f.Ntype != nil {
+		f.Ntype = edit(f.Ntype).(Ntype)
+	}
+}
+
+func copyFields(list []*Field) []*Field {
+	out := make([]*Field, len(list))
+	for i, f := range list {
+		out[i] = copyField(f)
+	}
+	return out
+}
+func doFields(list []*Field, do func(Node) bool) bool {
+	for _, x := range list {
+		if doField(x, do) {
+			return true
+		}
+	}
+	return false
+}
+func editFields(list []*Field, edit func(Node) Node) {
+	for _, f := range list {
+		editField(f, edit)
+	}
+}
+
+// A SliceType represents a []Elem type syntax.
+// If DDD is true, it's the ...Elem at the end of a function list.
+type SliceType struct {
+	miniType
+	Elem Ntype
+	DDD  bool
+}
+
+func NewSliceType(pos src.XPos, elem Ntype) *SliceType {
+	n := &SliceType{Elem: elem}
+	n.op = OTSLICE
+	n.pos = pos
+	return n
+}
+
+func (n *SliceType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Elem = nil
+}
+
+// An ArrayType represents a [Len]Elem type syntax.
+// If Len is nil, the type is a [...]Elem in an array literal.
+type ArrayType struct {
+	miniType
+	Len  Node
+	Elem Ntype
+}
+
+func NewArrayType(pos src.XPos, len Node, elem Ntype) *ArrayType {
+	n := &ArrayType{Len: len, Elem: elem}
+	n.op = OTARRAY
+	n.pos = pos
+	return n
+}
+
+func (n *ArrayType) SetOTYPE(t *types.Type) {
+	n.setOTYPE(t, n)
+	n.Len = nil
+	n.Elem = nil
+}
+
+// A typeNode is a Node wrapper for type t.
+type typeNode struct {
+	miniNode
+	typ *types.Type
+}
+
+func newTypeNode(pos src.XPos, typ *types.Type) *typeNode {
+	n := &typeNode{typ: typ}
+	n.pos = pos
+	n.op = OTYPE
+	return n
+}
+
+func (n *typeNode) Type() *types.Type { return n.typ }
+func (n *typeNode) Sym() *types.Sym   { return n.typ.Sym() }
+func (n *typeNode) CanBeNtype()       {}
+
+// TypeNode returns the Node representing the type t.
+func TypeNode(t *types.Type) Ntype {
+	if n := t.Obj(); n != nil {
+		if n.Type() != t {
+			base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
+		}
+		return n.(Ntype)
+	}
+	return newTypeNode(src.NoXPos, t)
+}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
new file mode 100644
index 0000000..ff45f31
--- /dev/null
+++ b/src/cmd/compile/internal/ir/val.go
@@ -0,0 +1,171 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+	"go/constant"
+	"math"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/types"
+)
+
+func ConstType(n Node) constant.Kind {
+	if n == nil || n.Op() != OLITERAL {
+		return constant.Unknown
+	}
+	return n.Val().Kind()
+}
+
+// ValueInterface returns the constant value stored in n as an interface{}.
+// It returns int64s for ints and runes, float64s for floats,
+// and complex128s for complex values.
+func ConstValue(n Node) interface{} {
+	switch v := n.Val(); v.Kind() {
+	default:
+		base.Fatalf("unexpected constant: %v", v)
+		panic("unreachable")
+	case constant.Bool:
+		return constant.BoolVal(v)
+	case constant.String:
+		return constant.StringVal(v)
+	case constant.Int:
+		return IntVal(n.Type(), v)
+	case constant.Float:
+		return Float64Val(v)
+	case constant.Complex:
+		return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
+	}
+}
+
+// int64Val returns v converted to int64.
+// Note: if t is uint64, very large values will be converted to negative int64.
+func IntVal(t *types.Type, v constant.Value) int64 {
+	if t.IsUnsigned() {
+		if x, ok := constant.Uint64Val(v); ok {
+			return int64(x)
+		}
+	} else {
+		if x, ok := constant.Int64Val(v); ok {
+			return x
+		}
+	}
+	base.Fatalf("%v out of range for %v", v, t)
+	panic("unreachable")
+}
+
+func Float64Val(v constant.Value) float64 {
+	if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
+		return x + 0 // avoid -0 (should not be needed, but be conservative)
+	}
+	base.Fatalf("bad float64 value: %v", v)
+	panic("unreachable")
+}
+
+func AssertValidTypeForConst(t *types.Type, v constant.Value) {
+	if !ValidTypeForConst(t, v) {
+		base.Fatalf("%v does not represent %v", t, v)
+	}
+}
+
+func ValidTypeForConst(t *types.Type, v constant.Value) bool {
+	switch v.Kind() {
+	case constant.Unknown:
+		return OKForConst[t.Kind()]
+	case constant.Bool:
+		return t.IsBoolean()
+	case constant.String:
+		return t.IsString()
+	case constant.Int:
+		return t.IsInteger()
+	case constant.Float:
+		return t.IsFloat()
+	case constant.Complex:
+		return t.IsComplex()
+	}
+
+	base.Fatalf("unexpected constant kind: %v", v)
+	panic("unreachable")
+}
+
+// nodlit returns a new untyped constant with value v.
+func NewLiteral(v constant.Value) Node {
+	return NewBasicLit(base.Pos, v)
+}
+
+func idealType(ct constant.Kind) *types.Type {
+	switch ct {
+	case constant.String:
+		return types.UntypedString
+	case constant.Bool:
+		return types.UntypedBool
+	case constant.Int:
+		return types.UntypedInt
+	case constant.Float:
+		return types.UntypedFloat
+	case constant.Complex:
+		return types.UntypedComplex
+	}
+	base.Fatalf("unexpected Ctype: %v", ct)
+	return nil
+}
+
+var OKForConst [types.NTYPE]bool
+
+// CanInt64 reports whether it is safe to call Int64Val() on n.
+func CanInt64(n Node) bool {
+	if !IsConst(n, constant.Int) {
+		return false
+	}
+
+	// if the value inside n cannot be represented as an int64, the
+	// return value of Int64 is undefined
+	_, ok := constant.Int64Val(n.Val())
+	return ok
+}
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func Int64Val(n Node) int64 {
+	if !IsConst(n, constant.Int) {
+		base.Fatalf("Int64Val(%v)", n)
+	}
+	x, ok := constant.Int64Val(n.Val())
+	if !ok {
+		base.Fatalf("Int64Val(%v)", n)
+	}
+	return x
+}
+
+// Uint64Val returns n as an uint64.
+// n must be an integer or rune constant.
+func Uint64Val(n Node) uint64 {
+	if !IsConst(n, constant.Int) {
+		base.Fatalf("Uint64Val(%v)", n)
+	}
+	x, ok := constant.Uint64Val(n.Val())
+	if !ok {
+		base.Fatalf("Uint64Val(%v)", n)
+	}
+	return x
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func BoolVal(n Node) bool {
+	if !IsConst(n, constant.Bool) {
+		base.Fatalf("BoolVal(%v)", n)
+	}
+	return constant.BoolVal(n.Val())
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func StringVal(n Node) string {
+	if !IsConst(n, constant.String) {
+		base.Fatalf("StringVal(%v)", n)
+	}
+	return constant.StringVal(n.Val())
+}
diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go
new file mode 100644
index 0000000..c1b3d4e
--- /dev/null
+++ b/src/cmd/compile/internal/ir/visit.go
@@ -0,0 +1,186 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IR visitors for walking the IR tree.
+//
+// The lowest level helpers are DoChildren and EditChildren, which
+// nodes help implement and provide control over whether and when
+// recursion happens during the walk of the IR.
+//
+// Although these are both useful directly, two simpler patterns
+// are fairly common and also provided: Visit and Any.
+
+package ir
+
+// DoChildren calls do(x) on each of n's non-nil child nodes x.
+// If any call returns true, DoChildren stops and returns true.
+// Otherwise, DoChildren returns false.
+//
+// Note that DoChildren(n, do) only calls do(x) for n's immediate children.
+// If x's children should be processed, then do(x) must call DoChildren(x, do).
+//
+// DoChildren allows constructing general traversals of the IR graph
+// that can stop early if needed. The most general usage is:
+//
+//	var do func(ir.Node) bool
+//	do = func(x ir.Node) bool {
+//		... processing BEFORE visting children ...
+//		if ... should visit children ... {
+//			ir.DoChildren(x, do)
+//			... processing AFTER visting children ...
+//		}
+//		if ... should stop parent DoChildren call from visiting siblings ... {
+//			return true
+//		}
+//		return false
+//	}
+//	do(root)
+//
+// Since DoChildren does not return true itself, if the do function
+// never wants to stop the traversal, it can assume that DoChildren
+// itself will always return false, simplifying to:
+//
+//	var do func(ir.Node) bool
+//	do = func(x ir.Node) bool {
+//		... processing BEFORE visting children ...
+//		if ... should visit children ... {
+//			ir.DoChildren(x, do)
+//		}
+//		... processing AFTER visting children ...
+//		return false
+//	}
+//	do(root)
+//
+// The Visit function illustrates a further simplification of the pattern,
+// only processing before visiting children and never stopping:
+//
+//	func Visit(n ir.Node, visit func(ir.Node)) {
+//		if n == nil {
+//			return
+//		}
+//		var do func(ir.Node) bool
+//		do = func(x ir.Node) bool {
+//			visit(x)
+//			return ir.DoChildren(x, do)
+//		}
+//		do(n)
+//	}
+//
+// The Any function illustrates a different simplification of the pattern,
+// visiting each node and then its children, recursively, until finding
+// a node x for which cond(x) returns true, at which point the entire
+// traversal stops and returns true.
+//
+//	func Any(n ir.Node, cond(ir.Node) bool) bool {
+//		if n == nil {
+//			return false
+//		}
+//		var do func(ir.Node) bool
+//		do = func(x ir.Node) bool {
+//			return cond(x) || ir.DoChildren(x, do)
+//		}
+//		return do(n)
+//	}
+//
+// Visit and Any are presented above as examples of how to use
+// DoChildren effectively, but of course, usage that fits within the
+// simplifications captured by Visit or Any will be best served
+// by directly calling the ones provided by this package.
+func DoChildren(n Node, do func(Node) bool) bool {
+	if n == nil {
+		return false
+	}
+	return n.doChildren(do)
+}
+
+// Visit visits each non-nil node x in the IR tree rooted at n
+// in a depth-first preorder traversal, calling visit on each node visited.
+func Visit(n Node, visit func(Node)) {
+	if n == nil {
+		return
+	}
+	var do func(Node) bool
+	do = func(x Node) bool {
+		visit(x)
+		return DoChildren(x, do)
+	}
+	do(n)
+}
+
+// VisitList calls Visit(x, visit) for each node x in the list.
+func VisitList(list Nodes, visit func(Node)) {
+	for _, x := range list {
+		Visit(x, visit)
+	}
+}
+
+// Any looks for a non-nil node x in the IR tree rooted at n
+// for which cond(x) returns true.
+// Any considers nodes in a depth-first, preorder traversal.
+// When Any finds a node x such that cond(x) is true,
+// Any ends the traversal and returns true immediately.
+// Otherwise Any returns false after completing the entire traversal.
+func Any(n Node, cond func(Node) bool) bool {
+	if n == nil {
+		return false
+	}
+	var do func(Node) bool
+	do = func(x Node) bool {
+		return cond(x) || DoChildren(x, do)
+	}
+	return do(n)
+}
+
+// AnyList calls Any(x, cond) for each node x in the list, in order.
+// If any call returns true, AnyList stops and returns true.
+// Otherwise, AnyList returns false after calling Any(x, cond)
+// for every x in the list.
+func AnyList(list Nodes, cond func(Node) bool) bool {
+	for _, x := range list {
+		if Any(x, cond) {
+			return true
+		}
+	}
+	return false
+}
+
+// EditChildren edits the child nodes of n, replacing each child x with edit(x).
+//
+// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
+// If x's children should be processed, then edit(x) must call EditChildren(x, edit).
+//
+// EditChildren allows constructing general editing passes of the IR graph.
+// The most general usage is:
+//
+//	var edit func(ir.Node) ir.Node
+//	edit = func(x ir.Node) ir.Node {
+//		... processing BEFORE editing children ...
+//		if ... should edit children ... {
+//			EditChildren(x, edit)
+//			... processing AFTER editing children ...
+//		}
+//		... return x ...
+//	}
+//	n = edit(n)
+//
+// EditChildren edits the node in place. To edit a copy, call Copy first.
+// As an example, a simple deep copy implementation would be:
+//
+//	func deepCopy(n ir.Node) ir.Node {
+//		var edit func(ir.Node) ir.Node
+//		edit = func(x ir.Node) ir.Node {
+//			x = ir.Copy(x)
+//			ir.EditChildren(x, edit)
+//			return x
+//		}
+//		return edit(n)
+//	}
+//
+// Of course, in this case it is better to call ir.DeepCopy than to build one anew.
+func EditChildren(n Node, edit func(Node) Node) {
+	if n == nil {
+		return
+	}
+	n.editChildren(edit)
+}
diff --git a/src/cmd/compile/internal/liveness/bvset.go b/src/cmd/compile/internal/liveness/bvset.go
new file mode 100644
index 0000000..3431f54
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/bvset.go
@@ -0,0 +1,97 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import "cmd/compile/internal/bitvec"
+
+// FNV-1 hash function constants.
+const (
+	h0 = 2166136261
+	hp = 16777619
+)
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+	index []int           // hash -> uniq index. -1 indicates empty slot.
+	uniq  []bitvec.BitVec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+	// Allocate new index.
+	n := len(m.index) * 2
+	if n == 0 {
+		n = 32
+	}
+	newIndex := make([]int, n)
+	for i := range newIndex {
+		newIndex[i] = -1
+	}
+
+	// Rehash into newIndex.
+	for i, bv := range m.uniq {
+		h := hashbitmap(h0, bv) % uint32(len(newIndex))
+		for {
+			j := newIndex[h]
+			if j < 0 {
+				newIndex[h] = i
+				break
+			}
+			h++
+			if h == uint32(len(newIndex)) {
+				h = 0
+			}
+		}
+	}
+	m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUnique.
+// The caller must not modify bv after this.
+func (m *bvecSet) add(bv bitvec.BitVec) int {
+	if len(m.uniq)*4 >= len(m.index) {
+		m.grow()
+	}
+
+	index := m.index
+	h := hashbitmap(h0, bv) % uint32(len(index))
+	for {
+		j := index[h]
+		if j < 0 {
+			// New bvec.
+			index[h] = len(m.uniq)
+			m.uniq = append(m.uniq, bv)
+			return len(m.uniq) - 1
+		}
+		jlive := m.uniq[j]
+		if bv.Eq(jlive) {
+			// Existing bvec.
+			return j
+		}
+
+		h++
+		if h == uint32(len(index)) {
+			h = 0
+		}
+	}
+}
+
+// extractUnique returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUnique() []bitvec.BitVec {
+	return m.uniq
+}
+
+func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
+	n := int((bv.N + 31) / 32)
+	for i := 0; i < n; i++ {
+		w := bv.B[i]
+		h = (h * hp) ^ (w & 0xff)
+		h = (h * hp) ^ ((w >> 8) & 0xff)
+		h = (h * hp) ^ ((w >> 16) & 0xff)
+		h = (h * hp) ^ ((w >> 24) & 0xff)
+	}
+
+	return h
+}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/liveness/plive.go
similarity index 71%
rename from src/cmd/compile/internal/gc/plive.go
rename to src/cmd/compile/internal/liveness/plive.go
index a48173e..53ae797 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -12,16 +12,24 @@
 //
 // Each level includes the earlier output as well.
 
-package gc
+package liveness
 
 import (
+	"crypto/md5"
+	"fmt"
+	"sort"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/bitvec"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/reflectdata"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/typebits"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
-	"crypto/md5"
-	"fmt"
-	"strings"
 )
 
 // OpVarDef is an annotation for the liveness analysis, marking a place
@@ -79,117 +87,87 @@
 // so the compiler can allocate two temps to the same location. Here it's now
 // useless, since the implementation of stack objects.
 
-// BlockEffects summarizes the liveness effects on an SSA block.
-type BlockEffects struct {
+// blockEffects summarizes the liveness effects on an SSA block.
+type blockEffects struct {
 	// Computed during Liveness.prologue using only the content of
 	// individual blocks:
 	//
 	//	uevar: upward exposed variables (used before set in block)
 	//	varkill: killed variables (set in block)
-	uevar   bvec
-	varkill bvec
+	uevar   bitvec.BitVec
+	varkill bitvec.BitVec
 
 	// Computed during Liveness.solve using control flow information:
 	//
 	//	livein: variables live at block entry
 	//	liveout: variables live at block exit
-	livein  bvec
-	liveout bvec
+	livein  bitvec.BitVec
+	liveout bitvec.BitVec
 }
 
 // A collection of global state used by liveness analysis.
-type Liveness struct {
-	fn         *Node
+type liveness struct {
+	fn         *ir.Func
 	f          *ssa.Func
-	vars       []*Node
-	idx        map[*Node]int32
+	vars       []*ir.Name
+	idx        map[*ir.Name]int32
 	stkptrsize int64
 
-	be []BlockEffects
+	be []blockEffects
 
 	// allUnsafe indicates that all points in this function are
 	// unsafe-points.
 	allUnsafe bool
 	// unsafePoints bit i is set if Value ID i is an unsafe-point
 	// (preemption is not allowed). Only valid if !allUnsafe.
-	unsafePoints bvec
+	unsafePoints bitvec.BitVec
 
 	// An array with a bit vector for each safe point in the
 	// current Block during Liveness.epilogue. Indexed in Value
 	// order for that block. Additionally, for the entry block
 	// livevars[0] is the entry bitmap. Liveness.compact moves
 	// these to stackMaps.
-	livevars []bvec
+	livevars []bitvec.BitVec
 
 	// livenessMap maps from safe points (i.e., CALLs) to their
 	// liveness map indexes.
-	livenessMap LivenessMap
+	livenessMap Map
 	stackMapSet bvecSet
-	stackMaps   []bvec
+	stackMaps   []bitvec.BitVec
 
 	cache progeffectscache
 }
 
-// LivenessMap maps from *ssa.Value to LivenessIndex.
-type LivenessMap struct {
-	vals map[ssa.ID]LivenessIndex
-	// The set of live, pointer-containing variables at the deferreturn
+// Map maps from *ssa.Value to LivenessIndex.
+type Map struct {
+	Vals map[ssa.ID]objw.LivenessIndex
+	// The set of live, pointer-containing variables at the DeferReturn
 	// call (only set when open-coded defers are used).
-	deferreturn LivenessIndex
+	DeferReturn objw.LivenessIndex
 }
 
-func (m *LivenessMap) reset() {
-	if m.vals == nil {
-		m.vals = make(map[ssa.ID]LivenessIndex)
+func (m *Map) reset() {
+	if m.Vals == nil {
+		m.Vals = make(map[ssa.ID]objw.LivenessIndex)
 	} else {
-		for k := range m.vals {
-			delete(m.vals, k)
+		for k := range m.Vals {
+			delete(m.Vals, k)
 		}
 	}
-	m.deferreturn = LivenessDontCare
+	m.DeferReturn = objw.LivenessDontCare
 }
 
-func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
-	m.vals[v.ID] = i
+func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
+	m.Vals[v.ID] = i
 }
 
-func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
+func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
 	// If v isn't in the map, then it's a "don't care" and not an
 	// unsafe-point.
-	if idx, ok := m.vals[v.ID]; ok {
+	if idx, ok := m.Vals[v.ID]; ok {
 		return idx
 	}
-	return LivenessIndex{StackMapDontCare, false}
-}
-
-// LivenessIndex stores the liveness map information for a Value.
-type LivenessIndex struct {
-	stackMapIndex int
-
-	// isUnsafePoint indicates that this is an unsafe-point.
-	//
-	// Note that it's possible for a call Value to have a stack
-	// map while also being an unsafe-point. This means it cannot
-	// be preempted at this instruction, but that a preemption or
-	// stack growth may happen in the called function.
-	isUnsafePoint bool
-}
-
-// LivenessDontCare indicates that the liveness information doesn't
-// matter. Currently it is used in deferreturn liveness when we don't
-// actually need it. It should never be emitted to the PCDATA stream.
-var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
-
-// StackMapDontCare indicates that the stack map index at a Value
-// doesn't matter.
-//
-// This is a sentinel value that should never be emitted to the PCDATA
-// stream. We use -1000 because that's obviously never a valid stack
-// index (but -1 is).
-const StackMapDontCare = -1000
-
-func (idx LivenessIndex) StackMapValid() bool {
-	return idx.stackMapIndex != StackMapDontCare
+	return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
 }
 
 type progeffectscache struct {
@@ -198,42 +176,42 @@
 	initialized bool
 }
 
-// livenessShouldTrack reports whether the liveness analysis
+// shouldTrack reports whether the liveness analysis
 // should track the variable n.
 // We don't care about variables that have no pointers,
 // nor do we care about non-local variables,
 // nor do we care about empty structs (handled by the pointer check),
 // nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *Node) bool {
-	return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
+func shouldTrack(n *ir.Name) bool {
+	return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
 }
 
 // getvariables returns the list of on-stack variables that we need to track
 // and a map for looking up indices by *Node.
-func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
-	var vars []*Node
-	for _, n := range fn.Func.Dcl {
-		if livenessShouldTrack(n) {
+func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
+	var vars []*ir.Name
+	for _, n := range fn.Dcl {
+		if shouldTrack(n) {
 			vars = append(vars, n)
 		}
 	}
-	idx := make(map[*Node]int32, len(vars))
+	idx := make(map[*ir.Name]int32, len(vars))
 	for i, n := range vars {
 		idx[n] = int32(i)
 	}
 	return vars, idx
 }
 
-func (lv *Liveness) initcache() {
+func (lv *liveness) initcache() {
 	if lv.cache.initialized {
-		Fatalf("liveness cache initialized twice")
+		base.Fatalf("liveness cache initialized twice")
 		return
 	}
 	lv.cache.initialized = true
 
 	for i, node := range lv.vars {
-		switch node.Class() {
-		case PPARAM:
+		switch node.Class {
+		case ir.PPARAM:
 			// A return instruction with a p.to is a tail return, which brings
 			// the stack pointer back up (if it ever went down) and then jumps
 			// to a new function entirely. That form of instruction must read
@@ -242,7 +220,7 @@
 			// function runs.
 			lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
 
-		case PPARAMOUT:
+		case ir.PPARAMOUT:
 			// All results are live at every return point.
 			// Note that this point is after escaping return values
 			// are copied back to the stack using their PAUTOHEAP references.
@@ -268,19 +246,18 @@
 // valueEffects returns the index of a variable in lv.vars and the
 // liveness effects v has on that variable.
 // If v does not affect any tracked variables, it returns -1, 0.
-func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
-	n, e := affectedNode(v)
-	if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
+func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
+	n, e := affectedVar(v)
+	if e == 0 || n == nil { // cheapest checks first
 		return -1, 0
 	}
-
 	// AllocFrame has dropped unused variables from
 	// lv.fn.Func.Dcl, but they might still be referenced by
 	// OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
 	// variable" ICEs (issue 19632).
 	switch v.Op {
 	case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
-		if !n.Name.Used() {
+		if !n.Used() {
 			return -1, 0
 		}
 	}
@@ -295,7 +272,7 @@
 	if e&(ssa.SymRead|ssa.SymAddr) != 0 {
 		effect |= uevar
 	}
-	if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+	if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
 		effect |= varkill
 	}
 
@@ -309,23 +286,23 @@
 	return -1, 0
 }
 
-// affectedNode returns the *Node affected by v
-func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
+// affectedVar returns the *ir.Name node affected by v
+func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) {
 	// Special cases.
 	switch v.Op {
 	case ssa.OpLoadReg:
-		n, _ := AutoVar(v.Args[0])
+		n, _ := ssa.AutoVar(v.Args[0])
 		return n, ssa.SymRead
 	case ssa.OpStoreReg:
-		n, _ := AutoVar(v)
+		n, _ := ssa.AutoVar(v)
 		return n, ssa.SymWrite
 
 	case ssa.OpVarLive:
-		return v.Aux.(*Node), ssa.SymRead
+		return v.Aux.(*ir.Name), ssa.SymRead
 	case ssa.OpVarDef, ssa.OpVarKill:
-		return v.Aux.(*Node), ssa.SymWrite
+		return v.Aux.(*ir.Name), ssa.SymWrite
 	case ssa.OpKeepAlive:
-		n, _ := AutoVar(v.Args[0])
+		n, _ := ssa.AutoVar(v.Args[0])
 		return n, ssa.SymRead
 	}
 
@@ -338,24 +315,24 @@
 	case nil, *obj.LSym:
 		// ok, but no node
 		return nil, e
-	case *Node:
+	case *ir.Name:
 		return a, e
 	default:
-		Fatalf("weird aux: %s", v.LongString())
+		base.Fatalf("weird aux: %s", v.LongString())
 		return nil, e
 	}
 }
 
 type livenessFuncCache struct {
-	be          []BlockEffects
-	livenessMap LivenessMap
+	be          []blockEffects
+	livenessMap Map
 }
 
 // Constructs a new liveness structure used to hold the global state of the
 // liveness computation. The cfg argument is a slice of *BasicBlocks and the
 // vars argument is a slice of *Nodes.
-func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
-	lv := &Liveness{
+func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness {
+	lv := &liveness{
 		fn:         fn,
 		f:          f,
 		vars:       vars,
@@ -373,23 +350,23 @@
 		if cap(lc.be) >= f.NumBlocks() {
 			lv.be = lc.be[:f.NumBlocks()]
 		}
-		lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
-		lc.livenessMap.vals = nil
+		lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
+		lc.livenessMap.Vals = nil
 	}
 	if lv.be == nil {
-		lv.be = make([]BlockEffects, f.NumBlocks())
+		lv.be = make([]blockEffects, f.NumBlocks())
 	}
 
 	nblocks := int32(len(f.Blocks))
 	nvars := int32(len(vars))
-	bulk := bvbulkalloc(nvars, nblocks*7)
+	bulk := bitvec.NewBulk(nvars, nblocks*7)
 	for _, b := range f.Blocks {
 		be := lv.blockEffects(b)
 
-		be.uevar = bulk.next()
-		be.varkill = bulk.next()
-		be.livein = bulk.next()
-		be.liveout = bulk.next()
+		be.uevar = bulk.Next()
+		be.varkill = bulk.Next()
+		be.livein = bulk.Next()
+		be.liveout = bulk.Next()
 	}
 	lv.livenessMap.reset()
 
@@ -397,109 +374,33 @@
 	return lv
 }
 
-func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
+func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects {
 	return &lv.be[b.ID]
 }
 
-// NOTE: The bitmap for a specific type t could be cached in t after
-// the first run and then simply copied into bv at the correct offset
-// on future calls with the same type t.
-func onebitwalktype1(t *types.Type, off int64, bv bvec) {
-	if t.Align > 0 && off&int64(t.Align-1) != 0 {
-		Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
-	}
-	if !t.HasPointers() {
-		// Note: this case ensures that pointers to go:notinheap types
-		// are not considered pointers by garbage collection and stack copying.
-		return
-	}
-
-	switch t.Etype {
-	case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
-		if off&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		bv.Set(int32(off / int64(Widthptr))) // pointer
-
-	case TSTRING:
-		// struct { byte *str; intgo len; }
-		if off&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
-
-	case TINTER:
-		// struct { Itab *tab;	void *data; }
-		// or, when isnilinter(t)==true:
-		// struct { Type *type; void *data; }
-		if off&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		// The first word of an interface is a pointer, but we don't
-		// treat it as such.
-		// 1. If it is a non-empty interface, the pointer points to an itab
-		//    which is always in persistentalloc space.
-		// 2. If it is an empty interface, the pointer points to a _type.
-		//   a. If it is a compile-time-allocated type, it points into
-		//      the read-only data section.
-		//   b. If it is a reflect-allocated type, it points into the Go heap.
-		//      Reflect is responsible for keeping a reference to
-		//      the underlying type so it won't be GCd.
-		// If we ever have a moving GC, we need to change this for 2b (as
-		// well as scan itabs to update their itab._type fields).
-		bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
-
-	case TSLICE:
-		// struct { byte *array; uintgo len; uintgo cap; }
-		if off&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
-		}
-		bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
-
-	case TARRAY:
-		elt := t.Elem()
-		if elt.Width == 0 {
-			// Short-circuit for #20739.
-			break
-		}
-		for i := int64(0); i < t.NumElem(); i++ {
-			onebitwalktype1(elt, off, bv)
-			off += elt.Width
-		}
-
-	case TSTRUCT:
-		for _, f := range t.Fields().Slice() {
-			onebitwalktype1(f.Type, off+f.Offset, bv)
-		}
-
-	default:
-		Fatalf("onebitwalktype1: unexpected type, %v", t)
-	}
-}
-
 // Generates live pointer value maps for arguments and local variables. The
 // this argument and the in arguments are always assumed live. The vars
 // argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
+func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
 	for i := int32(0); ; i++ {
 		i = liveout.Next(i)
 		if i < 0 {
 			break
 		}
 		node := vars[i]
-		switch node.Class() {
-		case PAUTO:
-			onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
+		switch node.Class {
+		case ir.PAUTO:
+			typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
 
-		case PPARAM, PPARAMOUT:
-			onebitwalktype1(node.Type, node.Xoffset, args)
+		case ir.PPARAM, ir.PPARAMOUT:
+			typebits.Set(node.Type(), node.FrameOffset(), args)
 		}
 	}
 }
 
-// allUnsafe indicates that all points in this function are
+// IsUnsafe indicates that all points in this function are
 // unsafe-points.
-func allUnsafe(f *ssa.Func) bool {
+func IsUnsafe(f *ssa.Func) bool {
 	// The runtime assumes the only safe-points are function
 	// prologues (because that's how it used to be). We could and
 	// should improve that, but for now keep consider all points
@@ -509,18 +410,18 @@
 	// go:nosplit functions are similar. Since safe points used to
 	// be coupled with stack checks, go:nosplit often actually
 	// means "no safe points in this function".
-	return compiling_runtime || f.NoSplit
+	return base.Flag.CompilingRuntime || f.NoSplit
 }
 
 // markUnsafePoints finds unsafe points and computes lv.unsafePoints.
-func (lv *Liveness) markUnsafePoints() {
-	if allUnsafe(lv.f) {
+func (lv *liveness) markUnsafePoints() {
+	if IsUnsafe(lv.f) {
 		// No complex analysis necessary.
 		lv.allUnsafe = true
 		return
 	}
 
-	lv.unsafePoints = bvalloc(int32(lv.f.NumValues()))
+	lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
 
 	// Mark architecture-specific unsafe points.
 	for _, b := range lv.f.Blocks {
@@ -564,7 +465,7 @@
 		var load *ssa.Value
 		v := wbBlock.Controls[0]
 		for {
-			if sym, ok := v.Aux.(*obj.LSym); ok && sym == writeBarrier {
+			if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
 				load = v
 				break
 			}
@@ -631,11 +532,11 @@
 	// nice to only flood as far as the unsafe.Pointer -> uintptr
 	// conversion, but it's hard to know which argument of an Add
 	// or Sub to follow.
-	var flooded bvec
+	var flooded bitvec.BitVec
 	var flood func(b *ssa.Block, vi int)
 	flood = func(b *ssa.Block, vi int) {
-		if flooded.n == 0 {
-			flooded = bvalloc(int32(lv.f.NumBlocks()))
+		if flooded.N == 0 {
+			flooded = bitvec.New(int32(lv.f.NumBlocks()))
 		}
 		if flooded.Get(int32(b.ID)) {
 			return
@@ -676,14 +577,14 @@
 // This does not necessarily mean the instruction is a safe-point. In
 // particular, call Values can have a stack map in case the callee
 // grows the stack, but not themselves be a safe-point.
-func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
+func (lv *liveness) hasStackMap(v *ssa.Value) bool {
 	if !v.Op.IsCall() {
 		return false
 	}
 	// typedmemclr and typedmemmove are write barriers and
 	// deeply non-preemptible. They are unsafe points and
 	// hence should not have liveness maps.
-	if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
+	if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
 		return false
 	}
 	return true
@@ -692,7 +593,7 @@
 // Initializes the sets for solving the live variables. Visits all the
 // instructions in each basic block to summarizes the information at each basic
 // block
-func (lv *Liveness) prologue() {
+func (lv *liveness) prologue() {
 	lv.initcache()
 
 	for _, b := range lv.f.Blocks {
@@ -714,12 +615,12 @@
 }
 
 // Solve the liveness dataflow equations.
-func (lv *Liveness) solve() {
+func (lv *liveness) solve() {
 	// These temporary bitvectors exist to avoid successive allocations and
 	// frees within the loop.
 	nvars := int32(len(lv.vars))
-	newlivein := bvalloc(nvars)
-	newliveout := bvalloc(nvars)
+	newlivein := bitvec.New(nvars)
+	newliveout := bitvec.New(nvars)
 
 	// Walk blocks in postorder ordering. This improves convergence.
 	po := lv.f.Postorder()
@@ -774,10 +675,10 @@
 
 // Visits all instructions in a basic block and computes a bit vector of live
 // variables at each safe point locations.
-func (lv *Liveness) epilogue() {
+func (lv *liveness) epilogue() {
 	nvars := int32(len(lv.vars))
-	liveout := bvalloc(nvars)
-	livedefer := bvalloc(nvars) // always-live variables
+	liveout := bitvec.New(nvars)
+	livedefer := bitvec.New(nvars) // always-live variables
 
 	// If there is a defer (that could recover), then all output
 	// parameters are live all the time.  In addition, any locals
@@ -786,14 +687,14 @@
 	// pointers to copy values back to the stack).
 	// TODO: if the output parameter is heap-allocated, then we
 	// don't need to keep the stack copy live?
-	if lv.fn.Func.HasDefer() {
+	if lv.fn.HasDefer() {
 		for i, n := range lv.vars {
-			if n.Class() == PPARAMOUT {
-				if n.Name.IsOutputParamHeapAddr() {
+			if n.Class == ir.PPARAMOUT {
+				if n.IsOutputParamHeapAddr() {
 					// Just to be paranoid.  Heap addresses are PAUTOs.
-					Fatalf("variable %v both output param and heap output param", n)
+					base.Fatalf("variable %v both output param and heap output param", n)
 				}
-				if n.Name.Param.Heapaddr != nil {
+				if n.Heapaddr != nil {
 					// If this variable moved to the heap, then
 					// its stack copy is not live.
 					continue
@@ -801,22 +702,22 @@
 				// Note: zeroing is handled by zeroResults in walk.go.
 				livedefer.Set(int32(i))
 			}
-			if n.Name.IsOutputParamHeapAddr() {
+			if n.IsOutputParamHeapAddr() {
 				// This variable will be overwritten early in the function
 				// prologue (from the result of a mallocgc) but we need to
 				// zero it in case that malloc causes a stack scan.
-				n.Name.SetNeedzero(true)
+				n.SetNeedzero(true)
 				livedefer.Set(int32(i))
 			}
-			if n.Name.OpenDeferSlot() {
+			if n.OpenDeferSlot() {
 				// Open-coded defer args slots must be live
 				// everywhere in a function, since a panic can
 				// occur (almost) anywhere. Because it is live
 				// everywhere, it must be zeroed on entry.
 				livedefer.Set(int32(i))
 				// It was already marked as Needzero when created.
-				if !n.Name.Needzero() {
-					Fatalf("all pointer-containing defer arg slots should have Needzero set")
+				if !n.Needzero() {
+					base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
 				}
 			}
 		}
@@ -831,7 +732,7 @@
 
 	{
 		// Reserve an entry for function entry.
-		live := bvalloc(nvars)
+		live := bitvec.New(nvars)
 		lv.livevars = append(lv.livevars, live)
 	}
 
@@ -845,7 +746,7 @@
 				continue
 			}
 
-			live := bvalloc(nvars)
+			live := bitvec.New(nvars)
 			lv.livevars = append(lv.livevars, live)
 		}
 
@@ -878,7 +779,7 @@
 
 		if b == lv.f.Entry {
 			if index != 0 {
-				Fatalf("bad index for entry point: %v", index)
+				base.Fatalf("bad index for entry point: %v", index)
 			}
 
 			// Check to make sure only input variables are live.
@@ -886,10 +787,10 @@
 				if !liveout.Get(int32(i)) {
 					continue
 				}
-				if n.Class() == PPARAM {
+				if n.Class == ir.PPARAM {
 					continue // ok
 				}
-				Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+				base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n)
 			}
 
 			// Record live variables.
@@ -902,25 +803,25 @@
 	}
 
 	// If we have an open-coded deferreturn call, make a liveness map for it.
-	if lv.fn.Func.OpenCodedDeferDisallowed() {
-		lv.livenessMap.deferreturn = LivenessDontCare
+	if lv.fn.OpenCodedDeferDisallowed() {
+		lv.livenessMap.DeferReturn = objw.LivenessDontCare
 	} else {
-		lv.livenessMap.deferreturn = LivenessIndex{
-			stackMapIndex: lv.stackMapSet.add(livedefer),
-			isUnsafePoint: false,
+		lv.livenessMap.DeferReturn = objw.LivenessIndex{
+			StackMapIndex: lv.stackMapSet.add(livedefer),
+			IsUnsafePoint: false,
 		}
 	}
 
 	// Done compacting. Throw out the stack map set.
-	lv.stackMaps = lv.stackMapSet.extractUniqe()
+	lv.stackMaps = lv.stackMapSet.extractUnique()
 	lv.stackMapSet = bvecSet{}
 
 	// Useful sanity check: on entry to the function,
 	// the only things that can possibly be live are the
 	// input parameters.
 	for j, n := range lv.vars {
-		if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) {
-			lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
+		if n.Class != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+			lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
 		}
 	}
 }
@@ -941,7 +842,7 @@
 // is actually a net loss: we save about 50k of argument bitmaps but the new
 // PCDATA tables cost about 100k. So for now we keep using a single index for
 // both bitmap lists.
-func (lv *Liveness) compact(b *ssa.Block) {
+func (lv *liveness) compact(b *ssa.Block) {
 	pos := 0
 	if b == lv.f.Entry {
 		// Handle entry stack map.
@@ -951,9 +852,9 @@
 	for _, v := range b.Values {
 		hasStackMap := lv.hasStackMap(v)
 		isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
-		idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
+		idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
 		if hasStackMap {
-			idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
+			idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
 			pos++
 		}
 		if hasStackMap || isUnsafePoint {
@@ -965,8 +866,8 @@
 	lv.livevars = lv.livevars[:0]
 }
 
-func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
-	if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
+func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
+	if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
 		return
 	}
 	if !(v == nil || v.Op.IsCall()) {
@@ -978,14 +879,14 @@
 		return
 	}
 
-	pos := lv.fn.Func.Nname.Pos
+	pos := lv.fn.Nname.Pos()
 	if v != nil {
 		pos = v.Pos
 	}
 
 	s := "live at "
 	if v == nil {
-		s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
+		s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
 	} else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
 		fn := sym.Fn.Name
 		if pos := strings.Index(fn, "."); pos >= 0 {
@@ -1002,10 +903,10 @@
 		}
 	}
 
-	Warnl(pos, s)
+	base.WarnfAt(pos, s)
 }
 
-func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
+func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
 	if live.IsEmpty() {
 		return printed
 	}
@@ -1022,14 +923,14 @@
 		if !live.Get(int32(i)) {
 			continue
 		}
-		fmt.Printf("%s%s", comma, n.Sym.Name)
+		fmt.Printf("%s%s", comma, n.Sym().Name)
 		comma = ","
 	}
 	return true
 }
 
 // printeffect is like printbvec, but for valueEffects.
-func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
+func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
 	if !x {
 		return printed
 	}
@@ -1040,7 +941,7 @@
 	}
 	fmt.Printf("%s=", name)
 	if x {
-		fmt.Printf("%s", lv.vars[pos].Sym.Name)
+		fmt.Printf("%s", lv.vars[pos].Sym().Name)
 	}
 
 	return true
@@ -1049,8 +950,8 @@
 // Prints the computed liveness information and inputs, for debugging.
 // This format synthesizes the information used during the multiple passes
 // into a single presentation.
-func (lv *Liveness) printDebug() {
-	fmt.Printf("liveness: %s\n", lv.fn.funcname())
+func (lv *liveness) printDebug() {
+	fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
 
 	for i, b := range lv.f.Blocks {
 		if i > 0 {
@@ -1088,7 +989,7 @@
 
 		if b == lv.f.Entry {
 			live := lv.stackMaps[0]
-			fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
+			fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos()))
 			fmt.Printf("\tlive=")
 			printed = false
 			for j, n := range lv.vars {
@@ -1105,7 +1006,7 @@
 		}
 
 		for _, v := range b.Values {
-			fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
+			fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
 
 			pcdata := lv.livenessMap.Get(v)
 
@@ -1121,7 +1022,7 @@
 				fmt.Printf("\tlive=")
 				printed = false
 				if pcdata.StackMapValid() {
-					live := lv.stackMaps[pcdata.stackMapIndex]
+					live := lv.stackMaps[pcdata.StackMapIndex]
 					for j, n := range lv.vars {
 						if !live.Get(int32(j)) {
 							continue
@@ -1136,7 +1037,7 @@
 				fmt.Printf("\n")
 			}
 
-			if pcdata.isUnsafePoint {
+			if pcdata.IsUnsafePoint {
 				fmt.Printf("\tunsafe-point\n")
 			}
 		}
@@ -1158,15 +1059,15 @@
 // first word dumped is the total number of bitmaps. The second word is the
 // length of the bitmaps. All bitmaps are assumed to be of equal length. The
 // remaining bytes are the raw bitmaps.
-func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
+func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
 	// Size args bitmaps to be just large enough to hold the largest pointer.
 	// First, find the largest Xoffset node we care about.
-	// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
-	var maxArgNode *Node
+	// (Nodes without pointers aren't in lv.vars; see ShouldTrack.)
+	var maxArgNode *ir.Name
 	for _, n := range lv.vars {
-		switch n.Class() {
-		case PPARAM, PPARAMOUT:
-			if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
+		switch n.Class {
+		case ir.PPARAM, ir.PPARAMOUT:
+			if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
 				maxArgNode = n
 			}
 		}
@@ -1174,7 +1075,7 @@
 	// Next, find the offset of the largest pointer in the largest node.
 	var maxArgs int64
 	if maxArgNode != nil {
-		maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
+		maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type())
 	}
 
 	// Size locals bitmaps to be stkptrsize sized.
@@ -1189,13 +1090,13 @@
 	// Temporary symbols for encoding bitmaps.
 	var argsSymTmp, liveSymTmp obj.LSym
 
-	args := bvalloc(int32(maxArgs / int64(Widthptr)))
-	aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
-	aoff = duint32(&argsSymTmp, aoff, uint32(args.n))          // number of bits in each bitmap
+	args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
+	aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+	aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N))          // number of bits in each bitmap
 
-	locals := bvalloc(int32(maxLocals / int64(Widthptr)))
-	loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
-	loff = duint32(&liveSymTmp, loff, uint32(locals.n))        // number of bits in each bitmap
+	locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
+	loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+	loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N))        // number of bits in each bitmap
 
 	for _, live := range lv.stackMaps {
 		args.Clear()
@@ -1203,8 +1104,8 @@
 
 		lv.pointerMap(live, lv.vars, args, locals)
 
-		aoff = dbvec(&argsSymTmp, aoff, args)
-		loff = dbvec(&liveSymTmp, loff, locals)
+		aoff = objw.BitVec(&argsSymTmp, aoff, args)
+		loff = objw.BitVec(&liveSymTmp, loff, locals)
 	}
 
 	// Give these LSyms content-addressable names,
@@ -1214,7 +1115,7 @@
 	// These symbols will be added to Ctxt.Data by addGCLocals
 	// after parallel compilation is done.
 	makeSym := func(tmpSym *obj.LSym) *obj.LSym {
-		return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
+		return base.Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
 			lsym.P = tmpSym.P
 			lsym.Set(obj.AttrContentAddressable, true)
 		})
@@ -1222,30 +1123,30 @@
 	return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
 }
 
-// Entry pointer for liveness analysis. Solves for the liveness of
+// Entry pointer for Compute analysis. Solves for the Compute of
 // pointer variables in the function and emits a runtime data
 // structure read by the garbage collector.
 // Returns a map from GC safe points to their corresponding stack map index.
-func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
+func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map {
 	// Construct the global liveness state.
-	vars, idx := getvariables(e.curfn)
-	lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
+	vars, idx := getvariables(curfn)
+	lv := newliveness(curfn, f, vars, idx, stkptrsize)
 
 	// Run the dataflow framework.
 	lv.prologue()
 	lv.solve()
 	lv.epilogue()
-	if debuglive > 0 {
+	if base.Flag.Live > 0 {
 		lv.showlive(nil, lv.stackMaps[0])
 		for _, b := range f.Blocks {
 			for _, val := range b.Values {
 				if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
-					lv.showlive(val, lv.stackMaps[idx.stackMapIndex])
+					lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
 				}
 			}
 		}
 	}
-	if debuglive >= 2 {
+	if base.Flag.Live >= 2 {
 		lv.printDebug()
 	}
 
@@ -1254,35 +1155,80 @@
 		cache := f.Cache.Liveness.(*livenessFuncCache)
 		if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
 			for i := range lv.be {
-				lv.be[i] = BlockEffects{}
+				lv.be[i] = blockEffects{}
 			}
 			cache.be = lv.be
 		}
-		if len(lv.livenessMap.vals) < 2000 {
+		if len(lv.livenessMap.Vals) < 2000 {
 			cache.livenessMap = lv.livenessMap
 		}
 	}
 
 	// Emit the live pointer map data structures
-	ls := e.curfn.Func.lsym
+	ls := curfn.LSym
 	fninfo := ls.Func()
 	fninfo.GCArgs, fninfo.GCLocals = lv.emit()
 
 	p := pp.Prog(obj.AFUNCDATA)
-	Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
+	p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
 	p.To.Type = obj.TYPE_MEM
 	p.To.Name = obj.NAME_EXTERN
 	p.To.Sym = fninfo.GCArgs
 
 	p = pp.Prog(obj.AFUNCDATA)
-	Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
+	p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
 	p.To.Type = obj.TYPE_MEM
 	p.To.Name = obj.NAME_EXTERN
 	p.To.Sym = fninfo.GCLocals
 
+	if x := lv.emitStackObjects(); x != nil {
+		p := pp.Prog(obj.AFUNCDATA)
+		p.From.SetConst(objabi.FUNCDATA_StackObjects)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Name = obj.NAME_EXTERN
+		p.To.Sym = x
+	}
+
 	return lv.livenessMap
 }
 
+func (lv *liveness) emitStackObjects() *obj.LSym {
+	var vars []*ir.Name
+	for _, n := range lv.fn.Dcl {
+		if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap {
+			vars = append(vars, n)
+		}
+	}
+	if len(vars) == 0 {
+		return nil
+	}
+
+	// Sort variables from lowest to highest address.
+	sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() })
+
+	// Populate the stack object data.
+	// Format must match runtime/stack.go:stackObjectRecord.
+	x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj")
+	lv.fn.LSym.Func().StackObjects = x
+	off := 0
+	off = objw.Uintptr(x, off, uint64(len(vars)))
+	for _, v := range vars {
+		// Note: arguments and return values have non-negative Xoffset,
+		// in which case the offset is relative to argp.
+		// Locals have a negative Xoffset, in which case the offset is relative to varp.
+		off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
+		off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0)
+	}
+
+	if base.Flag.Live != 0 {
+		for _, v := range vars {
+			base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type())
+		}
+	}
+
+	return x
+}
+
 // isfat reports whether a variable of type t needs multiple assignments to initialize.
 // For example:
 //
@@ -1298,17 +1244,17 @@
 // to fully initialize t.
 func isfat(t *types.Type) bool {
 	if t != nil {
-		switch t.Etype {
-		case TSLICE, TSTRING,
-			TINTER: // maybe remove later
+		switch t.Kind() {
+		case types.TSLICE, types.TSTRING,
+			types.TINTER: // maybe remove later
 			return true
-		case TARRAY:
+		case types.TARRAY:
 			// Array of 1 element, check if element is fat
 			if t.NumElem() == 1 {
 				return isfat(t.Elem())
 			}
 			return true
-		case TSTRUCT:
+		case types.TSTRUCT:
 			// Struct with 1 field, check if field is fat
 			if t.NumFields() == 1 {
 				return isfat(t.Field(0).Type)
@@ -1319,3 +1265,34 @@
 
 	return false
 }
+
+func WriteFuncMap(fn *ir.Func) {
+	if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
+		return
+	}
+	types.CalcSize(fn.Type())
+	lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+	nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize))
+	bv := bitvec.New(int32(nptr) * 2)
+	nbitmap := 1
+	if fn.Type().NumResults() > 0 {
+		nbitmap = 2
+	}
+	off := objw.Uint32(lsym, 0, uint32(nbitmap))
+	off = objw.Uint32(lsym, off, uint32(bv.N))
+
+	if ir.IsMethod(fn) {
+		typebits.Set(fn.Type().Recvs(), 0, bv)
+	}
+	if fn.Type().NumParams() > 0 {
+		typebits.Set(fn.Type().Params(), 0, bv)
+	}
+	off = objw.BitVec(lsym, off, bv)
+
+	if fn.Type().NumResults() > 0 {
+		typebits.Set(fn.Type().Results(), 0, bv)
+		off = objw.BitVec(lsym, off, bv)
+	}
+
+	objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
+}
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index e121c1a..7197617 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -132,7 +132,7 @@
 	// Check at both 1 and 8-byte alignments.
 	t.Run("Copy", func(t *testing.T) {
 		const copyCode = `package x
-func s128a1(x *[128]int8) [128]int8 { 
+func s128a1(x *[128]int8) [128]int8 {
 	return *x
 }
 func s127a1(x *[127]int8) [127]int8 {
@@ -219,7 +219,7 @@
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from \u0026y.b (address-of)"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
-			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from ~R0 = \u003cN\u003e (assign-pair)"},`+
+			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow:      from ~R0 = \u0026y.b (assign-pair)"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:    flow: ~r2 = ~R0:"},`+
 			`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow:      from return (*int)(~R0) (return)"}]}`)
 	})
diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go
index be40c16..5991635 100644
--- a/src/cmd/compile/internal/mips/galign.go
+++ b/src/cmd/compile/internal/mips/galign.go
@@ -5,13 +5,13 @@
 package mips
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/mips"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &mips.Linkmips
 	if objabi.GOARCH == "mipsle" {
 		arch.LinkArch = &mips.Linkmipsle
@@ -22,7 +22,7 @@
 	arch.ZeroRange = zerorange
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
index 5e86772..1a51252 100644
--- a/src/cmd/compile/internal/mips/ggen.go
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -5,20 +5,22 @@
 package mips
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
 )
 
 // TODO(mips): implement DUFFZERO
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 
 	if cnt == 0 {
 		return p
 	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
 		}
 	} else {
 		//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@@ -28,22 +30,22 @@
 		//	MOVW	R0, (Widthptr)r1
 		//	ADD 	$Widthptr, r1
 		//	BNE		r1, r2, loop
-		p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+		p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
 		p.Reg = mips.REGSP
-		p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+		p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
 		p.Reg = mips.REGRT1
-		p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+		p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
 		p1 := p
-		p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
-		p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+		p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+		p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
 		p.Reg = mips.REGRT2
-		gc.Patch(p, p1)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(mips.ANOR)
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = mips.REG_R0
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 9d11c6b..f1cdbd3 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -7,9 +7,11 @@
 import (
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
@@ -75,7 +77,7 @@
 	panic("bad store type")
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpMIPSMOVWreg:
 		t := v.Type
@@ -121,7 +123,7 @@
 		}
 		r := v.Reg()
 		p := s.Prog(loadByType(v.Type, r))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = r
 		if isHILO(r) {
@@ -151,7 +153,7 @@
 		p := s.Prog(storeByType(v.Type, r))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = r
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpMIPSADD,
 		ssa.OpMIPSSUB,
 		ssa.OpMIPSAND,
@@ -286,10 +288,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVW $off(SP), R
 			wantreg = "SP"
@@ -310,7 +312,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpMIPSMOVBstore,
@@ -323,7 +325,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpMIPSMOVBstorezero,
 		ssa.OpMIPSMOVHstorezero,
 		ssa.OpMIPSMOVWstorezero:
@@ -332,7 +334,7 @@
 		p.From.Reg = mips.REGZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpMIPSMOVBreg,
 		ssa.OpMIPSMOVBUreg,
 		ssa.OpMIPSMOVHreg,
@@ -425,7 +427,7 @@
 		p4.From.Reg = v.Args[1].Reg()
 		p4.Reg = mips.REG_R1
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p2)
+		p4.To.SetTarget(p2)
 	case ssa.OpMIPSLoweredMove:
 		// SUBU	$4, R1
 		// MOVW	4(R1), Rtmp
@@ -478,7 +480,7 @@
 		p6.From.Reg = v.Args[2].Reg()
 		p6.Reg = mips.REG_R1
 		p6.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p6, p2)
+		p6.To.SetTarget(p2)
 	case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
 		s.Call(v)
 	case ssa.OpMIPSLoweredWB:
@@ -490,13 +492,13 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(8) // space used in callee args area by assembly stubs
 	case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
 		s.UseArgs(12) // space used in callee args area by assembly stubs
 	case ssa.OpMIPSLoweredAtomicLoad8,
 		ssa.OpMIPSLoweredAtomicLoad32:
@@ -575,7 +577,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 		s.Prog(mips.ASYNC)
 	case ssa.OpMIPSLoweredAtomicAdd:
@@ -611,7 +613,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 		s.Prog(mips.ASYNC)
 
@@ -655,7 +657,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 		s.Prog(mips.ASYNC)
 
@@ -699,7 +701,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 		s.Prog(mips.ASYNC)
 
@@ -748,26 +750,26 @@
 		p5.From.Type = obj.TYPE_REG
 		p5.From.Reg = v.Reg0()
 		p5.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p5, p1)
+		p5.To.SetTarget(p1)
 
 		s.Prog(mips.ASYNC)
 
 		p6 := s.Prog(obj.ANOP)
-		gc.Patch(p2, p6)
+		p2.To.SetTarget(p6)
 
 	case ssa.OpMIPSLoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(mips.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = mips.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpMIPSFPFlagTrue,
 		ssa.OpMIPSFPFlagFalse:
@@ -791,12 +793,12 @@
 
 	case ssa.OpMIPSLoweredGetClosurePtr:
 		// Closure pointer is R22 (mips.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpMIPSLoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(mips.AMOVW)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -824,13 +826,13 @@
 	ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockDefer:
 		// defer returns in R1:
@@ -841,11 +843,11 @@
 		p.From.Reg = mips.REGZERO
 		p.Reg = mips.REG_R1
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
index 90c381a..fc0a342 100644
--- a/src/cmd/compile/internal/mips64/galign.go
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -5,13 +5,13 @@
 package mips64
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/mips"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &mips.Linkmips64
 	if objabi.GOARCH == "mips64le" {
 		arch.LinkArch = &mips.Linkmips64le
@@ -23,7 +23,7 @@
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
 
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
index 04e7a66..37bb871 100644
--- a/src/cmd/compile/internal/mips64/ggen.go
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -5,26 +5,28 @@
 package mips64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
 )
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+	} else if cnt <= int64(128*types.PtrSize) {
+		p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
 		p.Reg = mips.REGSP
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
 	} else {
 		//	ADDV	$(8+frame+lo-8), SP, r1
 		//	ADDV	$cnt, r1, r2
@@ -32,22 +34,22 @@
 		//	MOVV	R0, (Widthptr)r1
 		//	ADDV	$Widthptr, r1
 		//	BNE		r1, r2, loop
-		p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+		p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
 		p.Reg = mips.REGSP
-		p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+		p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
 		p.Reg = mips.REGRT1
-		p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+		p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
 		p1 := p
-		p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
-		p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+		p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+		p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
 		p.Reg = mips.REGRT2
-		gc.Patch(p, p1)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(mips.ANOR)
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = mips.REG_R0
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 2727c4d..14cf7af 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -7,9 +7,11 @@
 import (
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/mips"
@@ -83,7 +85,7 @@
 	panic("bad store type")
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
 		if v.Type.IsMemory() {
@@ -124,7 +126,7 @@
 		}
 		r := v.Reg()
 		p := s.Prog(loadByType(v.Type, r))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = r
 		if isHILO(r) {
@@ -154,7 +156,7 @@
 		p := s.Prog(storeByType(v.Type, r))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = r
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpMIPS64ADDV,
 		ssa.OpMIPS64SUBV,
 		ssa.OpMIPS64AND,
@@ -260,10 +262,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVV $off(SP), R
 			wantreg = "SP"
@@ -286,7 +288,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpMIPS64MOVBstore,
@@ -300,7 +302,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpMIPS64MOVBstorezero,
 		ssa.OpMIPS64MOVHstorezero,
 		ssa.OpMIPS64MOVWstorezero,
@@ -310,7 +312,7 @@
 		p.From.Reg = mips.REGZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpMIPS64MOVBreg,
 		ssa.OpMIPS64MOVBUreg,
 		ssa.OpMIPS64MOVHreg,
@@ -381,7 +383,7 @@
 		p = s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpMIPS64LoweredZero:
 		// SUBV	$8, R1
@@ -426,12 +428,12 @@
 		p4.From.Reg = v.Args[1].Reg()
 		p4.Reg = mips.REG_R1
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p2)
+		p4.To.SetTarget(p2)
 	case ssa.OpMIPS64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpMIPS64LoweredMove:
 		// SUBV	$8, R1
@@ -488,7 +490,7 @@
 		p6.From.Reg = v.Args[2].Reg()
 		p6.Reg = mips.REG_R1
 		p6.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p6, p2)
+		p6.To.SetTarget(p2)
 	case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
 		s.Call(v)
 	case ssa.OpMIPS64LoweredWB:
@@ -500,7 +502,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 	case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
 		as := mips.AMOVV
@@ -577,7 +579,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 		s.Prog(mips.ASYNC)
 	case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
 		// SYNC
@@ -614,7 +616,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 		s.Prog(mips.ASYNC)
 		p4 := s.Prog(mips.AADDVU)
 		p4.From.Type = obj.TYPE_REG
@@ -657,7 +659,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = mips.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 		s.Prog(mips.ASYNC)
 		p4 := s.Prog(mips.AADDVU)
 		p4.From.Type = obj.TYPE_CONST
@@ -710,22 +712,22 @@
 		p5.From.Type = obj.TYPE_REG
 		p5.From.Reg = v.Reg0()
 		p5.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p5, p1)
+		p5.To.SetTarget(p1)
 		p6 := s.Prog(mips.ASYNC)
-		gc.Patch(p2, p6)
+		p2.To.SetTarget(p6)
 	case ssa.OpMIPS64LoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(mips.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = mips.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpMIPS64FPFlagTrue,
 		ssa.OpMIPS64FPFlagFalse:
@@ -749,15 +751,15 @@
 		p3.To.Type = obj.TYPE_REG
 		p3.To.Reg = v.Reg()
 		p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
-		gc.Patch(p2, p4)
+		p2.To.SetTarget(p4)
 	case ssa.OpMIPS64LoweredGetClosurePtr:
 		// Closure pointer is R22 (mips.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpMIPS64LoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(mips.AMOVV)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -785,13 +787,13 @@
 	ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockDefer:
 		// defer returns in R1:
@@ -802,11 +804,11 @@
 		p.From.Reg = mips.REGZERO
 		p.Reg = mips.REG_R1
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
new file mode 100644
index 0000000..747c30e
--- /dev/null
+++ b/src/cmd/compile/internal/noder/import.go
@@ -0,0 +1,480 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	pathpkg "path"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/syntax"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/archive"
+	"cmd/internal/bio"
+	"cmd/internal/goobj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+func isDriveLetter(b byte) bool {
+	return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+	return strings.HasPrefix(name, "/") ||
+		runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
+		strings.HasPrefix(name, "./") || name == "." ||
+		strings.HasPrefix(name, "../") || name == ".."
+}
+
+func openPackage(path string) (*os.File, error) {
+	if islocalname(path) {
+		if base.Flag.NoLocalImports {
+			return nil, errors.New("local imports disallowed")
+		}
+
+		if base.Flag.Cfg.PackageFile != nil {
+			return os.Open(base.Flag.Cfg.PackageFile[path])
+		}
+
+		// try .a before .o.  important for building libraries:
+		// if there is an array.o in the array.a library,
+		// want to find all of array.a, not just array.o.
+		if file, err := os.Open(fmt.Sprintf("%s.a", path)); err == nil {
+			return file, nil
+		}
+		if file, err := os.Open(fmt.Sprintf("%s.o", path)); err == nil {
+			return file, nil
+		}
+		return nil, errors.New("file not found")
+	}
+
+	// local imports should be canonicalized already.
+	// don't want to see "encoding/../encoding/base64"
+	// as different from "encoding/base64".
+	if q := pathpkg.Clean(path); q != path {
+		return nil, fmt.Errorf("non-canonical import path %q (should be %q)", path, q)
+	}
+
+	if base.Flag.Cfg.PackageFile != nil {
+		return os.Open(base.Flag.Cfg.PackageFile[path])
+	}
+
+	for _, dir := range base.Flag.Cfg.ImportDirs {
+		if file, err := os.Open(fmt.Sprintf("%s/%s.a", dir, path)); err == nil {
+			return file, nil
+		}
+		if file, err := os.Open(fmt.Sprintf("%s/%s.o", dir, path)); err == nil {
+			return file, nil
+		}
+	}
+
+	if objabi.GOROOT != "" {
+		suffix := ""
+		if base.Flag.InstallSuffix != "" {
+			suffix = "_" + base.Flag.InstallSuffix
+		} else if base.Flag.Race {
+			suffix = "_race"
+		} else if base.Flag.MSan {
+			suffix = "_msan"
+		}
+
+		if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil {
+			return file, nil
+		}
+		if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffix, path)); err == nil {
+			return file, nil
+		}
+	}
+	return nil, errors.New("file not found")
+}
+
+// myheight tracks the local package's height based on packages
+// imported so far.
+var myheight int
+
+// resolveImportPath resolves an import path as it appears in a Go
+// source file to the package's full path.
+func resolveImportPath(path string) (string, error) {
+	// The package name main is no longer reserved,
+	// but we reserve the import path "main" to identify
+	// the main package, just as we reserve the import
+	// path "math" to identify the standard math package.
+	if path == "main" {
+		return "", errors.New("cannot import \"main\"")
+	}
+
+	if base.Ctxt.Pkgpath != "" && path == base.Ctxt.Pkgpath {
+		return "", fmt.Errorf("import %q while compiling that package (import cycle)", path)
+	}
+
+	if mapped, ok := base.Flag.Cfg.ImportMap[path]; ok {
+		path = mapped
+	}
+
+	if islocalname(path) {
+		if path[0] == '/' {
+			return "", errors.New("import path cannot be absolute path")
+		}
+
+		prefix := base.Flag.D
+		if prefix == "" {
+			// Questionable, but when -D isn't specified, historically we
+			// resolve local import paths relative to the directory the
+			// compiler's current directory, not the respective source
+			// file's directory.
+			prefix = base.Ctxt.Pathname
+		}
+		path = pathpkg.Join(prefix, path)
+
+		if err := checkImportPath(path, true); err != nil {
+			return "", err
+		}
+	}
+
+	return path, nil
+}
+
+// TODO(mdempsky): Return an error instead.
+func importfile(decl *syntax.ImportDecl) *types.Pkg {
+	if decl.Path.Kind != syntax.StringLit {
+		base.Errorf("import path must be a string")
+		return nil
+	}
+
+	path, err := strconv.Unquote(decl.Path.Value)
+	if err != nil {
+		base.Errorf("import path must be a string")
+		return nil
+	}
+
+	if err := checkImportPath(path, false); err != nil {
+		base.Errorf("%s", err.Error())
+		return nil
+	}
+
+	path, err = resolveImportPath(path)
+	if err != nil {
+		base.Errorf("%s", err)
+		return nil
+	}
+
+	importpkg := types.NewPkg(path, "")
+	if importpkg.Direct {
+		return importpkg // already fully loaded
+	}
+	importpkg.Direct = true
+	typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
+
+	if path == "unsafe" {
+		return importpkg // initialized with universe
+	}
+
+	f, err := openPackage(path)
+	if err != nil {
+		base.Errorf("could not import %q: %v", path, err)
+		base.ErrorExit()
+	}
+	imp := bio.NewReader(f)
+	defer imp.Close()
+	file := f.Name()
+
+	// check object header
+	p, err := imp.ReadString('\n')
+	if err != nil {
+		base.Errorf("import %s: reading input: %v", file, err)
+		base.ErrorExit()
+	}
+
+	if p == "!<arch>\n" { // package archive
+		// package export block should be first
+		sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
+		if sz <= 0 {
+			base.Errorf("import %s: not a package file", file)
+			base.ErrorExit()
+		}
+		p, err = imp.ReadString('\n')
+		if err != nil {
+			base.Errorf("import %s: reading input: %v", file, err)
+			base.ErrorExit()
+		}
+	}
+
+	if !strings.HasPrefix(p, "go object ") {
+		base.Errorf("import %s: not a go object file: %s", file, p)
+		base.ErrorExit()
+	}
+	q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
+	if p[10:] != q {
+		base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
+		base.ErrorExit()
+	}
+
+	// process header lines
+	for {
+		p, err = imp.ReadString('\n')
+		if err != nil {
+			base.Errorf("import %s: reading input: %v", file, err)
+			base.ErrorExit()
+		}
+		if p == "\n" {
+			break // header ends with blank line
+		}
+	}
+
+	// Expect $$B\n to signal binary import format.
+
+	// look for $$
+	var c byte
+	for {
+		c, err = imp.ReadByte()
+		if err != nil {
+			break
+		}
+		if c == '$' {
+			c, err = imp.ReadByte()
+			if c == '$' || err != nil {
+				break
+			}
+		}
+	}
+
+	// get character after $$
+	if err == nil {
+		c, _ = imp.ReadByte()
+	}
+
+	var fingerprint goobj.FingerprintType
+	switch c {
+	case '\n':
+		base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
+		return nil
+
+	case 'B':
+		if base.Debug.Export != 0 {
+			fmt.Printf("importing %s (%s)\n", path, file)
+		}
+		imp.ReadByte() // skip \n after $$B
+
+		c, err = imp.ReadByte()
+		if err != nil {
+			base.Errorf("import %s: reading input: %v", file, err)
+			base.ErrorExit()
+		}
+
+		// Indexed format is distinguished by an 'i' byte,
+		// whereas previous export formats started with 'c', 'd', or 'v'.
+		if c != 'i' {
+			base.Errorf("import %s: unexpected package format byte: %v", file, c)
+			base.ErrorExit()
+		}
+		fingerprint = typecheck.ReadImports(importpkg, imp)
+
+	default:
+		base.Errorf("no import in %q", path)
+		base.ErrorExit()
+	}
+
+	// assume files move (get installed) so don't record the full path
+	if base.Flag.Cfg.PackageFile != nil {
+		// If using a packageFile map, assume path_ can be recorded directly.
+		base.Ctxt.AddImport(path, fingerprint)
+	} else {
+		// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+		base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
+	}
+
+	if importpkg.Height >= myheight {
+		myheight = importpkg.Height + 1
+	}
+
+	return importpkg
+}
+
+// The linker uses the magic symbol prefixes "go." and "type."
+// Avoid potential confusion between import paths and symbols
+// by rejecting these reserved imports for now. Also, people
+// "can do weird things in GOPATH and we'd prefer they didn't
+// do _that_ weird thing" (per rsc). See also #4257.
+var reservedimports = []string{
+	"go",
+	"type",
+}
+
+func checkImportPath(path string, allowSpace bool) error {
+	if path == "" {
+		return errors.New("import path is empty")
+	}
+
+	if strings.Contains(path, "\x00") {
+		return errors.New("import path contains NUL")
+	}
+
+	for _, ri := range reservedimports {
+		if path == ri {
+			return fmt.Errorf("import path %q is reserved and cannot be used", path)
+		}
+	}
+
+	for _, r := range path {
+		switch {
+		case r == utf8.RuneError:
+			return fmt.Errorf("import path contains invalid UTF-8 sequence: %q", path)
+		case r < 0x20 || r == 0x7f:
+			return fmt.Errorf("import path contains control character: %q", path)
+		case r == '\\':
+			return fmt.Errorf("import path contains backslash; use slash: %q", path)
+		case !allowSpace && unicode.IsSpace(r):
+			return fmt.Errorf("import path contains space character: %q", path)
+		case strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r):
+			return fmt.Errorf("import path contains invalid character '%c': %q", r, path)
+		}
+	}
+
+	return nil
+}
+
+func pkgnotused(lineno src.XPos, path string, name string) {
+	// If the package was imported with a name other than the final
+	// import path element, show it explicitly in the error message.
+	// Note that this handles both renamed imports and imports of
+	// packages containing unconventional package declarations.
+	// Note that this uses / always, even on Windows, because Go import
+	// paths always use forward slashes.
+	elem := path
+	if i := strings.LastIndex(elem, "/"); i >= 0 {
+		elem = elem[i+1:]
+	}
+	if name == "" || elem == name {
+		base.ErrorfAt(lineno, "imported and not used: %q", path)
+	} else {
+		base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
+	}
+}
+
+func mkpackage(pkgname string) {
+	if types.LocalPkg.Name == "" {
+		if pkgname == "_" {
+			base.Errorf("invalid package name _")
+		}
+		types.LocalPkg.Name = pkgname
+	} else {
+		if pkgname != types.LocalPkg.Name {
+			base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
+		}
+	}
+}
+
+func clearImports() {
+	type importedPkg struct {
+		pos  src.XPos
+		path string
+		name string
+	}
+	var unused []importedPkg
+
+	for _, s := range types.LocalPkg.Syms {
+		n := ir.AsNode(s.Def)
+		if n == nil {
+			continue
+		}
+		if n.Op() == ir.OPACK {
+			// throw away top-level package name left over
+			// from previous file.
+			// leave s->block set to cause redeclaration
+			// errors if a conflicting top-level name is
+			// introduced by a different file.
+			p := n.(*ir.PkgName)
+			if !p.Used && base.SyntaxErrors() == 0 {
+				unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
+			}
+			s.Def = nil
+			continue
+		}
+		if types.IsDotAlias(s) {
+			// throw away top-level name left over
+			// from previous import . "x"
+			// We'll report errors after type checking in CheckDotImports.
+			s.Def = nil
+			continue
+		}
+	}
+
+	sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
+	for _, pkg := range unused {
+		pkgnotused(pkg.pos, pkg.path, pkg.name)
+	}
+}
+
+// CheckDotImports reports errors for any unused dot imports.
+func CheckDotImports() {
+	for _, pack := range dotImports {
+		if !pack.Used {
+			base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
+		}
+	}
+
+	// No longer needed; release memory.
+	dotImports = nil
+	typecheck.DotImportRefs = nil
+}
+
+// dotImports tracks all PkgNames that have been dot-imported.
+var dotImports []*ir.PkgName
+
+// find all the exported symbols in package referenced by PkgName,
+// and make them available in the current package
+func importDot(pack *ir.PkgName) {
+	if typecheck.DotImportRefs == nil {
+		typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName)
+	}
+
+	opkg := pack.Pkg
+	for _, s := range opkg.Syms {
+		if s.Def == nil {
+			if _, ok := typecheck.DeclImporter[s]; !ok {
+				continue
+			}
+		}
+		if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+			continue
+		}
+		s1 := typecheck.Lookup(s.Name)
+		if s1.Def != nil {
+			pkgerror := fmt.Sprintf("during import %q", opkg.Path)
+			typecheck.Redeclared(base.Pos, s1, pkgerror)
+			continue
+		}
+
+		id := ir.NewIdent(src.NoXPos, s)
+		typecheck.DotImportRefs[id] = pack
+		s1.Def = id
+		s1.Block = 1
+	}
+
+	dotImports = append(dotImports, pack)
+}
+
+// importName is like oldname,
+// but it reports an error if sym is from another package and not exported.
+func importName(sym *types.Sym) ir.Node {
+	n := oldname(sym)
+	if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
+		n.SetDiag(true)
+		base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+	}
+	return n
+}
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/noder/lex.go
similarity index 67%
rename from src/cmd/compile/internal/gc/lex.go
rename to src/cmd/compile/internal/noder/lex.go
index 7cce371..cdca9e5 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/noder/lex.go
@@ -2,24 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package noder
 
 import (
-	"cmd/compile/internal/syntax"
-	"cmd/internal/objabi"
-	"cmd/internal/src"
 	"fmt"
 	"strings"
+
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/syntax"
+	"cmd/internal/objabi"
 )
 
-// lineno is the source position at the start of the most recently lexed token.
-// TODO(gri) rename and eventually remove
-var lineno src.XPos
-
-func makePos(base *src.PosBase, line, col uint) src.XPos {
-	return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
-}
-
 func isSpace(c rune) bool {
 	return c == ' ' || c == '\t' || c == '\n' || c == '\r'
 }
@@ -28,78 +21,52 @@
 	return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
 }
 
-type PragmaFlag int16
-
 const (
-	// Func pragmas.
-	Nointerface    PragmaFlag = 1 << iota
-	Noescape                  // func parameters don't escape
-	Norace                    // func must not have race detector annotations
-	Nosplit                   // func should not execute on separate stack
-	Noinline                  // func should not be inlined
-	NoCheckPtr                // func should not be instrumented by checkptr
-	CgoUnsafeArgs             // treat a pointer to one arg as a pointer to them all
-	UintptrEscapes            // pointers converted to uintptr escape
+	funcPragmas = ir.Nointerface |
+		ir.Noescape |
+		ir.Norace |
+		ir.Nosplit |
+		ir.Noinline |
+		ir.NoCheckPtr |
+		ir.RegisterParams | // TODO remove after register abi is working
+		ir.CgoUnsafeArgs |
+		ir.UintptrEscapes |
+		ir.Systemstack |
+		ir.Nowritebarrier |
+		ir.Nowritebarrierrec |
+		ir.Yeswritebarrierrec
 
-	// Runtime-only func pragmas.
-	// See ../../../../runtime/README.md for detailed descriptions.
-	Systemstack        // func must run on system stack
-	Nowritebarrier     // emit compiler error instead of write barrier
-	Nowritebarrierrec  // error on write barrier in this or recursive callees
-	Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
-
-	// Runtime and cgo type pragmas
-	NotInHeap // values of this type must not be heap allocated
-
-	// Go command pragmas
-	GoBuildPragma
+	typePragmas = ir.NotInHeap
 )
 
-const (
-	FuncPragmas = Nointerface |
-		Noescape |
-		Norace |
-		Nosplit |
-		Noinline |
-		NoCheckPtr |
-		CgoUnsafeArgs |
-		UintptrEscapes |
-		Systemstack |
-		Nowritebarrier |
-		Nowritebarrierrec |
-		Yeswritebarrierrec
-
-	TypePragmas = NotInHeap
-)
-
-func pragmaFlag(verb string) PragmaFlag {
+func pragmaFlag(verb string) ir.PragmaFlag {
 	switch verb {
 	case "go:build":
-		return GoBuildPragma
+		return ir.GoBuildPragma
 	case "go:nointerface":
 		if objabi.Fieldtrack_enabled != 0 {
-			return Nointerface
+			return ir.Nointerface
 		}
 	case "go:noescape":
-		return Noescape
+		return ir.Noescape
 	case "go:norace":
-		return Norace
+		return ir.Norace
 	case "go:nosplit":
-		return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
+		return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
 	case "go:noinline":
-		return Noinline
+		return ir.Noinline
 	case "go:nocheckptr":
-		return NoCheckPtr
+		return ir.NoCheckPtr
 	case "go:systemstack":
-		return Systemstack
+		return ir.Systemstack
 	case "go:nowritebarrier":
-		return Nowritebarrier
+		return ir.Nowritebarrier
 	case "go:nowritebarrierrec":
-		return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
+		return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
 	case "go:yeswritebarrierrec":
-		return Yeswritebarrierrec
+		return ir.Yeswritebarrierrec
 	case "go:cgo_unsafe_args":
-		return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
+		return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
 	case "go:uintptrescapes":
 		// For the next function declared in the file
 		// any uintptr arguments may be pointer values
@@ -112,9 +79,11 @@
 		// call. The conversion to uintptr must appear
 		// in the argument list.
 		// Used in syscall/dll_windows.go.
-		return UintptrEscapes
+		return ir.UintptrEscapes
+	case "go:registerparams": // TODO remove after register abi is working
+		return ir.RegisterParams
 	case "go:notinheap":
-		return NotInHeap
+		return ir.NotInHeap
 	}
 	return 0
 }
diff --git a/src/cmd/compile/internal/gc/lex_test.go b/src/cmd/compile/internal/noder/lex_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/lex_test.go
rename to src/cmd/compile/internal/noder/lex_test.go
index b2081a1..85a3f06 100644
--- a/src/cmd/compile/internal/gc/lex_test.go
+++ b/src/cmd/compile/internal/noder/lex_test.go
@@ -2,13 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package noder
 
 import (
-	"cmd/compile/internal/syntax"
 	"reflect"
 	"runtime"
 	"testing"
+
+	"cmd/compile/internal/syntax"
 )
 
 func eq(a, b []string) bool {
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
new file mode 100644
index 0000000..5b5b09c
--- /dev/null
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -0,0 +1,1858 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+	"fmt"
+	"go/constant"
+	"go/token"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/dwarfgen"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/syntax"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+func LoadPackage(filenames []string) {
+	base.Timer.Start("fe", "parse")
+
+	mode := syntax.CheckBranches
+
+	// Limit the number of simultaneously open files.
+	sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
+
+	noders := make([]*noder, len(filenames))
+	for i, filename := range filenames {
+		p := noder{
+			err:         make(chan syntax.Error),
+			trackScopes: base.Flag.Dwarf,
+		}
+		noders[i] = &p
+
+		filename := filename
+		go func() {
+			sem <- struct{}{}
+			defer func() { <-sem }()
+			defer close(p.err)
+			fbase := syntax.NewFileBase(filename)
+
+			f, err := os.Open(filename)
+			if err != nil {
+				p.error(syntax.Error{Msg: err.Error()})
+				return
+			}
+			defer f.Close()
+
+			p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, mode) // errors are tracked via p.error
+		}()
+	}
+
+	var lines uint
+	for _, p := range noders {
+		for e := range p.err {
+			p.errorAt(e.Pos, "%s", e.Msg)
+		}
+		lines += p.file.Lines
+	}
+	base.Timer.AddEvent(int64(lines), "lines")
+
+	for _, p := range noders {
+		p.node()
+		p.file = nil // release memory
+	}
+
+	if base.SyntaxErrors() != 0 {
+		base.ErrorExit()
+	}
+	types.CheckDclstack()
+
+	for _, p := range noders {
+		p.processPragmas()
+	}
+
+	// Typecheck.
+	types.LocalPkg.Height = myheight
+	typecheck.DeclareUniverse()
+	typecheck.TypecheckAllowed = true
+
+	// Process top-level declarations in phases.
+
+	// Phase 1: const, type, and names and types of funcs.
+	//   This will gather all the information about types
+	//   and methods but doesn't depend on any of it.
+	//
+	//   We also defer type alias declarations until phase 2
+	//   to avoid cycles like #18640.
+	//   TODO(gri) Remove this again once we have a fix for #25838.
+
+	// Don't use range--typecheck can add closures to Target.Decls.
+	base.Timer.Start("fe", "typecheck", "top1")
+	for i := 0; i < len(typecheck.Target.Decls); i++ {
+		n := typecheck.Target.Decls[i]
+		if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) {
+			typecheck.Target.Decls[i] = typecheck.Stmt(n)
+		}
+	}
+
+	// Phase 2: Variable assignments.
+	//   To check interface assignments, depends on phase 1.
+
+	// Don't use range--typecheck can add closures to Target.Decls.
+	base.Timer.Start("fe", "typecheck", "top2")
+	for i := 0; i < len(typecheck.Target.Decls); i++ {
+		n := typecheck.Target.Decls[i]
+		if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() {
+			typecheck.Target.Decls[i] = typecheck.Stmt(n)
+		}
+	}
+
+	// Phase 3: Type check function bodies.
+	// Don't use range--typecheck can add closures to Target.Decls.
+	base.Timer.Start("fe", "typecheck", "func")
+	var fcount int64
+	for i := 0; i < len(typecheck.Target.Decls); i++ {
+		n := typecheck.Target.Decls[i]
+		if n.Op() == ir.ODCLFUNC {
+			if base.Flag.W > 1 {
+				s := fmt.Sprintf("\nbefore typecheck %v", n)
+				ir.Dump(s, n)
+			}
+			typecheck.FuncBody(n.(*ir.Func))
+			if base.Flag.W > 1 {
+				s := fmt.Sprintf("\nafter typecheck %v", n)
+				ir.Dump(s, n)
+			}
+			fcount++
+		}
+	}
+
+	// Phase 4: Check external declarations.
+	// TODO(mdempsky): This should be handled when type checking their
+	// corresponding ODCL nodes.
+	base.Timer.Start("fe", "typecheck", "externdcls")
+	for i, n := range typecheck.Target.Externs {
+		if n.Op() == ir.ONAME {
+			typecheck.Target.Externs[i] = typecheck.Expr(typecheck.Target.Externs[i])
+		}
+	}
+
+	// Phase 5: With all user code type-checked, it's now safe to verify map keys.
+	// With all user code typechecked, it's now safe to verify unused dot imports.
+	typecheck.CheckMapKeys()
+	CheckDotImports()
+	base.ExitIfErrors()
+}
+
+func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) {
+	base.ErrorfAt(p.makeXPos(pos), format, args...)
+}
+
+// TODO(gri) Can we eliminate fileh in favor of absFilename?
+func fileh(name string) string {
+	return objabi.AbsFile("", name, base.Flag.TrimPath)
+}
+
+func absFilename(name string) string {
+	return objabi.AbsFile(base.Ctxt.Pathname, name, base.Flag.TrimPath)
+}
+
+// noder transforms package syntax's AST into a Node tree.
+type noder struct {
+	posMap
+
+	file           *syntax.File
+	linknames      []linkname
+	pragcgobuf     [][]string
+	err            chan syntax.Error
+	importedUnsafe bool
+	importedEmbed  bool
+	trackScopes    bool
+
+	funcState *funcState
+}
+
+// funcState tracks all per-function state to make handling nested
+// functions easier.
+type funcState struct {
+	// scopeVars is a stack tracking the number of variables declared in
+	// the current function at the moment each open scope was opened.
+	scopeVars []int
+	marker    dwarfgen.ScopeMarker
+
+	lastCloseScopePos syntax.Pos
+}
+
+func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
+	outerFuncState := p.funcState
+	p.funcState = new(funcState)
+	typecheck.StartFuncBody(fn)
+
+	if block != nil {
+		body := p.stmts(block.List)
+		if body == nil {
+			body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
+		}
+		fn.Body = body
+
+		base.Pos = p.makeXPos(block.Rbrace)
+		fn.Endlineno = base.Pos
+	}
+
+	typecheck.FinishFuncBody()
+	p.funcState.marker.WriteTo(fn)
+	p.funcState = outerFuncState
+}
+
+func (p *noder) openScope(pos syntax.Pos) {
+	fs := p.funcState
+	types.Markdcl()
+
+	if p.trackScopes {
+		fs.scopeVars = append(fs.scopeVars, len(ir.CurFunc.Dcl))
+		fs.marker.Push(p.makeXPos(pos))
+	}
+}
+
+func (p *noder) closeScope(pos syntax.Pos) {
+	fs := p.funcState
+	fs.lastCloseScopePos = pos
+	types.Popdcl()
+
+	if p.trackScopes {
+		scopeVars := fs.scopeVars[len(fs.scopeVars)-1]
+		fs.scopeVars = fs.scopeVars[:len(fs.scopeVars)-1]
+		if scopeVars == len(ir.CurFunc.Dcl) {
+			// no variables were declared in this scope, so we can retract it.
+			fs.marker.Unpush()
+		} else {
+			fs.marker.Pop(p.makeXPos(pos))
+		}
+	}
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (p *noder) closeAnotherScope() {
+	p.closeScope(p.funcState.lastCloseScopePos)
+}
+
+// linkname records a //go:linkname directive.
+type linkname struct {
+	pos    syntax.Pos
+	local  string
+	remote string
+}
+
+func (p *noder) node() {
+	p.importedUnsafe = false
+	p.importedEmbed = false
+
+	p.setlineno(p.file.PkgName)
+	mkpackage(p.file.PkgName.Value)
+
+	if pragma, ok := p.file.Pragma.(*pragmas); ok {
+		pragma.Flag &^= ir.GoBuildPragma
+		p.checkUnused(pragma)
+	}
+
+	typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...)
+
+	base.Pos = src.NoXPos
+	clearImports()
+}
+
+func (p *noder) processPragmas() {
+	for _, l := range p.linknames {
+		if !p.importedUnsafe {
+			p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+			continue
+		}
+		n := ir.AsNode(typecheck.Lookup(l.local).Def)
+		if n == nil || n.Op() != ir.ONAME {
+			// TODO(mdempsky): Change to p.errorAt before Go 1.17 release.
+			// base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)")
+			continue
+		}
+		if n.Sym().Linkname != "" {
+			p.errorAt(l.pos, "duplicate //go:linkname for %s", l.local)
+			continue
+		}
+		n.Sym().Linkname = l.remote
+	}
+	typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...)
+}
+
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
+	var cs constState
+
+	for _, decl := range decls {
+		p.setlineno(decl)
+		switch decl := decl.(type) {
+		case *syntax.ImportDecl:
+			p.importDecl(decl)
+
+		case *syntax.VarDecl:
+			l = append(l, p.varDecl(decl)...)
+
+		case *syntax.ConstDecl:
+			l = append(l, p.constDecl(decl, &cs)...)
+
+		case *syntax.TypeDecl:
+			l = append(l, p.typeDecl(decl))
+
+		case *syntax.FuncDecl:
+			l = append(l, p.funcDecl(decl))
+
+		default:
+			panic("unhandled Decl")
+		}
+	}
+
+	return
+}
+
+func (p *noder) importDecl(imp *syntax.ImportDecl) {
+	if imp.Path == nil || imp.Path.Bad {
+		return // avoid follow-on errors if there was a syntax error
+	}
+
+	if pragma, ok := imp.Pragma.(*pragmas); ok {
+		p.checkUnused(pragma)
+	}
+
+	ipkg := importfile(imp)
+	if ipkg == nil {
+		if base.Errors() == 0 {
+			base.Fatalf("phase error in import")
+		}
+		return
+	}
+
+	if ipkg == ir.Pkgs.Unsafe {
+		p.importedUnsafe = true
+	}
+	if ipkg.Path == "embed" {
+		p.importedEmbed = true
+	}
+
+	var my *types.Sym
+	if imp.LocalPkgName != nil {
+		my = p.name(imp.LocalPkgName)
+	} else {
+		my = typecheck.Lookup(ipkg.Name)
+	}
+
+	pack := ir.NewPkgName(p.pos(imp), my, ipkg)
+
+	switch my.Name {
+	case ".":
+		importDot(pack)
+		return
+	case "init":
+		base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
+		return
+	case "_":
+		return
+	}
+	if my.Def != nil {
+		typecheck.Redeclared(pack.Pos(), my, "as imported package name")
+	}
+	my.Def = pack
+	my.Lastlineno = pack.Pos()
+	my.Block = 1 // at top level
+}
+
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
+	names := p.declNames(ir.ONAME, decl.NameList)
+	typ := p.typeExprOrNil(decl.Type)
+	exprs := p.exprList(decl.Values)
+
+	if pragma, ok := decl.Pragma.(*pragmas); ok {
+		varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed)
+		p.checkUnused(pragma)
+	}
+
+	var init []ir.Node
+	p.setlineno(decl)
+
+	if len(names) > 1 && len(exprs) == 1 {
+		as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, exprs)
+		for _, v := range names {
+			as2.Lhs.Append(v)
+			typecheck.Declare(v, typecheck.DeclContext)
+			v.Ntype = typ
+			v.Defn = as2
+			if ir.CurFunc != nil {
+				init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+			}
+		}
+
+		return append(init, as2)
+	}
+
+	for i, v := range names {
+		var e ir.Node
+		if i < len(exprs) {
+			e = exprs[i]
+		}
+
+		typecheck.Declare(v, typecheck.DeclContext)
+		v.Ntype = typ
+
+		if ir.CurFunc != nil {
+			init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+		}
+		as := ir.NewAssignStmt(base.Pos, v, e)
+		init = append(init, as)
+		if e != nil || ir.CurFunc == nil {
+			v.Defn = as
+		}
+	}
+
+	if len(exprs) != 0 && len(names) != len(exprs) {
+		base.Errorf("assignment mismatch: %d variables but %d values", len(names), len(exprs))
+	}
+
+	return init
+}
+
+// constState tracks state between constant specifiers within a
+// declaration group. This state is kept separate from noder so nested
+// constant declarations are handled correctly (e.g., issue 15550).
+type constState struct {
+	group  *syntax.Group
+	typ    ir.Ntype
+	values []ir.Node
+	iota   int64
+}
+
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
+	if decl.Group == nil || decl.Group != cs.group {
+		*cs = constState{
+			group: decl.Group,
+		}
+	}
+
+	if pragma, ok := decl.Pragma.(*pragmas); ok {
+		p.checkUnused(pragma)
+	}
+
+	names := p.declNames(ir.OLITERAL, decl.NameList)
+	typ := p.typeExprOrNil(decl.Type)
+
+	var values []ir.Node
+	if decl.Values != nil {
+		values = p.exprList(decl.Values)
+		cs.typ, cs.values = typ, values
+	} else {
+		if typ != nil {
+			base.Errorf("const declaration cannot have type without expression")
+		}
+		typ, values = cs.typ, cs.values
+	}
+
+	nn := make([]ir.Node, 0, len(names))
+	for i, n := range names {
+		if i >= len(values) {
+			base.Errorf("missing value in const declaration")
+			break
+		}
+		v := values[i]
+		if decl.Values == nil {
+			v = ir.DeepCopy(n.Pos(), v)
+		}
+		typecheck.Declare(n, typecheck.DeclContext)
+
+		n.Ntype = typ
+		n.Defn = v
+		n.SetIota(cs.iota)
+
+		nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n))
+	}
+
+	if len(values) > len(names) {
+		base.Errorf("extra expression in const declaration")
+	}
+
+	cs.iota++
+
+	return nn
+}
+
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
+	n := p.declName(ir.OTYPE, decl.Name)
+	typecheck.Declare(n, typecheck.DeclContext)
+
+	// decl.Type may be nil but in that case we got a syntax error during parsing
+	typ := p.typeExprOrNil(decl.Type)
+
+	n.Ntype = typ
+	n.SetAlias(decl.Alias)
+	if pragma, ok := decl.Pragma.(*pragmas); ok {
+		if !decl.Alias {
+			n.SetPragma(pragma.Flag & typePragmas)
+			pragma.Flag &^= typePragmas
+		}
+		p.checkUnused(pragma)
+	}
+
+	nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n)
+	if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) {
+		base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
+	}
+	return nod
+}
+
+func (p *noder) declNames(op ir.Op, names []*syntax.Name) []*ir.Name {
+	nodes := make([]*ir.Name, 0, len(names))
+	for _, name := range names {
+		nodes = append(nodes, p.declName(op, name))
+	}
+	return nodes
+}
+
+func (p *noder) declName(op ir.Op, name *syntax.Name) *ir.Name {
+	return ir.NewDeclNameAt(p.pos(name), op, p.name(name))
+}
+
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
+	name := p.name(fun.Name)
+	t := p.signature(fun.Recv, fun.Type)
+	f := ir.NewFunc(p.pos(fun))
+
+	if fun.Recv == nil {
+		if name.Name == "init" {
+			name = renameinit()
+			if len(t.Params) > 0 || len(t.Results) > 0 {
+				base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
+			}
+			typecheck.Target.Inits = append(typecheck.Target.Inits, f)
+		}
+
+		if types.LocalPkg.Name == "main" && name.Name == "main" {
+			if len(t.Params) > 0 || len(t.Results) > 0 {
+				base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
+			}
+		}
+	} else {
+		f.Shortname = name
+		name = ir.BlankNode.Sym() // filled in by tcFunc
+	}
+
+	f.Nname = ir.NewNameAt(p.pos(fun.Name), name)
+	f.Nname.Func = f
+	f.Nname.Defn = f
+	f.Nname.Ntype = t
+
+	if pragma, ok := fun.Pragma.(*pragmas); ok {
+		f.Pragma = pragma.Flag & funcPragmas
+		if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
+			base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
+		}
+		pragma.Flag &^= funcPragmas
+		p.checkUnused(pragma)
+	}
+
+	if fun.Recv == nil {
+		typecheck.Declare(f.Nname, ir.PFUNC)
+	}
+
+	p.funcBody(f, fun.Body)
+
+	if fun.Body != nil {
+		if f.Pragma&ir.Noescape != 0 {
+			base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
+		}
+	} else {
+		if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
+			// Linknamed functions are allowed to have no body. Hopefully
+			// the linkname target has a body. See issue 23311.
+			isLinknamed := false
+			for _, n := range p.linknames {
+				if ir.FuncName(f) == n.local {
+					isLinknamed = true
+					break
+				}
+			}
+			if !isLinknamed {
+				base.ErrorfAt(f.Pos(), "missing function body")
+			}
+		}
+	}
+
+	return f
+}
+
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.FuncType {
+	var rcvr *ir.Field
+	if recv != nil {
+		rcvr = p.param(recv, false, false)
+	}
+	return ir.NewFuncType(p.pos(typ), rcvr,
+		p.params(typ.ParamList, true),
+		p.params(typ.ResultList, false))
+}
+
+func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
+	nodes := make([]*ir.Field, 0, len(params))
+	for i, param := range params {
+		p.setlineno(param)
+		nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+	}
+	return nodes
+}
+
+func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field {
+	var name *types.Sym
+	if param.Name != nil {
+		name = p.name(param.Name)
+	}
+
+	typ := p.typeExpr(param.Type)
+	n := ir.NewField(p.pos(param), name, typ, nil)
+
+	// rewrite ...T parameter
+	if typ, ok := typ.(*ir.SliceType); ok && typ.DDD {
+		if !dddOk {
+			// We mark these as syntax errors to get automatic elimination
+			// of multiple such errors per line (see ErrorfAt in subr.go).
+			base.Errorf("syntax error: cannot use ... in receiver or result parameter list")
+		} else if !final {
+			if param.Name == nil {
+				base.Errorf("syntax error: cannot use ... with non-final parameter")
+			} else {
+				p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
+			}
+		}
+		typ.DDD = false
+		n.IsDDD = true
+	}
+
+	return n
+}
+
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
+	switch expr := expr.(type) {
+	case nil:
+		return nil
+	case *syntax.ListExpr:
+		return p.exprs(expr.ElemList)
+	default:
+		return []ir.Node{p.expr(expr)}
+	}
+}
+
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+	nodes := make([]ir.Node, 0, len(exprs))
+	for _, expr := range exprs {
+		nodes = append(nodes, p.expr(expr))
+	}
+	return nodes
+}
+
+func (p *noder) expr(expr syntax.Expr) ir.Node {
+	p.setlineno(expr)
+	switch expr := expr.(type) {
+	case nil, *syntax.BadExpr:
+		return nil
+	case *syntax.Name:
+		return p.mkname(expr)
+	case *syntax.BasicLit:
+		n := ir.NewBasicLit(p.pos(expr), p.basicLit(expr))
+		if expr.Kind == syntax.RuneLit {
+			n.SetType(types.UntypedRune)
+		}
+		n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
+		return n
+	case *syntax.CompositeLit:
+		n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, p.typeExpr(expr.Type), nil)
+		l := p.exprs(expr.ElemList)
+		for i, e := range l {
+			l[i] = p.wrapname(expr.ElemList[i], e)
+		}
+		n.List = l
+		base.Pos = p.makeXPos(expr.Rbrace)
+		return n
+	case *syntax.KeyValueExpr:
+		// use position of expr.Key rather than of expr (which has position of ':')
+		return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+	case *syntax.FuncLit:
+		return p.funcLit(expr)
+	case *syntax.ParenExpr:
+		return ir.NewParenExpr(p.pos(expr), p.expr(expr.X))
+	case *syntax.SelectorExpr:
+		// parser.new_dotname
+		obj := p.expr(expr.X)
+		if obj.Op() == ir.OPACK {
+			pack := obj.(*ir.PkgName)
+			pack.Used = true
+			return importName(pack.Pkg.Lookup(expr.Sel.Value))
+		}
+		n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel))
+		n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
+		return n
+	case *syntax.IndexExpr:
+		return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index))
+	case *syntax.SliceExpr:
+		op := ir.OSLICE
+		if expr.Full {
+			op = ir.OSLICE3
+		}
+		x := p.expr(expr.X)
+		var index [3]ir.Node
+		for i, n := range &expr.Index {
+			if n != nil {
+				index[i] = p.expr(n)
+			}
+		}
+		return ir.NewSliceExpr(p.pos(expr), op, x, index[0], index[1], index[2])
+	case *syntax.AssertExpr:
+		return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type))
+	case *syntax.Operation:
+		if expr.Op == syntax.Add && expr.Y != nil {
+			return p.sum(expr)
+		}
+		x := p.expr(expr.X)
+		if expr.Y == nil {
+			pos, op := p.pos(expr), p.unOp(expr.Op)
+			switch op {
+			case ir.OADDR:
+				return typecheck.NodAddrAt(pos, x)
+			case ir.ODEREF:
+				return ir.NewStarExpr(pos, x)
+			}
+			return ir.NewUnaryExpr(pos, op, x)
+		}
+
+		pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y)
+		switch op {
+		case ir.OANDAND, ir.OOROR:
+			return ir.NewLogicalExpr(pos, op, x, y)
+		}
+		return ir.NewBinaryExpr(pos, op, x, y)
+	case *syntax.CallExpr:
+		n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), p.exprs(expr.ArgList))
+		n.IsDDD = expr.HasDots
+		return n
+
+	case *syntax.ArrayType:
+		var len ir.Node
+		if expr.Len != nil {
+			len = p.expr(expr.Len)
+		}
+		return ir.NewArrayType(p.pos(expr), len, p.typeExpr(expr.Elem))
+	case *syntax.SliceType:
+		return ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
+	case *syntax.DotsType:
+		t := ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
+		t.DDD = true
+		return t
+	case *syntax.StructType:
+		return p.structType(expr)
+	case *syntax.InterfaceType:
+		return p.interfaceType(expr)
+	case *syntax.FuncType:
+		return p.signature(nil, expr)
+	case *syntax.MapType:
+		return ir.NewMapType(p.pos(expr),
+			p.typeExpr(expr.Key), p.typeExpr(expr.Value))
+	case *syntax.ChanType:
+		return ir.NewChanType(p.pos(expr),
+			p.typeExpr(expr.Elem), p.chanDir(expr.Dir))
+
+	case *syntax.TypeSwitchGuard:
+		var tag *ir.Ident
+		if expr.Lhs != nil {
+			tag = ir.NewIdent(p.pos(expr.Lhs), p.name(expr.Lhs))
+			if ir.IsBlank(tag) {
+				base.Errorf("invalid variable name %v in type switch", tag)
+			}
+		}
+		return ir.NewTypeSwitchGuard(p.pos(expr), tag, p.expr(expr.X))
+	}
+	panic("unhandled Expr")
+}
+
+// sum efficiently handles very large summation expressions (such as
+// in issue #16394). In particular, it avoids left recursion and
+// collapses string literals.
+func (p *noder) sum(x syntax.Expr) ir.Node {
+	// While we need to handle long sums with asymptotic
+	// efficiency, the vast majority of sums are very small: ~95%
+	// have only 2 or 3 operands, and ~99% of string literals are
+	// never concatenated.
+
+	adds := make([]*syntax.Operation, 0, 2)
+	for {
+		add, ok := x.(*syntax.Operation)
+		if !ok || add.Op != syntax.Add || add.Y == nil {
+			break
+		}
+		adds = append(adds, add)
+		x = add.X
+	}
+
+	// nstr is the current rightmost string literal in the
+	// summation (if any), and chunks holds its accumulated
+	// substrings.
+	//
+	// Consider the expression x + "a" + "b" + "c" + y. When we
+	// reach the string literal "a", we assign nstr to point to
+	// its corresponding Node and initialize chunks to {"a"}.
+	// Visiting the subsequent string literals "b" and "c", we
+	// simply append their values to chunks. Finally, when we
+	// reach the non-constant operand y, we'll join chunks to form
+	// "abc" and reassign the "a" string literal's value.
+	//
+	// N.B., we need to be careful about named string constants
+	// (indicated by Sym != nil) because 1) we can't modify their
+	// value, as doing so would affect other uses of the string
+	// constant, and 2) they may have types, which we need to
+	// handle correctly. For now, we avoid these problems by
+	// treating named string constants the same as non-constant
+	// operands.
+	var nstr ir.Node
+	chunks := make([]string, 0, 1)
+
+	n := p.expr(x)
+	if ir.IsConst(n, constant.String) && n.Sym() == nil {
+		nstr = n
+		chunks = append(chunks, ir.StringVal(nstr))
+	}
+
+	for i := len(adds) - 1; i >= 0; i-- {
+		add := adds[i]
+
+		r := p.expr(add.Y)
+		if ir.IsConst(r, constant.String) && r.Sym() == nil {
+			if nstr != nil {
+				// Collapse r into nstr instead of adding to n.
+				chunks = append(chunks, ir.StringVal(r))
+				continue
+			}
+
+			nstr = r
+			chunks = append(chunks, ir.StringVal(nstr))
+		} else {
+			if len(chunks) > 1 {
+				nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
+			}
+			nstr = nil
+			chunks = chunks[:0]
+		}
+		n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r)
+	}
+	if len(chunks) > 1 {
+		nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
+	}
+
+	return n
+}
+
+func (p *noder) typeExpr(typ syntax.Expr) ir.Ntype {
+	// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
+	n := p.expr(typ)
+	if n == nil {
+		return nil
+	}
+	if _, ok := n.(ir.Ntype); !ok {
+		ir.Dump("NOT NTYPE", n)
+	}
+	return n.(ir.Ntype)
+}
+
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Ntype {
+	if typ != nil {
+		return p.typeExpr(typ)
+	}
+	return nil
+}
+
+func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
+	switch dir {
+	case 0:
+		return types.Cboth
+	case syntax.SendOnly:
+		return types.Csend
+	case syntax.RecvOnly:
+		return types.Crecv
+	}
+	panic("unhandled ChanDir")
+}
+
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+	l := make([]*ir.Field, 0, len(expr.FieldList))
+	for i, field := range expr.FieldList {
+		p.setlineno(field)
+		var n *ir.Field
+		if field.Name == nil {
+			n = p.embedded(field.Type)
+		} else {
+			n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
+		}
+		if i < len(expr.TagList) && expr.TagList[i] != nil {
+			n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
+		}
+		l = append(l, n)
+	}
+
+	p.setlineno(expr)
+	return ir.NewStructType(p.pos(expr), l)
+}
+
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+	l := make([]*ir.Field, 0, len(expr.MethodList))
+	for _, method := range expr.MethodList {
+		p.setlineno(method)
+		var n *ir.Field
+		if method.Name == nil {
+			n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil)
+		} else {
+			mname := p.name(method.Name)
+			if mname.IsBlank() {
+				base.Errorf("methods must have a unique non-blank name")
+				continue
+			}
+			sig := p.typeExpr(method.Type).(*ir.FuncType)
+			sig.Recv = fakeRecv()
+			n = ir.NewField(p.pos(method), mname, sig, nil)
+		}
+		l = append(l, n)
+	}
+
+	return ir.NewInterfaceType(p.pos(expr), l)
+}
+
+func (p *noder) packname(expr syntax.Expr) *types.Sym {
+	switch expr := expr.(type) {
+	case *syntax.Name:
+		name := p.name(expr)
+		if n := oldname(name); n.Name() != nil && n.Name().PkgName != nil {
+			n.Name().PkgName.Used = true
+		}
+		return name
+	case *syntax.SelectorExpr:
+		name := p.name(expr.X.(*syntax.Name))
+		def := ir.AsNode(name.Def)
+		if def == nil {
+			base.Errorf("undefined: %v", name)
+			return name
+		}
+		var pkg *types.Pkg
+		if def.Op() != ir.OPACK {
+			base.Errorf("%v is not a package", name)
+			pkg = types.LocalPkg
+		} else {
+			def := def.(*ir.PkgName)
+			def.Used = true
+			pkg = def.Pkg
+		}
+		return pkg.Lookup(expr.Sel.Value)
+	}
+	panic(fmt.Sprintf("unexpected packname: %#v", expr))
+}
+
+func (p *noder) embedded(typ syntax.Expr) *ir.Field {
+	op, isStar := typ.(*syntax.Operation)
+	if isStar {
+		if op.Op != syntax.Mul || op.Y != nil {
+			panic("unexpected Operation")
+		}
+		typ = op.X
+	}
+
+	sym := p.packname(typ)
+	n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+	n.Embedded = true
+
+	if isStar {
+		n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
+	}
+	return n
+}
+
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
+	return p.stmtsFall(stmts, false)
+}
+
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+	var nodes []ir.Node
+	for i, stmt := range stmts {
+		s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
+		if s == nil {
+		} else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
+			// Inline non-empty block.
+			// Empty blocks must be preserved for CheckReturn.
+			nodes = append(nodes, s.(*ir.BlockStmt).List...)
+		} else {
+			nodes = append(nodes, s)
+		}
+	}
+	return nodes
+}
+
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
+	return p.stmtFall(stmt, false)
+}
+
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
+	p.setlineno(stmt)
+	switch stmt := stmt.(type) {
+	case nil, *syntax.EmptyStmt:
+		return nil
+	case *syntax.LabeledStmt:
+		return p.labeledStmt(stmt, fallOK)
+	case *syntax.BlockStmt:
+		l := p.blockStmt(stmt)
+		if len(l) == 0 {
+			// TODO(mdempsky): Line number?
+			return ir.NewBlockStmt(base.Pos, nil)
+		}
+		return ir.NewBlockStmt(src.NoXPos, l)
+	case *syntax.ExprStmt:
+		return p.wrapname(stmt, p.expr(stmt.X))
+	case *syntax.SendStmt:
+		return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value))
+	case *syntax.DeclStmt:
+		return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList))
+	case *syntax.AssignStmt:
+		if stmt.Rhs == syntax.ImplicitOne {
+			one := constant.MakeInt64(1)
+			pos := p.pos(stmt)
+			n := ir.NewAssignOpStmt(pos, p.binOp(stmt.Op), p.expr(stmt.Lhs), ir.NewBasicLit(pos, one))
+			n.IncDec = true
+			return n
+		}
+
+		if stmt.Op != 0 && stmt.Op != syntax.Def {
+			n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
+			return n
+		}
+
+		rhs := p.exprList(stmt.Rhs)
+		if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
+			n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil)
+			n.Def = stmt.Op == syntax.Def
+			n.Lhs = p.assignList(stmt.Lhs, n, n.Def)
+			n.Rhs = rhs
+			return n
+		}
+
+		n := ir.NewAssignStmt(p.pos(stmt), nil, nil)
+		n.Def = stmt.Op == syntax.Def
+		n.X = p.assignList(stmt.Lhs, n, n.Def)[0]
+		n.Y = rhs[0]
+		return n
+
+	case *syntax.BranchStmt:
+		var op ir.Op
+		switch stmt.Tok {
+		case syntax.Break:
+			op = ir.OBREAK
+		case syntax.Continue:
+			op = ir.OCONTINUE
+		case syntax.Fallthrough:
+			if !fallOK {
+				base.Errorf("fallthrough statement out of place")
+			}
+			op = ir.OFALL
+		case syntax.Goto:
+			op = ir.OGOTO
+		default:
+			panic("unhandled BranchStmt")
+		}
+		var sym *types.Sym
+		if stmt.Label != nil {
+			sym = p.name(stmt.Label)
+		}
+		return ir.NewBranchStmt(p.pos(stmt), op, sym)
+	case *syntax.CallStmt:
+		var op ir.Op
+		switch stmt.Tok {
+		case syntax.Defer:
+			op = ir.ODEFER
+		case syntax.Go:
+			op = ir.OGO
+		default:
+			panic("unhandled CallStmt")
+		}
+		return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call))
+	case *syntax.ReturnStmt:
+		n := ir.NewReturnStmt(p.pos(stmt), p.exprList(stmt.Results))
+		if len(n.Results) == 0 && ir.CurFunc != nil {
+			for _, ln := range ir.CurFunc.Dcl {
+				if ln.Class == ir.PPARAM {
+					continue
+				}
+				if ln.Class != ir.PPARAMOUT {
+					break
+				}
+				if ln.Sym().Def != ln {
+					base.Errorf("%s is shadowed during return", ln.Sym().Name)
+				}
+			}
+		}
+		return n
+	case *syntax.IfStmt:
+		return p.ifStmt(stmt)
+	case *syntax.ForStmt:
+		return p.forStmt(stmt)
+	case *syntax.SwitchStmt:
+		return p.switchStmt(stmt)
+	case *syntax.SelectStmt:
+		return p.selectStmt(stmt)
+	}
+	panic("unhandled Stmt")
+}
+
+func (p *noder) assignList(expr syntax.Expr, defn ir.InitNode, colas bool) []ir.Node {
+	if !colas {
+		return p.exprList(expr)
+	}
+
+	var exprs []syntax.Expr
+	if list, ok := expr.(*syntax.ListExpr); ok {
+		exprs = list.ElemList
+	} else {
+		exprs = []syntax.Expr{expr}
+	}
+
+	res := make([]ir.Node, len(exprs))
+	seen := make(map[*types.Sym]bool, len(exprs))
+
+	newOrErr := false
+	for i, expr := range exprs {
+		p.setlineno(expr)
+		res[i] = ir.BlankNode
+
+		name, ok := expr.(*syntax.Name)
+		if !ok {
+			p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+			newOrErr = true
+			continue
+		}
+
+		sym := p.name(name)
+		if sym.IsBlank() {
+			continue
+		}
+
+		if seen[sym] {
+			p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym)
+			newOrErr = true
+			continue
+		}
+		seen[sym] = true
+
+		if sym.Block == types.Block {
+			res[i] = oldname(sym)
+			continue
+		}
+
+		newOrErr = true
+		n := typecheck.NewName(sym)
+		typecheck.Declare(n, typecheck.DeclContext)
+		n.Defn = defn
+		defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
+		res[i] = n
+	}
+
+	if !newOrErr {
+		base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
+	}
+	return res
+}
+
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
+	p.openScope(stmt.Pos())
+	nodes := p.stmts(stmt.List)
+	p.closeScope(stmt.Rbrace)
+	return nodes
+}
+
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
+	p.openScope(stmt.Pos())
+	init := p.stmt(stmt.Init)
+	n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil)
+	if init != nil {
+		*n.PtrInit() = []ir.Node{init}
+	}
+	if stmt.Else != nil {
+		e := p.stmt(stmt.Else)
+		if e.Op() == ir.OBLOCK {
+			e := e.(*ir.BlockStmt)
+			n.Else = e.List
+		} else {
+			n.Else = []ir.Node{e}
+		}
+	}
+	p.closeAnotherScope()
+	return n
+}
+
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
+	p.openScope(stmt.Pos())
+	if r, ok := stmt.Init.(*syntax.RangeClause); ok {
+		if stmt.Cond != nil || stmt.Post != nil {
+			panic("unexpected RangeClause")
+		}
+
+		n := ir.NewRangeStmt(p.pos(r), nil, nil, p.expr(r.X), nil)
+		if r.Lhs != nil {
+			n.Def = r.Def
+			lhs := p.assignList(r.Lhs, n, n.Def)
+			n.Key = lhs[0]
+			if len(lhs) > 1 {
+				n.Value = lhs[1]
+			}
+		}
+		n.Body = p.blockStmt(stmt.Body)
+		p.closeAnotherScope()
+		return n
+	}
+
+	n := ir.NewForStmt(p.pos(stmt), p.stmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body))
+	p.closeAnotherScope()
+	return n
+}
+
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
+	p.openScope(stmt.Pos())
+
+	init := p.stmt(stmt.Init)
+	n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil)
+	if init != nil {
+		*n.PtrInit() = []ir.Node{init}
+	}
+
+	var tswitch *ir.TypeSwitchGuard
+	if l := n.Tag; l != nil && l.Op() == ir.OTYPESW {
+		tswitch = l.(*ir.TypeSwitchGuard)
+	}
+	n.Cases = p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)
+
+	p.closeScope(stmt.Rbrace)
+	return n
+}
+
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseClause {
+	nodes := make([]*ir.CaseClause, 0, len(clauses))
+	for i, clause := range clauses {
+		p.setlineno(clause)
+		if i > 0 {
+			p.closeScope(clause.Pos())
+		}
+		p.openScope(clause.Pos())
+
+		n := ir.NewCaseStmt(p.pos(clause), p.exprList(clause.Cases), nil)
+		if tswitch != nil && tswitch.Tag != nil {
+			nn := typecheck.NewName(tswitch.Tag.Sym())
+			typecheck.Declare(nn, typecheck.DeclContext)
+			n.Var = nn
+			// keep track of the instances for reporting unused
+			nn.Defn = tswitch
+		}
+
+		// Trim trailing empty statements. We omit them from
+		// the Node AST anyway, and it's easier to identify
+		// out-of-place fallthrough statements without them.
+		body := clause.Body
+		for len(body) > 0 {
+			if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
+				break
+			}
+			body = body[:len(body)-1]
+		}
+
+		n.Body = p.stmtsFall(body, true)
+		if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL {
+			if tswitch != nil {
+				base.Errorf("cannot fallthrough in type switch")
+			}
+			if i+1 == len(clauses) {
+				base.Errorf("cannot fallthrough final case in switch")
+			}
+		}
+
+		nodes = append(nodes, n)
+	}
+	if len(clauses) > 0 {
+		p.closeScope(rbrace)
+	}
+	return nodes
+}
+
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
+	return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace))
+}
+
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommClause {
+	nodes := make([]*ir.CommClause, len(clauses))
+	for i, clause := range clauses {
+		p.setlineno(clause)
+		if i > 0 {
+			p.closeScope(clause.Pos())
+		}
+		p.openScope(clause.Pos())
+
+		nodes[i] = ir.NewCommStmt(p.pos(clause), p.stmt(clause.Comm), p.stmts(clause.Body))
+	}
+	if len(clauses) > 0 {
+		p.closeScope(rbrace)
+	}
+	return nodes
+}
+
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
+	sym := p.name(label.Label)
+	lhs := ir.NewLabelStmt(p.pos(label), sym)
+
+	var ls ir.Node
+	if label.Stmt != nil { // TODO(mdempsky): Should always be present.
+		ls = p.stmtFall(label.Stmt, fallOK)
+		// Attach label directly to control statement too.
+		if ls != nil {
+			switch ls.Op() {
+			case ir.OFOR:
+				ls := ls.(*ir.ForStmt)
+				ls.Label = sym
+			case ir.ORANGE:
+				ls := ls.(*ir.RangeStmt)
+				ls.Label = sym
+			case ir.OSWITCH:
+				ls := ls.(*ir.SwitchStmt)
+				ls.Label = sym
+			case ir.OSELECT:
+				ls := ls.(*ir.SelectStmt)
+				ls.Label = sym
+			}
+		}
+	}
+
+	l := []ir.Node{lhs}
+	if ls != nil {
+		if ls.Op() == ir.OBLOCK {
+			ls := ls.(*ir.BlockStmt)
+			l = append(l, ls.List...)
+		} else {
+			l = append(l, ls)
+		}
+	}
+	return ir.NewBlockStmt(src.NoXPos, l)
+}
+
+var unOps = [...]ir.Op{
+	syntax.Recv: ir.ORECV,
+	syntax.Mul:  ir.ODEREF,
+	syntax.And:  ir.OADDR,
+
+	syntax.Not: ir.ONOT,
+	syntax.Xor: ir.OBITNOT,
+	syntax.Add: ir.OPLUS,
+	syntax.Sub: ir.ONEG,
+}
+
+func (p *noder) unOp(op syntax.Operator) ir.Op {
+	if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
+		panic("invalid Operator")
+	}
+	return unOps[op]
+}
+
+var binOps = [...]ir.Op{
+	syntax.OrOr:   ir.OOROR,
+	syntax.AndAnd: ir.OANDAND,
+
+	syntax.Eql: ir.OEQ,
+	syntax.Neq: ir.ONE,
+	syntax.Lss: ir.OLT,
+	syntax.Leq: ir.OLE,
+	syntax.Gtr: ir.OGT,
+	syntax.Geq: ir.OGE,
+
+	syntax.Add: ir.OADD,
+	syntax.Sub: ir.OSUB,
+	syntax.Or:  ir.OOR,
+	syntax.Xor: ir.OXOR,
+
+	syntax.Mul:    ir.OMUL,
+	syntax.Div:    ir.ODIV,
+	syntax.Rem:    ir.OMOD,
+	syntax.And:    ir.OAND,
+	syntax.AndNot: ir.OANDNOT,
+	syntax.Shl:    ir.OLSH,
+	syntax.Shr:    ir.ORSH,
+}
+
+func (p *noder) binOp(op syntax.Operator) ir.Op {
+	if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
+		panic("invalid Operator")
+	}
+	return binOps[op]
+}
+
+// checkLangCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func checkLangCompat(lit *syntax.BasicLit) {
+	s := lit.Value
+	if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) {
+		return
+	}
+	// len(s) > 2
+	if strings.Contains(s, "_") {
+		base.ErrorfVers("go1.13", "underscores in numeric literals")
+		return
+	}
+	if s[0] != '0' {
+		return
+	}
+	radix := s[1]
+	if radix == 'b' || radix == 'B' {
+		base.ErrorfVers("go1.13", "binary literals")
+		return
+	}
+	if radix == 'o' || radix == 'O' {
+		base.ErrorfVers("go1.13", "0o/0O-style octal literals")
+		return
+	}
+	if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+		base.ErrorfVers("go1.13", "hexadecimal floating-point literals")
+	}
+}
+
+func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
+	// We don't use the errors of the conversion routines to determine
+	// if a literal string is valid because the conversion routines may
+	// accept a wider syntax than the language permits. Rely on lit.Bad
+	// instead.
+	if lit.Bad {
+		return constant.MakeUnknown()
+	}
+
+	switch lit.Kind {
+	case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
+		checkLangCompat(lit)
+	}
+
+	v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0)
+	if v.Kind() == constant.Unknown {
+		// TODO(mdempsky): Better error message?
+		p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value)
+	}
+
+	return v
+}
+
+var tokenForLitKind = [...]token.Token{
+	syntax.IntLit:    token.INT,
+	syntax.RuneLit:   token.CHAR,
+	syntax.FloatLit:  token.FLOAT,
+	syntax.ImagLit:   token.IMAG,
+	syntax.StringLit: token.STRING,
+}
+
+func (p *noder) name(name *syntax.Name) *types.Sym {
+	return typecheck.Lookup(name.Value)
+}
+
+func (p *noder) mkname(name *syntax.Name) ir.Node {
+	// TODO(mdempsky): Set line number?
+	return mkname(p.name(name))
+}
+
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
+	// These nodes do not carry line numbers.
+	// Introduce a wrapper node to give them the correct line.
+	switch x.Op() {
+	case ir.OTYPE, ir.OLITERAL:
+		if x.Sym() == nil {
+			break
+		}
+		fallthrough
+	case ir.ONAME, ir.ONONAME, ir.OPACK:
+		p := ir.NewParenExpr(p.pos(n), x)
+		p.SetImplicit(true)
+		return p
+	}
+	return x
+}
+
+func (p *noder) setlineno(n syntax.Node) {
+	if n != nil {
+		base.Pos = p.pos(n)
+	}
+}
+
+// error is called concurrently if files are parsed concurrently.
+func (p *noder) error(err error) {
+	p.err <- err.(syntax.Error)
+}
+
+// pragmas that are allowed in the std lib, but don't have
+// a syntax.Pragma value (see lex.go) associated with them.
+var allowedStdPragmas = map[string]bool{
+	"go:cgo_export_static":  true,
+	"go:cgo_export_dynamic": true,
+	"go:cgo_import_static":  true,
+	"go:cgo_import_dynamic": true,
+	"go:cgo_ldflag":         true,
+	"go:cgo_dynamic_linker": true,
+	"go:embed":              true,
+	"go:generate":           true,
+}
+
+// *pragmas is the value stored in a syntax.pragmas during parsing.
+type pragmas struct {
+	Flag   ir.PragmaFlag // collected bits
+	Pos    []pragmaPos   // position of each individual flag
+	Embeds []pragmaEmbed
+}
+
+type pragmaPos struct {
+	Flag ir.PragmaFlag
+	Pos  syntax.Pos
+}
+
+type pragmaEmbed struct {
+	Pos      syntax.Pos
+	Patterns []string
+}
+
+func (p *noder) checkUnused(pragma *pragmas) {
+	for _, pos := range pragma.Pos {
+		if pos.Flag&pragma.Flag != 0 {
+			p.errorAt(pos.Pos, "misplaced compiler directive")
+		}
+	}
+	if len(pragma.Embeds) > 0 {
+		for _, e := range pragma.Embeds {
+			p.errorAt(e.Pos, "misplaced go:embed directive")
+		}
+	}
+}
+
+func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
+	for _, pos := range pragma.Pos {
+		if pos.Flag&pragma.Flag != 0 {
+			p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
+		}
+	}
+	if len(pragma.Embeds) > 0 {
+		for _, e := range pragma.Embeds {
+			p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+		}
+	}
+}
+
+// pragma is called concurrently if files are parsed concurrently.
+func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
+	pragma, _ := old.(*pragmas)
+	if pragma == nil {
+		pragma = new(pragmas)
+	}
+
+	if text == "" {
+		// unused pragma; only called with old != nil.
+		p.checkUnusedDuringParse(pragma)
+		return nil
+	}
+
+	if strings.HasPrefix(text, "line ") {
+		// line directives are handled by syntax package
+		panic("unreachable")
+	}
+
+	if !blankLine {
+		// directive must be on line by itself
+		p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"})
+		return pragma
+	}
+
+	switch {
+	case strings.HasPrefix(text, "go:linkname "):
+		f := strings.Fields(text)
+		if !(2 <= len(f) && len(f) <= 3) {
+			p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"})
+			break
+		}
+		// The second argument is optional. If omitted, we use
+		// the default object symbol name for this and
+		// linkname only serves to mark this symbol as
+		// something that may be referenced via the object
+		// symbol name from another package.
+		var target string
+		if len(f) == 3 {
+			target = f[2]
+		} else if base.Ctxt.Pkgpath != "" {
+			// Use the default object symbol name if the
+			// user didn't provide one.
+			target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
+		} else {
+			p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"})
+			break
+		}
+		p.linknames = append(p.linknames, linkname{pos, f[1], target})
+
+	case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+		args, err := parseGoEmbed(text[len("go:embed"):])
+		if err != nil {
+			p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+		}
+		if len(args) == 0 {
+			p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+			break
+		}
+		pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args})
+
+	case strings.HasPrefix(text, "go:cgo_import_dynamic "):
+		// This is permitted for general use because Solaris
+		// code relies on it in golang.org/x/sys/unix and others.
+		fields := pragmaFields(text)
+		if len(fields) >= 4 {
+			lib := strings.Trim(fields[3], `"`)
+			if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) {
+				p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)})
+			}
+			p.pragcgo(pos, text)
+			pragma.Flag |= pragmaFlag("go:cgo_import_dynamic")
+			break
+		}
+		fallthrough
+	case strings.HasPrefix(text, "go:cgo_"):
+		// For security, we disallow //go:cgo_* directives other
+		// than cgo_import_dynamic outside cgo-generated files.
+		// Exception: they are allowed in the standard library, for runtime and syscall.
+		if !isCgoGeneratedFile(pos) && !base.Flag.Std {
+			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
+		}
+		p.pragcgo(pos, text)
+		fallthrough // because of //go:cgo_unsafe_args
+	default:
+		verb := text
+		if i := strings.Index(text, " "); i >= 0 {
+			verb = verb[:i]
+		}
+		flag := pragmaFlag(verb)
+		const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec
+		if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
+			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
+		}
+		if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
+			p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
+		}
+		pragma.Flag |= flag
+		pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos})
+	}
+
+	return pragma
+}
+
+// isCgoGeneratedFile reports whether pos is in a file
+// generated by cgo, which is to say a file with name
+// beginning with "_cgo_". Such files are allowed to
+// contain cgo directives, and for security reasons
+// (primarily misuse of linker flags), other files are not.
+// See golang.org/issue/23672.
+func isCgoGeneratedFile(pos syntax.Pos) bool {
+	return strings.HasPrefix(filepath.Base(filepath.Clean(fileh(pos.Base().Filename()))), "_cgo_")
+}
+
+// safeArg reports whether arg is a "safe" command-line argument,
+// meaning that when it appears in a command-line, it probably
+// doesn't have some special meaning other than its own name.
+// This is copied from SafeArg in cmd/go/internal/load/pkg.go.
+func safeArg(name string) bool {
+	if name == "" {
+		return false
+	}
+	c := name[0]
+	return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+}
+
+func mkname(sym *types.Sym) ir.Node {
+	n := oldname(sym)
+	if n.Name() != nil && n.Name().PkgName != nil {
+		n.Name().PkgName.Used = true
+	}
+	return n
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+	var list []string
+	for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+		var path string
+	Switch:
+		switch args[0] {
+		default:
+			i := len(args)
+			for j, c := range args {
+				if unicode.IsSpace(c) {
+					i = j
+					break
+				}
+			}
+			path = args[:i]
+			args = args[i:]
+
+		case '`':
+			i := strings.Index(args[1:], "`")
+			if i < 0 {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+			path = args[1 : 1+i]
+			args = args[1+i+1:]
+
+		case '"':
+			i := 1
+			for ; i < len(args); i++ {
+				if args[i] == '\\' {
+					i++
+					continue
+				}
+				if args[i] == '"' {
+					q, err := strconv.Unquote(args[:i+1])
+					if err != nil {
+						return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+					}
+					path = q
+					args = args[i+1:]
+					break Switch
+				}
+			}
+			if i >= len(args) {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+		}
+
+		if args != "" {
+			r, _ := utf8.DecodeRuneInString(args)
+			if !unicode.IsSpace(r) {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+		}
+		list = append(list, path)
+	}
+	return list, nil
+}
+
+func fakeRecv() *ir.Field {
+	return ir.NewField(base.Pos, nil, nil, types.FakeRecvType())
+}
+
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
+	xtype := p.typeExpr(expr.Type)
+
+	fn := ir.NewFunc(p.pos(expr))
+	fn.SetIsHiddenClosure(ir.CurFunc != nil)
+
+	fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
+	fn.Nname.Func = fn
+	fn.Nname.Ntype = xtype
+	fn.Nname.Defn = fn
+
+	clo := ir.NewClosureExpr(p.pos(expr), fn)
+	fn.OClosure = clo
+
+	p.funcBody(fn, expr.Body)
+
+	ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
+
+	return clo
+}
+
+// A function named init is a special case.
+// It is called by the initialization before main is run.
+// To make it unique within a package and also uncallable,
+// the name, normally "pkg.init", is altered to "pkg.init.0".
+var renameinitgen int
+
+func renameinit() *types.Sym {
+	s := typecheck.LookupNum("init.", renameinitgen)
+	renameinitgen++
+	return s
+}
+
+// oldname returns the Node that declares symbol s in the current scope.
+// If no such Node currently exists, an ONONAME Node is returned instead.
+// Automatically creates a new closure variable if the referenced symbol was
+// declared in a different (containing) function.
+func oldname(s *types.Sym) ir.Node {
+	if s.Pkg != types.LocalPkg {
+		return ir.NewIdent(base.Pos, s)
+	}
+
+	n := ir.AsNode(s.Def)
+	if n == nil {
+		// Maybe a top-level declaration will come along later to
+		// define s. resolve will check s.Def again once all input
+		// source has been processed.
+		return ir.NewIdent(base.Pos, s)
+	}
+
+	if n, ok := n.(*ir.Name); ok {
+		// TODO(rsc): If there is an outer variable x and we
+		// are parsing x := 5 inside the closure, until we get to
+		// the := it looks like a reference to the outer x so we'll
+		// make x a closure variable unnecessarily.
+		return ir.CaptureName(base.Pos, ir.CurFunc, n)
+	}
+
+	return n
+}
+
+func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
+	if pragma.Embeds == nil {
+		return
+	}
+
+	pragmaEmbeds := pragma.Embeds
+	pragma.Embeds = nil
+	pos := makeXPos(pragmaEmbeds[0].Pos)
+
+	if !haveEmbed {
+		base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
+		return
+	}
+	if len(decl.NameList) > 1 {
+		base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
+		return
+	}
+	if decl.Values != nil {
+		base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
+		return
+	}
+	if decl.Type == nil {
+		// Should not happen, since Values == nil now.
+		base.ErrorfAt(pos, "go:embed cannot apply to var without type")
+		return
+	}
+	if typecheck.DeclContext != ir.PEXTERN {
+		base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
+		return
+	}
+
+	var embeds []ir.Embed
+	for _, e := range pragmaEmbeds {
+		embeds = append(embeds, ir.Embed{Pos: makeXPos(e.Pos), Patterns: e.Patterns})
+	}
+	typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
+	name.Embed = &embeds
+}
diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go
new file mode 100644
index 0000000..a6d3e2d
--- /dev/null
+++ b/src/cmd/compile/internal/noder/posmap.go
@@ -0,0 +1,83 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/syntax"
+	"cmd/internal/src"
+)
+
+// A posMap handles mapping from syntax.Pos to src.XPos.
+type posMap struct {
+	bases map[*syntax.PosBase]*src.PosBase
+	cache struct {
+		last *syntax.PosBase
+		base *src.PosBase
+	}
+}
+
+type poser interface{ Pos() syntax.Pos }
+type ender interface{ End() syntax.Pos }
+
+func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) }
+func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) }
+
+func (m *posMap) makeXPos(pos syntax.Pos) src.XPos {
+	if !pos.IsKnown() {
+		// TODO(mdempsky): Investigate restoring base.Fatalf.
+		return src.NoXPos
+	}
+
+	posBase := m.makeSrcPosBase(pos.Base())
+	return base.Ctxt.PosTable.XPos(src.MakePos(posBase, pos.Line(), pos.Col()))
+}
+
+// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase.
+func (m *posMap) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
+	// fast path: most likely PosBase hasn't changed
+	if m.cache.last == b0 {
+		return m.cache.base
+	}
+
+	b1, ok := m.bases[b0]
+	if !ok {
+		fn := b0.Filename()
+		if b0.IsFileBase() {
+			b1 = src.NewFileBase(fn, absFilename(fn))
+		} else {
+			// line directive base
+			p0 := b0.Pos()
+			p0b := p0.Base()
+			if p0b == b0 {
+				panic("infinite recursion in makeSrcPosBase")
+			}
+			p1 := src.MakePos(m.makeSrcPosBase(p0b), p0.Line(), p0.Col())
+			b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
+		}
+		if m.bases == nil {
+			m.bases = make(map[*syntax.PosBase]*src.PosBase)
+		}
+		m.bases[b0] = b1
+	}
+
+	// update cache
+	m.cache.last = b0
+	m.cache.base = b1
+
+	return b1
+}
+
+func (m *posMap) join(other *posMap) {
+	if m.bases == nil {
+		m.bases = make(map[*syntax.PosBase]*src.PosBase)
+	}
+	for k, v := range other.bases {
+		if m.bases[k] != nil {
+			base.Fatalf("duplicate posmap bases")
+		}
+		m.bases[k] = v
+	}
+}
diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go
new file mode 100644
index 0000000..dfbcf515
--- /dev/null
+++ b/src/cmd/compile/internal/objw/objw.go
@@ -0,0 +1,72 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objw
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/bitvec"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+func Uint8(s *obj.LSym, off int, v uint8) int {
+	return UintN(s, off, uint64(v), 1)
+}
+
+func Uint16(s *obj.LSym, off int, v uint16) int {
+	return UintN(s, off, uint64(v), 2)
+}
+
+func Uint32(s *obj.LSym, off int, v uint32) int {
+	return UintN(s, off, uint64(v), 4)
+}
+
+func Uintptr(s *obj.LSym, off int, v uint64) int {
+	return UintN(s, off, v, types.PtrSize)
+}
+
+func UintN(s *obj.LSym, off int, v uint64, wid int) int {
+	if off&(wid-1) != 0 {
+		base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+	}
+	s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
+	return off + wid
+}
+
+func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+	off = int(types.Rnd(int64(off), int64(types.PtrSize)))
+	s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+	off += types.PtrSize
+	return off
+}
+
+func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
+	s.WriteOff(base.Ctxt, int64(off), x, 0)
+	off += 4
+	return off
+}
+
+func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+	s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
+	off += 4
+	return off
+}
+
+func Global(s *obj.LSym, width int32, flags int16) {
+	if flags&obj.LOCAL != 0 {
+		s.Set(obj.AttrLocal, true)
+		flags &^= obj.LOCAL
+	}
+	base.Ctxt.Globl(s, int64(width), int(flags))
+}
+
+func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
+	// Runtime reads the bitmaps as byte arrays. Oblige.
+	for j := 0; int32(j) < bv.N; j += 8 {
+		word := bv.B[j/32]
+		off = Uint8(s, off, uint8(word>>(uint(j)%32)))
+	}
+	return off
+}
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
new file mode 100644
index 0000000..b5ac4dd
--- /dev/null
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -0,0 +1,226 @@
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+//	Portions Copyright © 1997-1999 Vita Nuova Limited
+//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+//	Portions Copyright © 2004,2006 Bruce Ellis
+//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+//	Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package objw
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// NewProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func NewProgs(fn *ir.Func, worker int) *Progs {
+	pp := new(Progs)
+	if base.Ctxt.CanReuseProgs() {
+		sz := len(sharedProgArray) / base.Flag.LowerC
+		pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
+	}
+	pp.CurFunc = fn
+
+	// prime the pump
+	pp.Next = pp.NewProg()
+	pp.Clear(pp.Next)
+
+	pp.Pos = fn.Pos()
+	pp.SetText(fn)
+	// PCDATA tables implicitly start with index -1.
+	pp.PrevLive = LivenessIndex{-1, false}
+	pp.NextLive = pp.PrevLive
+	return pp
+}
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+	Text       *obj.Prog  // ATEXT Prog for this function
+	Next       *obj.Prog  // next Prog
+	PC         int64      // virtual PC; count of Progs
+	Pos        src.XPos   // position to use for new Progs
+	CurFunc    *ir.Func   // fn these Progs are for
+	Cache      []obj.Prog // local progcache
+	CacheIndex int        // first free element of progcache
+
+	NextLive LivenessIndex // liveness index for the next Prog
+	PrevLive LivenessIndex // last emitted liveness index
+}
+
+// LivenessIndex stores the liveness map information for a Value.
+type LivenessIndex struct {
+	StackMapIndex int
+
+	// IsUnsafePoint indicates that this is an unsafe-point.
+	//
+	// Note that it's possible for a call Value to have a stack
+	// map while also being an unsafe-point. This means it cannot
+	// be preempted at this instruction, but that a preemption or
+	// stack growth may happen in the called function.
+	IsUnsafePoint bool
+}
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare = -1000
+
+// LivenessDontCare indicates that the liveness information doesn't
+// matter. Currently it is used in deferreturn liveness when we don't
+// actually need it. It should never be emitted to the PCDATA stream.
+var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+
+func (idx LivenessIndex) StackMapValid() bool {
+	return idx.StackMapIndex != StackMapDontCare
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+	var p *obj.Prog
+	if pp.CacheIndex < len(pp.Cache) {
+		p = &pp.Cache[pp.CacheIndex]
+		pp.CacheIndex++
+	} else {
+		p = new(obj.Prog)
+	}
+	p.Ctxt = base.Ctxt
+	return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+	plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
+	obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+	if base.Ctxt.CanReuseProgs() {
+		// Clear progs to enable GC and avoid abuse.
+		s := pp.Cache[:pp.CacheIndex]
+		for i := range s {
+			s[i] = obj.Prog{}
+		}
+	}
+	// Clear pp to avoid abuse.
+	*pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+	if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+		// Emit stack map index change.
+		idx := pp.NextLive.StackMapIndex
+		pp.PrevLive.StackMapIndex = idx
+		p := pp.Prog(obj.APCDATA)
+		p.From.SetConst(objabi.PCDATA_StackMapIndex)
+		p.To.SetConst(int64(idx))
+	}
+	if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+		// Emit unsafe-point marker.
+		pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+		p := pp.Prog(obj.APCDATA)
+		p.From.SetConst(objabi.PCDATA_UnsafePoint)
+		if pp.NextLive.IsUnsafePoint {
+			p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
+		} else {
+			p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
+		}
+	}
+
+	p := pp.Next
+	pp.Next = pp.NewProg()
+	pp.Clear(pp.Next)
+	p.Link = pp.Next
+
+	if !pp.Pos.IsKnown() && base.Flag.K != 0 {
+		base.Warn("prog: unknown position (line 0)")
+	}
+
+	p.As = as
+	p.Pos = pp.Pos
+	if pp.Pos.IsStmt() == src.PosIsStmt {
+		// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+		if LosesStmtMark(as) {
+			return p
+		}
+		pp.Pos = pp.Pos.WithNotStmt()
+	}
+	return p
+}
+
+func (pp *Progs) Clear(p *obj.Prog) {
+	obj.Nopout(p)
+	p.As = obj.AEND
+	p.Pc = pp.PC
+	pp.PC++
+}
+
+func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+	q := pp.NewProg()
+	pp.Clear(q)
+	q.As = as
+	q.Pos = p.Pos
+	q.From.Type = ftype
+	q.From.Reg = freg
+	q.From.Offset = foffset
+	q.To.Type = ttype
+	q.To.Reg = treg
+	q.To.Offset = toffset
+	q.Link = p.Link
+	p.Link = q
+	return q
+}
+
+func (pp *Progs) SetText(fn *ir.Func) {
+	if pp.Text != nil {
+		base.Fatalf("Progs.SetText called twice")
+	}
+	ptxt := pp.Prog(obj.ATEXT)
+	pp.Text = ptxt
+
+	fn.LSym.Func().Text = ptxt
+	ptxt.From.Type = obj.TYPE_MEM
+	ptxt.From.Name = obj.NAME_EXTERN
+	ptxt.From.Sym = fn.LSym
+}
+
+// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF.
+// The attributes from some opcodes are lost in translation.
+// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC.
+// Should try to fix it there.
+func LosesStmtMark(as obj.As) bool {
+	// is_stmt does not work for these; it DOES for ANOP even though that generates no code.
+	return as == obj.APCDATA || as == obj.AFUNCDATA
+}
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
new file mode 100644
index 0000000..7cad262
--- /dev/null
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -0,0 +1,109 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkginit
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/deadcode"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+// Task makes and returns an initialization record for the package.
+// See runtime/proc.go:initTask for its layout.
+// The 3 tasks for initialization are:
+//   1) Initialize all of the packages the current package depends on.
+//   2) Initialize all the variables that have initializers.
+//   3) Run any init functions.
+func Task() *ir.Name {
+	nf := initOrder(typecheck.Target.Decls)
+
+	var deps []*obj.LSym // initTask records for packages the current package depends on
+	var fns []*obj.LSym  // functions to call for package initialization
+
+	// Find imported packages with init tasks.
+	for _, pkg := range typecheck.Target.Imports {
+		n := typecheck.Resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
+		if n.Op() == ir.ONONAME {
+			continue
+		}
+		if n.Op() != ir.ONAME || n.(*ir.Name).Class != ir.PEXTERN {
+			base.Fatalf("bad inittask: %v", n)
+		}
+		deps = append(deps, n.(*ir.Name).Linksym())
+	}
+
+	// Make a function that contains all the initialization statements.
+	if len(nf) > 0 {
+		base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
+		initializers := typecheck.Lookup("init")
+		fn := typecheck.DeclFunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
+		for _, dcl := range typecheck.InitTodoFunc.Dcl {
+			dcl.Curfn = fn
+		}
+		fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
+		typecheck.InitTodoFunc.Dcl = nil
+
+		fn.Body = nf
+		typecheck.FinishFuncBody()
+
+		typecheck.Func(fn)
+		ir.CurFunc = fn
+		typecheck.Stmts(nf)
+		ir.CurFunc = nil
+		typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+		fns = append(fns, fn.Linksym())
+	}
+	if typecheck.InitTodoFunc.Dcl != nil {
+		// We only generate temps using InitTodoFunc if there
+		// are package-scope initialization statements, so
+		// something's weird if we get here.
+		base.Fatalf("InitTodoFunc still has declarations")
+	}
+	typecheck.InitTodoFunc = nil
+
+	// Record user init functions.
+	for _, fn := range typecheck.Target.Inits {
+		// Must happen after initOrder; see #43444.
+		deadcode.Func(fn)
+
+		// Skip init functions with empty bodies.
+		if len(fn.Body) == 1 {
+			if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 {
+				continue
+			}
+		}
+		fns = append(fns, fn.Nname.Linksym())
+	}
+
+	if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" {
+		return nil // nothing to initialize
+	}
+
+	// Make an .inittask structure.
+	sym := typecheck.Lookup(".inittask")
+	task := typecheck.NewName(sym)
+	task.SetType(types.Types[types.TUINT8]) // fake type
+	task.Class = ir.PEXTERN
+	sym.Def = task
+	lsym := task.Linksym()
+	ot := 0
+	ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
+	ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
+	ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
+	for _, d := range deps {
+		ot = objw.SymPtr(lsym, ot, d, 0)
+	}
+	for _, f := range fns {
+		ot = objw.SymPtr(lsym, ot, f, 0)
+	}
+	// An initTask has pointers, but none into the Go heap.
+	// It's not quite read only, the state field must be modifiable.
+	objw.Global(lsym, int32(ot), obj.NOPTR)
+	return task
+}
diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
similarity index 64%
rename from src/cmd/compile/internal/gc/initorder.go
rename to src/cmd/compile/internal/pkginit/initorder.go
index e2084fd..97d6962 100644
--- a/src/cmd/compile/internal/gc/initorder.go
+++ b/src/cmd/compile/internal/pkginit/initorder.go
@@ -2,12 +2,16 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package pkginit
 
 import (
 	"bytes"
 	"container/heap"
 	"fmt"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/staticinit"
 )
 
 // Package initialization
@@ -60,56 +64,57 @@
 type InitOrder struct {
 	// blocking maps initialization assignments to the assignments
 	// that depend on it.
-	blocking map[*Node][]*Node
+	blocking map[ir.Node][]ir.Node
 
 	// ready is the queue of Pending initialization assignments
 	// that are ready for initialization.
 	ready declOrder
+
+	order map[ir.Node]int
 }
 
 // initOrder computes initialization order for a list l of
 // package-level declarations (in declaration order) and outputs the
 // corresponding list of statements to include in the init() function
 // body.
-func initOrder(l []*Node) []*Node {
-	s := InitSchedule{
-		initplans: make(map[*Node]*InitPlan),
-		inittemps: make(map[*Node]*Node),
+func initOrder(l []ir.Node) []ir.Node {
+	s := staticinit.Schedule{
+		Plans: make(map[ir.Node]*staticinit.Plan),
+		Temps: make(map[ir.Node]*ir.Name),
 	}
 	o := InitOrder{
-		blocking: make(map[*Node][]*Node),
+		blocking: make(map[ir.Node][]ir.Node),
+		order:    make(map[ir.Node]int),
 	}
 
 	// Process all package-level assignment in declaration order.
 	for _, n := range l {
-		switch n.Op {
-		case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+		switch n.Op() {
+		case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
 			o.processAssign(n)
-			o.flushReady(s.staticInit)
-		case ODCLCONST, ODCLFUNC, ODCLTYPE:
+			o.flushReady(s.StaticInit)
+		case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
 			// nop
 		default:
-			Fatalf("unexpected package-level statement: %v", n)
+			base.Fatalf("unexpected package-level statement: %v", n)
 		}
 	}
 
 	// Check that all assignments are now Done; if not, there must
 	// have been a dependency cycle.
 	for _, n := range l {
-		switch n.Op {
-		case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-			if n.Initorder() != InitDone {
+		switch n.Op() {
+		case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+			if o.order[n] != orderDone {
 				// If there have already been errors
 				// printed, those errors may have
 				// confused us and there might not be
 				// a loop. Let the user fix those
 				// first.
-				if nerrors > 0 {
-					errorexit()
-				}
+				base.ExitIfErrors()
 
-				findInitLoopAndExit(firstLHS(n), new([]*Node), make(map[*Node]bool))
-				Fatalf("initialization unfinished, but failed to identify loop")
+				o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
+				base.Fatalf("initialization unfinished, but failed to identify loop")
 			}
 		}
 	}
@@ -117,58 +122,56 @@
 	// Invariant consistency check. If this is non-zero, then we
 	// should have found a cycle above.
 	if len(o.blocking) != 0 {
-		Fatalf("expected empty map: %v", o.blocking)
+		base.Fatalf("expected empty map: %v", o.blocking)
 	}
 
-	return s.out
+	return s.Out
 }
 
-func (o *InitOrder) processAssign(n *Node) {
-	if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
-		Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+func (o *InitOrder) processAssign(n ir.Node) {
+	if _, ok := o.order[n]; ok {
+		base.Fatalf("unexpected state: %v, %v", n, o.order[n])
 	}
-
-	n.SetInitorder(InitPending)
-	n.Xoffset = 0
+	o.order[n] = 0
 
 	// Compute number of variable dependencies and build the
 	// inverse dependency ("blocking") graph.
 	for dep := range collectDeps(n, true) {
-		defn := dep.Name.Defn
+		defn := dep.Defn
 		// Skip dependencies on functions (PFUNC) and
 		// variables already initialized (InitDone).
-		if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
+		if dep.Class != ir.PEXTERN || o.order[defn] == orderDone {
 			continue
 		}
-		n.Xoffset++
+		o.order[n]++
 		o.blocking[defn] = append(o.blocking[defn], n)
 	}
 
-	if n.Xoffset == 0 {
+	if o.order[n] == 0 {
 		heap.Push(&o.ready, n)
 	}
 }
 
+const orderDone = -1000
+
 // flushReady repeatedly applies initialize to the earliest (in
 // declaration order) assignment ready for initialization and updates
 // the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(*Node)) {
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
 	for o.ready.Len() != 0 {
-		n := heap.Pop(&o.ready).(*Node)
-		if n.Initorder() != InitPending || n.Xoffset != 0 {
-			Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+		n := heap.Pop(&o.ready).(ir.Node)
+		if order, ok := o.order[n]; !ok || order != 0 {
+			base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
 		}
 
 		initialize(n)
-		n.SetInitorder(InitDone)
-		n.Xoffset = BADWIDTH
+		o.order[n] = orderDone
 
 		blocked := o.blocking[n]
 		delete(o.blocking, n)
 
 		for _, m := range blocked {
-			m.Xoffset--
-			if m.Xoffset == 0 {
+			if o.order[m]--; o.order[m] == 0 {
 				heap.Push(&o.ready, m)
 			}
 		}
@@ -181,7 +184,7 @@
 // path points to a slice used for tracking the sequence of
 // variables/functions visited. Using a pointer to a slice allows the
 // slice capacity to grow and limit reallocations.
-func findInitLoopAndExit(n *Node, path *[]*Node, ok map[*Node]bool) {
+func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
 	for i, x := range *path {
 		if x == n {
 			reportInitLoopAndExit((*path)[i:])
@@ -191,24 +194,25 @@
 
 	// There might be multiple loops involving n; by sorting
 	// references, we deterministically pick the one reported.
-	refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
-		return ni.Pos.Before(nj.Pos)
+	refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool {
+		return ni.Pos().Before(nj.Pos())
 	})
 
 	*path = append(*path, n)
 	for _, ref := range refers {
 		// Short-circuit variables that were initialized.
-		if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone || ok[ref] {
+		if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
 			continue
 		}
-		findInitLoopAndExit(ref, path, ok)
+
+		o.findInitLoopAndExit(ref, path, ok)
 	}
 
 	// n is not involved in a cycle.
 	// Record that fact to avoid checking it again when reached another way,
 	// or else this traversal will take exponential time traversing all paths
 	// through the part of the package's call graph implicated in the cycle.
-	ok[n] = true
+	ok.Add(n)
 
 	*path = (*path)[:len(*path)-1]
 }
@@ -216,12 +220,12 @@
 // reportInitLoopAndExit reports and initialization loop as an error
 // and exits. However, if l is not actually an initialization loop, it
 // simply returns instead.
-func reportInitLoopAndExit(l []*Node) {
+func reportInitLoopAndExit(l []*ir.Name) {
 	// Rotate loop so that the earliest variable declaration is at
 	// the start.
 	i := -1
 	for j, n := range l {
-		if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
+		if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
 			i = j
 		}
 	}
@@ -239,69 +243,75 @@
 	var msg bytes.Buffer
 	fmt.Fprintf(&msg, "initialization loop:\n")
 	for _, n := range l {
-		fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
+		fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
 	}
-	fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
+	fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
 
-	yyerrorl(l[0].Pos, msg.String())
-	errorexit()
+	base.ErrorfAt(l[0].Pos(), msg.String())
+	base.ErrorExit()
 }
 
 // collectDeps returns all of the package-level functions and
 // variables that declaration n depends on. If transitive is true,
 // then it also includes the transitive dependencies of any depended
 // upon functions (but not variables).
-func collectDeps(n *Node, transitive bool) NodeSet {
+func collectDeps(n ir.Node, transitive bool) ir.NameSet {
 	d := initDeps{transitive: transitive}
-	switch n.Op {
-	case OAS:
-		d.inspect(n.Right)
-	case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-		d.inspect(n.Right)
-	case ODCLFUNC:
-		d.inspectList(n.Nbody)
+	switch n.Op() {
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		d.inspect(n.Y)
+	case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+		n := n.(*ir.AssignListStmt)
+		d.inspect(n.Rhs[0])
+	case ir.ODCLFUNC:
+		n := n.(*ir.Func)
+		d.inspectList(n.Body)
 	default:
-		Fatalf("unexpected Op: %v", n.Op)
+		base.Fatalf("unexpected Op: %v", n.Op())
 	}
 	return d.seen
 }
 
 type initDeps struct {
 	transitive bool
-	seen       NodeSet
+	seen       ir.NameSet
+	cvisit     func(ir.Node)
 }
 
-func (d *initDeps) inspect(n *Node)     { inspect(n, d.visit) }
-func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
+func (d *initDeps) cachedVisit() func(ir.Node) {
+	if d.cvisit == nil {
+		d.cvisit = d.visit // cache closure
+	}
+	return d.cvisit
+}
+
+func (d *initDeps) inspect(n ir.Node)      { ir.Visit(n, d.cachedVisit()) }
+func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
 
 // visit calls foundDep on any package-level functions or variables
 // referenced by n, if any.
-func (d *initDeps) visit(n *Node) bool {
-	switch n.Op {
-	case ONAME:
-		if n.isMethodExpression() {
-			d.foundDep(asNode(n.Type.FuncType().Nname))
-			return false
-		}
-
-		switch n.Class() {
-		case PEXTERN, PFUNC:
+func (d *initDeps) visit(n ir.Node) {
+	switch n.Op() {
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		switch n.Class {
+		case ir.PEXTERN, ir.PFUNC:
 			d.foundDep(n)
 		}
 
-	case OCLOSURE:
-		d.inspectList(n.Func.Closure.Nbody)
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		d.inspectList(n.Func.Body)
 
-	case ODOTMETH, OCALLPART:
-		d.foundDep(asNode(n.Type.FuncType().Nname))
+	case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+		d.foundDep(ir.MethodExprName(n))
 	}
-
-	return true
 }
 
 // foundDep records that we've found a dependency on n by adding it to
 // seen.
-func (d *initDeps) foundDep(n *Node) {
+func (d *initDeps) foundDep(n *ir.Name) {
 	// Can happen with method expressions involving interface
 	// types; e.g., fixedbugs/issue4495.go.
 	if n == nil {
@@ -310,7 +320,7 @@
 
 	// Names without definitions aren't interesting as far as
 	// initialization ordering goes.
-	if n.Name.Defn == nil {
+	if n.Defn == nil {
 		return
 	}
 
@@ -318,8 +328,8 @@
 		return
 	}
 	d.seen.Add(n)
-	if d.transitive && n.Class() == PFUNC {
-		d.inspectList(n.Name.Defn.Nbody)
+	if d.transitive && n.Class == ir.PFUNC {
+		d.inspectList(n.Defn.(*ir.Func).Body)
 	}
 }
 
@@ -330,13 +340,15 @@
 // an OAS node's Pos may not be unique. For example, given the
 // declaration "var a, b = f(), g()", "a" must be ordered before "b",
 // but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []*Node
+type declOrder []ir.Node
 
-func (s declOrder) Len() int           { return len(s) }
-func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
-func (s declOrder) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool {
+	return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
+}
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
 
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
 func (s *declOrder) Pop() interface{} {
 	n := (*s)[len(*s)-1]
 	*s = (*s)[:len(*s)-1]
@@ -345,14 +357,16 @@
 
 // firstLHS returns the first expression on the left-hand side of
 // assignment n.
-func firstLHS(n *Node) *Node {
-	switch n.Op {
-	case OAS:
-		return n.Left
-	case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
-		return n.List.First()
+func firstLHS(n ir.Node) *ir.Name {
+	switch n.Op() {
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		return n.X.Name()
+	case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
+		n := n.(*ir.AssignListStmt)
+		return n.Lhs[0].Name()
 	}
 
-	Fatalf("unexpected Op: %v", n.Op)
+	base.Fatalf("unexpected Op: %v", n.Op())
 	return nil
 }
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index c8ef567..c72d1aa 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -5,12 +5,12 @@
 package ppc64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/ppc64"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &ppc64.Linkppc64
 	if objabi.GOARCH == "ppc64le" {
 		arch.LinkArch = &ppc64.Linkppc64le
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index a5a772b..c76962c 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -5,44 +5,47 @@
 package ppc64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/ppc64"
 )
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+	} else if cnt <= int64(128*types.PtrSize) {
+		p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
 		p.Reg = ppc64.REGSP
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
 	} else {
-		p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+		p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+		p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
 		p.Reg = ppc64.REGSP
-		p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+		p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+		p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
 		p.Reg = ppc64.REGRT1
-		p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+		p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
 		p1 := p
-		p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
-		p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
+		p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+		p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(ppc64.AOR)
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = ppc64.REG_R0
@@ -51,7 +54,7 @@
 	return p
 }
 
-func ginsnopdefer(pp *gc.Progs) *obj.Prog {
+func ginsnopdefer(pp *objw.Progs) *obj.Prog {
 	// On PPC64 two nops are required in the defer case.
 	//
 	// (see gc/cgen.go, gc/plive.go -- copy of comment below)
@@ -66,7 +69,7 @@
 	// on ppc64 in both shared and non-shared modes.
 
 	ginsnop(pp)
-	if gc.Ctxt.Flag_shared {
+	if base.Ctxt.Flag_shared {
 		p := pp.Prog(ppc64.AMOVD)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Offset = 24
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 3e20c44..c85e110 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -5,9 +5,11 @@
 package ppc64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/ppc64"
@@ -17,7 +19,7 @@
 )
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 	//	flive := b.FlagsLiveAtEnd
 	//	if b.Control != nil && b.Control.Type.IsFlags() {
 	//		flive = true
@@ -99,7 +101,7 @@
 	panic("bad store type")
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy:
 		t := v.Type
@@ -208,7 +210,7 @@
 		// BNE retry
 		p3 := s.Prog(ppc64.ABNE)
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 	case ssa.OpPPC64LoweredAtomicAdd32,
 		ssa.OpPPC64LoweredAtomicAdd64:
@@ -252,7 +254,7 @@
 		// BNE retry
 		p4 := s.Prog(ppc64.ABNE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 
 		// Ensure a 32 bit result
 		if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
@@ -298,7 +300,7 @@
 		// BNE retry
 		p2 := s.Prog(ppc64.ABNE)
 		p2.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p2, p)
+		p2.To.SetTarget(p)
 		// ISYNC
 		pisync := s.Prog(ppc64.AISYNC)
 		pisync.To.Type = obj.TYPE_NONE
@@ -346,7 +348,7 @@
 		// ISYNC
 		pisync := s.Prog(ppc64.AISYNC)
 		pisync.To.Type = obj.TYPE_NONE
-		gc.Patch(p2, pisync)
+		p2.To.SetTarget(pisync)
 
 	case ssa.OpPPC64LoweredAtomicStore8,
 		ssa.OpPPC64LoweredAtomicStore32,
@@ -437,7 +439,7 @@
 		// BNE retry
 		p4 := s.Prog(ppc64.ABNE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 		// LWSYNC - Assuming shared data not write-through-required nor
 		// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
 		// If the operation is a CAS-Release, then synchronization is not necessary.
@@ -460,20 +462,20 @@
 		p7.From.Offset = 0
 		p7.To.Type = obj.TYPE_REG
 		p7.To.Reg = out
-		gc.Patch(p2, p7)
+		p2.To.SetTarget(p7)
 		// done (label)
 		p8 := s.Prog(obj.ANOP)
-		gc.Patch(p6, p8)
+		p6.To.SetTarget(p8)
 
 	case ssa.OpPPC64LoweredGetClosurePtr:
 		// Closure pointer is R11 (already)
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 
 	case ssa.OpPPC64LoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(ppc64.AMOVD)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -489,7 +491,7 @@
 	case ssa.OpLoadReg:
 		loadOp := loadByType(v.Type)
 		p := s.Prog(loadOp)
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
@@ -498,7 +500,7 @@
 		p := s.Prog(storeOp)
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 
 	case ssa.OpPPC64DIVD:
 		// For now,
@@ -537,10 +539,10 @@
 		p.To.Reg = r
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = r0
-		gc.Patch(pbahead, p)
+		pbahead.To.SetTarget(p)
 
 		p = s.Prog(obj.ANOP)
-		gc.Patch(pbover, p)
+		pbover.To.SetTarget(p)
 
 	case ssa.OpPPC64DIVW:
 		// word-width version of above
@@ -572,10 +574,10 @@
 		p.To.Reg = r
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = r0
-		gc.Patch(pbahead, p)
+		pbahead.To.SetTarget(p)
 
 		p = s.Prog(obj.ANOP)
-		gc.Patch(pbover, p)
+		pbover.To.SetTarget(p)
 
 	case ssa.OpPPC64CLRLSLWI:
 		r := v.Reg()
@@ -750,13 +752,13 @@
 				p.To.Reg = v.Reg()
 			}
 
-		case *obj.LSym, *gc.Node:
+		case *obj.LSym, ir.Node:
 			p := s.Prog(ppc64.AMOVD)
 			p.From.Type = obj.TYPE_ADDR
 			p.From.Reg = v.Args[0].Reg()
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = v.Reg()
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 
 		}
 
@@ -817,7 +819,7 @@
 			p := s.Prog(ppc64.AMOVD)
 			p.From.Type = obj.TYPE_ADDR
 			p.From.Reg = v.Args[0].Reg()
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = v.Reg()
 			// Load go.string using 0 offset
@@ -835,7 +837,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
@@ -869,7 +871,7 @@
 		p.From.Reg = ppc64.REGZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 
 	case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
 		p := s.Prog(v.Op.Asm())
@@ -877,7 +879,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 
 	case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
 		ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
@@ -1026,7 +1028,7 @@
 			p.From.Offset = ppc64.BO_BCTR
 			p.Reg = ppc64.REG_R0
 			p.To.Type = obj.TYPE_BRANCH
-			gc.Patch(p, top)
+			p.To.SetTarget(top)
 		}
 		// When ctr == 1 the loop was not generated but
 		// there are at least 64 bytes to clear, so add
@@ -1226,7 +1228,7 @@
 			p.From.Offset = ppc64.BO_BCTR
 			p.Reg = ppc64.REG_R0
 			p.To.Type = obj.TYPE_BRANCH
-			gc.Patch(p, top)
+			p.To.SetTarget(top)
 		}
 
 		// when ctr == 1 the loop was not generated but
@@ -1405,7 +1407,7 @@
 			p.From.Offset = ppc64.BO_BCTR
 			p.Reg = ppc64.REG_R0
 			p.To.Type = obj.TYPE_BRANCH
-			gc.Patch(p, top)
+			p.To.SetTarget(top)
 
 			// srcReg and dstReg were incremented in the loop, so
 			// later instructions start with offset 0.
@@ -1652,7 +1654,7 @@
 			p.From.Offset = ppc64.BO_BCTR
 			p.Reg = ppc64.REG_R0
 			p.To.Type = obj.TYPE_BRANCH
-			gc.Patch(p, top)
+			p.To.SetTarget(top)
 
 			// srcReg and dstReg were incremented in the loop, so
 			// later instructions start with offset 0.
@@ -1784,7 +1786,7 @@
 		// Insert a hint this is not a subroutine return.
 		pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1})
 
-		if gc.Ctxt.Flag_shared {
+		if base.Ctxt.Flag_shared {
 			// When compiling Go into PIC, the function we just
 			// called via pointer might have been implemented in
 			// a separate module and so overwritten the TOC
@@ -1807,7 +1809,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 
 	case ssa.OpPPC64LoweredNilCheck:
@@ -1838,22 +1840,22 @@
 
 			// NOP (so the BNE has somewhere to land)
 			nop := s.Prog(obj.ANOP)
-			gc.Patch(p2, nop)
+			p2.To.SetTarget(nop)
 
 		} else {
 			// Issue a load which will fault if arg is nil.
 			p := s.Prog(ppc64.AMOVBZ)
 			p.From.Type = obj.TYPE_MEM
 			p.From.Reg = v.Args[0].Reg()
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = ppc64.REGTMP
 		}
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 
 	// These should be resolved by rules and not make it here.
@@ -1891,7 +1893,7 @@
 	ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockDefer:
 		// defer returns in R3:
@@ -1905,18 +1907,18 @@
 
 		p = s.Prog(ppc64.ABNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
new file mode 100644
index 0000000..fcd824f
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -0,0 +1,788 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+	"fmt"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+// isRegularMemory reports whether t can be compared/hashed as regular memory.
+func isRegularMemory(t *types.Type) bool {
+	a, _ := types.AlgType(t)
+	return a == types.AMEM
+}
+
+// eqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// t must be comparable.
+func eqCanPanic(t *types.Type) bool {
+	switch t.Kind() {
+	default:
+		return false
+	case types.TINTER:
+		return true
+	case types.TARRAY:
+		return eqCanPanic(t.Elem())
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
+				return true
+			}
+		}
+		return false
+	}
+}
+
+// AlgType returns the fixed-width AMEMxx variants instead of the general
+// AMEM kind when possible.
+func AlgType(t *types.Type) types.AlgKind {
+	a, _ := types.AlgType(t)
+	if a == types.AMEM {
+		switch t.Width {
+		case 0:
+			return types.AMEM0
+		case 1:
+			return types.AMEM8
+		case 2:
+			return types.AMEM16
+		case 4:
+			return types.AMEM32
+		case 8:
+			return types.AMEM64
+		case 16:
+			return types.AMEM128
+		}
+	}
+
+	return a
+}
+
+// genhash returns a symbol which is the closure used to compute
+// the hash of a value of type t.
+// Note: the generated function must match runtime.typehash exactly.
+func genhash(t *types.Type) *obj.LSym {
+	switch AlgType(t) {
+	default:
+		// genhash is only called for types that have equality
+		base.Fatalf("genhash %v", t)
+	case types.AMEM0:
+		return sysClosure("memhash0")
+	case types.AMEM8:
+		return sysClosure("memhash8")
+	case types.AMEM16:
+		return sysClosure("memhash16")
+	case types.AMEM32:
+		return sysClosure("memhash32")
+	case types.AMEM64:
+		return sysClosure("memhash64")
+	case types.AMEM128:
+		return sysClosure("memhash128")
+	case types.ASTRING:
+		return sysClosure("strhash")
+	case types.AINTER:
+		return sysClosure("interhash")
+	case types.ANILINTER:
+		return sysClosure("nilinterhash")
+	case types.AFLOAT32:
+		return sysClosure("f32hash")
+	case types.AFLOAT64:
+		return sysClosure("f64hash")
+	case types.ACPLX64:
+		return sysClosure("c64hash")
+	case types.ACPLX128:
+		return sysClosure("c128hash")
+	case types.AMEM:
+		// For other sizes of plain memory, we build a closure
+		// that calls memhash_varlen. The size of the memory is
+		// encoded in the first slot of the closure.
+		closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Width))
+		if len(closure.P) > 0 { // already generated
+			return closure
+		}
+		if memhashvarlen == nil {
+			memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
+		}
+		ot := 0
+		ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
+		ot = objw.Uintptr(closure, ot, uint64(t.Width)) // size encoded in closure
+		objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+		return closure
+	case types.ASPECIAL:
+		break
+	}
+
+	closure := TypeLinksymPrefix(".hashfunc", t)
+	if len(closure.P) > 0 { // already generated
+		return closure
+	}
+
+	// Generate hash functions for subtypes.
+	// There are cases where we might not use these hashes,
+	// but in that case they will get dead-code eliminated.
+	// (And the closure generated by genhash will also get
+	// dead-code eliminated, as we call the subtype hashers
+	// directly.)
+	switch t.Kind() {
+	case types.TARRAY:
+		genhash(t.Elem())
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			genhash(f.Type)
+		}
+	}
+
+	sym := TypeSymPrefix(".hash", t)
+	if base.Flag.LowerR != 0 {
+		fmt.Printf("genhash %v %v %v\n", closure, sym, t)
+	}
+
+	base.Pos = base.AutogeneratedPos // less confusing than end of input
+	typecheck.DeclContext = ir.PEXTERN
+
+	// func sym(p *T, h uintptr) uintptr
+	args := []*ir.Field{
+		ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)),
+		ir.NewField(base.Pos, typecheck.Lookup("h"), nil, types.Types[types.TUINTPTR]),
+	}
+	results := []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR])}
+	tfn := ir.NewFuncType(base.Pos, nil, args, results)
+
+	fn := typecheck.DeclFunc(sym, tfn)
+	np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+	nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+
+	switch t.Kind() {
+	case types.TARRAY:
+		// An array of pure memory would be handled by the
+		// standard algorithm, so the element type must not be
+		// pure memory.
+		hashel := hashfor(t.Elem())
+
+		// for i := 0; i < nelem; i++
+		ni := typecheck.Temp(types.Types[types.TINT])
+		init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(0))
+		cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(t.NumElem()))
+		post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(1)))
+		loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
+		loop.PtrInit().Append(init)
+
+		// h = hashel(&p[i], h)
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+
+		nx := ir.NewIndexExpr(base.Pos, np, ni)
+		nx.SetBounded(true)
+		na := typecheck.NodAddr(nx)
+		call.Args.Append(na)
+		call.Args.Append(nh)
+		loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+		fn.Body.Append(loop)
+
+	case types.TSTRUCT:
+		// Walk the struct using memhash for runs of AMEM
+		// and calling specific hash functions for the others.
+		for i, fields := 0, t.FieldSlice(); i < len(fields); {
+			f := fields[i]
+
+			// Skip blank fields.
+			if f.Sym.IsBlank() {
+				i++
+				continue
+			}
+
+			// Hash non-memory fields with appropriate hash function.
+			if !isRegularMemory(f.Type) {
+				hashel := hashfor(f.Type)
+				call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+				nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+				na := typecheck.NodAddr(nx)
+				call.Args.Append(na)
+				call.Args.Append(nh)
+				fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+				i++
+				continue
+			}
+
+			// Otherwise, hash a maximal length run of raw memory.
+			size, next := memrun(t, i)
+
+			// h = hashel(&p.first, size, h)
+			hashel := hashmem(f.Type)
+			call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+			nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+			na := typecheck.NodAddr(nx)
+			call.Args.Append(na)
+			call.Args.Append(nh)
+			call.Args.Append(ir.NewInt(size))
+			fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+			i = next
+		}
+	}
+
+	r := ir.NewReturnStmt(base.Pos, nil)
+	r.Results.Append(nh)
+	fn.Body.Append(r)
+
+	if base.Flag.LowerR != 0 {
+		ir.DumpList("genhash body", fn.Body)
+	}
+
+	typecheck.FinishFuncBody()
+
+	fn.SetDupok(true)
+	typecheck.Func(fn)
+
+	ir.CurFunc = fn
+	typecheck.Stmts(fn.Body)
+	ir.CurFunc = nil
+
+	if base.Debug.DclStack != 0 {
+		types.CheckDclstack()
+	}
+
+	fn.SetNilCheckDisabled(true)
+	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+	// Build closure. It doesn't close over any variables, so
+	// it contains just the function pointer.
+	objw.SymPtr(closure, 0, fn.Linksym(), 0)
+	objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+
+	return closure
+}
+
+func hashfor(t *types.Type) ir.Node {
+	var sym *types.Sym
+
+	switch a, _ := types.AlgType(t); a {
+	case types.AMEM:
+		base.Fatalf("hashfor with AMEM type")
+	case types.AINTER:
+		sym = ir.Pkgs.Runtime.Lookup("interhash")
+	case types.ANILINTER:
+		sym = ir.Pkgs.Runtime.Lookup("nilinterhash")
+	case types.ASTRING:
+		sym = ir.Pkgs.Runtime.Lookup("strhash")
+	case types.AFLOAT32:
+		sym = ir.Pkgs.Runtime.Lookup("f32hash")
+	case types.AFLOAT64:
+		sym = ir.Pkgs.Runtime.Lookup("f64hash")
+	case types.ACPLX64:
+		sym = ir.Pkgs.Runtime.Lookup("c64hash")
+	case types.ACPLX128:
+		sym = ir.Pkgs.Runtime.Lookup("c128hash")
+	default:
+		// Note: the caller of hashfor ensured that this symbol
+		// exists and has a body by calling genhash for t.
+		sym = TypeSymPrefix(".hash", t)
+	}
+
+	n := typecheck.NewName(sym)
+	ir.MarkFunc(n)
+	n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{
+		types.NewField(base.Pos, nil, types.NewPtr(t)),
+		types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+	}, []*types.Field{
+		types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+	}))
+	return n
+}
+
+// sysClosure returns a closure which will call the
+// given runtime function (with no closed-over variables).
+func sysClosure(name string) *obj.LSym {
+	s := typecheck.LookupRuntimeVar(name + "·f")
+	if len(s.P) == 0 {
+		f := typecheck.LookupRuntimeFunc(name)
+		objw.SymPtr(s, 0, f, 0)
+		objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+	}
+	return s
+}
+
+// geneq returns a symbol which is the closure used to compute
+// equality for two objects of type t.
+func geneq(t *types.Type) *obj.LSym {
+	switch AlgType(t) {
+	case types.ANOEQ:
+		// The runtime will panic if it tries to compare
+		// a type with a nil equality function.
+		return nil
+	case types.AMEM0:
+		return sysClosure("memequal0")
+	case types.AMEM8:
+		return sysClosure("memequal8")
+	case types.AMEM16:
+		return sysClosure("memequal16")
+	case types.AMEM32:
+		return sysClosure("memequal32")
+	case types.AMEM64:
+		return sysClosure("memequal64")
+	case types.AMEM128:
+		return sysClosure("memequal128")
+	case types.ASTRING:
+		return sysClosure("strequal")
+	case types.AINTER:
+		return sysClosure("interequal")
+	case types.ANILINTER:
+		return sysClosure("nilinterequal")
+	case types.AFLOAT32:
+		return sysClosure("f32equal")
+	case types.AFLOAT64:
+		return sysClosure("f64equal")
+	case types.ACPLX64:
+		return sysClosure("c64equal")
+	case types.ACPLX128:
+		return sysClosure("c128equal")
+	case types.AMEM:
+		// make equality closure. The size of the type
+		// is encoded in the closure.
+		closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Width))
+		if len(closure.P) != 0 {
+			return closure
+		}
+		if memequalvarlen == nil {
+			memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func
+		}
+		ot := 0
+		ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
+		ot = objw.Uintptr(closure, ot, uint64(t.Width))
+		objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+		return closure
+	case types.ASPECIAL:
+		break
+	}
+
+	closure := TypeLinksymPrefix(".eqfunc", t)
+	if len(closure.P) > 0 { // already generated
+		return closure
+	}
+	sym := TypeSymPrefix(".eq", t)
+	if base.Flag.LowerR != 0 {
+		fmt.Printf("geneq %v\n", t)
+	}
+
+	// Autogenerate code for equality of structs and arrays.
+
+	base.Pos = base.AutogeneratedPos // less confusing than end of input
+	typecheck.DeclContext = ir.PEXTERN
+
+	// func sym(p, q *T) bool
+	tfn := ir.NewFuncType(base.Pos, nil,
+		[]*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), nil, types.NewPtr(t))},
+		[]*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), nil, types.Types[types.TBOOL])})
+
+	fn := typecheck.DeclFunc(sym, tfn)
+	np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+	nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+	nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
+
+	// Label to jump to if an equality test fails.
+	neq := typecheck.AutoLabel(".neq")
+
+	// We reach here only for types that have equality but
+	// cannot be handled by the standard algorithms,
+	// so t must be either an array or a struct.
+	switch t.Kind() {
+	default:
+		base.Fatalf("geneq %v", t)
+
+	case types.TARRAY:
+		nelem := t.NumElem()
+
+		// checkAll generates code to check the equality of all array elements.
+		// If unroll is greater than nelem, checkAll generates:
+		//
+		// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
+		// } else {
+		//   return
+		// }
+		//
+		// And so on.
+		//
+		// Otherwise it generates:
+		//
+		// for i := 0; i < nelem; i++ {
+		//   if eq(p[i], q[i]) {
+		//   } else {
+		//     goto neq
+		//   }
+		// }
+		//
+		// TODO(josharian): consider doing some loop unrolling
+		// for larger nelem as well, processing a few elements at a time in a loop.
+		checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
+			// checkIdx generates a node to check for equality at index i.
+			checkIdx := func(i ir.Node) ir.Node {
+				// pi := p[i]
+				pi := ir.NewIndexExpr(base.Pos, np, i)
+				pi.SetBounded(true)
+				pi.SetType(t.Elem())
+				// qi := q[i]
+				qi := ir.NewIndexExpr(base.Pos, nq, i)
+				qi.SetBounded(true)
+				qi.SetType(t.Elem())
+				return eq(pi, qi)
+			}
+
+			if nelem <= unroll {
+				if last {
+					// Do last comparison in a different manner.
+					nelem--
+				}
+				// Generate a series of checks.
+				for i := int64(0); i < nelem; i++ {
+					// if check {} else { goto neq }
+					nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(i)), nil, nil)
+					nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+					fn.Body.Append(nif)
+				}
+				if last {
+					fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem))))
+				}
+			} else {
+				// Generate a for loop.
+				// for i := 0; i < nelem; i++
+				i := typecheck.Temp(types.Types[types.TINT])
+				init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+				cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem))
+				post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+				loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
+				loop.PtrInit().Append(init)
+				// if eq(pi, qi) {} else { goto neq }
+				nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
+				nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+				loop.Body.Append(nif)
+				fn.Body.Append(loop)
+				if last {
+					fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+				}
+			}
+		}
+
+		switch t.Elem().Kind() {
+		case types.TSTRING:
+			// Do two loops. First, check that all the lengths match (cheap).
+			// Second, check that all the contents match (expensive).
+			// TODO: when the array size is small, unroll the length match checks.
+			checkAll(3, false, func(pi, qi ir.Node) ir.Node {
+				// Compare lengths.
+				eqlen, _ := EqString(pi, qi)
+				return eqlen
+			})
+			checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+				// Compare contents.
+				_, eqmem := EqString(pi, qi)
+				return eqmem
+			})
+		case types.TFLOAT32, types.TFLOAT64:
+			checkAll(2, true, func(pi, qi ir.Node) ir.Node {
+				// p[i] == q[i]
+				return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+			})
+		// TODO: pick apart structs, do them piecemeal too
+		default:
+			checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+				// p[i] == q[i]
+				return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+			})
+		}
+
+	case types.TSTRUCT:
+		// Build a list of conditions to satisfy.
+		// The conditions are a list-of-lists. Conditions are reorderable
+		// within each inner list. The outer lists must be evaluated in order.
+		var conds [][]ir.Node
+		conds = append(conds, []ir.Node{})
+		and := func(n ir.Node) {
+			i := len(conds) - 1
+			conds[i] = append(conds[i], n)
+		}
+
+		// Walk the struct using memequal for runs of AMEM
+		// and calling specific equality tests for the others.
+		for i, fields := 0, t.FieldSlice(); i < len(fields); {
+			f := fields[i]
+
+			// Skip blank-named fields.
+			if f.Sym.IsBlank() {
+				i++
+				continue
+			}
+
+			// Compare non-memory fields with field equality.
+			if !isRegularMemory(f.Type) {
+				if eqCanPanic(f.Type) {
+					// Enforce ordering by starting a new set of reorderable conditions.
+					conds = append(conds, []ir.Node{})
+				}
+				p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
+				q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
+				switch {
+				case f.Type.IsString():
+					eqlen, eqmem := EqString(p, q)
+					and(eqlen)
+					and(eqmem)
+				default:
+					and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
+				}
+				if eqCanPanic(f.Type) {
+					// Also enforce ordering after something that can panic.
+					conds = append(conds, []ir.Node{})
+				}
+				i++
+				continue
+			}
+
+			// Find maximal length run of memory-only fields.
+			size, next := memrun(t, i)
+
+			// TODO(rsc): All the calls to newname are wrong for
+			// cross-package unexported fields.
+			if s := fields[i:next]; len(s) <= 2 {
+				// Two or fewer fields: use plain field equality.
+				for _, f := range s {
+					and(eqfield(np, nq, f.Sym))
+				}
+			} else {
+				// More than two fields: use memequal.
+				and(eqmem(np, nq, f.Sym, size))
+			}
+			i = next
+		}
+
+		// Sort conditions to put runtime calls last.
+		// Preserve the rest of the ordering.
+		var flatConds []ir.Node
+		for _, c := range conds {
+			isCall := func(n ir.Node) bool {
+				return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
+			}
+			sort.SliceStable(c, func(i, j int) bool {
+				return !isCall(c[i]) && isCall(c[j])
+			})
+			flatConds = append(flatConds, c...)
+		}
+
+		if len(flatConds) == 0 {
+			fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+		} else {
+			for _, c := range flatConds[:len(flatConds)-1] {
+				// if cond {} else { goto neq }
+				n := ir.NewIfStmt(base.Pos, c, nil, nil)
+				n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+				fn.Body.Append(n)
+			}
+			fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
+		}
+	}
+
+	// ret:
+	//   return
+	ret := typecheck.AutoLabel(".ret")
+	fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
+	fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+
+	// neq:
+	//   r = false
+	//   return (or goto ret)
+	fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
+	fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
+	if eqCanPanic(t) || anyCall(fn) {
+		// Epilogue is large, so share it with the equal case.
+		fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
+	} else {
+		// Epilogue is small, so don't bother sharing.
+		fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+	}
+	// TODO(khr): the epilogue size detection condition above isn't perfect.
+	// We should really do a generic CL that shares epilogues across
+	// the board. See #24936.
+
+	if base.Flag.LowerR != 0 {
+		ir.DumpList("geneq body", fn.Body)
+	}
+
+	typecheck.FinishFuncBody()
+
+	fn.SetDupok(true)
+	typecheck.Func(fn)
+
+	ir.CurFunc = fn
+	typecheck.Stmts(fn.Body)
+	ir.CurFunc = nil
+
+	if base.Debug.DclStack != 0 {
+		types.CheckDclstack()
+	}
+
+	// Disable checknils while compiling this code.
+	// We are comparing a struct or an array,
+	// neither of which can be nil, and our comparisons
+	// are shallow.
+	fn.SetNilCheckDisabled(true)
+	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+	// Generate a closure which points at the function we just generated.
+	objw.SymPtr(closure, 0, fn.Linksym(), 0)
+	objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+	return closure
+}
+
+func anyCall(fn *ir.Func) bool {
+	return ir.Any(fn, func(n ir.Node) bool {
+		// TODO(rsc): No methods?
+		op := n.Op()
+		return op == ir.OCALL || op == ir.OCALLFUNC
+	})
+}
+
+// eqfield returns the node
+// 	p.field == q.field
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
+	nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
+	ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
+	ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)
+	return ne
+}
+
+// EqString returns the nodes
+//   len(s) == len(t)
+// and
+//   memequal(s.ptr, t.ptr, len(s))
+// which can be used to construct string equality comparison.
+// eqlen must be evaluated before eqmem, and shortcircuiting is required.
+func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+	s = typecheck.Conv(s, types.Types[types.TSTRING])
+	t = typecheck.Conv(t, types.Types[types.TSTRING])
+	sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+	tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
+	slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
+	tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
+
+	fn := typecheck.LookupRuntime("memequal")
+	fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
+	typecheck.Call(call)
+
+	cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
+	cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+	cmp.SetType(types.Types[types.TBOOL])
+	return cmp, call
+}
+
+// EqInterface returns the nodes
+//   s.tab == t.tab (or s.typ == t.typ, as appropriate)
+// and
+//   ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+// which can be used to construct interface equality comparison.
+// eqtab must be evaluated before eqdata, and shortcircuiting is required.
+func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+	if !types.Identical(s.Type(), t.Type()) {
+		base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
+	}
+	// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+	// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+	var fn ir.Node
+	if s.Type().IsEmptyInterface() {
+		fn = typecheck.LookupRuntime("efaceeq")
+	} else {
+		fn = typecheck.LookupRuntime("ifaceeq")
+	}
+
+	stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
+	ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
+	sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
+	tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
+	sdata.SetType(types.Types[types.TUNSAFEPTR])
+	tdata.SetType(types.Types[types.TUNSAFEPTR])
+	sdata.SetTypecheck(1)
+	tdata.SetTypecheck(1)
+
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
+	typecheck.Call(call)
+
+	cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
+	cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+	cmp.SetType(types.Types[types.TBOOL])
+	return cmp, call
+}
+
+// eqmem returns the node
+// 	memequal(&p.field, &q.field [, size])
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
+	nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
+	ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
+
+	fn, needsize := eqmemfunc(size, nx.Type().Elem())
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+	call.Args.Append(nx)
+	call.Args.Append(ny)
+	if needsize {
+		call.Args.Append(ir.NewInt(size))
+	}
+
+	return call
+}
+
+func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
+	switch size {
+	default:
+		fn = typecheck.LookupRuntime("memequal")
+		needsize = true
+	case 1, 2, 4, 8, 16:
+		buf := fmt.Sprintf("memequal%d", int(size)*8)
+		fn = typecheck.LookupRuntime(buf)
+	}
+
+	fn = typecheck.SubstArgTypes(fn, t, t)
+	return fn, needsize
+}
+
+// memrun finds runs of struct fields for which memory-only algs are appropriate.
+// t is the parent struct type, and start is the field index at which to start the run.
+// size is the length in bytes of the memory included in the run.
+// next is the index just after the end of the memory run.
+func memrun(t *types.Type, start int) (size int64, next int) {
+	next = start
+	for {
+		next++
+		if next == t.NumFields() {
+			break
+		}
+		// Stop run after a padded field.
+		if types.IsPaddedField(t, next-1) {
+			break
+		}
+		// Also, stop before a blank or non-memory field.
+		if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
+			break
+		}
+	}
+	return t.Field(next-1).End() - t.Field(start).Offset, next
+}
+
+func hashmem(t *types.Type) ir.Node {
+	sym := ir.Pkgs.Runtime.Lookup("memhash")
+
+	n := typecheck.NewName(sym)
+	ir.MarkFunc(n)
+	n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{
+		types.NewField(base.Pos, nil, types.NewPtr(t)),
+		types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+		types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+	}, []*types.Field{
+		types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+	}))
+	return n
+}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
new file mode 100644
index 0000000..3ff14c8
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -0,0 +1,1836 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+	"fmt"
+	"os"
+	"sort"
+	"strings"
+	"sync"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/bitvec"
+	"cmd/compile/internal/escape"
+	"cmd/compile/internal/inline"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/typebits"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/gcprog"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+type itabEntry struct {
+	t, itype *types.Type
+	lsym     *obj.LSym // symbol of the itab itself
+
+	// symbols of each method in
+	// the itab, sorted by byte offset;
+	// filled in by CompileITabs
+	entries []*obj.LSym
+}
+
+type ptabEntry struct {
+	s *types.Sym
+	t *types.Type
+}
+
+func CountTabs() (numPTabs, numITabs int) {
+	return len(ptabs), len(itabs)
+}
+
+// runtime interface and reflection data structures
+var (
+	signatmu    sync.Mutex // protects signatset and signatslice
+	signatset   = make(map[*types.Type]struct{})
+	signatslice []*types.Type
+
+	itabs []itabEntry
+	ptabs []*ir.Name
+)
+
+type typeSig struct {
+	name  *types.Sym
+	isym  *obj.LSym
+	tsym  *obj.LSym
+	type_ *types.Type
+	mtype *types.Type
+}
+
+// Builds a type representing a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with runtime/map.go.
+const (
+	BUCKETSIZE  = 8
+	MAXKEYSIZE  = 128
+	MAXELEMSIZE = 128
+)
+
+func structfieldSize() int { return 3 * types.PtrSize }       // Sizeof(runtime.structfield{})
+func imethodSize() int     { return 4 + 4 }                   // Sizeof(runtime.imethod{})
+func commonSize() int      { return 4*types.PtrSize + 8 + 8 } // Sizeof(runtime._type{})
+
+func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
+	if t.Sym() == nil && len(methods(t)) == 0 {
+		return 0
+	}
+	return 4 + 2 + 2 + 4 + 4
+}
+
+func makefield(name string, t *types.Type) *types.Field {
+	sym := (*types.Pkg)(nil).Lookup(name)
+	return types.NewField(src.NoXPos, sym, t)
+}
+
+// MapBucketType makes the map bucket type given the type of the map.
+func MapBucketType(t *types.Type) *types.Type {
+	if t.MapType().Bucket != nil {
+		return t.MapType().Bucket
+	}
+
+	keytype := t.Key()
+	elemtype := t.Elem()
+	types.CalcSize(keytype)
+	types.CalcSize(elemtype)
+	if keytype.Width > MAXKEYSIZE {
+		keytype = types.NewPtr(keytype)
+	}
+	if elemtype.Width > MAXELEMSIZE {
+		elemtype = types.NewPtr(elemtype)
+	}
+
+	field := make([]*types.Field, 0, 5)
+
+	// The first field is: uint8 topbits[BUCKETSIZE].
+	arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
+	field = append(field, makefield("topbits", arr))
+
+	arr = types.NewArray(keytype, BUCKETSIZE)
+	arr.SetNoalg(true)
+	keys := makefield("keys", arr)
+	field = append(field, keys)
+
+	arr = types.NewArray(elemtype, BUCKETSIZE)
+	arr.SetNoalg(true)
+	elems := makefield("elems", arr)
+	field = append(field, elems)
+
+	// If keys and elems have no pointers, the map implementation
+	// can keep a list of overflow pointers on the side so that
+	// buckets can be marked as having no pointers.
+	// Arrange for the bucket to have no pointers by changing
+	// the type of the overflow field to uintptr in this case.
+	// See comment on hmap.overflow in runtime/map.go.
+	otyp := types.Types[types.TUNSAFEPTR]
+	if !elemtype.HasPointers() && !keytype.HasPointers() {
+		otyp = types.Types[types.TUINTPTR]
+	}
+	overflow := makefield("overflow", otyp)
+	field = append(field, overflow)
+
+	// link up fields
+	bucket := types.NewStruct(types.NoPkg, field[:])
+	bucket.SetNoalg(true)
+	types.CalcSize(bucket)
+
+	// Check invariants that map code depends on.
+	if !types.IsComparable(t.Key()) {
+		base.Fatalf("unsupported map key type for %v", t)
+	}
+	if BUCKETSIZE < 8 {
+		base.Fatalf("bucket size too small for proper alignment")
+	}
+	if keytype.Align > BUCKETSIZE {
+		base.Fatalf("key align too big for %v", t)
+	}
+	if elemtype.Align > BUCKETSIZE {
+		base.Fatalf("elem align too big for %v", t)
+	}
+	if keytype.Width > MAXKEYSIZE {
+		base.Fatalf("key size to large for %v", t)
+	}
+	if elemtype.Width > MAXELEMSIZE {
+		base.Fatalf("elem size to large for %v", t)
+	}
+	if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
+		base.Fatalf("key indirect incorrect for %v", t)
+	}
+	if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
+		base.Fatalf("elem indirect incorrect for %v", t)
+	}
+	if keytype.Width%int64(keytype.Align) != 0 {
+		base.Fatalf("key size not a multiple of key align for %v", t)
+	}
+	if elemtype.Width%int64(elemtype.Align) != 0 {
+		base.Fatalf("elem size not a multiple of elem align for %v", t)
+	}
+	if bucket.Align%keytype.Align != 0 {
+		base.Fatalf("bucket align not multiple of key align %v", t)
+	}
+	if bucket.Align%elemtype.Align != 0 {
+		base.Fatalf("bucket align not multiple of elem align %v", t)
+	}
+	if keys.Offset%int64(keytype.Align) != 0 {
+		base.Fatalf("bad alignment of keys in bmap for %v", t)
+	}
+	if elems.Offset%int64(elemtype.Align) != 0 {
+		base.Fatalf("bad alignment of elems in bmap for %v", t)
+	}
+
+	// Double-check that overflow field is final memory in struct,
+	// with no padding at end.
+	if overflow.Offset != bucket.Width-int64(types.PtrSize) {
+		base.Fatalf("bad offset of overflow in bmap for %v", t)
+	}
+
+	t.MapType().Bucket = bucket
+
+	bucket.StructType().Map = t
+	return bucket
+}
+
+// MapType builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func MapType(t *types.Type) *types.Type {
+	if t.MapType().Hmap != nil {
+		return t.MapType().Hmap
+	}
+
+	bmap := MapBucketType(t)
+
+	// build a struct:
+	// type hmap struct {
+	//    count      int
+	//    flags      uint8
+	//    B          uint8
+	//    noverflow  uint16
+	//    hash0      uint32
+	//    buckets    *bmap
+	//    oldbuckets *bmap
+	//    nevacuate  uintptr
+	//    extra      unsafe.Pointer // *mapextra
+	// }
+	// must match runtime/map.go:hmap.
+	fields := []*types.Field{
+		makefield("count", types.Types[types.TINT]),
+		makefield("flags", types.Types[types.TUINT8]),
+		makefield("B", types.Types[types.TUINT8]),
+		makefield("noverflow", types.Types[types.TUINT16]),
+		makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+		makefield("buckets", types.NewPtr(bmap)),       // Used in walk.go for OMAKEMAP.
+		makefield("oldbuckets", types.NewPtr(bmap)),
+		makefield("nevacuate", types.Types[types.TUINTPTR]),
+		makefield("extra", types.Types[types.TUNSAFEPTR]),
+	}
+
+	hmap := types.NewStruct(types.NoPkg, fields)
+	hmap.SetNoalg(true)
+	types.CalcSize(hmap)
+
+	// The size of hmap should be 48 bytes on 64 bit
+	// and 28 bytes on 32 bit platforms.
+	if size := int64(8 + 5*types.PtrSize); hmap.Width != size {
+		base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+	}
+
+	t.MapType().Hmap = hmap
+	hmap.StructType().Map = t
+	return hmap
+}
+
+// MapIterType builds a type representing an Hiter structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func MapIterType(t *types.Type) *types.Type {
+	if t.MapType().Hiter != nil {
+		return t.MapType().Hiter
+	}
+
+	hmap := MapType(t)
+	bmap := MapBucketType(t)
+
+	// build a struct:
+	// type hiter struct {
+	//    key         *Key
+	//    elem        *Elem
+	//    t           unsafe.Pointer // *MapType
+	//    h           *hmap
+	//    buckets     *bmap
+	//    bptr        *bmap
+	//    overflow    unsafe.Pointer // *[]*bmap
+	//    oldoverflow unsafe.Pointer // *[]*bmap
+	//    startBucket uintptr
+	//    offset      uint8
+	//    wrapped     bool
+	//    B           uint8
+	//    i           uint8
+	//    bucket      uintptr
+	//    checkBucket uintptr
+	// }
+	// must match runtime/map.go:hiter.
+	fields := []*types.Field{
+		makefield("key", types.NewPtr(t.Key())),   // Used in range.go for TMAP.
+		makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
+		makefield("t", types.Types[types.TUNSAFEPTR]),
+		makefield("h", types.NewPtr(hmap)),
+		makefield("buckets", types.NewPtr(bmap)),
+		makefield("bptr", types.NewPtr(bmap)),
+		makefield("overflow", types.Types[types.TUNSAFEPTR]),
+		makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
+		makefield("startBucket", types.Types[types.TUINTPTR]),
+		makefield("offset", types.Types[types.TUINT8]),
+		makefield("wrapped", types.Types[types.TBOOL]),
+		makefield("B", types.Types[types.TUINT8]),
+		makefield("i", types.Types[types.TUINT8]),
+		makefield("bucket", types.Types[types.TUINTPTR]),
+		makefield("checkBucket", types.Types[types.TUINTPTR]),
+	}
+
+	// build iterator struct holding the above fields
+	hiter := types.NewStruct(types.NoPkg, fields)
+	hiter.SetNoalg(true)
+	types.CalcSize(hiter)
+	if hiter.Width != int64(12*types.PtrSize) {
+		base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*types.PtrSize)
+	}
+	t.MapType().Hiter = hiter
+	hiter.StructType().Map = t
+	return hiter
+}
+
+// methods returns the methods of the non-interface type t, sorted by name.
+// Generates stub functions as needed.
+func methods(t *types.Type) []*typeSig {
+	// method type
+	mt := types.ReceiverBaseType(t)
+
+	if mt == nil {
+		return nil
+	}
+	typecheck.CalcMethods(mt)
+
+	// type stored in interface word
+	it := t
+
+	if !types.IsDirectIface(it) {
+		it = types.NewPtr(t)
+	}
+
+	// make list of methods for t,
+	// generating code if necessary.
+	var ms []*typeSig
+	for _, f := range mt.AllMethods().Slice() {
+		if f.Sym == nil {
+			base.Fatalf("method with no sym on %v", mt)
+		}
+		if !f.IsMethod() {
+			base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f)
+		}
+		if f.Type.Recv() == nil {
+			base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f)
+		}
+		if f.Nointerface() {
+			continue
+		}
+
+		// get receiver type for this particular method.
+		// if pointer receiver but non-pointer t and
+		// this is not an embedded pointer inside a struct,
+		// method does not apply.
+		if !types.IsMethodApplicable(t, f) {
+			continue
+		}
+
+		sig := &typeSig{
+			name:  f.Sym,
+			isym:  methodWrapper(it, f),
+			tsym:  methodWrapper(t, f),
+			type_: typecheck.NewMethodType(f.Type, t),
+			mtype: typecheck.NewMethodType(f.Type, nil),
+		}
+		ms = append(ms, sig)
+	}
+
+	return ms
+}
+
+// imethods returns the methods of the interface type t, sorted by name.
+func imethods(t *types.Type) []*typeSig {
+	var methods []*typeSig
+	for _, f := range t.Fields().Slice() {
+		if f.Type.Kind() != types.TFUNC || f.Sym == nil {
+			continue
+		}
+		if f.Sym.IsBlank() {
+			base.Fatalf("unexpected blank symbol in interface method set")
+		}
+		if n := len(methods); n > 0 {
+			last := methods[n-1]
+			if !last.name.Less(f.Sym) {
+				base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+			}
+		}
+
+		sig := &typeSig{
+			name:  f.Sym,
+			mtype: f.Type,
+			type_: typecheck.NewMethodType(f.Type, nil),
+		}
+		methods = append(methods, sig)
+
+		// NOTE(rsc): Perhaps an oversight that
+		// IfaceType.Method is not in the reflect data.
+		// Generate the method body, so that compiled
+		// code can refer to it.
+		methodWrapper(t, f)
+	}
+
+	return methods
+}
+
+func dimportpath(p *types.Pkg) {
+	if p.Pathsym != nil {
+		return
+	}
+
+	// If we are compiling the runtime package, there are two runtime packages around
+	// -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
+	// both of them, so just produce one for localpkg.
+	if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
+		return
+	}
+
+	str := p.Path
+	if p == types.LocalPkg {
+		// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
+		str = base.Ctxt.Pkgpath
+	}
+
+	s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
+	ot := dnameData(s, 0, str, "", nil, false)
+	objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+	s.Set(obj.AttrContentAddressable, true)
+	p.Pathsym = s
+}
+
+func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
+	if pkg == nil {
+		return objw.Uintptr(s, ot, 0)
+	}
+
+	if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+		// If we don't know the full import path of the package being compiled
+		// (i.e. -p was not passed on the compiler command line), emit a reference to
+		// type..importpath.""., which the linker will rewrite using the correct import path.
+		// Every package that imports this one directly defines the symbol.
+		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+		ns := base.Ctxt.Lookup(`type..importpath."".`)
+		return objw.SymPtr(s, ot, ns, 0)
+	}
+
+	dimportpath(pkg)
+	return objw.SymPtr(s, ot, pkg.Pathsym, 0)
+}
+
+// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
+func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
+	if pkg == nil {
+		return objw.Uint32(s, ot, 0)
+	}
+	if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+		// If we don't know the full import path of the package being compiled
+		// (i.e. -p was not passed on the compiler command line), emit a reference to
+		// type..importpath.""., which the linker will rewrite using the correct import path.
+		// Every package that imports this one directly defines the symbol.
+		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+		ns := base.Ctxt.Lookup(`type..importpath."".`)
+		return objw.SymPtrOff(s, ot, ns)
+	}
+
+	dimportpath(pkg)
+	return objw.SymPtrOff(s, ot, pkg.Pathsym)
+}
+
+// dnameField dumps a reflect.name for a struct field.
+func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
+	if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+		base.Fatalf("package mismatch for %v", ft.Sym)
+	}
+	nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
+	return objw.SymPtr(lsym, ot, nsym, 0)
+}
+
+// dnameData writes the contents of a reflect.name into s at offset ot.
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
+	if len(name) > 1<<16-1 {
+		base.Fatalf("name too long: %s", name)
+	}
+	if len(tag) > 1<<16-1 {
+		base.Fatalf("tag too long: %s", tag)
+	}
+
+	// Encode name and tag. See reflect/type.go for details.
+	var bits byte
+	l := 1 + 2 + len(name)
+	if exported {
+		bits |= 1 << 0
+	}
+	if len(tag) > 0 {
+		l += 2 + len(tag)
+		bits |= 1 << 1
+	}
+	if pkg != nil {
+		bits |= 1 << 2
+	}
+	b := make([]byte, l)
+	b[0] = bits
+	b[1] = uint8(len(name) >> 8)
+	b[2] = uint8(len(name))
+	copy(b[3:], name)
+	if len(tag) > 0 {
+		tb := b[3+len(name):]
+		tb[0] = uint8(len(tag) >> 8)
+		tb[1] = uint8(len(tag))
+		copy(tb[2:], tag)
+	}
+
+	ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
+
+	if pkg != nil {
+		ot = dgopkgpathOff(s, ot, pkg)
+	}
+
+	return ot
+}
+
+var dnameCount int
+
+// dname creates a reflect.name for a struct field or method.
+func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
+	// Write out data as "type.." to signal two things to the
+	// linker, first that when dynamically linking, the symbol
+	// should be moved to a relro section, and second that the
+	// contents should not be decoded as a type.
+	sname := "type..namedata."
+	if pkg == nil {
+		// In the common case, share data with other packages.
+		if name == "" {
+			if exported {
+				sname += "-noname-exported." + tag
+			} else {
+				sname += "-noname-unexported." + tag
+			}
+		} else {
+			if exported {
+				sname += name + "." + tag
+			} else {
+				sname += name + "-" + tag
+			}
+		}
+	} else {
+		sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
+		dnameCount++
+	}
+	s := base.Ctxt.Lookup(sname)
+	if len(s.P) > 0 {
+		return s
+	}
+	ot := dnameData(s, 0, name, tag, pkg, exported)
+	objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+	s.Set(obj.AttrContentAddressable, true)
+	return s
+}
+
+// dextratype dumps the fields of a runtime.uncommontype.
+// dataAdd is the offset in bytes after the header where the
+// backing array of the []method field is written (by dextratypeData).
+func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
+	m := methods(t)
+	if t.Sym() == nil && len(m) == 0 {
+		return ot
+	}
+	noff := int(types.Rnd(int64(ot), int64(types.PtrSize)))
+	if noff != ot {
+		base.Fatalf("unexpected alignment in dextratype for %v", t)
+	}
+
+	for _, a := range m {
+		writeType(a.type_)
+	}
+
+	ot = dgopkgpathOff(lsym, ot, typePkg(t))
+
+	dataAdd += uncommonSize(t)
+	mcount := len(m)
+	if mcount != int(uint16(mcount)) {
+		base.Fatalf("too many methods on %v: %d", t, mcount)
+	}
+	xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
+	if dataAdd != int(uint32(dataAdd)) {
+		base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
+	}
+
+	ot = objw.Uint16(lsym, ot, uint16(mcount))
+	ot = objw.Uint16(lsym, ot, uint16(xcount))
+	ot = objw.Uint32(lsym, ot, uint32(dataAdd))
+	ot = objw.Uint32(lsym, ot, 0)
+	return ot
+}
+
+func typePkg(t *types.Type) *types.Pkg {
+	tsym := t.Sym()
+	if tsym == nil {
+		switch t.Kind() {
+		case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
+			if t.Elem() != nil {
+				tsym = t.Elem().Sym()
+			}
+		}
+	}
+	if tsym != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+		return tsym.Pkg
+	}
+	return nil
+}
+
+// dextratypeData dumps the backing array for the []method field of
+// runtime.uncommontype.
+func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
+	for _, a := range methods(t) {
+		// ../../../../runtime/type.go:/method
+		exported := types.IsExported(a.name.Name)
+		var pkg *types.Pkg
+		if !exported && a.name.Pkg != typePkg(t) {
+			pkg = a.name.Pkg
+		}
+		nsym := dname(a.name.Name, "", pkg, exported)
+
+		ot = objw.SymPtrOff(lsym, ot, nsym)
+		ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
+		ot = dmethodptrOff(lsym, ot, a.isym)
+		ot = dmethodptrOff(lsym, ot, a.tsym)
+	}
+	return ot
+}
+
+func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
+	objw.Uint32(s, ot, 0)
+	r := obj.Addrel(s)
+	r.Off = int32(ot)
+	r.Siz = 4
+	r.Sym = x
+	r.Type = objabi.R_METHODOFF
+	return ot + 4
+}
+
+var kinds = []int{
+	types.TINT:        objabi.KindInt,
+	types.TUINT:       objabi.KindUint,
+	types.TINT8:       objabi.KindInt8,
+	types.TUINT8:      objabi.KindUint8,
+	types.TINT16:      objabi.KindInt16,
+	types.TUINT16:     objabi.KindUint16,
+	types.TINT32:      objabi.KindInt32,
+	types.TUINT32:     objabi.KindUint32,
+	types.TINT64:      objabi.KindInt64,
+	types.TUINT64:     objabi.KindUint64,
+	types.TUINTPTR:    objabi.KindUintptr,
+	types.TFLOAT32:    objabi.KindFloat32,
+	types.TFLOAT64:    objabi.KindFloat64,
+	types.TBOOL:       objabi.KindBool,
+	types.TSTRING:     objabi.KindString,
+	types.TPTR:        objabi.KindPtr,
+	types.TSTRUCT:     objabi.KindStruct,
+	types.TINTER:      objabi.KindInterface,
+	types.TCHAN:       objabi.KindChan,
+	types.TMAP:        objabi.KindMap,
+	types.TARRAY:      objabi.KindArray,
+	types.TSLICE:      objabi.KindSlice,
+	types.TFUNC:       objabi.KindFunc,
+	types.TCOMPLEX64:  objabi.KindComplex64,
+	types.TCOMPLEX128: objabi.KindComplex128,
+	types.TUNSAFEPTR:  objabi.KindUnsafePointer,
+}
+
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+//	cmd/compile/internal/gc/reflect.go
+//	cmd/link/internal/ld/decodesym.go
+//	reflect/type.go
+//	runtime/type.go
+const (
+	tflagUncommon      = 1 << 0
+	tflagExtraStar     = 1 << 1
+	tflagNamed         = 1 << 2
+	tflagRegularMemory = 1 << 3
+)
+
+var (
+	memhashvarlen  *obj.LSym
+	memequalvarlen *obj.LSym
+)
+
+// dcommontype dumps the contents of a reflect.rtype (runtime._type).
+func dcommontype(lsym *obj.LSym, t *types.Type) int {
+	types.CalcSize(t)
+	eqfunc := geneq(t)
+
+	sptrWeak := true
+	var sptr *obj.LSym
+	if !t.IsPtr() || t.IsPtrElem() {
+		tptr := types.NewPtr(t)
+		if t.Sym() != nil || methods(tptr) != nil {
+			sptrWeak = false
+		}
+		sptr = writeType(tptr)
+	}
+
+	gcsym, useGCProg, ptrdata := dgcsym(t)
+
+	// ../../../../reflect/type.go:/^type.rtype
+	// actual type structure
+	//	type rtype struct {
+	//		size          uintptr
+	//		ptrdata       uintptr
+	//		hash          uint32
+	//		tflag         tflag
+	//		align         uint8
+	//		fieldAlign    uint8
+	//		kind          uint8
+	//		equal         func(unsafe.Pointer, unsafe.Pointer) bool
+	//		gcdata        *byte
+	//		str           nameOff
+	//		ptrToThis     typeOff
+	//	}
+	ot := 0
+	ot = objw.Uintptr(lsym, ot, uint64(t.Width))
+	ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
+	ot = objw.Uint32(lsym, ot, types.TypeHash(t))
+
+	var tflag uint8
+	if uncommonSize(t) != 0 {
+		tflag |= tflagUncommon
+	}
+	if t.Sym() != nil && t.Sym().Name != "" {
+		tflag |= tflagNamed
+	}
+	if isRegularMemory(t) {
+		tflag |= tflagRegularMemory
+	}
+
+	exported := false
+	p := t.LongString()
+	// If we're writing out type T,
+	// we are very likely to write out type *T as well.
+	// Use the string "*T"[1:] for "T", so that the two
+	// share storage. This is a cheap way to reduce the
+	// amount of space taken up by reflect strings.
+	if !strings.HasPrefix(p, "*") {
+		p = "*" + p
+		tflag |= tflagExtraStar
+		if t.Sym() != nil {
+			exported = types.IsExported(t.Sym().Name)
+		}
+	} else {
+		if t.Elem() != nil && t.Elem().Sym() != nil {
+			exported = types.IsExported(t.Elem().Sym().Name)
+		}
+	}
+
+	ot = objw.Uint8(lsym, ot, tflag)
+
+	// runtime (and common sense) expects alignment to be a power of two.
+	i := int(t.Align)
+
+	if i == 0 {
+		i = 1
+	}
+	if i&(i-1) != 0 {
+		base.Fatalf("invalid alignment %d for %v", t.Align, t)
+	}
+	ot = objw.Uint8(lsym, ot, t.Align) // align
+	ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign
+
+	i = kinds[t.Kind()]
+	if types.IsDirectIface(t) {
+		i |= objabi.KindDirectIface
+	}
+	if useGCProg {
+		i |= objabi.KindGCProg
+	}
+	ot = objw.Uint8(lsym, ot, uint8(i)) // kind
+	if eqfunc != nil {
+		ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
+	} else {
+		ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
+	}
+	ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
+
+	nsym := dname(p, "", nil, exported)
+	ot = objw.SymPtrOff(lsym, ot, nsym) // str
+	// ptrToThis
+	if sptr == nil {
+		ot = objw.Uint32(lsym, ot, 0)
+	} else if sptrWeak {
+		ot = objw.SymPtrWeakOff(lsym, ot, sptr)
+	} else {
+		ot = objw.SymPtrOff(lsym, ot, sptr)
+	}
+
+	return ot
+}
+
+// TrackSym returns the symbol for tracking use of field/method f, assumed
+// to be a member of struct/interface type t.
+func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
+	return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
+}
+
+func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
+	p := prefix + "." + t.ShortString()
+	s := types.TypeSymLookup(p)
+
+	// This function is for looking up type-related generated functions
+	// (e.g. eq and hash). Make sure they are indeed generated.
+	signatmu.Lock()
+	NeedRuntimeType(t)
+	signatmu.Unlock()
+
+	//print("algsym: %s -> %+S\n", p, s);
+
+	return s
+}
+
+func TypeSym(t *types.Type) *types.Sym {
+	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
+		base.Fatalf("TypeSym %v", t)
+	}
+	if t.Kind() == types.TFUNC && t.Recv() != nil {
+		base.Fatalf("misuse of method type: %v", t)
+	}
+	s := types.TypeSym(t)
+	signatmu.Lock()
+	NeedRuntimeType(t)
+	signatmu.Unlock()
+	return s
+}
+
+func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym {
+	return TypeSymPrefix(prefix, t).Linksym()
+}
+
+func TypeLinksymLookup(name string) *obj.LSym {
+	return types.TypeSymLookup(name).Linksym()
+}
+
+func TypeLinksym(t *types.Type) *obj.LSym {
+	return TypeSym(t).Linksym()
+}
+
+func TypePtr(t *types.Type) *ir.AddrExpr {
+	n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8])
+	return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
+}
+
+func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
+	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
+		base.Fatalf("ITabAddr(%v, %v)", t, itype)
+	}
+	s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
+	if !existed {
+		itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
+	}
+
+	lsym := s.Linksym()
+	n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+	return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
+}
+
+// needkeyupdate reports whether map updates with t as a key
+// need the key to be updated.
+func needkeyupdate(t *types.Type) bool {
+	switch t.Kind() {
+	case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
+		types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
+		return false
+
+	case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
+		types.TINTER,
+		types.TSTRING: // strings might have smaller backing stores
+		return true
+
+	case types.TARRAY:
+		return needkeyupdate(t.Elem())
+
+	case types.TSTRUCT:
+		for _, t1 := range t.Fields().Slice() {
+			if needkeyupdate(t1.Type) {
+				return true
+			}
+		}
+		return false
+
+	default:
+		base.Fatalf("bad type for map key: %v", t)
+		return true
+	}
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *types.Type) bool {
+	switch t.Kind() {
+	case types.TINTER:
+		return true
+
+	case types.TARRAY:
+		return hashMightPanic(t.Elem())
+
+	case types.TSTRUCT:
+		for _, t1 := range t.Fields().Slice() {
+			if hashMightPanic(t1.Type) {
+				return true
+			}
+		}
+		return false
+
+	default:
+		return false
+	}
+}
+
+// formalType replaces byte and rune aliases with real types.
+// They've been separate internally to make error messages
+// better, but we have to merge them in the reflect tables.
+func formalType(t *types.Type) *types.Type {
+	if t == types.ByteType || t == types.RuneType {
+		return types.Types[t.Kind()]
+	}
+	return t
+}
+
+func writeType(t *types.Type) *obj.LSym {
+	t = formalType(t)
+	if t.IsUntyped() {
+		base.Fatalf("writeType %v", t)
+	}
+
+	s := types.TypeSym(t)
+	lsym := s.Linksym()
+	if s.Siggen() {
+		return lsym
+	}
+	s.SetSiggen(true)
+
+	// special case (look for runtime below):
+	// when compiling package runtime,
+	// emit the type structures for int, float, etc.
+	tbase := t
+
+	if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+		tbase = t.Elem()
+	}
+	dupok := 0
+	if tbase.Sym() == nil {
+		dupok = obj.DUPOK
+	}
+
+	if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
+		// named types from other files are defined only by those files
+		if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
+			if i := typecheck.BaseTypeIndex(t); i >= 0 {
+				lsym.Pkg = tbase.Sym().Pkg.Prefix
+				lsym.SymIdx = int32(i)
+				lsym.Set(obj.AttrIndexed, true)
+			}
+			return lsym
+		}
+		// TODO(mdempsky): Investigate whether this can happen.
+		if tbase.Kind() == types.TFORW {
+			return lsym
+		}
+	}
+
+	ot := 0
+	switch t.Kind() {
+	default:
+		ot = dcommontype(lsym, t)
+		ot = dextratype(lsym, ot, t, 0)
+
+	case types.TARRAY:
+		// ../../../../runtime/type.go:/arrayType
+		s1 := writeType(t.Elem())
+		t2 := types.NewSlice(t.Elem())
+		s2 := writeType(t2)
+		ot = dcommontype(lsym, t)
+		ot = objw.SymPtr(lsym, ot, s1, 0)
+		ot = objw.SymPtr(lsym, ot, s2, 0)
+		ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
+		ot = dextratype(lsym, ot, t, 0)
+
+	case types.TSLICE:
+		// ../../../../runtime/type.go:/sliceType
+		s1 := writeType(t.Elem())
+		ot = dcommontype(lsym, t)
+		ot = objw.SymPtr(lsym, ot, s1, 0)
+		ot = dextratype(lsym, ot, t, 0)
+
+	case types.TCHAN:
+		// ../../../../runtime/type.go:/chanType
+		s1 := writeType(t.Elem())
+		ot = dcommontype(lsym, t)
+		ot = objw.SymPtr(lsym, ot, s1, 0)
+		ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
+		ot = dextratype(lsym, ot, t, 0)
+
+	case types.TFUNC:
+		for _, t1 := range t.Recvs().Fields().Slice() {
+			writeType(t1.Type)
+		}
+		isddd := false
+		for _, t1 := range t.Params().Fields().Slice() {
+			isddd = t1.IsDDD()
+			writeType(t1.Type)
+		}
+		for _, t1 := range t.Results().Fields().Slice() {
+			writeType(t1.Type)
+		}
+
+		ot = dcommontype(lsym, t)
+		inCount := t.NumRecvs() + t.NumParams()
+		outCount := t.NumResults()
+		if isddd {
+			outCount |= 1 << 15
+		}
+		ot = objw.Uint16(lsym, ot, uint16(inCount))
+		ot = objw.Uint16(lsym, ot, uint16(outCount))
+		if types.PtrSize == 8 {
+			ot += 4 // align for *rtype
+		}
+
+		dataAdd := (inCount + t.NumResults()) * types.PtrSize
+		ot = dextratype(lsym, ot, t, dataAdd)
+
+		// Array of rtype pointers follows funcType.
+		for _, t1 := range t.Recvs().Fields().Slice() {
+			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+		}
+		for _, t1 := range t.Params().Fields().Slice() {
+			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+		}
+		for _, t1 := range t.Results().Fields().Slice() {
+			ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+		}
+
+	case types.TINTER:
+		m := imethods(t)
+		n := len(m)
+		for _, a := range m {
+			writeType(a.type_)
+		}
+
+		// ../../../../runtime/type.go:/interfaceType
+		ot = dcommontype(lsym, t)
+
+		var tpkg *types.Pkg
+		if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+			tpkg = t.Sym().Pkg
+		}
+		ot = dgopkgpath(lsym, ot, tpkg)
+
+		ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+		ot = objw.Uintptr(lsym, ot, uint64(n))
+		ot = objw.Uintptr(lsym, ot, uint64(n))
+		dataAdd := imethodSize() * n
+		ot = dextratype(lsym, ot, t, dataAdd)
+
+		for _, a := range m {
+			// ../../../../runtime/type.go:/imethod
+			exported := types.IsExported(a.name.Name)
+			var pkg *types.Pkg
+			if !exported && a.name.Pkg != tpkg {
+				pkg = a.name.Pkg
+			}
+			nsym := dname(a.name.Name, "", pkg, exported)
+
+			ot = objw.SymPtrOff(lsym, ot, nsym)
+			ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
+		}
+
+	// ../../../../runtime/type.go:/mapType
+	case types.TMAP:
+		s1 := writeType(t.Key())
+		s2 := writeType(t.Elem())
+		s3 := writeType(MapBucketType(t))
+		hasher := genhash(t.Key())
+
+		ot = dcommontype(lsym, t)
+		ot = objw.SymPtr(lsym, ot, s1, 0)
+		ot = objw.SymPtr(lsym, ot, s2, 0)
+		ot = objw.SymPtr(lsym, ot, s3, 0)
+		ot = objw.SymPtr(lsym, ot, hasher, 0)
+		var flags uint32
+		// Note: flags must match maptype accessors in ../../../../runtime/type.go
+		// and maptype builder in ../../../../reflect/type.go:MapOf.
+		if t.Key().Width > MAXKEYSIZE {
+			ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+			flags |= 1 // indirect key
+		} else {
+			ot = objw.Uint8(lsym, ot, uint8(t.Key().Width))
+		}
+
+		if t.Elem().Width > MAXELEMSIZE {
+			ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+			flags |= 2 // indirect value
+		} else {
+			ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
+		}
+		ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width))
+		if types.IsReflexive(t.Key()) {
+			flags |= 4 // reflexive key
+		}
+		if needkeyupdate(t.Key()) {
+			flags |= 8 // need key update
+		}
+		if hashMightPanic(t.Key()) {
+			flags |= 16 // hash might panic
+		}
+		ot = objw.Uint32(lsym, ot, flags)
+		ot = dextratype(lsym, ot, t, 0)
+
+	case types.TPTR:
+		if t.Elem().Kind() == types.TANY {
+			// ../../../../runtime/type.go:/UnsafePointerType
+			ot = dcommontype(lsym, t)
+			ot = dextratype(lsym, ot, t, 0)
+
+			break
+		}
+
+		// ../../../../runtime/type.go:/ptrType
+		s1 := writeType(t.Elem())
+
+		ot = dcommontype(lsym, t)
+		ot = objw.SymPtr(lsym, ot, s1, 0)
+		ot = dextratype(lsym, ot, t, 0)
+
+	// ../../../../runtime/type.go:/structType
+	// for security, only the exported fields.
+	case types.TSTRUCT:
+		fields := t.Fields().Slice()
+		for _, t1 := range fields {
+			writeType(t1.Type)
+		}
+
+		// All non-exported struct field names within a struct
+		// type must originate from a single package. By
+		// identifying and recording that package within the
+		// struct type descriptor, we can omit that
+		// information from the field descriptors.
+		var spkg *types.Pkg
+		for _, f := range fields {
+			if !types.IsExported(f.Sym.Name) {
+				spkg = f.Sym.Pkg
+				break
+			}
+		}
+
+		ot = dcommontype(lsym, t)
+		ot = dgopkgpath(lsym, ot, spkg)
+		ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+		ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+		ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+
+		dataAdd := len(fields) * structfieldSize()
+		ot = dextratype(lsym, ot, t, dataAdd)
+
+		for _, f := range fields {
+			// ../../../../runtime/type.go:/structField
+			ot = dnameField(lsym, ot, spkg, f)
+			ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
+			offsetAnon := uint64(f.Offset) << 1
+			if offsetAnon>>1 != uint64(f.Offset) {
+				base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
+			}
+			if f.Embedded != 0 {
+				offsetAnon |= 1
+			}
+			ot = objw.Uintptr(lsym, ot, offsetAnon)
+		}
+	}
+
+	ot = dextratypeData(lsym, ot, t)
+	objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
+
+	// The linker will leave a table of all the typelinks for
+	// types in the binary, so the runtime can find them.
+	//
+	// When buildmode=shared, all types are in typelinks so the
+	// runtime can deduplicate type pointers.
+	keep := base.Ctxt.Flag_dynlink
+	if !keep && t.Sym() == nil {
+		// For an unnamed type, we only need the link if the type can
+		// be created at run time by reflect.PtrTo and similar
+		// functions. If the type exists in the program, those
+		// functions must return the existing type structure rather
+		// than creating a new one.
+		switch t.Kind() {
+		case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
+			keep = true
+		}
+	}
+	// Do not put Noalg types in typelinks.  See issue #22605.
+	if types.TypeHasNoAlg(t) {
+		keep = false
+	}
+	lsym.Set(obj.AttrMakeTypelink, keep)
+
+	return lsym
+}
+
+// InterfaceMethodOffset returns the offset of the i-th method in the interface
+// type descriptor, ityp.
+func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
+	// interface type descriptor layout is struct {
+	//   _type        // commonSize
+	//   pkgpath      // 1 word
+	//   []imethod    // 3 words (pointing to [...]imethod below)
+	//   uncommontype // uncommonSize
+	//   [...]imethod
+	// }
+	// The size of imethod is 8.
+	return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
+}
+
+// for each itabEntry, gather the methods on
+// the concrete type that implement the interface
+func CompileITabs() {
+	for i := range itabs {
+		tab := &itabs[i]
+		methods := genfun(tab.t, tab.itype)
+		if len(methods) == 0 {
+			continue
+		}
+		tab.entries = methods
+	}
+}
+
+// for the given concrete type and interface
+// type, return the (sorted) set of methods
+// on the concrete type that implement the interface
+func genfun(t, it *types.Type) []*obj.LSym {
+	if t == nil || it == nil {
+		return nil
+	}
+	sigs := imethods(it)
+	methods := methods(t)
+	out := make([]*obj.LSym, 0, len(sigs))
+	// TODO(mdempsky): Short circuit before calling methods(t)?
+	// See discussion on CL 105039.
+	if len(sigs) == 0 {
+		return nil
+	}
+
+	// both sigs and methods are sorted by name,
+	// so we can find the intersect in a single pass
+	for _, m := range methods {
+		if m.name == sigs[0].name {
+			out = append(out, m.isym)
+			sigs = sigs[1:]
+			if len(sigs) == 0 {
+				break
+			}
+		}
+	}
+
+	if len(sigs) != 0 {
+		base.Fatalf("incomplete itab")
+	}
+
+	return out
+}
+
+// ITabSym uses the information gathered in
+// CompileITabs to de-virtualize interface methods.
+// Since this is called by the SSA backend, it shouldn't
+// generate additional Nodes, Syms, etc.
+func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
+	var syms []*obj.LSym
+	if it == nil {
+		return nil
+	}
+
+	for i := range itabs {
+		e := &itabs[i]
+		if e.lsym == it {
+			syms = e.entries
+			break
+		}
+	}
+	if syms == nil {
+		return nil
+	}
+
+	// keep this arithmetic in sync with *itab layout
+	methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize))
+	if methodnum >= len(syms) {
+		return nil
+	}
+	return syms[methodnum]
+}
+
+// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
+func NeedRuntimeType(t *types.Type) {
+	if _, ok := signatset[t]; !ok {
+		signatset[t] = struct{}{}
+		signatslice = append(signatslice, t)
+	}
+}
+
+func WriteRuntimeTypes() {
+	// Process signatset. Use a loop, as writeType adds
+	// entries to signatset while it is being processed.
+	signats := make([]typeAndStr, len(signatslice))
+	for len(signatslice) > 0 {
+		signats = signats[:0]
+		// Transfer entries to a slice and sort, for reproducible builds.
+		for _, t := range signatslice {
+			signats = append(signats, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
+			delete(signatset, t)
+		}
+		signatslice = signatslice[:0]
+		sort.Sort(typesByString(signats))
+		for _, ts := range signats {
+			t := ts.t
+			writeType(t)
+			if t.Sym() != nil {
+				writeType(types.NewPtr(t))
+			}
+		}
+	}
+}
+
+func WriteTabs() {
+	// process itabs
+	for _, i := range itabs {
+		// dump empty itab symbol into i.sym
+		// type itab struct {
+		//   inter  *interfacetype
+		//   _type  *_type
+		//   hash   uint32
+		//   _      [4]byte
+		//   fun    [1]uintptr // variable sized
+		// }
+		o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
+		o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
+		o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
+		o += 4                                          // skip unused field
+		for _, fn := range genfun(i.t, i.itype) {
+			o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method
+		}
+		// Nothing writes static itabs, so they are read only.
+		objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+		i.lsym.Set(obj.AttrContentAddressable, true)
+	}
+
+	// process ptabs
+	if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
+		ot := 0
+		s := base.Ctxt.Lookup("go.plugin.tabs")
+		for _, p := range ptabs {
+			// Dump ptab symbol into go.pluginsym package.
+			//
+			// type ptab struct {
+			//	name nameOff
+			//	typ  typeOff // pointer to symbol
+			// }
+			nsym := dname(p.Sym().Name, "", nil, true)
+			t := p.Type()
+			if p.Class != ir.PFUNC {
+				t = types.NewPtr(t)
+			}
+			tsym := writeType(t)
+			ot = objw.SymPtrOff(s, ot, nsym)
+			ot = objw.SymPtrOff(s, ot, tsym)
+			// Plugin exports symbols as interfaces. Mark their types
+			// as UsedInIface.
+			tsym.Set(obj.AttrUsedInIface, true)
+		}
+		objw.Global(s, int32(ot), int16(obj.RODATA))
+
+		ot = 0
+		s = base.Ctxt.Lookup("go.plugin.exports")
+		for _, p := range ptabs {
+			ot = objw.SymPtr(s, ot, p.Linksym(), 0)
+		}
+		objw.Global(s, int32(ot), int16(obj.RODATA))
+	}
+}
+
+func WriteImportStrings() {
+	// generate import strings for imported packages
+	for _, p := range types.ImportedPkgList() {
+		dimportpath(p)
+	}
+}
+
+func WriteBasicTypes() {
+	// do basic types if compiling package runtime.
+	// they have to be in at least one package,
+	// and runtime is always loaded implicitly,
+	// so this is as good as any.
+	// another possible choice would be package main,
+	// but using runtime means fewer copies in object files.
+	if base.Ctxt.Pkgpath == "runtime" {
+		for i := types.Kind(1); i <= types.TBOOL; i++ {
+			writeType(types.NewPtr(types.Types[i]))
+		}
+		writeType(types.NewPtr(types.Types[types.TSTRING]))
+		writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+
+		// emit type structs for error and func(error) string.
+		// The latter is the type of an auto-generated wrapper.
+		writeType(types.NewPtr(types.ErrorType))
+
+		writeType(types.NewSignature(types.NoPkg, nil, []*types.Field{
+			types.NewField(base.Pos, nil, types.ErrorType),
+		}, []*types.Field{
+			types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
+		}))
+
+		// add paths for runtime and main, which 6l imports implicitly.
+		dimportpath(ir.Pkgs.Runtime)
+
+		if base.Flag.Race {
+			dimportpath(types.NewPkg("runtime/race", ""))
+		}
+		if base.Flag.MSan {
+			dimportpath(types.NewPkg("runtime/msan", ""))
+		}
+
+		dimportpath(types.NewPkg("main", ""))
+	}
+}
+
+type typeAndStr struct {
+	t       *types.Type
+	short   string
+	regular string
+}
+
+type typesByString []typeAndStr
+
+func (a typesByString) Len() int { return len(a) }
+func (a typesByString) Less(i, j int) bool {
+	if a[i].short != a[j].short {
+		return a[i].short < a[j].short
+	}
+	// When the only difference between the types is whether
+	// they refer to byte or uint8, such as **byte vs **uint8,
+	// the types' ShortStrings can be identical.
+	// To preserve deterministic sort ordering, sort these by String().
+	if a[i].regular != a[j].regular {
+		return a[i].regular < a[j].regular
+	}
+	// Identical anonymous interfaces defined in different locations
+	// will be equal for the above checks, but different in DWARF output.
+	// Sort by source position to ensure deterministic order.
+	// See issues 27013 and 30202.
+	if a[i].t.Kind() == types.TINTER && a[i].t.Methods().Len() > 0 {
+		return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos)
+	}
+	return false
+}
+func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
+// which holds 1-bit entries describing where pointers are in a given type.
+// Above this length, the GC information is recorded as a GC program,
+// which can express repetition compactly. In either form, the
+// information is used by the runtime to initialize the heap bitmap,
+// and for large types (like 128 or more words), they are roughly the
+// same speed. GC programs are never much larger and often more
+// compact. (If large arrays are involved, they can be arbitrarily
+// more compact.)
+//
+// The cutoff must be large enough that any allocation large enough to
+// use a GC program is large enough that it does not share heap bitmap
+// bytes with any other objects, allowing the GC program execution to
+// assume an aligned start and not use atomic operations. In the current
+// runtime, this means all malloc size classes larger than the cutoff must
+// be multiples of four words. On 32-bit systems that's 16 bytes, and
+// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
+// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
+// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
+// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
+// must be >= 4.
+//
+// We used to use 16 because the GC programs do have some constant overhead
+// to get started, and processing 128 pointers seems to be enough to
+// amortize that overhead well.
+//
+// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
+// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
+// use bitmaps for objects up to 64 kB in size.
+//
+// Also known to reflect/type.go.
+//
+const maxPtrmaskBytes = 2048
+
+// dgcsym emits and returns a data symbol containing GC information for type t,
+// along with a boolean reporting whether the UseGCProg bit should be set in
+// the type kind, and the ptrdata field to record in the reflect type information.
+func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+	ptrdata = types.PtrDataSize(t)
+	if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 {
+		lsym = dgcptrmask(t)
+		return
+	}
+
+	useGCProg = true
+	lsym, ptrdata = dgcprog(t)
+	return
+}
+
+// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
+func dgcptrmask(t *types.Type) *obj.LSym {
+	ptrmask := make([]byte, (types.PtrDataSize(t)/int64(types.PtrSize)+7)/8)
+	fillptrmask(t, ptrmask)
+	p := fmt.Sprintf("gcbits.%x", ptrmask)
+
+	sym := ir.Pkgs.Runtime.Lookup(p)
+	lsym := sym.Linksym()
+	if !sym.Uniq() {
+		sym.SetUniq(true)
+		for i, x := range ptrmask {
+			objw.Uint8(lsym, i, x)
+		}
+		objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+		lsym.Set(obj.AttrContentAddressable, true)
+	}
+	return lsym
+}
+
+// fillptrmask fills in ptrmask with 1s corresponding to the
+// word offsets in t that hold pointers.
+// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
+func fillptrmask(t *types.Type, ptrmask []byte) {
+	for i := range ptrmask {
+		ptrmask[i] = 0
+	}
+	if !t.HasPointers() {
+		return
+	}
+
+	vec := bitvec.New(8 * int32(len(ptrmask)))
+	typebits.Set(t, 0, vec)
+
+	nptr := types.PtrDataSize(t) / int64(types.PtrSize)
+	for i := int64(0); i < nptr; i++ {
+		if vec.Get(int32(i)) {
+			ptrmask[i/8] |= 1 << (uint(i) % 8)
+		}
+	}
+}
+
+// dgcprog emits and returns the symbol containing a GC program for type t
+// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
+// In practice, the size is typeptrdata(t) except for non-trivial arrays.
+// For non-trivial arrays, the program describes the full t.Width size.
+func dgcprog(t *types.Type) (*obj.LSym, int64) {
+	types.CalcSize(t)
+	if t.Width == types.BADWIDTH {
+		base.Fatalf("dgcprog: %v badwidth", t)
+	}
+	lsym := TypeLinksymPrefix(".gcprog", t)
+	var p gcProg
+	p.init(lsym)
+	p.emit(t, 0)
+	offset := p.w.BitIndex() * int64(types.PtrSize)
+	p.end()
+	if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Width {
+		base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+	}
+	return lsym, offset
+}
+
+type gcProg struct {
+	lsym   *obj.LSym
+	symoff int
+	w      gcprog.Writer
+}
+
+func (p *gcProg) init(lsym *obj.LSym) {
+	p.lsym = lsym
+	p.symoff = 4 // first 4 bytes hold program length
+	p.w.Init(p.writeByte)
+	if base.Debug.GCProg > 0 {
+		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
+		p.w.Debug(os.Stderr)
+	}
+}
+
+func (p *gcProg) writeByte(x byte) {
+	p.symoff = objw.Uint8(p.lsym, p.symoff, x)
+}
+
+func (p *gcProg) end() {
+	p.w.End()
+	objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
+	objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+	if base.Debug.GCProg > 0 {
+		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
+	}
+}
+
+func (p *gcProg) emit(t *types.Type, offset int64) {
+	types.CalcSize(t)
+	if !t.HasPointers() {
+		return
+	}
+	if t.Width == int64(types.PtrSize) {
+		p.w.Ptr(offset / int64(types.PtrSize))
+		return
+	}
+	switch t.Kind() {
+	default:
+		base.Fatalf("gcProg.emit: unexpected type %v", t)
+
+	case types.TSTRING:
+		p.w.Ptr(offset / int64(types.PtrSize))
+
+	case types.TINTER:
+		// Note: the first word isn't a pointer. See comment in typebits.Set
+		p.w.Ptr(offset/int64(types.PtrSize) + 1)
+
+	case types.TSLICE:
+		p.w.Ptr(offset / int64(types.PtrSize))
+
+	case types.TARRAY:
+		if t.NumElem() == 0 {
+			// should have been handled by haspointers check above
+			base.Fatalf("gcProg.emit: empty array")
+		}
+
+		// Flatten array-of-array-of-array to just a big array by multiplying counts.
+		count := t.NumElem()
+		elem := t.Elem()
+		for elem.IsArray() {
+			count *= elem.NumElem()
+			elem = elem.Elem()
+		}
+
+		if !p.w.ShouldRepeat(elem.Width/int64(types.PtrSize), count) {
+			// Cheaper to just emit the bits.
+			for i := int64(0); i < count; i++ {
+				p.emit(elem, offset+i*elem.Width)
+			}
+			return
+		}
+		p.emit(elem, offset)
+		p.w.ZeroUntil((offset + elem.Width) / int64(types.PtrSize))
+		p.w.Repeat(elem.Width/int64(types.PtrSize), count-1)
+
+	case types.TSTRUCT:
+		for _, t1 := range t.Fields().Slice() {
+			p.emit(t1.Type, offset+t1.Offset)
+		}
+	}
+}
+
+// ZeroAddr returns the address of a symbol with at least
+// size bytes of zeros.
+func ZeroAddr(size int64) ir.Node {
+	if size >= 1<<31 {
+		base.Fatalf("map elem too big %d", size)
+	}
+	if ZeroSize < size {
+		ZeroSize = size
+	}
+	lsym := base.PkgLinksym("go.map", "zero", obj.ABI0)
+	x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+	return typecheck.Expr(typecheck.NodAddr(x))
+}
+
+func CollectPTabs() {
+	if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
+		return
+	}
+	for _, exportn := range typecheck.Target.Exports {
+		s := exportn.Sym()
+		nn := ir.AsNode(s.Def)
+		if nn == nil {
+			continue
+		}
+		if nn.Op() != ir.ONAME {
+			continue
+		}
+		n := nn.(*ir.Name)
+		if !types.IsExported(s.Name) {
+			continue
+		}
+		if s.Pkg.Name != "main" {
+			continue
+		}
+		ptabs = append(ptabs, n)
+	}
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+//	func (t T) M() {
+//		...
+//	}
+//
+// already exists; this function generates
+//
+//	func (u U) M() {
+//		u.M()
+//	}
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+//	rcvr - U
+//	method - M func (t T)(), a TFIELD type struct
+func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
+	newnam := ir.MethodSym(rcvr, method.Sym)
+	lsym := newnam.Linksym()
+	if newnam.Siggen() {
+		return lsym
+	}
+	newnam.SetSiggen(true)
+
+	if types.Identical(rcvr, method.Type.Recv().Type) {
+		return lsym
+	}
+
+	// Only generate (*T).M wrappers for T.M in T's own package.
+	if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
+		rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+		return lsym
+	}
+
+	// Only generate I.M wrappers for I in I's own package
+	// but keep doing it for error.Error (was issue #29304).
+	if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+		return lsym
+	}
+
+	base.Pos = base.AutogeneratedPos
+	typecheck.DeclContext = ir.PEXTERN
+
+	tfn := ir.NewFuncType(base.Pos,
+		ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
+		typecheck.NewFuncParams(method.Type.Params(), true),
+		typecheck.NewFuncParams(method.Type.Results(), false))
+
+	fn := typecheck.DeclFunc(newnam, tfn)
+	fn.SetDupok(true)
+
+	nthis := ir.AsNode(tfn.Type().Recv().Nname)
+
+	methodrcvr := method.Type.Recv().Type
+
+	// generate nil pointer check for better error
+	if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+		// generating wrapper from *T to T.
+		n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+		n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
+		n.Body = []ir.Node{call}
+		fn.Body.Append(n)
+	}
+
+	dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
+
+	// generate call
+	// It's not possible to use a tail call when dynamic linking on ppc64le. The
+	// bad scenario is when a local call is made to the wrapper: the wrapper will
+	// call the implementation, which might be in a different module and so set
+	// the TOC to the appropriate value for that module. But if it returns
+	// directly to the wrapper's caller, nothing will reset it to the correct
+	// value for that function.
+	if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
+		// generate tail call: adjust pointer receiver and jump to embedded method.
+		left := dot.X // skip final .M
+		if !left.Type().IsPtr() {
+			left = typecheck.NodAddr(left)
+		}
+		as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
+		fn.Body.Append(as)
+		fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
+	} else {
+		fn.SetWrapper(true) // ignore frame for panic+recover matching
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+		call.Args = ir.ParamNames(tfn.Type())
+		call.IsDDD = tfn.Type().IsVariadic()
+		if method.Type.NumResults() > 0 {
+			ret := ir.NewReturnStmt(base.Pos, nil)
+			ret.Results = []ir.Node{call}
+			fn.Body.Append(ret)
+		} else {
+			fn.Body.Append(call)
+		}
+	}
+
+	typecheck.FinishFuncBody()
+	if base.Debug.DclStack != 0 {
+		types.CheckDclstack()
+	}
+
+	typecheck.Func(fn)
+	ir.CurFunc = fn
+	typecheck.Stmts(fn.Body)
+
+	// Inline calls within (*T).M wrappers. This is safe because we only
+	// generate those wrappers within the same compilation unit as (T).M.
+	// TODO(mdempsky): Investigate why we can't enable this more generally.
+	if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
+		inline.InlineCalls(fn)
+	}
+	escape.Batch([]*ir.Func{fn}, false)
+
+	ir.CurFunc = nil
+	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+	return lsym
+}
+
+var ZeroSize int64
+
+// MarkTypeUsedInInterface marks that type t is converted to an interface.
+// This information is used in the linker in dead method elimination.
+func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+	tsym := TypeLinksym(t)
+	// Emit a marker relocation. The linker will know the type is converted
+	// to an interface if "from" is reachable.
+	r := obj.Addrel(from)
+	r.Sym = tsym
+	r.Type = objabi.R_USEIFACE
+}
+
+// MarkUsedIfaceMethod marks that an interface method is used in the current
+// function. n is OCALLINTER node.
+func MarkUsedIfaceMethod(n *ir.CallExpr) {
+	dot := n.X.(*ir.SelectorExpr)
+	ityp := dot.X.Type()
+	tsym := TypeLinksym(ityp)
+	r := obj.Addrel(ir.CurFunc.LSym)
+	r.Sym = tsym
+	// dot.Xoffset is the method index * Widthptr (the offset of code pointer
+	// in itab).
+	midx := dot.Offset() / int64(types.PtrSize)
+	r.Add = InterfaceMethodOffset(ityp, midx)
+	r.Type = objabi.R_USEIFACEMETHOD
+}
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
index 4db0fac..338248a 100644
--- a/src/cmd/compile/internal/riscv64/galign.go
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -5,11 +5,11 @@
 package riscv64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/riscv"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &riscv.LinkRISCV64
 
 	arch.REGSP = riscv.REG_SP
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
index f7c03fe..9df7394 100644
--- a/src/cmd/compile/internal/riscv64/ggen.go
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -5,33 +5,36 @@
 package riscv64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/riscv"
 )
 
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 
 	// Adjust the frame to account for LR.
-	off += gc.Ctxt.FixedFrameSize()
+	off += base.Ctxt.FixedFrameSize()
 
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
 		}
 		return p
 	}
 
-	if cnt <= int64(128*gc.Widthptr) {
-		p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+	if cnt <= int64(128*types.PtrSize) {
+		p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
 		p.Reg = riscv.REG_SP
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
 		return p
 	}
 
@@ -42,15 +45,15 @@
 	// 	MOV	ZERO, (T0)
 	// 	ADD	$Widthptr, T0
 	//	BNE	T0, T1, loop
-	p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+	p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
 	p.Reg = riscv.REG_SP
-	p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+	p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
 	p.Reg = riscv.REG_T0
-	p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+	p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
 	loop := p
-	p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0)
-	p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+	p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
+	p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
 	p.Reg = riscv.REG_T1
-	gc.Patch(p, loop)
+	p.To.SetTarget(loop)
 	return p
 }
diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go
index d40bdf7..74bccf8 100644
--- a/src/cmd/compile/internal/riscv64/gsubr.go
+++ b/src/cmd/compile/internal/riscv64/gsubr.go
@@ -5,12 +5,12 @@
 package riscv64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/objw"
 	"cmd/internal/obj"
 	"cmd/internal/obj/riscv"
 )
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	// Hardware nop is ADD $0, ZERO
 	p := pp.Prog(riscv.AADD)
 	p.From.Type = obj.TYPE_CONST
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 0beb5b4..70c29a4 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -5,8 +5,10 @@
 package riscv64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/riscv"
@@ -91,7 +93,7 @@
 		case 8:
 			return riscv.AMOVD
 		default:
-			gc.Fatalf("unknown float width for load %d in type %v", width, t)
+			base.Fatalf("unknown float width for load %d in type %v", width, t)
 			return 0
 		}
 	}
@@ -118,7 +120,7 @@
 	case 8:
 		return riscv.AMOV
 	default:
-		gc.Fatalf("unknown width for load %d in type %v", width, t)
+		base.Fatalf("unknown width for load %d in type %v", width, t)
 		return 0
 	}
 }
@@ -134,7 +136,7 @@
 		case 8:
 			return riscv.AMOVD
 		default:
-			gc.Fatalf("unknown float width for store %d in type %v", width, t)
+			base.Fatalf("unknown float width for store %d in type %v", width, t)
 			return 0
 		}
 	}
@@ -149,7 +151,7 @@
 	case 8:
 		return riscv.AMOV
 	default:
-		gc.Fatalf("unknown width for store %d in type %v", width, t)
+		base.Fatalf("unknown width for store %d in type %v", width, t)
 		return 0
 	}
 }
@@ -178,9 +180,9 @@
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
 // RISC-V has no flags, so this is a no-op.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	s.SetPos(v.Pos)
 
 	switch v.Op {
@@ -189,7 +191,7 @@
 	case ssa.OpArg:
 		// input args need no code
 	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
+		ssagen.CheckLoweredPhi(v)
 	case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
 		if v.Type.IsMemory() {
 			return
@@ -219,7 +221,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -230,7 +232,7 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
 		// nothing to do
 	case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
@@ -321,10 +323,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVW $off(SP), R
 			wantreg = "SP"
@@ -340,7 +342,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
@@ -350,14 +352,14 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = riscv.REG_ZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_REG
@@ -375,7 +377,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 
 	case ssa.OpRISCV64LoweredAtomicLoad8:
@@ -500,7 +502,7 @@
 		p4.From.Reg = riscv.REG_TMP
 		p4.Reg = riscv.REG_ZERO
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p1)
+		p4.To.SetTarget(p1)
 
 		p5 := s.Prog(riscv.AMOV)
 		p5.From.Type = obj.TYPE_CONST
@@ -509,7 +511,7 @@
 		p5.To.Reg = out
 
 		p6 := s.Prog(obj.ANOP)
-		gc.Patch(p2, p6)
+		p2.To.SetTarget(p6)
 
 	case ssa.OpRISCV64LoweredZero:
 		mov, sz := largestMove(v.AuxInt)
@@ -535,7 +537,7 @@
 		p3.Reg = v.Args[0].Reg()
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = v.Args[1].Reg()
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 
 	case ssa.OpRISCV64LoweredMove:
 		mov, sz := largestMove(v.AuxInt)
@@ -575,7 +577,7 @@
 		p5.Reg = v.Args[1].Reg()
 		p5.From.Type = obj.TYPE_REG
 		p5.From.Reg = v.Args[2].Reg()
-		gc.Patch(p5, p)
+		p5.To.SetTarget(p)
 
 	case ssa.OpRISCV64LoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
@@ -583,22 +585,22 @@
 		p := s.Prog(riscv.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = riscv.REG_ZERO
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 
 	case ssa.OpRISCV64LoweredGetClosurePtr:
 		// Closure pointer is S4 (riscv.REG_CTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 
 	case ssa.OpRISCV64LoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(riscv.AMOV)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -612,14 +614,14 @@
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 
 	case ssa.OpRISCV64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 
 	default:
@@ -642,7 +644,7 @@
 	ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	s.SetPos(b.Pos)
 
 	switch b.Kind {
@@ -655,17 +657,17 @@
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = riscv.REG_ZERO
 		p.Reg = riscv.REG_A0
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
index cb68fd3..b004a2d 100644
--- a/src/cmd/compile/internal/s390x/galign.go
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -5,11 +5,11 @@
 package s390x
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/s390x"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &s390x.Links390x
 	arch.REGSP = s390x.REGSP
 	arch.MAXWIDTH = 1 << 50
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
index 5a837d8..488a080 100644
--- a/src/cmd/compile/internal/s390x/ggen.go
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -5,7 +5,8 @@
 package s390x
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/objw"
 	"cmd/internal/obj"
 	"cmd/internal/obj/s390x"
 )
@@ -17,20 +18,20 @@
 const clearLoopCutoff = 1024
 
 // zerorange clears the stack in the given range.
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 
 	// Adjust the frame to account for LR.
-	off += gc.Ctxt.FixedFrameSize()
+	off += base.Ctxt.FixedFrameSize()
 	reg := int16(s390x.REGSP)
 
 	// If the off cannot fit in a 12-bit unsigned displacement then we
 	// need to create a copy of the stack pointer that we can adjust.
 	// We also need to do this if we are going to loop.
 	if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
-		p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+		p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
 		p.Reg = int16(s390x.REGSP)
 		reg = s390x.REGRT1
 		off = 0
@@ -39,12 +40,12 @@
 	// Generate a loop of large clears.
 	if cnt > clearLoopCutoff {
 		ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
-		p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
-		p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+		p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+		p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
 		pl := p
-		p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
-		p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, pl)
+		p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+		p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(pl)
 		cnt = cnt % 256
 	}
 
@@ -69,11 +70,11 @@
 			case 2:
 				ins = s390x.AMOVH
 			}
-			p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+			p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
 
 		// Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
 		default:
-			p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+			p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
 		}
 
 		cnt -= n
@@ -83,6 +84,6 @@
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	return pp.Prog(s390x.ANOPH)
 }
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index 8037357..d4c7a28 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -7,16 +7,17 @@
 import (
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/s390x"
 )
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 	flive := b.FlagsLiveAtEnd
 	for _, c := range b.ControlValues() {
 		flive = c.Type.IsFlags() || flive
@@ -134,7 +135,7 @@
 //     dest := dest(To) op src(From)
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
 	p := s.Prog(op)
 	p.From.Type = obj.TYPE_REG
 	p.To.Type = obj.TYPE_REG
@@ -147,7 +148,7 @@
 //	dest := src(From) op off
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
+func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
 	p := s.Prog(op)
 	p.From.Type = obj.TYPE_CONST
 	p.From.Offset = off
@@ -157,7 +158,7 @@
 	return p
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpS390XSLD, ssa.OpS390XSLW,
 		ssa.OpS390XSRD, ssa.OpS390XSRW,
@@ -394,14 +395,14 @@
 		p.From.Type = obj.TYPE_ADDR
 		p.From.Reg = r
 		p.From.Index = i
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpS390XMOVDaddr:
 		p := s.Prog(s390x.AMOVD)
 		p.From.Type = obj.TYPE_ADDR
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
@@ -447,7 +448,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = r
 	case ssa.OpS390XMOVDload,
@@ -458,7 +459,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
@@ -475,7 +476,7 @@
 		p.From.Reg = r
 		p.From.Scale = 1
 		p.From.Index = i
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
@@ -486,7 +487,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
 		ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
 		ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
@@ -502,7 +503,7 @@
 		p.To.Reg = r
 		p.To.Scale = 1
 		p.To.Index = i
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_CONST
@@ -510,7 +511,7 @@
 		p.From.Offset = sc.Val()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
 		ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
 		ssa.OpS390XLDGR, ssa.OpS390XLGDR,
@@ -529,7 +530,7 @@
 		p.From.Offset = sc.Val()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.OpCopy:
 		if v.Type.IsMemory() {
 			return
@@ -545,7 +546,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -556,10 +557,10 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpS390XLoweredGetClosurePtr:
 		// Closure pointer is R12 (already)
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
 		// input is already rounded
 	case ssa.OpS390XLoweredGetG:
@@ -573,7 +574,7 @@
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(s390x.AMOVD)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -592,7 +593,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 	case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
 		ssa.OpS390XNEG, ssa.OpS390XNEGW,
@@ -636,14 +637,14 @@
 		p := s.Prog(s390x.AMOVBZ)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = s390x.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpS390XMVC:
 		vo := v.AuxValAndOff()
@@ -671,7 +672,7 @@
 		p.Reg = v.Args[len(v.Args)-2].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpS390XLoweredMove:
 		// Inputs must be valid pointers to memory,
 		// so adjust arg0 and arg1 as part of the expansion.
@@ -708,7 +709,7 @@
 
 		bne := s.Prog(s390x.ABLT)
 		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, mvc)
+		bne.To.SetTarget(mvc)
 
 		if v.AuxInt > 0 {
 			mvc := s.Prog(s390x.AMVC)
@@ -750,7 +751,7 @@
 
 		bne := s.Prog(s390x.ABLT)
 		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, clear)
+		bne.To.SetTarget(clear)
 
 		if v.AuxInt > 0 {
 			clear := s.Prog(s390x.ACLEAR)
@@ -763,7 +764,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 	case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
@@ -772,7 +773,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpS390XLAN, ssa.OpS390XLAO:
 		// LA(N|O) Ry, TMP, 0(Rx)
 		op := s.Prog(v.Op.Asm())
@@ -807,7 +808,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
 		// Convert the flags output of CS{,G} into a bool.
 		//    CS{,G} arg1, arg2, arg0
@@ -823,7 +824,7 @@
 		cs.Reg = v.Args[2].Reg()      // new
 		cs.To.Type = obj.TYPE_MEM
 		cs.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&cs.To, v)
+		ssagen.AddAux(&cs.To, v)
 
 		// MOVD $0, ret
 		movd := s.Prog(s390x.AMOVD)
@@ -845,7 +846,7 @@
 
 		// NOP (so the BNE has somewhere to land)
 		nop := s.Prog(obj.ANOP)
-		gc.Patch(bne, nop)
+		bne.To.SetTarget(nop)
 	case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
 		// Loop until the CS{,G} succeeds.
 		//     MOV{WZ,D} arg0, ret
@@ -858,7 +859,7 @@
 		load.From.Reg = v.Args[0].Reg()
 		load.To.Type = obj.TYPE_REG
 		load.To.Reg = v.Reg0()
-		gc.AddAux(&load.From, v)
+		ssagen.AddAux(&load.From, v)
 
 		// CS{,G} ret, arg1, arg0
 		cs := s.Prog(v.Op.Asm())
@@ -867,12 +868,12 @@
 		cs.Reg = v.Args[1].Reg() // new
 		cs.To.Type = obj.TYPE_MEM
 		cs.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&cs.To, v)
+		ssagen.AddAux(&cs.To, v)
 
 		// BNE cs
 		bne := s.Prog(s390x.ABNE)
 		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, cs)
+		bne.To.SetTarget(cs)
 	case ssa.OpS390XSYNC:
 		s.Prog(s390x.ASYNC)
 	case ssa.OpClobber:
@@ -907,14 +908,14 @@
 	panic("unreachable")
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	// Handle generic blocks first.
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(s390x.ABR)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 		return
 	case ssa.BlockDefer:
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index 519ac21..937c757 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -52,7 +52,7 @@
 	Controls [2]*Value
 
 	// Auxiliary info for the block. Its value depends on the Kind.
-	Aux    interface{}
+	Aux    Aux
 	AuxInt int64
 
 	// The unordered set of Values that define the operation of this block.
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index 2dade7a..9e4aa6c 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -166,7 +166,7 @@
 					f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
 				}
 			case auxString:
-				if _, ok := v.Aux.(string); !ok {
+				if _, ok := v.Aux.(stringAux); !ok {
 					f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
 				}
 				canHaveAux = true
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 63994d1..c267274 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -431,7 +431,6 @@
 	{name: "early copyelim", fn: copyelim},
 	{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
 	{name: "short circuit", fn: shortcircuit},
-	{name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering
 	{name: "decompose user", fn: decomposeUser, required: true},
 	{name: "pre-opt deadcode", fn: deadcode},
 	{name: "opt", fn: opt, required: true},               // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 0fe0337..c29bc8f 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -5,6 +5,8 @@
 package ssa
 
 import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
@@ -138,7 +140,7 @@
 
 	// Auto returns a Node for an auto variable of the given type.
 	// The SSA compiler uses this function to allocate space for spills.
-	Auto(src.XPos, *types.Type) GCNode
+	Auto(src.XPos, *types.Type) *ir.Name
 
 	// Given the name for a compound type, returns the name we should use
 	// for the parts of that compound type.
@@ -178,32 +180,6 @@
 	MyImportPath() string
 }
 
-// interface used to hold a *gc.Node (a stack variable).
-// We'd use *gc.Node directly but that would lead to an import cycle.
-type GCNode interface {
-	Typ() *types.Type
-	String() string
-	IsSynthetic() bool
-	IsAutoTmp() bool
-	StorageClass() StorageClass
-}
-
-type StorageClass uint8
-
-const (
-	ClassAuto     StorageClass = iota // local stack variable
-	ClassParam                        // argument
-	ClassParamOut                     // return value
-)
-
-const go116lateCallExpansion = true
-
-// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
-// within compilation of a function/method.
-func LateCallExpansionEnabledWithin(f *Func) bool {
-	return go116lateCallExpansion
-}
-
 // NewConfig returns a new configuration object for the given architecture.
 func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
 	c := &Config{arch: arch, Types: types}
@@ -219,9 +195,10 @@
 		c.registers = registersAMD64[:]
 		c.gpRegMask = gpRegMaskAMD64
 		c.fpRegMask = fpRegMaskAMD64
+		c.specialRegMask = specialRegMaskAMD64
 		c.FPReg = framepointerRegAMD64
 		c.LinkReg = linkRegAMD64
-		c.hasGReg = false
+		c.hasGReg = base.Flag.ABIWrap
 	case "386":
 		c.PtrSize = 4
 		c.RegSize = 4
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index 3b4f2be..f785274 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -275,7 +275,7 @@
 	return types.CMPgt
 }
 
-type auxmap map[interface{}]int32
+type auxmap map[Aux]int32
 
 func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
 	// Try to order these comparison by cost (cheaper first)
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
index 9e76645..8052016 100644
--- a/src/cmd/compile/internal/ssa/cse_test.go
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -14,6 +14,8 @@
 	s string
 }
 
+func (*tstAux) CanBeAnSSAAux() {}
+
 // This tests for a bug found when partitioning, but not sorting by the Aux value.
 func TestCSEAuxPartitionBug(t *testing.T) {
 	c := testConfig(t)
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 0664013..31d3f62 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
 )
@@ -136,9 +137,9 @@
 // reaches stores then we delete all the stores. The other operations will then
 // be eliminated by the dead code elimination pass.
 func elimDeadAutosGeneric(f *Func) {
-	addr := make(map[*Value]GCNode) // values that the address of the auto reaches
-	elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
-	used := make(map[GCNode]bool)   // used autos that must be kept
+	addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
+	elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
+	var used ir.NameSet               // used autos that must be kept
 
 	// visit the value and report whether any of the maps are updated
 	visit := func(v *Value) (changed bool) {
@@ -146,8 +147,8 @@
 		switch v.Op {
 		case OpAddr, OpLocalAddr:
 			// Propagate the address if it points to an auto.
-			n, ok := v.Aux.(GCNode)
-			if !ok || n.StorageClass() != ClassAuto {
+			n, ok := v.Aux.(*ir.Name)
+			if !ok || n.Class != ir.PAUTO {
 				return
 			}
 			if addr[v] == nil {
@@ -157,8 +158,8 @@
 			return
 		case OpVarDef, OpVarKill:
 			// v should be eliminated if we eliminate the auto.
-			n, ok := v.Aux.(GCNode)
-			if !ok || n.StorageClass() != ClassAuto {
+			n, ok := v.Aux.(*ir.Name)
+			if !ok || n.Class != ir.PAUTO {
 				return
 			}
 			if elim[v] == nil {
@@ -173,12 +174,12 @@
 			// for open-coded defers from being removed (since they
 			// may not be used by the inline code, but will be used by
 			// panic processing).
-			n, ok := v.Aux.(GCNode)
-			if !ok || n.StorageClass() != ClassAuto {
+			n, ok := v.Aux.(*ir.Name)
+			if !ok || n.Class != ir.PAUTO {
 				return
 			}
-			if !used[n] {
-				used[n] = true
+			if !used.Has(n) {
+				used.Add(n)
 				changed = true
 			}
 			return
@@ -211,8 +212,8 @@
 		if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
 			for _, a := range args {
 				if n, ok := addr[a]; ok {
-					if !used[n] {
-						used[n] = true
+					if !used.Has(n) {
+						used.Add(n)
 						changed = true
 					}
 				}
@@ -221,9 +222,9 @@
 		}
 
 		// Propagate any auto addresses through v.
-		node := GCNode(nil)
+		var node *ir.Name
 		for _, a := range args {
-			if n, ok := addr[a]; ok && !used[n] {
+			if n, ok := addr[a]; ok && !used.Has(n) {
 				if node == nil {
 					node = n
 				} else if node != n {
@@ -232,7 +233,7 @@
 					// multiple pointers (e.g. NeqPtr, Phi etc.).
 					// This is rare, so just propagate the first
 					// value to keep things simple.
-					used[n] = true
+					used.Add(n)
 					changed = true
 				}
 			}
@@ -248,7 +249,7 @@
 		}
 		if addr[v] != node {
 			// This doesn't happen in practice, but catch it just in case.
-			used[node] = true
+			used.Add(node)
 			changed = true
 		}
 		return
@@ -268,8 +269,8 @@
 			}
 			// keep the auto if its address reaches a control value
 			for _, c := range b.ControlValues() {
-				if n, ok := addr[c]; ok && !used[n] {
-					used[n] = true
+				if n, ok := addr[c]; ok && !used.Has(n) {
+					used.Add(n)
 					changed = true
 				}
 			}
@@ -281,7 +282,7 @@
 
 	// Eliminate stores to unread autos.
 	for v, n := range elim {
-		if used[n] {
+		if used.Has(n) {
 			continue
 		}
 		// replace with OpCopy
@@ -298,15 +299,15 @@
 	// Loop over all ops that affect autos taking note of which
 	// autos we need and also stores that we might be able to
 	// eliminate.
-	seen := make(map[GCNode]bool)
+	var seen ir.NameSet
 	var stores []*Value
 	for _, b := range f.Blocks {
 		for _, v := range b.Values {
-			n, ok := v.Aux.(GCNode)
+			n, ok := v.Aux.(*ir.Name)
 			if !ok {
 				continue
 			}
-			if n.StorageClass() != ClassAuto {
+			if n.Class != ir.PAUTO {
 				continue
 			}
 
@@ -316,7 +317,7 @@
 				// If we haven't seen the auto yet
 				// then this might be a store we can
 				// eliminate.
-				if !seen[n] {
+				if !seen.Has(n) {
 					stores = append(stores, v)
 				}
 			default:
@@ -326,7 +327,7 @@
 				// because dead loads haven't been
 				// eliminated yet.
 				if v.Uses > 0 {
-					seen[n] = true
+					seen.Add(n)
 				}
 			}
 		}
@@ -334,8 +335,8 @@
 
 	// Eliminate stores to unread autos.
 	for _, store := range stores {
-		n, _ := store.Aux.(GCNode)
-		if seen[n] {
+		n, _ := store.Aux.(*ir.Name)
+		if seen.Has(n) {
 			continue
 		}
 
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 6353f72..68b6ab5 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/internal/dwarf"
 	"cmd/internal/obj"
 	"encoding/hex"
@@ -24,7 +25,7 @@
 	// Slots is all the slots used in the debug info, indexed by their SlotID.
 	Slots []LocalSlot
 	// The user variables, indexed by VarID.
-	Vars []GCNode
+	Vars []*ir.Name
 	// The slots that make up each variable, indexed by VarID.
 	VarSlots [][]SlotID
 	// The location list data, indexed by VarID. Must be processed by PutLocationList.
@@ -142,13 +143,13 @@
 var BlockStart = &Value{
 	ID:  -10000,
 	Op:  OpInvalid,
-	Aux: "BlockStart",
+	Aux: StringToAux("BlockStart"),
 }
 
 var BlockEnd = &Value{
 	ID:  -20000,
 	Op:  OpInvalid,
-	Aux: "BlockEnd",
+	Aux: StringToAux("BlockEnd"),
 }
 
 // RegisterSet is a bitmap of registers, indexed by Register.num.
@@ -165,7 +166,7 @@
 type debugState struct {
 	// See FuncDebug.
 	slots    []LocalSlot
-	vars     []GCNode
+	vars     []*ir.Name
 	varSlots [][]SlotID
 	lists    [][]byte
 
@@ -189,7 +190,7 @@
 	// The pending location list entry for each user variable, indexed by VarID.
 	pendingEntries []pendingEntry
 
-	varParts           map[GCNode][]SlotID
+	varParts           map[*ir.Name][]SlotID
 	blockDebug         []BlockDebug
 	pendingSlotLocs    []VarLoc
 	liveSlots          []liveSlot
@@ -346,7 +347,7 @@
 	}
 
 	if state.varParts == nil {
-		state.varParts = make(map[GCNode][]SlotID)
+		state.varParts = make(map[*ir.Name][]SlotID)
 	} else {
 		for n := range state.varParts {
 			delete(state.varParts, n)
@@ -360,7 +361,7 @@
 	state.vars = state.vars[:0]
 	for i, slot := range f.Names {
 		state.slots = append(state.slots, slot)
-		if slot.N.IsSynthetic() {
+		if ir.IsSynthetic(slot.N) {
 			continue
 		}
 
@@ -379,8 +380,8 @@
 	for _, b := range f.Blocks {
 		for _, v := range b.Values {
 			if v.Op == OpVarDef || v.Op == OpVarKill {
-				n := v.Aux.(GCNode)
-				if n.IsSynthetic() {
+				n := v.Aux.(*ir.Name)
+				if ir.IsSynthetic(n) {
 					continue
 				}
 
@@ -425,7 +426,7 @@
 	state.initializeCache(f, len(state.varParts), len(state.slots))
 
 	for i, slot := range f.Names {
-		if slot.N.IsSynthetic() {
+		if ir.IsSynthetic(slot.N) {
 			continue
 		}
 		for _, value := range f.NamedValues[slot] {
@@ -717,8 +718,8 @@
 
 	switch {
 	case v.Op == OpVarDef, v.Op == OpVarKill:
-		n := v.Aux.(GCNode)
-		if n.IsSynthetic() {
+		n := v.Aux.(*ir.Name)
+		if ir.IsSynthetic(n) {
 			break
 		}
 
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index bf7f1e8..ea988e4 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -219,10 +219,6 @@
 	v.AddArg(data)
 }
 
-func decomposeArgs(f *Func) {
-	applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues)
-}
-
 func decomposeUser(f *Func) {
 	for _, b := range f.Blocks {
 		for _, v := range b.Values {
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 679ee8a..579818e 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -24,6 +24,632 @@
 	pt     *types.Type
 }
 
+func isBlockMultiValueExit(b *Block) bool {
+	return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult
+}
+
+// removeTrivialWrapperTypes unwraps layers of
+// struct { singleField SomeType } and [1]SomeType
+// until a non-wrapper type is reached.  This is useful
+// for working with assignments to/from interface data
+// fields (either second operand to OpIMake or OpIData)
+// where the wrapping or type conversion can be elided
+// because of type conversions/assertions in source code
+// that do not appear in SSA.
+func removeTrivialWrapperTypes(t *types.Type) *types.Type {
+	for {
+		if t.IsStruct() && t.NumFields() == 1 {
+			t = t.Field(0).Type
+			continue
+		}
+		if t.IsArray() && t.NumElem() == 1 {
+			t = t.Elem()
+			continue
+		}
+		break
+	}
+	return t
+}
+
+type expandState struct {
+	f            *Func
+	debug        bool
+	canSSAType   func(*types.Type) bool
+	regSize      int64
+	sp           *Value
+	typs         *Types
+	ptrSize      int64
+	hiOffset     int64
+	lowOffset    int64
+	namedSelects map[*Value][]namedVal
+	sdom         SparseTree
+	common       map[selKey]*Value
+	offsets      map[offsetKey]*Value
+}
+
+// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+// that has no 64-bit integer registers.
+func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
+	tHi = x.typs.UInt32
+	if et == types.TINT64 {
+		tHi = x.typs.Int32
+	}
+	tLo = x.typs.UInt32
+	return
+}
+
+// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
+// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
+// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
+// integer on 32-bit).
+func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
+	if !x.canSSAType(t) {
+		return false
+	}
+	return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
+		t.Size() > x.regSize && t.IsInteger()
+}
+
+// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+// TODO should also optimize offsets from SB?
+func (x *expandState) offsetFrom(from *Value, offset int64, pt *types.Type) *Value {
+	if offset == 0 && from.Type == pt { // this is not actually likely
+		return from
+	}
+	// Simplify, canonicalize
+	for from.Op == OpOffPtr {
+		offset += from.AuxInt
+		from = from.Args[0]
+	}
+	if from == x.sp {
+		return x.f.ConstOffPtrSP(pt, offset, x.sp)
+	}
+	key := offsetKey{from, offset, pt}
+	v := x.offsets[key]
+	if v != nil {
+		return v
+	}
+	v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+	x.offsets[key] = v
+	return v
+}
+
+// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
+func (x *expandState) splitSlots(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
+	var locs []LocalSlot
+	for i := range ls {
+		locs = append(locs, x.f.fe.SplitSlot(&ls[i], sfx, offset, ty))
+	}
+	return locs
+}
+
+// Calls that need lowering have some number of inputs, including a memory input,
+// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+// With the current ABI those inputs need to be converted into stores to memory,
+// rethreading the call's memory input to the first, and the new call now receiving the last.
+
+// With the current ABI, the outputs need to be converted to loads, which will all use the call's
+// memory output as their input.
+
+// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
+// through a chain of Struct/Array/builtin Select operations.  If the chain of selectors does not
+// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
+// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
+// accumulates the offset.
+// It emits the code necessary to implement the leaf select operation that leads to the root.
+//
+// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
+func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) []LocalSlot {
+	if x.debug {
+		fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset)
+	}
+	var locs []LocalSlot
+	leafType := leaf.Type
+	if len(selector.Args) > 0 {
+		w := selector.Args[0]
+		if w.Op == OpCopy {
+			for w.Op == OpCopy {
+				w = w.Args[0]
+			}
+			selector.SetArg(0, w)
+		}
+	}
+	switch selector.Op {
+	case OpArg:
+		if !x.isAlreadyExpandedAggregateType(selector.Type) {
+			if leafType == selector.Type { // OpIData leads us here, sometimes.
+				leaf.copyOf(selector)
+			} else {
+				x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
+			}
+			if x.debug {
+				fmt.Printf("\tOpArg, break\n")
+			}
+			break
+		}
+		switch leaf.Op {
+		case OpIData, OpStructSelect, OpArraySelect:
+			leafType = removeTrivialWrapperTypes(leaf.Type)
+		}
+		aux := selector.Aux
+		auxInt := selector.AuxInt + offset
+		if leaf.Block == selector.Block {
+			leaf.reset(OpArg)
+			leaf.Aux = aux
+			leaf.AuxInt = auxInt
+			leaf.Type = leafType
+		} else {
+			w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux)
+			leaf.copyOf(w)
+			if x.debug {
+				fmt.Printf("\tnew %s\n", w.LongString())
+			}
+		}
+		for _, s := range x.namedSelects[selector] {
+			locs = append(locs, x.f.Names[s.locIndex])
+		}
+
+	case OpLoad: // We end up here because of IData of immediate structures.
+		// Failure case:
+		// (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
+		// the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
+		//
+		// GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
+		// cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
+		// b2: ← b1
+		// v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
+		// v21 (142) = SelectN <mem> [1] v20
+		// v22 (142) = SelectN <interface {}> [0] v20
+		// b15: ← b8
+		// v71 (+143) = IData <Nodes> v22 (v[Nodes])
+		// v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
+		//
+		// translates (w/o the "case OpLoad:" above) to:
+		//
+		// b2: ← b1
+		// v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
+		// v23 (142) = Load <*uintptr> v19 v20
+		// v823 (142) = IsNonNil <bool> v23
+		// v67 (+143) = Load <*[]*Node> v880 v20
+		// b15: ← b8
+		// v827 (146) = StructSelect <*[]*Node> [0] v67
+		// v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
+		// v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
+		// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
+		// The OpLoad was created to load the single field of the IData
+		// This case removes that StructSelect.
+		if leafType != selector.Type {
+			x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
+		}
+		leaf.copyOf(selector)
+		for _, s := range x.namedSelects[selector] {
+			locs = append(locs, x.f.Names[s.locIndex])
+		}
+
+	case OpSelectN:
+		// TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
+		call := selector.Args[0]
+		aux := call.Aux.(*AuxCall)
+		which := selector.AuxInt
+		if which == aux.NResults() { // mem is after the results.
+			// rewrite v as a Copy of call -- the replacement call will produce a mem.
+			leaf.copyOf(call)
+		} else {
+			leafType := removeTrivialWrapperTypes(leaf.Type)
+			if x.canSSAType(leafType) {
+				pt := types.NewPtr(leafType)
+				off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt)
+				// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
+				if leaf.Block == call.Block {
+					leaf.reset(OpLoad)
+					leaf.SetArgs2(off, call)
+					leaf.Type = leafType
+				} else {
+					w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
+					leaf.copyOf(w)
+					if x.debug {
+						fmt.Printf("\tnew %s\n", w.LongString())
+					}
+				}
+				for _, s := range x.namedSelects[selector] {
+					locs = append(locs, x.f.Names[s.locIndex])
+				}
+			} else {
+				x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
+			}
+		}
+
+	case OpStructSelect:
+		w := selector.Args[0]
+		var ls []LocalSlot
+		if w.Type.Kind() != types.TSTRUCT { // IData artifact
+			ls = x.rewriteSelect(leaf, w, offset)
+		} else {
+			ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
+			if w.Op != OpIData {
+				for _, l := range ls {
+					locs = append(locs, x.f.fe.SplitStruct(l, int(selector.AuxInt)))
+				}
+			}
+		}
+
+	case OpArraySelect:
+		w := selector.Args[0]
+		x.rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
+
+	case OpInt64Hi:
+		w := selector.Args[0]
+		ls := x.rewriteSelect(leaf, w, offset+x.hiOffset)
+		locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType)
+
+	case OpInt64Lo:
+		w := selector.Args[0]
+		ls := x.rewriteSelect(leaf, w, offset+x.lowOffset)
+		locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType)
+
+	case OpStringPtr:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset)
+		locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr)
+
+	case OpSlicePtr:
+		w := selector.Args[0]
+		ls := x.rewriteSelect(leaf, w, offset)
+		locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
+
+	case OpITab:
+		w := selector.Args[0]
+		ls := x.rewriteSelect(leaf, w, offset)
+		sfx := ".itab"
+		if w.Type.IsEmptyInterface() {
+			sfx = ".type"
+		}
+		locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr)
+
+	case OpComplexReal:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset)
+		locs = x.splitSlots(ls, ".real", 0, leafType)
+
+	case OpComplexImag:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
+		locs = x.splitSlots(ls, ".imag", leafType.Width, leafType)
+
+	case OpStringLen, OpSliceLen:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize)
+		locs = x.splitSlots(ls, ".len", x.ptrSize, leafType)
+
+	case OpIData:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize)
+		locs = x.splitSlots(ls, ".data", x.ptrSize, leafType)
+
+	case OpSliceCap:
+		ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize)
+		locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType)
+
+	case OpCopy: // If it's an intermediate result, recurse
+		locs = x.rewriteSelect(leaf, selector.Args[0], offset)
+		for _, s := range x.namedSelects[selector] {
+			// this copy may have had its own name, preserve that, too.
+			locs = append(locs, x.f.Names[s.locIndex])
+		}
+
+	default:
+		// Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
+	}
+
+	return locs
+}
+
+func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
+	source := a.Args[0]
+	dst := x.offsetFrom(base, offset, source.Type)
+	if a.Uses == 1 && a.Block == b {
+		a.reset(OpMove)
+		a.Pos = pos
+		a.Type = types.TypeMem
+		a.Aux = typ
+		a.AuxInt = size
+		a.SetArgs3(dst, source, mem)
+		mem = a
+	} else {
+		mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
+		mem.AuxInt = size
+	}
+	return mem
+}
+
+// decomposeArgOrLoad is a helper for storeArgOrLoad.
+// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
+// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
+func (x *expandState) decomposeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64,
+	decomposeOne func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
+	decomposeTwo func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
+	u := source.Type
+	switch u.Kind() {
+	case types.TARRAY:
+		elem := u.Elem()
+		for i := int64(0); i < u.NumElem(); i++ {
+			elemOff := i * elem.Size()
+			mem = decomposeOne(x, pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff)
+			pos = pos.WithNotStmt()
+		}
+		return mem
+	case types.TSTRUCT:
+		for i := 0; i < u.NumFields(); i++ {
+			fld := u.Field(i)
+			mem = decomposeOne(x, pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset)
+			pos = pos.WithNotStmt()
+		}
+		return mem
+	case types.TINT64, types.TUINT64:
+		if t.Width == x.regSize {
+			break
+		}
+		tHi, tLo := x.intPairTypes(t.Kind())
+		mem = decomposeOne(x, pos, b, base, source, mem, tHi, source.AuxInt+x.hiOffset, offset+x.hiOffset)
+		pos = pos.WithNotStmt()
+		return decomposeOne(x, pos, b, base, source, mem, tLo, source.AuxInt+x.lowOffset, offset+x.lowOffset)
+	case types.TINTER:
+		return decomposeTwo(x, pos, b, base, source, mem, x.typs.Uintptr, x.typs.BytePtr, source.AuxInt, offset)
+	case types.TSTRING:
+		return decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset)
+	case types.TCOMPLEX64:
+		return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float32, x.typs.Float32, source.AuxInt, offset)
+	case types.TCOMPLEX128:
+		return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float64, x.typs.Float64, source.AuxInt, offset)
+	case types.TSLICE:
+		mem = decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset)
+		return decomposeOne(x, pos, b, base, source, mem, x.typs.Int, source.AuxInt+2*x.ptrSize, offset+2*x.ptrSize)
+	}
+	return nil
+}
+
+// storeOneArg creates a decomposed (one step) arg that is then stored.
+// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
+// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
+func storeOneArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+	w := x.common[selKey{source, offArg, t.Width, t}]
+	if w == nil {
+		w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux)
+		x.common[selKey{source, offArg, t.Width, t}] = w
+	}
+	return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+}
+
+// storeOneLoad creates a decomposed (one step) load that is then stored.
+func storeOneLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+	from := x.offsetFrom(source.Args[0], offArg, types.NewPtr(t))
+	w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
+	return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+}
+
+func storeTwoArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+	mem = storeOneArg(x, pos, b, base, source, mem, t1, offArg, offStore)
+	pos = pos.WithNotStmt()
+	t1Size := t1.Size()
+	return storeOneArg(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+}
+
+func storeTwoLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+	mem = storeOneLoad(x, pos, b, base, source, mem, t1, offArg, offStore)
+	pos = pos.WithNotStmt()
+	t1Size := t1.Size()
+	return storeOneLoad(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+}
+
+// storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed
+// stores of non-aggregate types.  It recursively walks up a chain of selectors until it reaches a Load or an Arg.
+// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
+func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value {
+	if x.debug {
+		fmt.Printf("\tstoreArgOrLoad(%s;  %s;  %s;  %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset)
+	}
+
+	switch source.Op {
+	case OpCopy:
+		return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset)
+
+	case OpLoad:
+		ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad)
+		if ret != nil {
+			return ret
+		}
+
+	case OpArg:
+		ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg)
+		if ret != nil {
+			return ret
+		}
+
+	case OpArrayMake0, OpStructMake0:
+		return mem
+
+	case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
+		for i := 0; i < t.NumFields(); i++ {
+			fld := t.Field(i)
+			mem = x.storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset)
+			pos = pos.WithNotStmt()
+		}
+		return mem
+
+	case OpArrayMake1:
+		return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
+
+	case OpInt64Make:
+		tHi, tLo := x.intPairTypes(t.Kind())
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+x.hiOffset)
+		pos = pos.WithNotStmt()
+		return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+x.lowOffset)
+
+	case OpComplexMake:
+		tPart := x.typs.Float32
+		wPart := t.Width / 2
+		if wPart == 8 {
+			tPart = x.typs.Float64
+		}
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset)
+		pos = pos.WithNotStmt()
+		return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart)
+
+	case OpIMake:
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.Uintptr, offset)
+		pos = pos.WithNotStmt()
+		return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.BytePtr, offset+x.ptrSize)
+
+	case OpStringMake:
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset)
+		pos = pos.WithNotStmt()
+		return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize)
+
+	case OpSliceMake:
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset)
+		pos = pos.WithNotStmt()
+		mem = x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize)
+		return x.storeArgOrLoad(pos, b, base, source.Args[2], mem, x.typs.Int, offset+2*x.ptrSize)
+	}
+
+	// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
+	switch t.Kind() {
+	case types.TARRAY:
+		elt := t.Elem()
+		if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == x.regSize {
+			t = removeTrivialWrapperTypes(t)
+			// it could be a leaf type, but the "leaf" could be complex64 (for example)
+			return x.storeArgOrLoad(pos, b, base, source, mem, t, offset)
+		}
+		for i := int64(0); i < t.NumElem(); i++ {
+			sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
+			mem = x.storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width)
+			pos = pos.WithNotStmt()
+		}
+		return mem
+
+	case types.TSTRUCT:
+		if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == x.regSize {
+			// This peculiar test deals with accesses to immediate interface data.
+			// It works okay because everything is the same size.
+			// Example code that triggers this can be found in go/constant/value.go, function ToComplex
+			// v119 (+881) = IData <intVal> v6
+			// v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
+			// This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
+			// Guard against "struct{struct{*foo}}"
+			// Other rewriting phases create minor glitches when they transform IData, for instance the
+			// interface-typed Arg "x" of ToFloat in go/constant/value.go
+			//   v6 (858) = Arg <Value> {x} (x[Value], x[Value])
+			// is rewritten by decomposeArgs into
+			//   v141 (858) = Arg <uintptr> {x}
+			//   v139 (858) = Arg <*uint8> {x} [8]
+			// because of a type case clause on line 862 of go/constant/value.go
+			//  	case intVal:
+			//		   return itof(x)
+			// v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
+			// of a *uint8, which does not succeed.
+			t = removeTrivialWrapperTypes(t)
+			// it could be a leaf type, but the "leaf" could be complex64 (for example)
+			return x.storeArgOrLoad(pos, b, base, source, mem, t, offset)
+		}
+
+		for i := 0; i < t.NumFields(); i++ {
+			fld := t.Field(i)
+			sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
+			mem = x.storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset)
+			pos = pos.WithNotStmt()
+		}
+		return mem
+
+	case types.TINT64, types.TUINT64:
+		if t.Width == x.regSize {
+			break
+		}
+		tHi, tLo := x.intPairTypes(t.Kind())
+		sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+x.hiOffset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+x.lowOffset)
+
+	case types.TINTER:
+		sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset+x.ptrSize)
+
+	case types.TSTRING:
+		sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize)
+
+	case types.TSLICE:
+		et := types.NewPtr(t.Elem())
+		sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, et, offset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize)
+		sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+2*x.ptrSize)
+
+	case types.TCOMPLEX64:
+		sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset+4)
+
+	case types.TCOMPLEX128:
+		sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
+		mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset)
+		pos = pos.WithNotStmt()
+		sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
+		return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset+8)
+	}
+
+	dst := x.offsetFrom(base, offset, types.NewPtr(t))
+	s := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
+	if x.debug {
+		fmt.Printf("\t\tstoreArg returns %s\n", s.LongString())
+	}
+	return s
+}
+
+// rewriteArgs removes all the Args from a call and converts the call args into appropriate
+// stores (or later, register movement).  Extra args for interface and closure calls are ignored,
+// but removed.
+func (x *expandState) rewriteArgs(v *Value, firstArg int) *Value {
+	// Thread the stores on the memory arg
+	aux := v.Aux.(*AuxCall)
+	pos := v.Pos.WithNotStmt()
+	m0 := v.MemoryArg()
+	mem := m0
+	for i, a := range v.Args {
+		if i < firstArg {
+			continue
+		}
+		if a == m0 { // mem is last.
+			break
+		}
+		auxI := int64(i - firstArg)
+		if a.Op == OpDereference {
+			if a.MemoryArg() != m0 {
+				x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
+			}
+			// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
+			// TODO this will be more complicated with registers in the picture.
+			mem = x.rewriteDereference(v.Block, x.sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos)
+		} else {
+			if x.debug {
+				fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+			}
+			mem = x.storeArgOrLoad(pos, v.Block, x.sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+		}
+	}
+	v.resetArgs()
+	return mem
+}
+
 // expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
 // that is more oriented to a platform's ABI.  The SelectN operations that extract results are rewritten into
 // more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
@@ -38,639 +664,30 @@
 
 	// With the current ABI, the outputs need to be converted to loads, which will all use the call's
 	// memory output as their input.
-	if !LateCallExpansionEnabledWithin(f) {
-		return
-	}
-	debug := f.pass.debug > 0
-
-	if debug {
-		fmt.Printf("\nexpandsCalls(%s)\n", f.Name)
-	}
-
-	canSSAType := f.fe.CanSSA
-	regSize := f.Config.RegSize
 	sp, _ := f.spSb()
-	typ := &f.Config.Types
-	ptrSize := f.Config.PtrSize
+	x := &expandState{
+		f:            f,
+		debug:        f.pass.debug > 0,
+		canSSAType:   f.fe.CanSSA,
+		regSize:      f.Config.RegSize,
+		sp:           sp,
+		typs:         &f.Config.Types,
+		ptrSize:      f.Config.PtrSize,
+		namedSelects: make(map[*Value][]namedVal),
+		sdom:         f.Sdom(),
+		common:       make(map[selKey]*Value),
+		offsets:      make(map[offsetKey]*Value),
+	}
 
 	// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
-	var hiOffset, lowOffset int64
 	if f.Config.BigEndian {
-		lowOffset = 4
+		x.lowOffset = 4
 	} else {
-		hiOffset = 4
+		x.hiOffset = 4
 	}
 
-	namedSelects := make(map[*Value][]namedVal)
-
-	sdom := f.Sdom()
-
-	common := make(map[selKey]*Value)
-
-	// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
-	// that has no 64-bit integer registers.
-	intPairTypes := func(et types.EType) (tHi, tLo *types.Type) {
-		tHi = typ.UInt32
-		if et == types.TINT64 {
-			tHi = typ.Int32
-		}
-		tLo = typ.UInt32
-		return
-	}
-
-	// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
-	// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
-	// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
-	// integer on 32-bit).
-	isAlreadyExpandedAggregateType := func(t *types.Type) bool {
-		if !canSSAType(t) {
-			return false
-		}
-		return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
-			t.Size() > regSize && t.IsInteger()
-	}
-
-	offsets := make(map[offsetKey]*Value)
-
-	// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
-	// TODO should also optimize offsets from SB?
-	offsetFrom := func(from *Value, offset int64, pt *types.Type) *Value {
-		if offset == 0 && from.Type == pt { // this is not actually likely
-			return from
-		}
-		// Simplify, canonicalize
-		for from.Op == OpOffPtr {
-			offset += from.AuxInt
-			from = from.Args[0]
-		}
-		if from == sp {
-			return f.ConstOffPtrSP(pt, offset, sp)
-		}
-		key := offsetKey{from, offset, pt}
-		v := offsets[key]
-		if v != nil {
-			return v
-		}
-		v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
-		offsets[key] = v
-		return v
-	}
-
-	// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
-	splitSlots := func(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
-		var locs []LocalSlot
-		for i := range ls {
-			locs = append(locs, f.fe.SplitSlot(&ls[i], sfx, offset, ty))
-		}
-		return locs
-	}
-
-	// removeTrivialWrapperTypes unwraps layers of
-	// struct { singleField SomeType } and [1]SomeType
-	// until a non-wrapper type is reached.  This is useful
-	// for working with assignments to/from interface data
-	// fields (either second operand to OpIMake or OpIData)
-	// where the wrapping or type conversion can be elided
-	// because of type conversions/assertions in source code
-	// that do not appear in SSA.
-	removeTrivialWrapperTypes := func(t *types.Type) *types.Type {
-		for {
-			if t.IsStruct() && t.NumFields() == 1 {
-				t = t.Field(0).Type
-				continue
-			}
-			if t.IsArray() && t.NumElem() == 1 {
-				t = t.Elem()
-				continue
-			}
-			break
-		}
-		return t
-	}
-
-	// Calls that need lowering have some number of inputs, including a memory input,
-	// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
-
-	// With the current ABI those inputs need to be converted into stores to memory,
-	// rethreading the call's memory input to the first, and the new call now receiving the last.
-
-	// With the current ABI, the outputs need to be converted to loads, which will all use the call's
-	// memory output as their input.
-
-	// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
-	// through a chain of Struct/Array/builtin Select operations.  If the chain of selectors does not
-	// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
-	// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
-	// accumulates the offset.
-	// It emits the code necessary to implement the leaf select operation that leads to the root.
-	//
-	// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
-	var rewriteSelect func(leaf *Value, selector *Value, offset int64) []LocalSlot
-	rewriteSelect = func(leaf *Value, selector *Value, offset int64) []LocalSlot {
-		if debug {
-			fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset)
-		}
-		var locs []LocalSlot
-		leafType := leaf.Type
-		if len(selector.Args) > 0 {
-			w := selector.Args[0]
-			if w.Op == OpCopy {
-				for w.Op == OpCopy {
-					w = w.Args[0]
-				}
-				selector.SetArg(0, w)
-			}
-		}
-		switch selector.Op {
-		case OpArg:
-			if !isAlreadyExpandedAggregateType(selector.Type) {
-				if leafType == selector.Type { // OpIData leads us here, sometimes.
-					leaf.copyOf(selector)
-				} else {
-					f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
-				}
-				if debug {
-					fmt.Printf("\tOpArg, break\n")
-				}
-				break
-			}
-			switch leaf.Op {
-			case OpIData, OpStructSelect, OpArraySelect:
-				leafType = removeTrivialWrapperTypes(leaf.Type)
-			}
-			aux := selector.Aux
-			auxInt := selector.AuxInt + offset
-			if leaf.Block == selector.Block {
-				leaf.reset(OpArg)
-				leaf.Aux = aux
-				leaf.AuxInt = auxInt
-				leaf.Type = leafType
-			} else {
-				w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux)
-				leaf.copyOf(w)
-				if debug {
-					fmt.Printf("\tnew %s\n", w.LongString())
-				}
-			}
-			for _, s := range namedSelects[selector] {
-				locs = append(locs, f.Names[s.locIndex])
-			}
-
-		case OpLoad: // We end up here because of IData of immediate structures.
-			// Failure case:
-			// (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
-			// the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
-			//
-			// GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
-			// cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
-			// b2: ← b1
-			// v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
-			// v21 (142) = SelectN <mem> [1] v20
-			// v22 (142) = SelectN <interface {}> [0] v20
-			// b15: ← b8
-			// v71 (+143) = IData <Nodes> v22 (v[Nodes])
-			// v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
-			//
-			// translates (w/o the "case OpLoad:" above) to:
-			//
-			// b2: ← b1
-			// v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
-			// v23 (142) = Load <*uintptr> v19 v20
-			// v823 (142) = IsNonNil <bool> v23
-			// v67 (+143) = Load <*[]*Node> v880 v20
-			// b15: ← b8
-			// v827 (146) = StructSelect <*[]*Node> [0] v67
-			// v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
-			// v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
-			// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
-			// The OpLoad was created to load the single field of the IData
-			// This case removes that StructSelect.
-			if leafType != selector.Type {
-				f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
-			}
-			leaf.copyOf(selector)
-			for _, s := range namedSelects[selector] {
-				locs = append(locs, f.Names[s.locIndex])
-			}
-
-		case OpSelectN:
-			// TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
-			call := selector.Args[0]
-			aux := call.Aux.(*AuxCall)
-			which := selector.AuxInt
-			if which == aux.NResults() { // mem is after the results.
-				// rewrite v as a Copy of call -- the replacement call will produce a mem.
-				leaf.copyOf(call)
-			} else {
-				leafType := removeTrivialWrapperTypes(leaf.Type)
-				if canSSAType(leafType) {
-					pt := types.NewPtr(leafType)
-					off := offsetFrom(sp, offset+aux.OffsetOfResult(which), pt)
-					// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
-					if leaf.Block == call.Block {
-						leaf.reset(OpLoad)
-						leaf.SetArgs2(off, call)
-						leaf.Type = leafType
-					} else {
-						w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
-						leaf.copyOf(w)
-						if debug {
-							fmt.Printf("\tnew %s\n", w.LongString())
-						}
-					}
-					for _, s := range namedSelects[selector] {
-						locs = append(locs, f.Names[s.locIndex])
-					}
-				} else {
-					f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
-				}
-			}
-
-		case OpStructSelect:
-			w := selector.Args[0]
-			var ls []LocalSlot
-			if w.Type.Etype != types.TSTRUCT { // IData artifact
-				ls = rewriteSelect(leaf, w, offset)
-			} else {
-				ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
-				if w.Op != OpIData {
-					for _, l := range ls {
-						locs = append(locs, f.fe.SplitStruct(l, int(selector.AuxInt)))
-					}
-				}
-			}
-
-		case OpArraySelect:
-			w := selector.Args[0]
-			rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
-
-		case OpInt64Hi:
-			w := selector.Args[0]
-			ls := rewriteSelect(leaf, w, offset+hiOffset)
-			locs = splitSlots(ls, ".hi", hiOffset, leafType)
-
-		case OpInt64Lo:
-			w := selector.Args[0]
-			ls := rewriteSelect(leaf, w, offset+lowOffset)
-			locs = splitSlots(ls, ".lo", lowOffset, leafType)
-
-		case OpStringPtr:
-			ls := rewriteSelect(leaf, selector.Args[0], offset)
-			locs = splitSlots(ls, ".ptr", 0, typ.BytePtr)
-
-		case OpSlicePtr:
-			w := selector.Args[0]
-			ls := rewriteSelect(leaf, w, offset)
-			locs = splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
-
-		case OpITab:
-			w := selector.Args[0]
-			ls := rewriteSelect(leaf, w, offset)
-			sfx := ".itab"
-			if w.Type.IsEmptyInterface() {
-				sfx = ".type"
-			}
-			locs = splitSlots(ls, sfx, 0, typ.Uintptr)
-
-		case OpComplexReal:
-			ls := rewriteSelect(leaf, selector.Args[0], offset)
-			locs = splitSlots(ls, ".real", 0, leafType)
-
-		case OpComplexImag:
-			ls := rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
-			locs = splitSlots(ls, ".imag", leafType.Width, leafType)
-
-		case OpStringLen, OpSliceLen:
-			ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
-			locs = splitSlots(ls, ".len", ptrSize, leafType)
-
-		case OpIData:
-			ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
-			locs = splitSlots(ls, ".data", ptrSize, leafType)
-
-		case OpSliceCap:
-			ls := rewriteSelect(leaf, selector.Args[0], offset+2*ptrSize)
-			locs = splitSlots(ls, ".cap", 2*ptrSize, leafType)
-
-		case OpCopy: // If it's an intermediate result, recurse
-			locs = rewriteSelect(leaf, selector.Args[0], offset)
-			for _, s := range namedSelects[selector] {
-				// this copy may have had its own name, preserve that, too.
-				locs = append(locs, f.Names[s.locIndex])
-			}
-
-		default:
-			// Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
-		}
-
-		return locs
-	}
-
-	// storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed
-	// stores of non-aggregate types.  It recursively walks up a chain of selectors until it reaches a Load or an Arg.
-	// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
-	var storeArgOrLoad func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value
-
-	// decomposeArgOrLoad is a helper for storeArgOrLoad.
-	// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
-	// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
-	decomposeArgOrLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64,
-		decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
-		decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
-		u := source.Type
-		switch u.Etype {
-		case types.TARRAY:
-			elem := u.Elem()
-			for i := int64(0); i < u.NumElem(); i++ {
-				elemOff := i * elem.Size()
-				mem = decomposeOne(pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff)
-				pos = pos.WithNotStmt()
-			}
-			return mem
-		case types.TSTRUCT:
-			for i := 0; i < u.NumFields(); i++ {
-				fld := u.Field(i)
-				mem = decomposeOne(pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset)
-				pos = pos.WithNotStmt()
-			}
-			return mem
-		case types.TINT64, types.TUINT64:
-			if t.Width == regSize {
-				break
-			}
-			tHi, tLo := intPairTypes(t.Etype)
-			mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset)
-			pos = pos.WithNotStmt()
-			return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset)
-		case types.TINTER:
-			return decomposeTwo(pos, b, base, source, mem, typ.Uintptr, typ.BytePtr, source.AuxInt, offset)
-		case types.TSTRING:
-			return decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
-		case types.TCOMPLEX64:
-			return decomposeTwo(pos, b, base, source, mem, typ.Float32, typ.Float32, source.AuxInt, offset)
-		case types.TCOMPLEX128:
-			return decomposeTwo(pos, b, base, source, mem, typ.Float64, typ.Float64, source.AuxInt, offset)
-		case types.TSLICE:
-			mem = decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
-			return decomposeOne(pos, b, base, source, mem, typ.Int, source.AuxInt+2*ptrSize, offset+2*ptrSize)
-		}
-		return nil
-	}
-
-	// storeOneArg creates a decomposed (one step) arg that is then stored.
-	// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
-	// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
-	storeOneArg := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
-		w := common[selKey{source, offArg, t.Width, t}]
-		if w == nil {
-			w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux)
-			common[selKey{source, offArg, t.Width, t}] = w
-		}
-		return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
-	}
-
-	// storeOneLoad creates a decomposed (one step) load that is then stored.
-	storeOneLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
-		from := offsetFrom(source.Args[0], offArg, types.NewPtr(t))
-		w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
-		return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
-	}
-
-	storeTwoArg := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
-		mem = storeOneArg(pos, b, base, source, mem, t1, offArg, offStore)
-		pos = pos.WithNotStmt()
-		t1Size := t1.Size()
-		return storeOneArg(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
-	}
-
-	storeTwoLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
-		mem = storeOneLoad(pos, b, base, source, mem, t1, offArg, offStore)
-		pos = pos.WithNotStmt()
-		t1Size := t1.Size()
-		return storeOneLoad(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
-	}
-
-	storeArgOrLoad = func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value {
-		if debug {
-			fmt.Printf("\tstoreArgOrLoad(%s;  %s;  %s;  %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset)
-		}
-
-		switch source.Op {
-		case OpCopy:
-			return storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset)
-
-		case OpLoad:
-			ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad)
-			if ret != nil {
-				return ret
-			}
-
-		case OpArg:
-			ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg)
-			if ret != nil {
-				return ret
-			}
-
-		case OpArrayMake0, OpStructMake0:
-			return mem
-
-		case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
-			for i := 0; i < t.NumFields(); i++ {
-				fld := t.Field(i)
-				mem = storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset)
-				pos = pos.WithNotStmt()
-			}
-			return mem
-
-		case OpArrayMake1:
-			return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
-
-		case OpInt64Make:
-			tHi, tLo := intPairTypes(t.Etype)
-			mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset)
-			pos = pos.WithNotStmt()
-			return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset)
-
-		case OpComplexMake:
-			tPart := typ.Float32
-			wPart := t.Width / 2
-			if wPart == 8 {
-				tPart = typ.Float64
-			}
-			mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset)
-			pos = pos.WithNotStmt()
-			return storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart)
-
-		case OpIMake:
-			mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.Uintptr, offset)
-			pos = pos.WithNotStmt()
-			return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.BytePtr, offset+ptrSize)
-
-		case OpStringMake:
-			mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
-			pos = pos.WithNotStmt()
-			return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
-
-		case OpSliceMake:
-			mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
-			pos = pos.WithNotStmt()
-			mem = storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
-			return storeArgOrLoad(pos, b, base, source.Args[2], mem, typ.Int, offset+2*ptrSize)
-		}
-
-		// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
-		switch t.Etype {
-		case types.TARRAY:
-			elt := t.Elem()
-			if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize {
-				t = removeTrivialWrapperTypes(t)
-				// it could be a leaf type, but the "leaf" could be complex64 (for example)
-				return storeArgOrLoad(pos, b, base, source, mem, t, offset)
-			}
-			for i := int64(0); i < t.NumElem(); i++ {
-				sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
-				mem = storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width)
-				pos = pos.WithNotStmt()
-			}
-			return mem
-
-		case types.TSTRUCT:
-			if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize {
-				// This peculiar test deals with accesses to immediate interface data.
-				// It works okay because everything is the same size.
-				// Example code that triggers this can be found in go/constant/value.go, function ToComplex
-				// v119 (+881) = IData <intVal> v6
-				// v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
-				// This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
-				// Guard against "struct{struct{*foo}}"
-				// Other rewriting phases create minor glitches when they transform IData, for instance the
-				// interface-typed Arg "x" of ToFloat in go/constant/value.go
-				//   v6 (858) = Arg <Value> {x} (x[Value], x[Value])
-				// is rewritten by decomposeArgs into
-				//   v141 (858) = Arg <uintptr> {x}
-				//   v139 (858) = Arg <*uint8> {x} [8]
-				// because of a type case clause on line 862 of go/constant/value.go
-				//  	case intVal:
-				//		   return itof(x)
-				// v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
-				// of a *uint8, which does not succeed.
-				t = removeTrivialWrapperTypes(t)
-				// it could be a leaf type, but the "leaf" could be complex64 (for example)
-				return storeArgOrLoad(pos, b, base, source, mem, t, offset)
-			}
-
-			for i := 0; i < t.NumFields(); i++ {
-				fld := t.Field(i)
-				sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
-				mem = storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset)
-				pos = pos.WithNotStmt()
-			}
-			return mem
-
-		case types.TINT64, types.TUINT64:
-			if t.Width == regSize {
-				break
-			}
-			tHi, tLo := intPairTypes(t.Etype)
-			sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+lowOffset)
-
-		case types.TINTER:
-			sel := source.Block.NewValue1(pos, OpITab, typ.BytePtr, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpIData, typ.BytePtr, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset+ptrSize)
-
-		case types.TSTRING:
-			sel := source.Block.NewValue1(pos, OpStringPtr, typ.BytePtr, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpStringLen, typ.Int, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
-
-		case types.TSLICE:
-			et := types.NewPtr(t.Elem())
-			sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, et, offset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpSliceLen, typ.Int, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
-			sel = source.Block.NewValue1(pos, OpSliceCap, typ.Int, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+2*ptrSize)
-
-		case types.TCOMPLEX64:
-			sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float32, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float32, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset+4)
-
-		case types.TCOMPLEX128:
-			sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float64, source)
-			mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset)
-			pos = pos.WithNotStmt()
-			sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float64, source)
-			return storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset+8)
-		}
-
-		dst := offsetFrom(base, offset, types.NewPtr(t))
-		x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
-		if debug {
-			fmt.Printf("\t\tstoreArg returns %s\n", x.LongString())
-		}
-		return x
-	}
-
-	// rewriteArgs removes all the Args from a call and converts the call args into appropriate
-	// stores (or later, register movement).  Extra args for interface and closure calls are ignored,
-	// but removed.
-	rewriteArgs := func(v *Value, firstArg int) *Value {
-		// Thread the stores on the memory arg
-		aux := v.Aux.(*AuxCall)
-		pos := v.Pos.WithNotStmt()
-		m0 := v.Args[len(v.Args)-1]
-		mem := m0
-		for i, a := range v.Args {
-			if i < firstArg {
-				continue
-			}
-			if a == m0 { // mem is last.
-				break
-			}
-			auxI := int64(i - firstArg)
-			if a.Op == OpDereference {
-				if a.MemoryArg() != m0 {
-					f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
-				}
-				// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
-				// TODO this will be more complicated with registers in the picture.
-				source := a.Args[0]
-				dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp)
-				if a.Uses == 1 && a.Block == v.Block {
-					a.reset(OpMove)
-					a.Pos = pos
-					a.Type = types.TypeMem
-					a.Aux = aux.TypeOfArg(auxI)
-					a.AuxInt = aux.SizeOfArg(auxI)
-					a.SetArgs3(dst, source, mem)
-					mem = a
-				} else {
-					mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem)
-					mem.AuxInt = aux.SizeOfArg(auxI)
-				}
-			} else {
-				if debug {
-					fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
-				}
-				mem = storeArgOrLoad(pos, v.Block, sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
-			}
-		}
-		v.resetArgs()
-		return mem
+	if x.debug {
+		fmt.Printf("\nexpandsCalls(%s)\n", f.Name)
 	}
 
 	// TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
@@ -680,28 +697,67 @@
 		for _, v := range b.Values {
 			switch v.Op {
 			case OpStaticLECall:
-				mem := rewriteArgs(v, 0)
+				mem := x.rewriteArgs(v, 0)
 				v.SetArgs1(mem)
 			case OpClosureLECall:
 				code := v.Args[0]
 				context := v.Args[1]
-				mem := rewriteArgs(v, 2)
+				mem := x.rewriteArgs(v, 2)
 				v.SetArgs3(code, context, mem)
 			case OpInterLECall:
 				code := v.Args[0]
-				mem := rewriteArgs(v, 1)
+				mem := x.rewriteArgs(v, 1)
 				v.SetArgs2(code, mem)
 			}
 		}
+		if isBlockMultiValueExit(b) {
+			// Very similar to code in rewriteArgs, but results instead of args.
+			v := b.Controls[0]
+			m0 := v.MemoryArg()
+			mem := m0
+			aux := f.OwnAux
+			pos := v.Pos.WithNotStmt()
+			for j, a := range v.Args {
+				i := int64(j)
+				if a == m0 {
+					break
+				}
+				auxType := aux.TypeOfResult(i)
+				auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, x.sp, mem)
+				auxOffset := int64(0)
+				auxSize := aux.SizeOfResult(i)
+				if a.Op == OpDereference {
+					// Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
+					if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
+						dAddr.Args[1] == dMem && dAddr.Aux == aux.results[i].Name {
+						if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
+							dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
+						}
+						continue
+					}
+					mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
+				} else {
+					if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+						addr := a.Args[0]
+						if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.results[i].Name {
+							continue
+						}
+					}
+					mem = x.storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset)
+				}
+			}
+			b.SetControl(mem)
+			v.reset(OpInvalid) // otherwise it can have a mem operand which will fail check(), even though it is dead.
+		}
 	}
 
 	for i, name := range f.Names {
 		t := name.Type
-		if isAlreadyExpandedAggregateType(t) {
+		if x.isAlreadyExpandedAggregateType(t) {
 			for j, v := range f.NamedValues[name] {
-				if v.Op == OpSelectN || v.Op == OpArg && isAlreadyExpandedAggregateType(v.Type) {
-					ns := namedSelects[v]
-					namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
+				if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) {
+					ns := x.namedSelects[v]
+					x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
 				}
 			}
 		}
@@ -715,22 +771,22 @@
 				t := v.Aux.(*types.Type)
 				source := v.Args[1]
 				tSrc := source.Type
-				iAEATt := isAlreadyExpandedAggregateType(t)
+				iAEATt := x.isAlreadyExpandedAggregateType(t)
 
 				if !iAEATt {
 					// guarding against store immediate struct into interface data field -- store type is *uint8
 					// TODO can this happen recursively?
-					iAEATt = isAlreadyExpandedAggregateType(tSrc)
+					iAEATt = x.isAlreadyExpandedAggregateType(tSrc)
 					if iAEATt {
 						t = tSrc
 					}
 				}
 				if iAEATt {
-					if debug {
+					if x.debug {
 						fmt.Printf("Splitting store %s\n", v.LongString())
 					}
 					dst, mem := v.Args[0], v.Args[2]
-					mem = storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0)
+					mem = x.storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0)
 					v.copyOf(mem)
 				}
 			}
@@ -759,7 +815,7 @@
 				switch w.Op {
 				case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
 					val2Preds[w] += 1
-					if debug {
+					if x.debug {
 						fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
 					}
 				}
@@ -768,18 +824,18 @@
 			case OpSelectN:
 				if _, ok := val2Preds[v]; !ok {
 					val2Preds[v] = 0
-					if debug {
+					if x.debug {
 						fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
 					}
 				}
 
 			case OpArg:
-				if !isAlreadyExpandedAggregateType(v.Type) {
+				if !x.isAlreadyExpandedAggregateType(v.Type) {
 					continue
 				}
 				if _, ok := val2Preds[v]; !ok {
 					val2Preds[v] = 0
-					if debug {
+					if x.debug {
 						fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
 					}
 				}
@@ -790,7 +846,7 @@
 				which := v.AuxInt
 				aux := call.Aux.(*AuxCall)
 				pt := v.Type
-				off := offsetFrom(sp, aux.OffsetOfResult(which), pt)
+				off := x.offsetFrom(x.sp, aux.OffsetOfResult(which), pt)
 				v.copyOf(off)
 			}
 		}
@@ -812,7 +868,7 @@
 		if bi == bj {
 			return vi.ID < vj.ID
 		}
-		return sdom.domorder(bi) > sdom.domorder(bj) // reverse the order to put dominators last.
+		return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last.
 	}
 
 	// Accumulate order in allOrdered
@@ -846,7 +902,7 @@
 		}
 	}
 
-	common = make(map[selKey]*Value)
+	x.common = make(map[selKey]*Value)
 	// Rewrite duplicate selectors as copies where possible.
 	for i := len(allOrdered) - 1; i >= 0; i-- {
 		v := allOrdered[i]
@@ -868,7 +924,7 @@
 		offset := int64(0)
 		switch v.Op {
 		case OpStructSelect:
-			if w.Type.Etype == types.TSTRUCT {
+			if w.Type.Kind() == types.TSTRUCT {
 				offset = w.Type.FieldOff(int(v.AuxInt))
 			} else { // Immediate interface data artifact, offset is zero.
 				f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
@@ -878,26 +934,26 @@
 		case OpSelectN:
 			offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt)
 		case OpInt64Hi:
-			offset = hiOffset
+			offset = x.hiOffset
 		case OpInt64Lo:
-			offset = lowOffset
+			offset = x.lowOffset
 		case OpStringLen, OpSliceLen, OpIData:
-			offset = ptrSize
+			offset = x.ptrSize
 		case OpSliceCap:
-			offset = 2 * ptrSize
+			offset = 2 * x.ptrSize
 		case OpComplexImag:
 			offset = size
 		}
 		sk := selKey{from: w, size: size, offset: offset, typ: typ}
-		dupe := common[sk]
+		dupe := x.common[sk]
 		if dupe == nil {
-			common[sk] = v
-		} else if sdom.IsAncestorEq(dupe.Block, v.Block) {
+			x.common[sk] = v
+		} else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
 			v.copyOf(dupe)
 		} else {
 			// Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
 			// Installing the new value might match some future values.
-			common[sk] = v
+			x.common[sk] = v
 		}
 	}
 
@@ -906,7 +962,7 @@
 
 	// Rewrite selectors.
 	for i, v := range allOrdered {
-		if debug {
+		if x.debug {
 			b := v.Block
 			fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
 		}
@@ -917,13 +973,13 @@
 		if v.Op == OpCopy {
 			continue
 		}
-		locs := rewriteSelect(v, v, 0)
+		locs := x.rewriteSelect(v, v, 0)
 		// Install new names.
 		if v.Type.IsMemory() {
 			continue
 		}
 		// Leaf types may have debug locations
-		if !isAlreadyExpandedAggregateType(v.Type) {
+		if !x.isAlreadyExpandedAggregateType(v.Type) {
 			for _, l := range locs {
 				f.NamedValues[l] = append(f.NamedValues[l], v)
 			}
@@ -931,7 +987,7 @@
 			continue
 		}
 		// Not-leaf types that had debug locations need to lose them.
-		if ns, ok := namedSelects[v]; ok {
+		if ns, ok := x.namedSelects[v]; ok {
 			toDelete = append(toDelete, ns...)
 		}
 	}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index b4c3e5c..32e6d09 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -5,13 +5,13 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
 	"cmd/internal/obj/s390x"
 	"cmd/internal/obj/x86"
 	"cmd/internal/src"
-	"fmt"
 	"testing"
 )
 
@@ -36,10 +36,10 @@
 		tb.Fatalf("unknown arch %s", arch)
 	}
 	if ctxt.Arch.PtrSize != 8 {
-		tb.Fatal("dummyTypes is 64-bit only")
+		tb.Fatal("testTypes is 64-bit only")
 	}
 	c := &Conf{
-		config: NewConfig(arch, dummyTypes, ctxt, true),
+		config: NewConfig(arch, testTypes, ctxt, true),
 		tb:     tb,
 	}
 	return c
@@ -53,131 +53,94 @@
 
 func (c *Conf) Frontend() Frontend {
 	if c.fe == nil {
-		c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt}
+		c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt}
 	}
 	return c.fe
 }
 
-// DummyFrontend is a test-only frontend.
+// TestFrontend is a test-only frontend.
 // It assumes 64 bit integers and pointers.
-type DummyFrontend struct {
+type TestFrontend struct {
 	t    testing.TB
 	ctxt *obj.Link
 }
 
-type DummyAuto struct {
-	t *types.Type
-	s string
-}
-
-func (d *DummyAuto) Typ() *types.Type {
-	return d.t
-}
-
-func (d *DummyAuto) String() string {
-	return d.s
-}
-
-func (d *DummyAuto) StorageClass() StorageClass {
-	return ClassAuto
-}
-
-func (d *DummyAuto) IsSynthetic() bool {
-	return false
-}
-
-func (d *DummyAuto) IsAutoTmp() bool {
-	return true
-}
-
-func (DummyFrontend) StringData(s string) *obj.LSym {
+func (TestFrontend) StringData(s string) *obj.LSym {
 	return nil
 }
-func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
-	return &DummyAuto{t: t, s: "aDummyAuto"}
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name {
+	n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
+	n.Class = ir.PAUTO
+	return n
 }
-func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
-	return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}
+func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
+	return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}
 }
-func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
-	return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8}
+func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
+	return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8}
 }
-func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
 	return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off},
-		LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8},
-		LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16}
+		LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8},
+		LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16}
 }
-func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
 	if s.Type.Size() == 16 {
-		return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8}
+		return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8}
 	}
-	return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4}
+	return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4}
 }
-func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
 	if s.Type.IsSigned() {
-		return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+		return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
 	}
-	return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+	return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
 }
-func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
+func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
 	return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)}
 }
-func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
+func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot {
 	return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
 }
 
-func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
 	return LocalSlot{N: parent.N, Type: t, Off: offset}
 }
-func (DummyFrontend) Line(_ src.XPos) string {
+func (TestFrontend) Line(_ src.XPos) string {
 	return "unknown.go:0"
 }
-func (DummyFrontend) AllocFrame(f *Func) {
+func (TestFrontend) AllocFrame(f *Func) {
 }
-func (d DummyFrontend) Syslook(s string) *obj.LSym {
+func (d TestFrontend) Syslook(s string) *obj.LSym {
 	return d.ctxt.Lookup(s)
 }
-func (DummyFrontend) UseWriteBarrier() bool {
+func (TestFrontend) UseWriteBarrier() bool {
 	return true // only writebarrier_test cares
 }
-func (DummyFrontend) SetWBPos(pos src.XPos) {
+func (TestFrontend) SetWBPos(pos src.XPos) {
 }
 
-func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Log() bool                            { return true }
+func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Log() bool                            { return true }
 
-func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
-func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{})  { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Debug_checknil() bool                               { return false }
+func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{})  { d.t.Logf(msg, args...) }
+func (d TestFrontend) Debug_checknil() bool                               { return false }
 
-func (d DummyFrontend) MyImportPath() string {
+func (d TestFrontend) MyImportPath() string {
 	return "my/import/path"
 }
 
-var dummyTypes Types
+var testTypes Types
 
 func init() {
 	// Initialize just enough of the universe and the types package to make our tests function.
 	// TODO(josharian): move universe initialization to the types package,
 	// so this test setup can share it.
 
-	types.Tconv = func(t *types.Type, flag, mode int) string {
-		return t.Etype.String()
-	}
-	types.Sconv = func(s *types.Sym, flag, mode int) string {
-		return "sym"
-	}
-	types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
-		fmt.Fprintf(s, "sym")
-	}
-	types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
-		fmt.Fprintf(s, "%v", t.Etype)
-	}
-	types.Dowidth = func(t *types.Type) {}
-
 	for _, typ := range [...]struct {
 		width int64
-		et    types.EType
+		et    types.Kind
 	}{
 		{1, types.TINT8},
 		{1, types.TUINT8},
@@ -198,12 +161,12 @@
 		t.Align = uint8(typ.width)
 		types.Types[typ.et] = t
 	}
-	dummyTypes.SetTypPtrs()
+	testTypes.SetTypPtrs()
 }
 
-func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
+func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
 
-func (d DummyFrontend) CanSSA(t *types.Type) bool {
-	// There are no un-SSAable types in dummy land.
+func (d TestFrontend) CanSSA(t *types.Type) bool {
+	// There are no un-SSAable types in test land.
 	return true
 }
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index e6f899a..de99a8d 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -58,6 +58,11 @@
 	// of keys to make iteration order deterministic.
 	Names []LocalSlot
 
+	// RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
+	RegArgs []ArgPair
+	// AuxCall describing parameters and results for this function.
+	OwnAux *AuxCall
+
 	// WBLoads is a list of Blocks that branch on the write
 	// barrier flag. Safe-points are disabled from the OpLoad that
 	// reads the write-barrier flag until the control flow rejoins
@@ -377,13 +382,7 @@
 }
 
 // NewValue returns a new value in the block with no arguments and an aux value.
-func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) *Value {
-	if _, ok := aux.(int64); ok {
-		// Disallow int64 aux values. They should be in the auxint field instead.
-		// Maybe we want to allow this at some point, but for now we disallow it
-		// to prevent errors like using NewValue1A instead of NewValue1I.
-		b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
-	}
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = 0
 	v.Aux = aux
@@ -392,7 +391,7 @@
 }
 
 // NewValue returns a new value in the block with no arguments and both an auxint and aux values.
-func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}) *Value {
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = auxint
 	v.Aux = aux
@@ -421,7 +420,7 @@
 }
 
 // NewValue1A returns a new value in the block with one argument and an aux value.
-func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = 0
 	v.Aux = aux
@@ -432,7 +431,7 @@
 }
 
 // NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
-func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg *Value) *Value {
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = auxint
 	v.Aux = aux
@@ -455,7 +454,7 @@
 }
 
 // NewValue2A returns a new value in the block with two arguments and one aux values.
-func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = 0
 	v.Aux = aux
@@ -480,7 +479,7 @@
 }
 
 // NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values.
-func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg0, arg1 *Value) *Value {
+func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = auxint
 	v.Aux = aux
@@ -521,7 +520,7 @@
 }
 
 // NewValue3A returns a new value in the block with three argument and an aux value.
-func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *Value) *Value {
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value {
 	v := b.Func.newValue(op, t, b, pos)
 	v.AuxInt = 0
 	v.Aux = aux
@@ -633,7 +632,7 @@
 }
 func (f *Func) ConstEmptyString(t *types.Type) *Value {
 	v := f.constVal(OpConstString, t, constEmptyStringMagic, false)
-	v.Aux = ""
+	v.Aux = StringToAux("")
 	return v
 }
 func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
@@ -777,7 +776,7 @@
 }
 
 func (f *Func) spSb() (sp, sb *Value) {
-	initpos := f.Entry.Pos
+	initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same.
 	for _, v := range f.Entry.Values {
 		if v.Op == OpSB {
 			sb = v
@@ -786,7 +785,7 @@
 			sp = v
 		}
 		if sb != nil && sp != nil {
-			break
+			return
 		}
 	}
 	if sb == nil {
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
index 568c643..276c444 100644
--- a/src/cmd/compile/internal/ssa/func_test.go
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -232,7 +232,7 @@
 }
 
 // Valu defines a value in a block.
-func Valu(name string, op Op, t *types.Type, auxint int64, aux interface{}, args ...string) valu {
+func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu {
 	return valu{name, op, t, auxint, aux, args}
 }
 
@@ -277,7 +277,7 @@
 	op     Op
 	t      *types.Type
 	auxint int64
-	aux    interface{}
+	aux    Aux
 	args   []string
 }
 
@@ -402,12 +402,12 @@
 			cfg.Fun("entry",
 				Bloc("entry",
 					Valu("mem", OpInitMem, types.TypeMem, 0, nil),
-					Valu("a", OpConst64, cfg.config.Types.Int64, 0, 14),
+					Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")),
 					Exit("mem"))),
 			cfg.Fun("entry",
 				Bloc("entry",
 					Valu("mem", OpInitMem, types.TypeMem, 0, nil),
-					Valu("a", OpConst64, cfg.config.Types.Int64, 0, 26),
+					Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")),
 					Exit("mem"))),
 		},
 		// value args different
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index fbc12fd..df03cb7 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -475,7 +475,7 @@
 (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
+(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
 
 // strength reduction
 // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index a866a96..3c75bcf 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -361,31 +361,31 @@
 // Adjust zeros to be a multiple of 16 bytes.
 (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
 	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
-		(MOVOstore destptr (MOVOconst [0]) mem))
+		(MOVOstorezero destptr mem))
 
 (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
 	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
 		(MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
 
 (Zero [16] destptr mem) && config.useSSE =>
-	(MOVOstore destptr (MOVOconst [0]) mem)
+	(MOVOstorezero destptr mem)
 (Zero [32] destptr mem) && config.useSSE =>
-	(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
-		(MOVOstore destptr (MOVOconst [0]) mem))
+	(MOVOstorezero (OffPtr <destptr.Type> destptr [16])
+		(MOVOstorezero destptr mem))
 (Zero [48] destptr mem) && config.useSSE =>
-	(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
-		(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
-			(MOVOstore destptr (MOVOconst [0]) mem)))
+	(MOVOstorezero (OffPtr <destptr.Type> destptr [32])
+		(MOVOstorezero (OffPtr <destptr.Type> destptr [16])
+			(MOVOstorezero destptr mem)))
 (Zero [64] destptr mem) && config.useSSE =>
-	(MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
-		(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
-			(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
-				(MOVOstore destptr (MOVOconst [0]) mem))))
+	(MOVOstorezero (OffPtr <destptr.Type> destptr [48])
+		(MOVOstorezero (OffPtr <destptr.Type> destptr [32])
+			(MOVOstorezero (OffPtr <destptr.Type> destptr [16])
+				(MOVOstorezero destptr mem))))
 
 // Medium zeroing uses a duff device.
 (Zero [s] destptr mem)
 	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
-	(DUFFZERO [s] destptr (MOVOconst [0]) mem)
+	(DUFFZERO [s] destptr mem)
 
 // Large zeroing uses REP STOSQ.
 (Zero [s] destptr mem)
@@ -459,7 +459,7 @@
 (IsInBounds idx len) => (SETB (CMPQ idx len))
 (IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
 (NilCheck ...) => (LoweredNilCheck ...)
-(GetG ...) => (LoweredGetG ...)
+(GetG mem) && !base.Flag.ABIWrap => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
 (GetCallerPC ...) => (LoweredGetCallerPC ...)
 (GetCallerSP ...) => (LoweredGetCallerSP ...)
@@ -916,7 +916,7 @@
 (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x))
+(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
 
 // Using MOVZX instead of AND is cheaper.
 (AND(Q|L)const [  0xFF] x) => (MOVBQZX x)
@@ -1900,7 +1900,7 @@
   && c.Val() == 0
   && c2.Val() == 0
   && clobber(x)
-  => (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
+  => (MOVOstorezero [c2.Off32()] {s} p mem)
 
 // Combine stores into larger (unaligned) stores. Little endian.
 (MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index de53726..043162e 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -44,7 +44,7 @@
 	"R11",
 	"R12",
 	"R13",
-	"R14",
+	"g", // a.k.a. R14
 	"R15",
 	"X0",
 	"X1",
@@ -61,7 +61,7 @@
 	"X12",
 	"X13",
 	"X14",
-	"X15",
+	"X15", // constant 0 in ABIInternal
 
 	// If you add registers, update asyncPreempt in runtime
 
@@ -96,11 +96,14 @@
 		cx         = buildReg("CX")
 		dx         = buildReg("DX")
 		bx         = buildReg("BX")
-		gp         = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15")
-		fp         = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15")
+		gp         = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15")
+		g          = buildReg("g")
+		fp         = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14")
+		x15        = buildReg("X15")
 		gpsp       = gp | buildReg("SP")
 		gpspsb     = gpsp | buildReg("SB")
-		callerSave = gp | fp
+		gpspsbg    = gpspsb | g
+		callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g
 	)
 	// Common slices of register masks
 	var (
@@ -113,10 +116,10 @@
 		gp01           = regInfo{inputs: nil, outputs: gponly}
 		gp11           = regInfo{inputs: []regMask{gp}, outputs: gponly}
 		gp11sp         = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
-		gp11sb         = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+		gp11sb         = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly}
 		gp21           = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
 		gp21sp         = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
-		gp21sb         = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+		gp21sb         = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly}
 		gp21shift      = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
 		gp11div        = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}}
 		gp21hmul       = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
@@ -125,9 +128,9 @@
 
 		gp2flags     = regInfo{inputs: []regMask{gpsp, gpsp}}
 		gp1flags     = regInfo{inputs: []regMask{gpsp}}
-		gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}}
-		gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
-		gp2flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+		gp0flagsLoad = regInfo{inputs: []regMask{gpspsbg, 0}}
+		gp1flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+		gp2flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
 		flagsgp      = regInfo{inputs: nil, outputs: gponly}
 
 		gp11flags      = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
@@ -136,24 +139,24 @@
 		readflags = regInfo{inputs: nil, outputs: gponly}
 		flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
 
-		gpload      = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
-		gp21load    = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
-		gploadidx   = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
-		gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
+		gpload      = regInfo{inputs: []regMask{gpspsbg, 0}, outputs: gponly}
+		gp21load    = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: gponly}
+		gploadidx   = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}, outputs: gponly}
+		gp21loadidx = regInfo{inputs: []regMask{gp, gpspsbg, gpsp, 0}, outputs: gponly}
 		gp21pax     = regInfo{inputs: []regMask{gp &^ ax, gp}, outputs: []regMask{gp &^ ax}, clobbers: ax}
 
-		gpstore         = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
-		gpstoreconst    = regInfo{inputs: []regMask{gpspsb, 0}}
-		gpstoreidx      = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
-		gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
-		gpstorexchg     = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: []regMask{gp}}
+		gpstore         = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+		gpstoreconst    = regInfo{inputs: []regMask{gpspsbg, 0}}
+		gpstoreidx      = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
+		gpstoreconstidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+		gpstorexchg     = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: []regMask{gp}}
 		cmpxchg         = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax}
 
 		fp01        = regInfo{inputs: nil, outputs: fponly}
 		fp21        = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
 		fp31        = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
-		fp21load    = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
-		fp21loadidx = regInfo{inputs: []regMask{fp, gpspsb, gpspsb, 0}, outputs: fponly}
+		fp21load    = regInfo{inputs: []regMask{fp, gpspsbg, 0}, outputs: fponly}
+		fp21loadidx = regInfo{inputs: []regMask{fp, gpspsbg, gpspsb, 0}, outputs: fponly}
 		fpgp        = regInfo{inputs: fponly, outputs: gponly}
 		gpfp        = regInfo{inputs: gponly, outputs: fponly}
 		fp11        = regInfo{inputs: fponly, outputs: fponly}
@@ -684,19 +687,20 @@
 		// Note: LEAx{1,2,4,8} must not have OpSB as either argument.
 
 		// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
-		{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"},  // load byte from arg0+auxint+aux. arg1=mem.  Zero extend.
-		{name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},             // ditto, sign extend to int64
-		{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem.  Zero extend.
-		{name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},             // ditto, sign extend to int64
-		{name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"},    // load 4 bytes from arg0+auxint+aux. arg1=mem.  Zero extend.
-		{name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},             // ditto, sign extend to int64
-		{name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"},    // load 8 bytes from arg0+auxint+aux. arg1=mem
-		{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},    // store byte in arg1 to arg0+auxint+aux. arg2=mem
-		{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},    // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
-		{name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},    // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
-		{name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},    // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
-		{name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"},  // load 16 bytes from arg0+auxint+aux. arg1=mem
-		{name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},  // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"},                                   // load byte from arg0+auxint+aux. arg1=mem.  Zero extend.
+		{name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},                                              // ditto, sign extend to int64
+		{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"},                                  // load 2 bytes from arg0+auxint+aux. arg1=mem.  Zero extend.
+		{name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},                                              // ditto, sign extend to int64
+		{name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"},                                     // load 4 bytes from arg0+auxint+aux. arg1=mem.  Zero extend.
+		{name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},                                              // ditto, sign extend to int64
+		{name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"},                                     // load 8 bytes from arg0+auxint+aux. arg1=mem
+		{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                                     // store byte in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                                     // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                                     // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                                     // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"},                                   // load 16 bytes from arg0+auxint+aux. arg1=mem
+		{name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},                                   // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
+		{name: "MOVOstorezero", argLength: 2, reg: regInfo{inputs: []regMask{gpspsb, 0}}, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0+auxint+aux. arg1=mem
 
 		// indexed loads/stores
 		{name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"},  // load a byte from arg0+arg1+auxint+aux. arg2=mem
@@ -735,22 +739,20 @@
 		{name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},                    // store 8 bytes of ... 8*arg1 ...
 
 		// arg0 = pointer to start of memory to zero
-		// arg1 = value to store (will always be zero)
-		// arg2 = mem
+		// arg1 = mem
 		// auxint = # of bytes to zero
 		// returns mem
 		{
 			name:      "DUFFZERO",
 			aux:       "Int64",
-			argLength: 3,
+			argLength: 2,
 			reg: regInfo{
-				inputs:   []regMask{buildReg("DI"), buildReg("X0")},
+				inputs:   []regMask{buildReg("DI")},
 				clobbers: buildReg("DI"),
 			},
 			faultOnNilArg0: true,
 			unsafePoint:    true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
 		},
-		{name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true},
 
 		// arg0 = address of memory to zero
 		// arg1 = # of 8-byte words to zero
@@ -830,7 +832,7 @@
 		{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
 		// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
 		// It saves all GP registers if necessary, but may clobber others.
-		{name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+		{name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ (gp | g)}, clobberFlags: true, aux: "Sym", symEffect: "None"},
 
 		{name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"},
 
@@ -935,6 +937,7 @@
 		regnames:        regNamesAMD64,
 		gpregmask:       gp,
 		fpregmask:       fp,
+		specialregmask:  x15,
 		framepointerreg: int8(num["BP"]),
 		linkreg:         -1, // not used
 	})
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 11c36b5..de0df36 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -507,7 +507,7 @@
 (TEQ x (MOVWconst [c])) => (TEQconst [c] x)
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x))
+(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
 
 // don't extend after proper load
 // MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 3f4d0c1..a0e2a0d 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -1151,7 +1151,7 @@
 (CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
+((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
 
 // mul-neg => mneg
 (NEG (MUL x y)) => (MNEG x y)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index c064046..a762be6 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -1088,7 +1088,7 @@
 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
 
 // ISEL auxInt values 0=LT 1=GT 2=EQ   arg2 ? arg0 : arg1
 // ISEL auxInt values 4=GE 5=LE 6=NE   arg2 ? arg1 : arg0
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 384f2e8..c3421da 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -785,7 +785,7 @@
   => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
 
 // Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
 
 // Use sign/zero extend instead of RISBGZ.
 (RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
index 9297ed8..b0f10d0 100644
--- a/src/cmd/compile/internal/ssa/gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -42,20 +42,20 @@
 		(Store {hi.Type} dst hi mem))
 
 // These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
-(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
   (Int64Make
     (Arg <typ.Int32> {n} [off+4])
     (Arg <typ.UInt32> {n} [off]))
-(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")  =>
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")  =>
   (Int64Make
     (Arg <typ.UInt32> {n} [off+4])
     (Arg <typ.UInt32> {n} [off]))
 
-(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
   (Int64Make
     (Arg <typ.Int32> {n} [off])
     (Arg <typ.UInt32> {n} [off+4]))
-(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
   (Int64Make
     (Arg <typ.UInt32> {n} [off])
     (Arg <typ.UInt32> {n} [off+4]))
diff --git a/src/cmd/compile/internal/ssa/gen/decArgs.rules b/src/cmd/compile/internal/ssa/gen/decArgs.rules
deleted file mode 100644
index 1c9a0bb..0000000
--- a/src/cmd/compile/internal/ssa/gen/decArgs.rules
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Decompose compound argument values
-// Do this early to simplify tracking names for debugging.
-
-(Arg {n} [off]) && v.Type.IsString() =>
-  (StringMake
-    (Arg <typ.BytePtr> {n} [off])
-    (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsSlice() =>
-  (SliceMake
-    (Arg <v.Type.Elem().PtrTo()> {n} [off])
-    (Arg <typ.Int> {n} [off+int32(config.PtrSize)])
-    (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsInterface() =>
-  (IMake
-    (Arg <typ.Uintptr> {n} [off])
-    (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 =>
-  (ComplexMake
-    (Arg <typ.Float64> {n} [off])
-    (Arg <typ.Float64> {n} [off+8]))
-
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 =>
-  (ComplexMake
-    (Arg <typ.Float32> {n} [off])
-    (Arg <typ.Float32> {n} [off+4]))
-
-(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
-  (StructMake0)
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
-  (StructMake1
-    (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
-  (StructMake2
-    (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
-    (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
-  (StructMake3
-    (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
-    (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
-    (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
-  (StructMake4
-    (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
-    (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
-    (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))])
-    (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
-
-(Arg <t>) && t.IsArray() && t.NumElem() == 0 =>
-  (ArrayMake0)
-(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
-  (ArrayMake1 (Arg <t.Elem()> {n} [off]))
diff --git a/src/cmd/compile/internal/ssa/gen/decArgsOps.go b/src/cmd/compile/internal/ssa/gen/decArgsOps.go
deleted file mode 100644
index b73d9d3..0000000
--- a/src/cmd/compile/internal/ssa/gen/decArgsOps.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-var decArgsOps = []opData{}
-
-var decArgsBlocks = []blockData{}
-
-func init() {
-	archs = append(archs, arch{
-		name:    "decArgs",
-		ops:     decArgsOps,
-		blocks:  decArgsBlocks,
-		generic: true,
-	})
-}
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index aaf9101..6388aab 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -582,6 +582,7 @@
 			"math",
 			"cmd/internal/obj",
 			"cmd/internal/objabi",
+			"cmd/compile/internal/base",
 			"cmd/compile/internal/types",
 		}, n.Arch.imports...) {
 			fmt.Fprintf(w, "import %q\n", path)
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index a333982..4cd0ac8 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"fmt"
 )
@@ -59,7 +60,7 @@
 //                           { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
 //                           parent = &{N: s, Type: string}
 type LocalSlot struct {
-	N    GCNode      // an ONAME *gc.Node representing a stack location.
+	N    *ir.Name    // an ONAME *ir.Name representing a stack location.
 	Type *types.Type // type of slot
 	Off  int64       // offset of slot in N
 
@@ -86,3 +87,29 @@
 	}
 	return fmt.Sprintf("<%s,%s>", n0, n1)
 }
+
+type ArgPair struct {
+	reg *Register
+	mem LocalSlot
+}
+
+func (ap *ArgPair) Reg() int16 {
+	return ap.reg.objNum
+}
+
+func (ap *ArgPair) Type() *types.Type {
+	return ap.mem.Type
+}
+
+func (ap *ArgPair) Mem() *LocalSlot {
+	return &ap.mem
+}
+
+func (t ArgPair) String() string {
+	n0 := "nil"
+	if t.reg != nil {
+		n0 = t.reg.String()
+	}
+	n1 := t.mem.String()
+	return fmt.Sprintf("<%s,%s>", n0, n1)
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index d1bad52..bae5065 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/internal/objabi"
 	"cmd/internal/src"
 )
@@ -235,7 +236,7 @@
 				continue
 			}
 			if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
-				if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
+				if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) {
 					// These ops don't really change memory.
 					continue
 					// Note: OpVarDef requires that the defined variable not have pointers.
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
index 16d9461..2e32afe 100644
--- a/src/cmd/compile/internal/ssa/nilcheck_test.go
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -212,7 +212,7 @@
 			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
 			Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
 			Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
-			Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, "b", "sp", "mem"),
+			Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"),
 			Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
 			If("bool1", "b1", "b2")),
 		Bloc("b1",
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
index f4e62b8..2a9c8e4 100644
--- a/src/cmd/compile/internal/ssa/numberlines.go
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -5,7 +5,6 @@
 package ssa
 
 import (
-	"cmd/internal/obj"
 	"cmd/internal/src"
 	"fmt"
 	"sort"
@@ -23,15 +22,6 @@
 	return false
 }
 
-// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF.
-// The attributes from some opcodes are lost in translation.
-// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC.
-// Should try to fix it there.
-func LosesStmtMark(as obj.As) bool {
-	// is_stmt does not work for these; it DOES for ANOP even though that generates no code.
-	return as == obj.APCDATA || as == obj.AFUNCDATA
-}
-
 // nextGoodStatementIndex returns an index at i or later that is believed
 // to be a good place to start the statement for b.  This decision is
 // based on v's Op, the possibility of a better later operation, and
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index d167335..f41d014 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"fmt"
@@ -70,7 +71,8 @@
 
 type Param struct {
 	Type   *types.Type
-	Offset int32 // TODO someday this will be a register
+	Offset int32    // Offset of Param if not in a register.
+	Name   *ir.Name // For OwnAux, need to prepend stores with Vardefs
 }
 
 type AuxCall struct {
@@ -197,6 +199,14 @@
 	return &AuxCall{Fn: nil, args: args, results: results}
 }
 
+func (*AuxCall) CanBeAnSSAAux() {}
+
+// OwnAuxCall returns a function's own AuxCall
+func OwnAuxCall(fn *obj.LSym, args []Param, results []Param) *AuxCall {
+	// TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
+	return &AuxCall{Fn: fn, args: args, results: results}
+}
+
 const (
 	auxNone         auxType = iota
 	auxBool                 // auxInt is 0/1 for false/true
@@ -247,8 +257,8 @@
 //  - a *obj.LSym, for an offset from SB (the global pointer)
 //  - nil, for no offset
 type Sym interface {
-	String() string
 	CanBeAnSSASym()
+	CanBeAnSSAAux()
 }
 
 // A ValAndOff is used by the several opcodes. It holds
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index e590f6b..ccfed93 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -970,6 +970,7 @@
 	OpAMD64MOVQstore
 	OpAMD64MOVOload
 	OpAMD64MOVOstore
+	OpAMD64MOVOstorezero
 	OpAMD64MOVBloadidx1
 	OpAMD64MOVWloadidx1
 	OpAMD64MOVWloadidx2
@@ -998,7 +999,6 @@
 	OpAMD64MOVQstoreconstidx1
 	OpAMD64MOVQstoreconstidx8
 	OpAMD64DUFFZERO
-	OpAMD64MOVOconst
 	OpAMD64REPSTOSQ
 	OpAMD64CALLstatic
 	OpAMD64CALLclosure
@@ -6162,11 +6162,11 @@
 		asm:          x86.AADDSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6178,11 +6178,11 @@
 		asm:          x86.AADDSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6193,11 +6193,11 @@
 		asm:          x86.ASUBSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6208,11 +6208,11 @@
 		asm:          x86.ASUBSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6224,11 +6224,11 @@
 		asm:          x86.AMULSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6240,11 +6240,11 @@
 		asm:          x86.AMULSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6255,11 +6255,11 @@
 		asm:          x86.ADIVSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6270,11 +6270,11 @@
 		asm:          x86.ADIVSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6287,10 +6287,10 @@
 		asm:            x86.AMOVSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6303,10 +6303,10 @@
 		asm:            x86.AMOVSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6318,7 +6318,7 @@
 		asm:               x86.AMOVSS,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6330,7 +6330,7 @@
 		asm:               x86.AMOVSD,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6343,11 +6343,11 @@
 		scale:     1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6360,11 +6360,11 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6377,11 +6377,11 @@
 		scale:     1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6394,11 +6394,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6411,8 +6411,8 @@
 		asm:            x86.AMOVSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6425,8 +6425,8 @@
 		asm:            x86.AMOVSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6439,9 +6439,9 @@
 		scale:     1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6454,9 +6454,9 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6469,9 +6469,9 @@
 		scale:     1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6484,9 +6484,9 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -6500,11 +6500,11 @@
 		asm:            x86.AADDSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6518,11 +6518,11 @@
 		asm:            x86.AADDSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6536,11 +6536,11 @@
 		asm:            x86.ASUBSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6554,11 +6554,11 @@
 		asm:            x86.ASUBSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6572,11 +6572,11 @@
 		asm:            x86.AMULSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6590,11 +6590,11 @@
 		asm:            x86.AMULSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6608,11 +6608,11 @@
 		asm:            x86.ADIVSS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6626,11 +6626,11 @@
 		asm:            x86.ADIVSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6644,12 +6644,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6663,12 +6663,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6682,12 +6682,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6701,12 +6701,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6720,12 +6720,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6739,12 +6739,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6758,12 +6758,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6777,12 +6777,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6796,12 +6796,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6815,12 +6815,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6834,12 +6834,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6853,12 +6853,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6872,12 +6872,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6891,12 +6891,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6910,12 +6910,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6929,12 +6929,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-				{2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -6946,11 +6946,11 @@
 		asm:          x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -6962,11 +6962,11 @@
 		asm:          x86.AADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -6978,10 +6978,10 @@
 		asm:          x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -6993,10 +6993,10 @@
 		asm:          x86.AADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7010,7 +7010,7 @@
 		asm:            x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7024,7 +7024,7 @@
 		asm:            x86.AADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7036,11 +7036,11 @@
 		asm:          x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7052,11 +7052,11 @@
 		asm:          x86.ASUBL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7069,10 +7069,10 @@
 		asm:          x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7085,10 +7085,10 @@
 		asm:          x86.ASUBL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7101,11 +7101,11 @@
 		asm:          x86.AIMULQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7118,11 +7118,11 @@
 		asm:          x86.AIMULL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7134,10 +7134,10 @@
 		asm:          x86.AIMUL3Q,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7149,10 +7149,10 @@
 		asm:          x86.AIMUL3L,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7165,7 +7165,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 4, // DX
 			outputs: []outputInfo{
@@ -7183,7 +7183,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 4, // DX
 			outputs: []outputInfo{
@@ -7200,7 +7200,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
@@ -7216,7 +7216,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
@@ -7232,7 +7232,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
@@ -7248,7 +7248,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
@@ -7264,11 +7264,11 @@
 		clobberFlags: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7281,7 +7281,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7298,7 +7298,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7315,7 +7315,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7331,7 +7331,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7347,7 +7347,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7363,7 +7363,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7378,11 +7378,11 @@
 		asm:          x86.ANEGL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7394,12 +7394,12 @@
 		asm:          x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7411,12 +7411,12 @@
 		asm:          x86.AADCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7428,11 +7428,11 @@
 		asm:          x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7444,11 +7444,11 @@
 		asm:          x86.AADCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7459,12 +7459,12 @@
 		asm:          x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7475,12 +7475,12 @@
 		asm:          x86.ASBBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7492,11 +7492,11 @@
 		asm:          x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7508,11 +7508,11 @@
 		asm:          x86.ASBBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7525,7 +7525,7 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 4}, // DX
@@ -7542,7 +7542,7 @@
 			inputs: []inputInfo{
 				{0, 4},     // DX
 				{1, 1},     // AX
-				{2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{0, 1}, // AX
@@ -7559,11 +7559,11 @@
 		asm:          x86.AANDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7576,11 +7576,11 @@
 		asm:          x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7593,10 +7593,10 @@
 		asm:          x86.AANDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7609,10 +7609,10 @@
 		asm:          x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7626,7 +7626,7 @@
 		asm:            x86.AANDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7640,7 +7640,7 @@
 		asm:            x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7653,11 +7653,11 @@
 		asm:          x86.AORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7670,11 +7670,11 @@
 		asm:          x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7687,10 +7687,10 @@
 		asm:          x86.AORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7703,10 +7703,10 @@
 		asm:          x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7720,7 +7720,7 @@
 		asm:            x86.AORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7734,7 +7734,7 @@
 		asm:            x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7747,11 +7747,11 @@
 		asm:          x86.AXORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7764,11 +7764,11 @@
 		asm:          x86.AXORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7781,10 +7781,10 @@
 		asm:          x86.AXORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7797,10 +7797,10 @@
 		asm:          x86.AXORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7814,7 +7814,7 @@
 		asm:            x86.AXORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7828,7 +7828,7 @@
 		asm:            x86.AXORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7838,8 +7838,8 @@
 		asm:    x86.ACMPQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7849,8 +7849,8 @@
 		asm:    x86.ACMPL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7860,8 +7860,8 @@
 		asm:    x86.ACMPW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7871,8 +7871,8 @@
 		asm:    x86.ACMPB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7883,7 +7883,7 @@
 		asm:     x86.ACMPQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7894,7 +7894,7 @@
 		asm:     x86.ACMPL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7905,7 +7905,7 @@
 		asm:     x86.ACMPW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7916,7 +7916,7 @@
 		asm:     x86.ACMPB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -7929,8 +7929,8 @@
 		asm:            x86.ACMPQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7943,8 +7943,8 @@
 		asm:            x86.ACMPL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7957,8 +7957,8 @@
 		asm:            x86.ACMPW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7971,8 +7971,8 @@
 		asm:            x86.ACMPB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7985,7 +7985,7 @@
 		asm:            x86.ACMPQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -7998,7 +7998,7 @@
 		asm:            x86.ACMPL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8011,7 +8011,7 @@
 		asm:            x86.ACMPW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8024,7 +8024,7 @@
 		asm:            x86.ACMPB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8037,9 +8037,9 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8053,9 +8053,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8068,9 +8068,9 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8084,9 +8084,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8099,9 +8099,9 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8115,9 +8115,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8131,9 +8131,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8146,8 +8146,8 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8161,8 +8161,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8175,8 +8175,8 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8190,8 +8190,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8204,8 +8204,8 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8219,8 +8219,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8234,8 +8234,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8245,8 +8245,8 @@
 		asm:    x86.AUCOMISS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -8256,8 +8256,8 @@
 		asm:    x86.AUCOMISD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -8267,8 +8267,8 @@
 		asm:    x86.ABTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8278,8 +8278,8 @@
 		asm:    x86.ABTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8291,11 +8291,11 @@
 		asm:          x86.ABTCL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8307,11 +8307,11 @@
 		asm:          x86.ABTCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8323,11 +8323,11 @@
 		asm:          x86.ABTRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8339,11 +8339,11 @@
 		asm:          x86.ABTRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8355,11 +8355,11 @@
 		asm:          x86.ABTSL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8371,11 +8371,11 @@
 		asm:          x86.ABTSQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8386,7 +8386,7 @@
 		asm:     x86.ABTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8397,7 +8397,7 @@
 		asm:     x86.ABTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8410,10 +8410,10 @@
 		asm:          x86.ABTCL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8426,10 +8426,10 @@
 		asm:          x86.ABTCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8442,10 +8442,10 @@
 		asm:          x86.ABTRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8458,10 +8458,10 @@
 		asm:          x86.ABTRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8474,10 +8474,10 @@
 		asm:          x86.ABTSL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8490,10 +8490,10 @@
 		asm:          x86.ABTSQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8507,8 +8507,8 @@
 		asm:            x86.ABTCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8522,8 +8522,8 @@
 		asm:            x86.ABTCL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8537,8 +8537,8 @@
 		asm:            x86.ABTSQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8552,8 +8552,8 @@
 		asm:            x86.ABTSL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8567,8 +8567,8 @@
 		asm:            x86.ABTRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8582,8 +8582,8 @@
 		asm:            x86.ABTRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8597,7 +8597,7 @@
 		asm:            x86.ABTCQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8611,7 +8611,7 @@
 		asm:            x86.ABTCL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8625,7 +8625,7 @@
 		asm:            x86.ABTSQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8639,7 +8639,7 @@
 		asm:            x86.ABTSL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8653,7 +8653,7 @@
 		asm:            x86.ABTRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8667,7 +8667,7 @@
 		asm:            x86.ABTRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -8678,8 +8678,8 @@
 		asm:         x86.ATESTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8690,8 +8690,8 @@
 		asm:         x86.ATESTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8702,8 +8702,8 @@
 		asm:         x86.ATESTW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8714,8 +8714,8 @@
 		asm:         x86.ATESTB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8726,7 +8726,7 @@
 		asm:     x86.ATESTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8737,7 +8737,7 @@
 		asm:     x86.ATESTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8748,7 +8748,7 @@
 		asm:     x86.ATESTW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8759,7 +8759,7 @@
 		asm:     x86.ATESTB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8772,10 +8772,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8788,10 +8788,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8804,10 +8804,10 @@
 		asm:          x86.ASHLQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8820,10 +8820,10 @@
 		asm:          x86.ASHLL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8836,10 +8836,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8852,10 +8852,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8868,10 +8868,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8884,10 +8884,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8900,10 +8900,10 @@
 		asm:          x86.ASHRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8916,10 +8916,10 @@
 		asm:          x86.ASHRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8932,10 +8932,10 @@
 		asm:          x86.ASHRW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8948,10 +8948,10 @@
 		asm:          x86.ASHRB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8964,10 +8964,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8980,10 +8980,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -8996,10 +8996,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9012,10 +9012,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9028,10 +9028,10 @@
 		asm:          x86.ASARQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9044,10 +9044,10 @@
 		asm:          x86.ASARL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9060,10 +9060,10 @@
 		asm:          x86.ASARW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9076,10 +9076,10 @@
 		asm:          x86.ASARB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9092,10 +9092,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9108,10 +9108,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9124,10 +9124,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9140,10 +9140,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9156,10 +9156,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9172,10 +9172,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9188,10 +9188,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9204,10 +9204,10 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9220,10 +9220,10 @@
 		asm:          x86.AROLQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9236,10 +9236,10 @@
 		asm:          x86.AROLL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9252,10 +9252,10 @@
 		asm:          x86.AROLW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9268,10 +9268,10 @@
 		asm:          x86.AROLB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9286,11 +9286,11 @@
 		asm:            x86.AADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9305,11 +9305,11 @@
 		asm:            x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9324,11 +9324,11 @@
 		asm:            x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9343,11 +9343,11 @@
 		asm:            x86.ASUBL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9362,11 +9362,11 @@
 		asm:            x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9381,11 +9381,11 @@
 		asm:            x86.AANDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9400,11 +9400,11 @@
 		asm:            x86.AORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9419,11 +9419,11 @@
 		asm:            x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9438,11 +9438,11 @@
 		asm:            x86.AXORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9457,11 +9457,11 @@
 		asm:            x86.AXORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9476,12 +9476,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9496,12 +9496,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9516,12 +9516,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9536,12 +9536,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9556,12 +9556,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9576,12 +9576,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9596,12 +9596,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9616,12 +9616,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9636,12 +9636,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9656,12 +9656,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9676,12 +9676,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9696,12 +9696,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9716,12 +9716,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9736,12 +9736,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9756,12 +9756,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9776,12 +9776,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9796,12 +9796,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9816,12 +9816,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9836,12 +9836,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9856,12 +9856,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9876,12 +9876,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9896,12 +9896,12 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9916,12 +9916,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9936,12 +9936,12 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9956,12 +9956,12 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -9975,8 +9975,8 @@
 		asm:            x86.AADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -9990,8 +9990,8 @@
 		asm:            x86.ASUBQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10005,8 +10005,8 @@
 		asm:            x86.AANDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10020,8 +10020,8 @@
 		asm:            x86.AORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10035,8 +10035,8 @@
 		asm:            x86.AXORQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10050,8 +10050,8 @@
 		asm:            x86.AADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10065,8 +10065,8 @@
 		asm:            x86.ASUBL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10080,8 +10080,8 @@
 		asm:            x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10095,8 +10095,8 @@
 		asm:            x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10110,8 +10110,8 @@
 		asm:            x86.AXORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10125,9 +10125,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10141,9 +10141,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10157,9 +10157,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10173,9 +10173,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10189,9 +10189,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10205,9 +10205,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10221,9 +10221,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10237,9 +10237,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10253,9 +10253,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10269,9 +10269,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10285,9 +10285,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10301,9 +10301,9 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10317,9 +10317,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10333,9 +10333,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10349,9 +10349,9 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10365,9 +10365,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10381,9 +10381,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10397,9 +10397,9 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10413,9 +10413,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10429,9 +10429,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10445,9 +10445,9 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10461,9 +10461,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10477,9 +10477,9 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10493,9 +10493,9 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10509,9 +10509,9 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10525,8 +10525,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10540,8 +10540,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10555,8 +10555,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10570,8 +10570,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10585,8 +10585,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10600,8 +10600,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10615,8 +10615,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10630,8 +10630,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10645,8 +10645,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10660,8 +10660,8 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10675,8 +10675,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10690,8 +10690,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10705,8 +10705,8 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10720,8 +10720,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10735,8 +10735,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10750,8 +10750,8 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10765,8 +10765,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10780,8 +10780,8 @@
 		scale:        1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10795,8 +10795,8 @@
 		scale:        4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10810,8 +10810,8 @@
 		scale:        8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -10823,10 +10823,10 @@
 		asm:          x86.ANEGQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10838,10 +10838,10 @@
 		asm:          x86.ANEGL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10853,10 +10853,10 @@
 		asm:          x86.ANOTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10868,10 +10868,10 @@
 		asm:          x86.ANOTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10881,11 +10881,11 @@
 		asm:    x86.ABSFQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10896,10 +10896,10 @@
 		asm:          x86.ABSFL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10909,11 +10909,11 @@
 		asm:    x86.ABSRQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10924,10 +10924,10 @@
 		asm:          x86.ABSRL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10938,11 +10938,11 @@
 		asm:          x86.ACMOVQEQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10953,11 +10953,11 @@
 		asm:          x86.ACMOVQNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10968,11 +10968,11 @@
 		asm:          x86.ACMOVQLT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10983,11 +10983,11 @@
 		asm:          x86.ACMOVQGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -10998,11 +10998,11 @@
 		asm:          x86.ACMOVQLE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11013,11 +11013,11 @@
 		asm:          x86.ACMOVQGE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11028,11 +11028,11 @@
 		asm:          x86.ACMOVQLS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11043,11 +11043,11 @@
 		asm:          x86.ACMOVQHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11058,11 +11058,11 @@
 		asm:          x86.ACMOVQCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11073,11 +11073,11 @@
 		asm:          x86.ACMOVQCS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11088,11 +11088,11 @@
 		asm:          x86.ACMOVLEQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11103,11 +11103,11 @@
 		asm:          x86.ACMOVLNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11118,11 +11118,11 @@
 		asm:          x86.ACMOVLLT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11133,11 +11133,11 @@
 		asm:          x86.ACMOVLGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11148,11 +11148,11 @@
 		asm:          x86.ACMOVLLE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11163,11 +11163,11 @@
 		asm:          x86.ACMOVLGE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11178,11 +11178,11 @@
 		asm:          x86.ACMOVLLS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11193,11 +11193,11 @@
 		asm:          x86.ACMOVLHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11208,11 +11208,11 @@
 		asm:          x86.ACMOVLCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11223,11 +11223,11 @@
 		asm:          x86.ACMOVLCS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11238,11 +11238,11 @@
 		asm:          x86.ACMOVWEQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11253,11 +11253,11 @@
 		asm:          x86.ACMOVWNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11268,11 +11268,11 @@
 		asm:          x86.ACMOVWLT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11283,11 +11283,11 @@
 		asm:          x86.ACMOVWGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11298,11 +11298,11 @@
 		asm:          x86.ACMOVWLE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11313,11 +11313,11 @@
 		asm:          x86.ACMOVWGE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11328,11 +11328,11 @@
 		asm:          x86.ACMOVWLS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11343,11 +11343,11 @@
 		asm:          x86.ACMOVWHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11358,11 +11358,11 @@
 		asm:          x86.ACMOVWCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11373,11 +11373,11 @@
 		asm:          x86.ACMOVWCS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11388,12 +11388,12 @@
 		asm:          x86.ACMOVQNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11404,11 +11404,11 @@
 		asm:          x86.ACMOVQNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11419,11 +11419,11 @@
 		asm:          x86.ACMOVQHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11434,11 +11434,11 @@
 		asm:          x86.ACMOVQCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11449,12 +11449,12 @@
 		asm:          x86.ACMOVLNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11465,11 +11465,11 @@
 		asm:          x86.ACMOVLNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11480,11 +11480,11 @@
 		asm:          x86.ACMOVLHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11495,11 +11495,11 @@
 		asm:          x86.ACMOVLCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11510,12 +11510,12 @@
 		asm:          x86.ACMOVWNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11526,11 +11526,11 @@
 		asm:          x86.ACMOVWNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11541,11 +11541,11 @@
 		asm:          x86.ACMOVWHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11556,11 +11556,11 @@
 		asm:          x86.ACMOVWCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11572,10 +11572,10 @@
 		asm:          x86.ABSWAPQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11587,10 +11587,10 @@
 		asm:          x86.ABSWAPL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11601,10 +11601,10 @@
 		asm:          x86.APOPCNTQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11615,10 +11615,10 @@
 		asm:          x86.APOPCNTL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11628,10 +11628,10 @@
 		asm:    x86.ASQRTSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -11642,10 +11642,10 @@
 		asm:     x86.AROUNDSD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -11656,12 +11656,12 @@
 		asm:          x86.AVFMADD231SD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -11671,7 +11671,7 @@
 		asm:    x86.ASBBQ,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11681,7 +11681,7 @@
 		asm:    x86.ASBBL,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11691,7 +11691,7 @@
 		asm:    x86.ASETEQ,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11701,7 +11701,7 @@
 		asm:    x86.ASETNE,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11711,7 +11711,7 @@
 		asm:    x86.ASETLT,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11721,7 +11721,7 @@
 		asm:    x86.ASETLE,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11731,7 +11731,7 @@
 		asm:    x86.ASETGT,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11741,7 +11741,7 @@
 		asm:    x86.ASETGE,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11751,7 +11751,7 @@
 		asm:    x86.ASETCS,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11761,7 +11761,7 @@
 		asm:    x86.ASETLS,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11771,7 +11771,7 @@
 		asm:    x86.ASETHI,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11781,7 +11781,7 @@
 		asm:    x86.ASETCC,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11791,7 +11791,7 @@
 		asm:    x86.ASETOS,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11804,7 +11804,7 @@
 		asm:            x86.ASETEQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11817,7 +11817,7 @@
 		asm:            x86.ASETNE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11830,7 +11830,7 @@
 		asm:            x86.ASETLT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11843,7 +11843,7 @@
 		asm:            x86.ASETLE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11856,7 +11856,7 @@
 		asm:            x86.ASETGT,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11869,7 +11869,7 @@
 		asm:            x86.ASETGE,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11882,7 +11882,7 @@
 		asm:            x86.ASETCS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11895,7 +11895,7 @@
 		asm:            x86.ASETLS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11908,7 +11908,7 @@
 		asm:            x86.ASETHI,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11921,7 +11921,7 @@
 		asm:            x86.ASETCC,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -11933,7 +11933,7 @@
 		reg: regInfo{
 			clobbers: 1, // AX
 			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11945,7 +11945,7 @@
 		reg: regInfo{
 			clobbers: 1, // AX
 			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11955,7 +11955,7 @@
 		asm:    x86.ASETPC,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11965,7 +11965,7 @@
 		asm:    x86.ASETPS,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11975,7 +11975,7 @@
 		asm:    x86.ASETHI,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11985,7 +11985,7 @@
 		asm:    x86.ASETCC,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -11995,10 +11995,10 @@
 		asm:    x86.AMOVBQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12008,10 +12008,10 @@
 		asm:    x86.AMOVBLZX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12021,10 +12021,10 @@
 		asm:    x86.AMOVWQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12034,10 +12034,10 @@
 		asm:    x86.AMOVWLZX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12047,10 +12047,10 @@
 		asm:    x86.AMOVLQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12060,10 +12060,10 @@
 		asm:    x86.AMOVL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12075,7 +12075,7 @@
 		asm:               x86.AMOVL,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12087,7 +12087,7 @@
 		asm:               x86.AMOVQ,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12097,10 +12097,10 @@
 		asm:    x86.ACVTTSD2SL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12110,10 +12110,10 @@
 		asm:    x86.ACVTTSD2SQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12123,10 +12123,10 @@
 		asm:    x86.ACVTTSS2SL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12136,10 +12136,10 @@
 		asm:    x86.ACVTTSS2SQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12149,10 +12149,10 @@
 		asm:    x86.ACVTSL2SS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12162,10 +12162,10 @@
 		asm:    x86.ACVTSL2SD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12175,10 +12175,10 @@
 		asm:    x86.ACVTSQ2SS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12188,10 +12188,10 @@
 		asm:    x86.ACVTSQ2SD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12201,10 +12201,10 @@
 		asm:    x86.ACVTSD2SS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12214,10 +12214,10 @@
 		asm:    x86.ACVTSS2SD,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12226,10 +12226,10 @@
 		argLen: 1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12238,10 +12238,10 @@
 		argLen: 1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12250,10 +12250,10 @@
 		argLen: 1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12262,10 +12262,10 @@
 		argLen: 1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12277,11 +12277,11 @@
 		asm:          x86.APXOR,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12294,10 +12294,10 @@
 		asm:               x86.ALEAQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12310,10 +12310,10 @@
 		asm:               x86.ALEAL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12326,10 +12326,10 @@
 		asm:               x86.ALEAW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12343,11 +12343,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12361,11 +12361,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12379,11 +12379,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12396,11 +12396,11 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12413,11 +12413,11 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12430,11 +12430,11 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12447,11 +12447,11 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12464,11 +12464,11 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12481,11 +12481,11 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12498,11 +12498,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12515,11 +12515,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12532,11 +12532,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12549,10 +12549,10 @@
 		asm:            x86.AMOVBLZX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12565,10 +12565,10 @@
 		asm:            x86.AMOVBQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12581,10 +12581,10 @@
 		asm:            x86.AMOVWLZX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12597,10 +12597,10 @@
 		asm:            x86.AMOVWQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12613,10 +12613,10 @@
 		asm:            x86.AMOVL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12629,10 +12629,10 @@
 		asm:            x86.AMOVLQSX,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12645,10 +12645,10 @@
 		asm:            x86.AMOVQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12661,8 +12661,8 @@
 		asm:            x86.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12675,8 +12675,8 @@
 		asm:            x86.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12689,8 +12689,8 @@
 		asm:            x86.AMOVL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12703,8 +12703,8 @@
 		asm:            x86.AMOVQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12717,10 +12717,10 @@
 		asm:            x86.AMOVUPS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+				{0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 			},
 		},
 	},
@@ -12733,8 +12733,21 @@
 		asm:            x86.AMOVUPS,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+			},
+		},
+	},
+	{
+		name:           "MOVOstorezero",
+		auxType:        auxSymOff,
+		argLen:         2,
+		faultOnNilArg0: true,
+		symEffect:      SymWrite,
+		asm:            x86.AMOVUPS,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
 			},
 		},
 	},
@@ -12748,11 +12761,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12766,11 +12779,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12783,11 +12796,11 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12801,11 +12814,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12818,11 +12831,11 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12835,11 +12848,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12853,11 +12866,11 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12870,11 +12883,11 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -12888,9 +12901,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12904,9 +12917,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12919,9 +12932,9 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12935,9 +12948,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12950,9 +12963,9 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12965,9 +12978,9 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12981,9 +12994,9 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -12996,9 +13009,9 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13011,7 +13024,7 @@
 		asm:            x86.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13024,7 +13037,7 @@
 		asm:            x86.AMOVW,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13037,7 +13050,7 @@
 		asm:            x86.AMOVL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13050,7 +13063,7 @@
 		asm:            x86.AMOVQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13064,8 +13077,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13079,8 +13092,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13093,8 +13106,8 @@
 		scale:     2,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13108,8 +13121,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13122,8 +13135,8 @@
 		scale:     4,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13137,8 +13150,8 @@
 		scale:       1,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13151,37 +13164,25 @@
 		scale:     8,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
 	{
 		name:           "DUFFZERO",
 		auxType:        auxInt64,
-		argLen:         3,
+		argLen:         2,
 		faultOnNilArg0: true,
 		unsafePoint:    true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 128},   // DI
-				{1, 65536}, // X0
+				{0, 128}, // DI
 			},
 			clobbers: 128, // DI
 		},
 	},
 	{
-		name:              "MOVOconst",
-		auxType:           auxInt128,
-		argLen:            0,
-		rematerializeable: true,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
 		name:           "REPSTOSQ",
 		argLen:         4,
 		faultOnNilArg0: true,
@@ -13201,7 +13202,7 @@
 		clobberFlags: true,
 		call:         true,
 		reg: regInfo{
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+			clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 		},
 	},
 	{
@@ -13213,9 +13214,9 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 4},     // DX
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+			clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 		},
 	},
 	{
@@ -13226,9 +13227,9 @@
 		call:         true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+			clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 		},
 	},
 	{
@@ -13271,7 +13272,7 @@
 		argLen: 1,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13291,7 +13292,7 @@
 		rematerializeable: true,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13301,7 +13302,7 @@
 		rematerializeable: true,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13313,7 +13314,7 @@
 		faultOnNilArg0: true,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13328,7 +13329,7 @@
 				{0, 128}, // DI
 				{1, 879}, // AX CX DX BX BP SI R8 R9
 			},
-			clobbers: 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+			clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
 		},
 	},
 	{
@@ -13339,7 +13340,7 @@
 		symEffect:         SymNone,
 		reg: regInfo{
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13413,10 +13414,10 @@
 		asm:            x86.AMOVB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13429,10 +13430,10 @@
 		asm:            x86.AMOVL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13445,10 +13446,10 @@
 		asm:            x86.AMOVQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13463,11 +13464,11 @@
 		asm:            x86.AXCHGB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13482,11 +13483,11 @@
 		asm:            x86.AXCHGL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13501,11 +13502,11 @@
 		asm:            x86.AXCHGQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13521,11 +13522,11 @@
 		asm:            x86.AXADDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13541,11 +13542,11 @@
 		asm:            x86.AXADDQ,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{0, 65519},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{0, 49135},      // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13571,13 +13572,13 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 1},     // AX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13593,13 +13594,13 @@
 		reg: regInfo{
 			inputs: []inputInfo{
 				{1, 1},     // AX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 			clobbers: 1, // AX
 			outputs: []outputInfo{
 				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+				{0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
 			},
 		},
 	},
@@ -13614,8 +13615,8 @@
 		asm:            x86.AANDB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13630,8 +13631,8 @@
 		asm:            x86.AANDL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13646,8 +13647,8 @@
 		asm:            x86.AORB,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -13662,8 +13663,8 @@
 		asm:            x86.AORL,
 		reg: regInfo{
 			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+				{1, 49151},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
 			},
 		},
 	},
@@ -36172,8 +36173,8 @@
 	{11, x86.REG_R11, 10, "R11"},
 	{12, x86.REG_R12, 11, "R12"},
 	{13, x86.REG_R13, 12, "R13"},
-	{14, x86.REG_R14, 13, "R14"},
-	{15, x86.REG_R15, 14, "R15"},
+	{14, x86.REGG, -1, "g"},
+	{15, x86.REG_R15, 13, "R15"},
 	{16, x86.REG_X0, -1, "X0"},
 	{17, x86.REG_X1, -1, "X1"},
 	{18, x86.REG_X2, -1, "X2"},
@@ -36192,9 +36193,9 @@
 	{31, x86.REG_X15, -1, "X15"},
 	{32, 0, -1, "SB"},
 }
-var gpRegMaskAMD64 = regMask(65519)
-var fpRegMaskAMD64 = regMask(4294901760)
-var specialRegMaskAMD64 = regMask(0)
+var gpRegMaskAMD64 = regMask(49135)
+var fpRegMaskAMD64 = regMask(2147418112)
+var specialRegMaskAMD64 = regMask(2147483648)
 var framepointerRegAMD64 = int8(5)
 var linkRegAMD64 = int8(-1)
 var registersARM = [...]Register{
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index f5a2b3a..1e04b48 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -136,13 +136,13 @@
 // Most internal data structures are pre-allocated and flat, so for instance adding a
 // new relation does not cause any allocation. For performance reasons,
 // each node has only up to two outgoing edges (like a binary tree), so intermediate
-// "dummy" nodes are required to represent more than two relations. For instance,
+// "extra" nodes are required to represent more than two relations. For instance,
 // to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
 // following DAG:
 //
 //         A
 //        / \
-//       I  dummy
+//       I  extra
 //           /  \
 //          J    K
 //
@@ -223,7 +223,7 @@
 		po.setchr(i1, e2)
 		po.upush(undoSetChr, i1, 0)
 	} else {
-		// If n1 already has two children, add an intermediate dummy
+		// If n1 already has two children, add an intermediate extra
 		// node to record the relation correctly (without relating
 		// n2 to other existing nodes). Use a non-deterministic value
 		// to decide whether to append on the left or the right, to avoid
@@ -231,27 +231,27 @@
 		//
 		//      n1
 		//     /  \
-		//   i1l  dummy
+		//   i1l  extra
 		//        /   \
 		//      i1r   n2
 		//
-		dummy := po.newnode(nil)
+		extra := po.newnode(nil)
 		if (i1^i2)&1 != 0 { // non-deterministic
-			po.setchl(dummy, i1r)
-			po.setchr(dummy, e2)
-			po.setchr(i1, newedge(dummy, false))
+			po.setchl(extra, i1r)
+			po.setchr(extra, e2)
+			po.setchr(i1, newedge(extra, false))
 			po.upush(undoSetChr, i1, i1r)
 		} else {
-			po.setchl(dummy, i1l)
-			po.setchr(dummy, e2)
-			po.setchl(i1, newedge(dummy, false))
+			po.setchl(extra, i1l)
+			po.setchr(extra, e2)
+			po.setchl(i1, newedge(extra, false))
 			po.upush(undoSetChl, i1, i1l)
 		}
 	}
 }
 
 // newnode allocates a new node bound to SSA value n.
-// If n is nil, this is a dummy node (= only used internally).
+// If n is nil, this is an extra node (= only used internally).
 func (po *poset) newnode(n *Value) uint32 {
 	i := po.lastidx + 1
 	po.lastidx++
@@ -380,9 +380,9 @@
 
 	case higherptr != 0:
 		// Higher bound only. To record n < higher, we need
-		// a dummy root:
+		// an extra root:
 		//
-		//        dummy
+		//        extra
 		//        /   \
 		//      root   \
 		//       /      n
@@ -395,11 +395,11 @@
 		if r2 != po.roots[0] { // all constants should be in root #0
 			panic("constant not in root #0")
 		}
-		dummy := po.newnode(nil)
-		po.changeroot(r2, dummy)
-		po.upush(undoChangeRoot, dummy, newedge(r2, false))
-		po.addchild(dummy, r2, false)
-		po.addchild(dummy, i, false)
+		extra := po.newnode(nil)
+		po.changeroot(r2, extra)
+		po.upush(undoChangeRoot, extra, newedge(r2, false))
+		po.addchild(extra, r2, false)
+		po.addchild(extra, i, false)
 		po.addchild(i, i2, true)
 	}
 
@@ -612,7 +612,7 @@
 	panic("findroot didn't find any root")
 }
 
-// mergeroot merges two DAGs into one DAG by creating a new dummy root
+// mergeroot merges two DAGs into one DAG by creating a new extra root
 func (po *poset) mergeroot(r1, r2 uint32) uint32 {
 	// Root #0 is special as it contains all constants. Since mergeroot
 	// discards r2 as root and keeps r1, make sure that r2 is not root #0,
@@ -1004,7 +1004,7 @@
 	case !f1 && f2:
 		// n1 is not in any DAG but n2 is. If n2 is a root, we can put
 		// n1 in its place as a root; otherwise, we need to create a new
-		// dummy root to record the relation.
+		// extra root to record the relation.
 		i1 = po.newnode(n1)
 
 		if po.isroot(i2) {
@@ -1020,17 +1020,17 @@
 
 		// Re-parent as follows:
 		//
-		//                  dummy
+		//                  extra
 		//     r            /   \
 		//      \   ===>   r    i1
 		//      i2          \   /
 		//                    i2
 		//
-		dummy := po.newnode(nil)
-		po.changeroot(r, dummy)
-		po.upush(undoChangeRoot, dummy, newedge(r, false))
-		po.addchild(dummy, r, false)
-		po.addchild(dummy, i1, false)
+		extra := po.newnode(nil)
+		po.changeroot(r, extra)
+		po.upush(undoChangeRoot, extra, newedge(r, false))
+		po.addchild(extra, r, false)
+		po.addchild(extra, i1, false)
 		po.addchild(i1, i2, strict)
 
 	case f1 && f2:
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 0339b07..8c25b1c 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -104,7 +104,7 @@
 // If b3 is the primary predecessor of b2, then we use x3 in b2 and
 // add a x4:CX->BX copy at the end of b4.
 // But the definition of x3 doesn't dominate b2.  We should really
-// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
 // SSA form. For now, we ignore this problem as remaining in strict
 // SSA form isn't needed after regalloc. We'll just leave the use
 // of x3 not dominated by the definition of x3, and the CX->BX copy
@@ -114,6 +114,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/objabi"
 	"cmd/internal/src"
@@ -782,9 +783,9 @@
 		return 0
 	}
 	if t.IsFloat() || t == types.TypeInt128 {
-		if t.Etype == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+		if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
 			m = s.f.Config.fp32RegMask
-		} else if t.Etype == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+		} else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
 			m = s.f.Config.fp64RegMask
 		} else {
 			m = s.f.Config.fpRegMask
@@ -1248,7 +1249,7 @@
 					// This forces later liveness analysis to make the
 					// value live at this point.
 					v.SetArg(0, s.makeSpill(a, b))
-				} else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable {
+				} else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable {
 					// Rematerializeable value with a gc.Node. This is the address of
 					// a stack object (e.g. an LEAQ). Keep the object live.
 					// Change it to VarLive, which is what plive expects for locals.
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index f5d1a78..e82aa84 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -521,6 +521,18 @@
 	return v.AuxInt != 0
 }
 
+// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
+// generated code as much as possible.
+func canonLessThan(x, y *Value) bool {
+	if x.Op != y.Op {
+		return x.Op < y.Op
+	}
+	if !x.Pos.SameFileAndLine(y.Pos) {
+		return x.Pos.Before(y.Pos)
+	}
+	return x.ID < y.ID
+}
+
 // truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
 // of the mantissa. It will panic if the truncation results in lost information.
 func truncate64Fto32F(f float64) float32 {
@@ -678,43 +690,53 @@
 	return int64(o)
 }
 
-func auxToString(i interface{}) string {
-	return i.(string)
+// Aux is an interface to hold miscellaneous data in Blocks and Values.
+type Aux interface {
+	CanBeAnSSAAux()
 }
-func auxToSym(i interface{}) Sym {
+
+// stringAux wraps string values for use in Aux.
+type stringAux string
+
+func (stringAux) CanBeAnSSAAux() {}
+
+func auxToString(i Aux) string {
+	return string(i.(stringAux))
+}
+func auxToSym(i Aux) Sym {
 	// TODO: kind of a hack - allows nil interface through
 	s, _ := i.(Sym)
 	return s
 }
-func auxToType(i interface{}) *types.Type {
+func auxToType(i Aux) *types.Type {
 	return i.(*types.Type)
 }
-func auxToCall(i interface{}) *AuxCall {
+func auxToCall(i Aux) *AuxCall {
 	return i.(*AuxCall)
 }
-func auxToS390xCCMask(i interface{}) s390x.CCMask {
+func auxToS390xCCMask(i Aux) s390x.CCMask {
 	return i.(s390x.CCMask)
 }
-func auxToS390xRotateParams(i interface{}) s390x.RotateParams {
+func auxToS390xRotateParams(i Aux) s390x.RotateParams {
 	return i.(s390x.RotateParams)
 }
 
-func stringToAux(s string) interface{} {
+func StringToAux(s string) Aux {
+	return stringAux(s)
+}
+func symToAux(s Sym) Aux {
 	return s
 }
-func symToAux(s Sym) interface{} {
+func callToAux(s *AuxCall) Aux {
 	return s
 }
-func callToAux(s *AuxCall) interface{} {
-	return s
-}
-func typeToAux(t *types.Type) interface{} {
+func typeToAux(t *types.Type) Aux {
 	return t
 }
-func s390xCCMaskToAux(c s390x.CCMask) interface{} {
+func s390xCCMaskToAux(c s390x.CCMask) Aux {
 	return c
 }
-func s390xRotateParamsToAux(r s390x.RotateParams) interface{} {
+func s390xRotateParamsToAux(r s390x.RotateParams) Aux {
 	return r
 }
 
@@ -725,7 +747,7 @@
 
 // de-virtualize an InterCall
 // 'sym' is the symbol for the itab
-func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall {
+func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall {
 	f := v.Block.Func
 	n, ok := sym.(*obj.LSym)
 	if !ok {
@@ -748,7 +770,7 @@
 
 // de-virtualize an InterLECall
 // 'sym' is the symbol for the itab
-func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym {
+func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
 	n, ok := sym.(*obj.LSym)
 	if !ok {
 		return nil
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 2acdccd..4e7fdb9 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -1785,12 +1785,12 @@
 		return true
 	}
 	// match: (CMPB x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPB y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(Op386InvertFlags)
@@ -2078,12 +2078,12 @@
 		return true
 	}
 	// match: (CMPL x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPL y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(Op386InvertFlags)
@@ -2386,12 +2386,12 @@
 		return true
 	}
 	// match: (CMPW x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPW y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(Op386InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 75d4ff7..03498c7 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -4,6 +4,7 @@
 package ssa
 
 import "math"
+import "cmd/compile/internal/base"
 import "cmd/compile/internal/types"
 
 func rewriteValueAMD64(v *Value) bool {
@@ -767,8 +768,7 @@
 		v.Op = OpAMD64LoweredGetClosurePtr
 		return true
 	case OpGetG:
-		v.Op = OpAMD64LoweredGetG
-		return true
+		return rewriteValueAMD64_OpGetG(v)
 	case OpHasCPUFeature:
 		return rewriteValueAMD64_OpHasCPUFeature(v)
 	case OpHmul32:
@@ -6749,12 +6749,12 @@
 		return true
 	}
 	// match: (CMPB x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPB y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpAMD64InvertFlags)
@@ -7135,12 +7135,12 @@
 		return true
 	}
 	// match: (CMPL x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPL y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpAMD64InvertFlags)
@@ -7544,12 +7544,12 @@
 		return true
 	}
 	// match: (CMPQ x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPQ y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpAMD64InvertFlags)
@@ -8106,12 +8106,12 @@
 		return true
 	}
 	// match: (CMPW x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPW y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpAMD64InvertFlags)
@@ -14226,7 +14226,7 @@
 	}
 	// match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
 	// cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)
-	// result: (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
+	// result: (MOVOstorezero [c2.Off32()] {s} p mem)
 	for {
 		c := auxIntToValAndOff(v.AuxInt)
 		s := auxToSym(v.Aux)
@@ -14243,12 +14243,10 @@
 		if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) {
 			break
 		}
-		v.reset(OpAMD64MOVOstore)
+		v.reset(OpAMD64MOVOstorezero)
 		v.AuxInt = int32ToAuxInt(c2.Off32())
 		v.Aux = symToAux(s)
-		v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v0.AuxInt = int128ToAuxInt(0)
-		v.AddArg3(p, v0, mem)
+		v.AddArg2(p, mem)
 		return true
 	}
 	// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
@@ -30128,6 +30126,22 @@
 		return true
 	}
 }
+func rewriteValueAMD64_OpGetG(v *Value) bool {
+	v_0 := v.Args[0]
+	// match: (GetG mem)
+	// cond: !base.Flag.ABIWrap
+	// result: (LoweredGetG mem)
+	for {
+		mem := v_0
+		if !(!base.Flag.ABIWrap) {
+			break
+		}
+		v.reset(OpAMD64LoweredGetG)
+		v.AddArg(mem)
+		return true
+	}
+	return false
+}
 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
 	b := v.Block
 	typ := &b.Func.Config.Types
@@ -34163,7 +34177,7 @@
 	}
 	// match: (Zero [s] destptr mem)
 	// cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
-	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem))
+	// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstorezero destptr mem))
 	for {
 		s := auxIntToInt64(v.AuxInt)
 		destptr := v_0
@@ -34176,10 +34190,8 @@
 		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 		v0.AuxInt = int64ToAuxInt(s % 16)
 		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v2.AuxInt = int128ToAuxInt(0)
-		v1.AddArg3(destptr, v2, mem)
+		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v1.AddArg2(destptr, mem)
 		v.AddArg2(v0, v1)
 		return true
 	}
@@ -34206,7 +34218,7 @@
 	}
 	// match: (Zero [16] destptr mem)
 	// cond: config.useSSE
-	// result: (MOVOstore destptr (MOVOconst [0]) mem)
+	// result: (MOVOstorezero destptr mem)
 	for {
 		if auxIntToInt64(v.AuxInt) != 16 {
 			break
@@ -34216,15 +34228,13 @@
 		if !(config.useSSE) {
 			break
 		}
-		v.reset(OpAMD64MOVOstore)
-		v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v0.AuxInt = int128ToAuxInt(0)
-		v.AddArg3(destptr, v0, mem)
+		v.reset(OpAMD64MOVOstorezero)
+		v.AddArg2(destptr, mem)
 		return true
 	}
 	// match: (Zero [32] destptr mem)
 	// cond: config.useSSE
-	// result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))
+	// result: (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))
 	for {
 		if auxIntToInt64(v.AuxInt) != 32 {
 			break
@@ -34234,20 +34244,18 @@
 		if !(config.useSSE) {
 			break
 		}
-		v.reset(OpAMD64MOVOstore)
+		v.reset(OpAMD64MOVOstorezero)
 		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 		v0.AuxInt = int64ToAuxInt(16)
 		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v1.AuxInt = int128ToAuxInt(0)
-		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v2.AddArg3(destptr, v1, mem)
-		v.AddArg3(v0, v1, v2)
+		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v1.AddArg2(destptr, mem)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Zero [48] destptr mem)
 	// cond: config.useSSE
-	// result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))
+	// result: (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem)))
 	for {
 		if auxIntToInt64(v.AuxInt) != 48 {
 			break
@@ -34257,25 +34265,23 @@
 		if !(config.useSSE) {
 			break
 		}
-		v.reset(OpAMD64MOVOstore)
+		v.reset(OpAMD64MOVOstorezero)
 		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 		v0.AuxInt = int64ToAuxInt(32)
 		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v1.AuxInt = int128ToAuxInt(0)
-		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
-		v3.AuxInt = int64ToAuxInt(16)
-		v3.AddArg(destptr)
-		v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v4.AddArg3(destptr, v1, mem)
-		v2.AddArg3(v3, v1, v4)
-		v.AddArg3(v0, v1, v2)
+		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+		v2.AuxInt = int64ToAuxInt(16)
+		v2.AddArg(destptr)
+		v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v3.AddArg2(destptr, mem)
+		v1.AddArg2(v2, v3)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Zero [64] destptr mem)
 	// cond: config.useSSE
-	// result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))))
+	// result: (MOVOstorezero (OffPtr <destptr.Type> destptr [48]) (MOVOstorezero (OffPtr <destptr.Type> destptr [32]) (MOVOstorezero (OffPtr <destptr.Type> destptr [16]) (MOVOstorezero destptr mem))))
 	for {
 		if auxIntToInt64(v.AuxInt) != 64 {
 			break
@@ -34285,30 +34291,28 @@
 		if !(config.useSSE) {
 			break
 		}
-		v.reset(OpAMD64MOVOstore)
+		v.reset(OpAMD64MOVOstorezero)
 		v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
 		v0.AuxInt = int64ToAuxInt(48)
 		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v1.AuxInt = int128ToAuxInt(0)
-		v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
-		v3.AuxInt = int64ToAuxInt(32)
-		v3.AddArg(destptr)
-		v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v5 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
-		v5.AuxInt = int64ToAuxInt(16)
-		v5.AddArg(destptr)
-		v6 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
-		v6.AddArg3(destptr, v1, mem)
-		v4.AddArg3(v5, v1, v6)
-		v2.AddArg3(v3, v1, v4)
-		v.AddArg3(v0, v1, v2)
+		v1 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v2 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+		v2.AuxInt = int64ToAuxInt(32)
+		v2.AddArg(destptr)
+		v3 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v4 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+		v4.AuxInt = int64ToAuxInt(16)
+		v4.AddArg(destptr)
+		v5 := b.NewValue0(v.Pos, OpAMD64MOVOstorezero, types.TypeMem)
+		v5.AddArg2(destptr, mem)
+		v3.AddArg2(v4, v5)
+		v1.AddArg2(v2, v3)
+		v.AddArg2(v0, v1)
 		return true
 	}
 	// match: (Zero [s] destptr mem)
 	// cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
-	// result: (DUFFZERO [s] destptr (MOVOconst [0]) mem)
+	// result: (DUFFZERO [s] destptr mem)
 	for {
 		s := auxIntToInt64(v.AuxInt)
 		destptr := v_0
@@ -34318,9 +34322,7 @@
 		}
 		v.reset(OpAMD64DUFFZERO)
 		v.AuxInt = int64ToAuxInt(s)
-		v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
-		v0.AuxInt = int128ToAuxInt(0)
-		v.AddArg3(destptr, v0, mem)
+		v.AddArg2(destptr, mem)
 		return true
 	}
 	// match: (Zero [s] destptr mem)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index d9d439f..c958aae 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -3728,12 +3728,12 @@
 		return true
 	}
 	// match: (CMP x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMP y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpARMInvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 5d5e526..ff1156d 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -2772,12 +2772,12 @@
 		return true
 	}
 	// match: (CMP x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMP y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpARM64InvertFlags)
@@ -2941,12 +2941,12 @@
 		return true
 	}
 	// match: (CMPW x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPW y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpARM64InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 455f9b1..98f748e 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -4777,12 +4777,12 @@
 		return true
 	}
 	// match: (CMP x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMP y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpPPC64InvertFlags)
@@ -4834,12 +4834,12 @@
 		return true
 	}
 	// match: (CMPU x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPU y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpPPC64InvertFlags)
@@ -4964,12 +4964,12 @@
 		return true
 	}
 	// match: (CMPW x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPW y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpPPC64InvertFlags)
@@ -5045,12 +5045,12 @@
 		return true
 	}
 	// match: (CMPWU x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPWU y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpPPC64InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a9722b8..b52a1b6 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -6332,12 +6332,12 @@
 		return true
 	}
 	// match: (CMP x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMP y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpS390XInvertFlags)
@@ -6389,12 +6389,12 @@
 		return true
 	}
 	// match: (CMPU x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPU y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpS390XInvertFlags)
@@ -6624,12 +6624,12 @@
 		return true
 	}
 	// match: (CMPW x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPW y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpS390XInvertFlags)
@@ -6721,12 +6721,12 @@
 		return true
 	}
 	// match: (CMPWU x y)
-	// cond: x.ID > y.ID
+	// cond: canonLessThan(x,y)
 	// result: (InvertFlags (CMPWU y x))
 	for {
 		x := v_0
 		y := v_1
-		if !(x.ID > y.ID) {
+		if !(canonLessThan(x, y)) {
 			break
 		}
 		v.reset(OpS390XInvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index c49bc80..60b727f 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -184,12 +184,12 @@
 	config := b.Func.Config
 	typ := &b.Func.Config.Types
 	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+	// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
 	// result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
 	for {
 		off := auxIntToInt32(v.AuxInt)
 		n := auxToSym(v.Aux)
-		if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+		if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
 			break
 		}
 		v.reset(OpInt64Make)
@@ -203,12 +203,12 @@
 		return true
 	}
 	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+	// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
 	// result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
 	for {
 		off := auxIntToInt32(v.AuxInt)
 		n := auxToSym(v.Aux)
-		if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+		if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
 			break
 		}
 		v.reset(OpInt64Make)
@@ -222,12 +222,12 @@
 		return true
 	}
 	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+	// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
 	// result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
 	for {
 		off := auxIntToInt32(v.AuxInt)
 		n := auxToSym(v.Aux)
-		if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+		if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
 			break
 		}
 		v.reset(OpInt64Make)
@@ -241,12 +241,12 @@
 		return true
 	}
 	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+	// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
 	// result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
 	for {
 		off := auxIntToInt32(v.AuxInt)
 		n := auxToSym(v.Aux)
-		if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+		if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
 			break
 		}
 		v.reset(OpInt64Make)
diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go
deleted file mode 100644
index 23ff417..0000000
--- a/src/cmd/compile/internal/ssa/rewritedecArgs.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Code generated from gen/decArgs.rules; DO NOT EDIT.
-// generated with: cd gen; go run *.go
-
-package ssa
-
-func rewriteValuedecArgs(v *Value) bool {
-	switch v.Op {
-	case OpArg:
-		return rewriteValuedecArgs_OpArg(v)
-	}
-	return false
-}
-func rewriteValuedecArgs_OpArg(v *Value) bool {
-	b := v.Block
-	config := b.Func.Config
-	fe := b.Func.fe
-	typ := &b.Func.Config.Types
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsString()
-	// result: (StringMake (Arg <typ.BytePtr> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
-	for {
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(v.Type.IsString()) {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
-		v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
-		v1.Aux = symToAux(n)
-		v.AddArg2(v0, v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsSlice()
-	// result: (SliceMake (Arg <v.Type.Elem().PtrTo()> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]) (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
-	for {
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(v.Type.IsSlice()) {
-			break
-		}
-		v.reset(OpSliceMake)
-		v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo())
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
-		v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
-		v1.Aux = symToAux(n)
-		v2 := b.NewValue0(v.Pos, OpArg, typ.Int)
-		v2.AuxInt = int32ToAuxInt(off + 2*int32(config.PtrSize))
-		v2.Aux = symToAux(n)
-		v.AddArg3(v0, v1, v2)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsInterface()
-	// result: (IMake (Arg <typ.Uintptr> {n} [off]) (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
-	for {
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(v.Type.IsInterface()) {
-			break
-		}
-		v.reset(OpIMake)
-		v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr)
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
-		v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
-		v1.Aux = symToAux(n)
-		v.AddArg2(v0, v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsComplex() && v.Type.Size() == 16
-	// result: (ComplexMake (Arg <typ.Float64> {n} [off]) (Arg <typ.Float64> {n} [off+8]))
-	for {
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(v.Type.IsComplex() && v.Type.Size() == 16) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Pos, OpArg, typ.Float64)
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, typ.Float64)
-		v1.AuxInt = int32ToAuxInt(off + 8)
-		v1.Aux = symToAux(n)
-		v.AddArg2(v0, v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsComplex() && v.Type.Size() == 8
-	// result: (ComplexMake (Arg <typ.Float32> {n} [off]) (Arg <typ.Float32> {n} [off+4]))
-	for {
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(v.Type.IsComplex() && v.Type.Size() == 8) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Pos, OpArg, typ.Float32)
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, typ.Float32)
-		v1.AuxInt = int32ToAuxInt(off + 4)
-		v1.Aux = symToAux(n)
-		v.AddArg2(v0, v1)
-		return true
-	}
-	// match: (Arg <t>)
-	// cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
-	// result: (StructMake0)
-	for {
-		t := v.Type
-		if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
-	// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
-	for {
-		t := v.Type
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake1)
-		v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
-		v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
-		v0.Aux = symToAux(n)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
-	// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
-	for {
-		t := v.Type
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake2)
-		v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
-		v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
-		v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
-		v1.Aux = symToAux(n)
-		v.AddArg2(v0, v1)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
-	// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
-	for {
-		t := v.Type
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake3)
-		v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
-		v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
-		v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
-		v1.Aux = symToAux(n)
-		v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
-		v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
-		v2.Aux = symToAux(n)
-		v.AddArg3(v0, v1, v2)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
-	// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]) (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
-	for {
-		t := v.Type
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake4)
-		v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
-		v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
-		v0.Aux = symToAux(n)
-		v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
-		v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
-		v1.Aux = symToAux(n)
-		v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
-		v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
-		v2.Aux = symToAux(n)
-		v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3))
-		v3.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(3)))
-		v3.Aux = symToAux(n)
-		v.AddArg4(v0, v1, v2, v3)
-		return true
-	}
-	// match: (Arg <t>)
-	// cond: t.IsArray() && t.NumElem() == 0
-	// result: (ArrayMake0)
-	for {
-		t := v.Type
-		if !(t.IsArray() && t.NumElem() == 0) {
-			break
-		}
-		v.reset(OpArrayMake0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
-	// result: (ArrayMake1 (Arg <t.Elem()> {n} [off]))
-	for {
-		t := v.Type
-		off := auxIntToInt32(v.AuxInt)
-		n := auxToSym(v.Aux)
-		if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpArrayMake1)
-		v0 := b.NewValue0(v.Pos, OpArg, t.Elem())
-		v0.AuxInt = int32ToAuxInt(off)
-		v0.Aux = symToAux(n)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteBlockdecArgs(b *Block) bool {
-	switch b.Kind {
-	}
-	return false
-}
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
index 60ada01..a27002e 100644
--- a/src/cmd/compile/internal/ssa/sizeof_test.go
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -22,7 +22,7 @@
 	}{
 		{Value{}, 72, 112},
 		{Block{}, 164, 304},
-		{LocalSlot{}, 32, 48},
+		{LocalSlot{}, 28, 40},
 		{valState{}, 28, 40},
 	}
 
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 406a3c3..68a6f08 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -7,6 +7,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
 	"fmt"
@@ -156,7 +157,7 @@
 		if v.Aux == nil {
 			f.Fatalf("%s has nil Aux\n", v.LongString())
 		}
-		loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
+		loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt}
 		if f.pass.debug > stackDebug {
 			fmt.Printf("stackalloc %s to %s\n", v, loc)
 		}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index edc43aa..d000b7c 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
 	"fmt"
@@ -36,7 +37,7 @@
 	// Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
 	// Use Value.AuxUnsigned to get the zero-extended value of AuxInt.
 	AuxInt int64
-	Aux    interface{}
+	Aux    Aux
 
 	// Arguments of this value
 	Args []*Value
@@ -492,3 +493,16 @@
 	}
 	return true
 }
+
+// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756
+func (*Value) CanBeAnSSAAux() {}
+
+// AutoVar returns a *Name and int64 representing the auto variable and offset within it
+// where v should be spilled.
+func AutoVar(v *Value) (*ir.Name, int64) {
+	loc := v.Block.Func.RegAlloc[v.ID].(LocalSlot)
+	if v.Type.Size() > loc.Type.Size() {
+		v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+	}
+	return loc.N, loc.Off
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index 849c9e8..4378f2d 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -5,6 +5,7 @@
 package ssa
 
 import (
+	"cmd/compile/internal/reflectdata"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/objabi"
@@ -270,11 +271,11 @@
 			case OpMoveWB:
 				fn = typedmemmove
 				val = w.Args[1]
-				typ = w.Aux.(*types.Type).Symbol()
+				typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
 				nWBops--
 			case OpZeroWB:
 				fn = typedmemclr
-				typ = w.Aux.(*types.Type).Symbol()
+				typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
 				nWBops--
 			case OpVarDef, OpVarLive, OpVarKill:
 			}
diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go
index ec38b7d..e08272c 100644
--- a/src/cmd/compile/internal/ssa/zcse.go
+++ b/src/cmd/compile/internal/ssa/zcse.go
@@ -57,7 +57,7 @@
 type vkey struct {
 	op Op
 	ai int64       // aux int
-	ax interface{} // aux
+	ax Aux         // aux
 	t  *types.Type // type
 }
 
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
new file mode 100644
index 0000000..7180b38
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/escape"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+)
+
+// useNewABIWrapGen returns TRUE if the compiler should generate an
+// ABI wrapper for the function 'f'.
+func useABIWrapGen(f *ir.Func) bool {
+	if !base.Flag.ABIWrap {
+		return false
+	}
+
+	// Support limit option for bisecting.
+	if base.Flag.ABIWrapLimit == 1 {
+		return false
+	}
+	if base.Flag.ABIWrapLimit < 1 {
+		return true
+	}
+	base.Flag.ABIWrapLimit--
+	if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
+		fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
+			f.LSym.Name)
+	}
+
+	return true
+}
+
+// symabiDefs and symabiRefs record the defined and referenced ABIs of
+// symbols required by non-Go code. These are keyed by link symbol
+// name, where the local package prefix is always `"".`
+var symabiDefs, symabiRefs map[string]obj.ABI
+
+func CgoSymABIs() {
+	// The linker expects an ABI0 wrapper for all cgo-exported
+	// functions.
+	for _, prag := range typecheck.Target.CgoPragmas {
+		switch prag[0] {
+		case "cgo_export_static", "cgo_export_dynamic":
+			if symabiRefs == nil {
+				symabiRefs = make(map[string]obj.ABI)
+			}
+			symabiRefs[prag[1]] = obj.ABI0
+		}
+	}
+}
+
+// ReadSymABIs reads a symabis file that specifies definitions and
+// references of text symbols by ABI.
+//
+// The symabis format is a set of lines, where each line is a sequence
+// of whitespace-separated fields. The first field is a verb and is
+// either "def" for defining a symbol ABI or "ref" for referencing a
+// symbol using an ABI. For both "def" and "ref", the second field is
+// the symbol name and the third field is the ABI name, as one of the
+// named cmd/internal/obj.ABI constants.
+func ReadSymABIs(file, myimportpath string) {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		log.Fatalf("-symabis: %v", err)
+	}
+
+	symabiDefs = make(map[string]obj.ABI)
+	symabiRefs = make(map[string]obj.ABI)
+
+	localPrefix := ""
+	if myimportpath != "" {
+		// Symbols in this package may be written either as
+		// "".X or with the package's import path already in
+		// the symbol.
+		localPrefix = objabi.PathToPrefix(myimportpath) + "."
+	}
+
+	for lineNum, line := range strings.Split(string(data), "\n") {
+		lineNum++ // 1-based
+		line = strings.TrimSpace(line)
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+
+		parts := strings.Fields(line)
+		switch parts[0] {
+		case "def", "ref":
+			// Parse line.
+			if len(parts) != 3 {
+				log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
+			}
+			sym, abistr := parts[1], parts[2]
+			abi, valid := obj.ParseABI(abistr)
+			if !valid {
+				log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
+			}
+
+			// If the symbol is already prefixed with
+			// myimportpath, rewrite it to start with ""
+			// so it matches the compiler's internal
+			// symbol names.
+			if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
+				sym = `"".` + sym[len(localPrefix):]
+			}
+
+			// Record for later.
+			if parts[0] == "def" {
+				symabiDefs[sym] = abi
+			} else {
+				symabiRefs[sym] = abi
+			}
+		default:
+			log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
+		}
+	}
+}
+
+// InitLSym defines f's obj.LSym and initializes it based on the
+// properties of f. This includes setting the symbol flags and ABI and
+// creating and initializing related DWARF symbols.
+//
+// InitLSym must be called exactly once per function and must be
+// called for both functions with bodies and functions without bodies.
+// For body-less functions, we only create the LSym; for functions
+// with bodies call a helper to setup up / populate the LSym.
+func InitLSym(f *ir.Func, hasBody bool) {
+	// FIXME: for new-style ABI wrappers, we set up the lsym at the
+	// point the wrapper is created.
+	if f.LSym != nil && base.Flag.ABIWrap {
+		return
+	}
+	staticdata.NeedFuncSym(f.Sym())
+	selectLSym(f, hasBody)
+	if hasBody {
+		setupTextLSym(f, 0)
+	}
+}
+
+// selectLSym sets up the LSym for a given function, and
+// makes calls to helpers to create ABI wrappers if needed.
+func selectLSym(f *ir.Func, hasBody bool) {
+	if f.LSym != nil {
+		base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f)
+	}
+
+	if nam := f.Nname; !ir.IsBlank(nam) {
+
+		var wrapperABI obj.ABI
+		needABIWrapper := false
+		defABI, hasDefABI := symabiDefs[nam.Linksym().Name]
+		if hasDefABI && defABI == obj.ABI0 {
+			// Symbol is defined as ABI0. Create an
+			// Internal -> ABI0 wrapper.
+			f.LSym = nam.LinksymABI(obj.ABI0)
+			needABIWrapper, wrapperABI = true, obj.ABIInternal
+		} else {
+			f.LSym = nam.Linksym()
+			// No ABI override. Check that the symbol is
+			// using the expected ABI.
+			want := obj.ABIInternal
+			if f.LSym.ABI() != want {
+				base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
+			}
+		}
+		if f.Pragma&ir.Systemstack != 0 {
+			f.LSym.Set(obj.AttrCFunc, true)
+		}
+
+		isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
+		if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
+			// Either 1) this symbol is definitely
+			// referenced as ABI0 from this package; or 2)
+			// this symbol is defined in this package but
+			// given a linkname, indicating that it may be
+			// referenced from another package. Create an
+			// ABI0 -> Internal wrapper so it can be
+			// called as ABI0. In case 2, it's important
+			// that we know it's defined in this package
+			// since other packages may "pull" symbols
+			// using linkname and we don't want to create
+			// duplicate ABI wrappers.
+			if f.LSym.ABI() != obj.ABI0 {
+				needABIWrapper, wrapperABI = true, obj.ABI0
+			}
+		}
+
+		if needABIWrapper {
+			if !useABIWrapGen(f) {
+				// Fallback: use alias instead. FIXME.
+
+				// These LSyms have the same name as the
+				// native function, so we create them directly
+				// rather than looking them up. The uniqueness
+				// of f.lsym ensures uniqueness of asym.
+				asym := &obj.LSym{
+					Name: f.LSym.Name,
+					Type: objabi.SABIALIAS,
+					R:    []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
+				}
+				asym.SetABI(wrapperABI)
+				asym.Set(obj.AttrDuplicateOK, true)
+				base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
+			} else {
+				if base.Debug.ABIWrap != 0 {
+					fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
+						wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
+				}
+				makeABIWrapper(f, wrapperABI)
+			}
+		}
+	}
+}
+
+// makeABIWrapper creates a new function that wraps a cross-ABI call
+// to "f".  The wrapper is marked as an ABIWRAPPER.
+func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
+
+	// Q: is this needed?
+	savepos := base.Pos
+	savedclcontext := typecheck.DeclContext
+	savedcurfn := ir.CurFunc
+
+	base.Pos = base.AutogeneratedPos
+	typecheck.DeclContext = ir.PEXTERN
+
+	// At the moment we don't support wrapping a method, we'd need machinery
+	// below to handle the receiver. Panic if we see this scenario.
+	ft := f.Nname.Ntype.Type()
+	if ft.NumRecvs() != 0 {
+		panic("makeABIWrapper support for wrapping methods not implemented")
+	}
+
+	// Manufacture a new func type to use for the wrapper.
+	var noReceiver *ir.Field
+	tfn := ir.NewFuncType(base.Pos,
+		noReceiver,
+		typecheck.NewFuncParams(ft.Params(), true),
+		typecheck.NewFuncParams(ft.Results(), false))
+
+	// Reuse f's types.Sym to create a new ODCLFUNC/function.
+	fn := typecheck.DeclFunc(f.Nname.Sym(), tfn)
+	fn.SetDupok(true)
+	fn.SetWrapper(true) // ignore frame for panic+recover matching
+
+	// Select LSYM now.
+	asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
+	asym.Type = objabi.STEXT
+	if fn.LSym != nil {
+		panic("unexpected")
+	}
+	fn.LSym = asym
+
+	// ABI0-to-ABIInternal wrappers will be mainly loading params from
+	// stack into registers (and/or storing stack locations back to
+	// registers after the wrapped call); in most cases they won't
+	// need to allocate stack space, so it should be OK to mark them
+	// as NOSPLIT in these cases. In addition, my assumption is that
+	// functions written in assembly are NOSPLIT in most (but not all)
+	// cases. In the case of an ABIInternal target that has too many
+	// parameters to fit into registers, the wrapper would need to
+	// allocate stack space, but this seems like an unlikely scenario.
+	// Hence: mark these wrappers NOSPLIT.
+	//
+	// ABIInternal-to-ABI0 wrappers on the other hand will be taking
+	// things in registers and pushing them onto the stack prior to
+	// the ABI0 call, meaning that they will always need to allocate
+	// stack space. If the compiler marks them as NOSPLIT this seems
+	// as though it could lead to situations where the the linker's
+	// nosplit-overflow analysis would trigger a link failure. On the
+	// other hand if they not tagged NOSPLIT then this could cause
+	// problems when building the runtime (since there may be calls to
+	// asm routine in cases where it's not safe to grow the stack). In
+	// most cases the wrapper would be (in effect) inlined, but are
+	// there (perhaps) indirect calls from the runtime that could run
+	// into trouble here.
+	// FIXME: at the moment all.bash does not pass when I leave out
+	// NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
+	setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
+
+	// Generate call. Use tail call if no params and no returns,
+	// but a regular call otherwise.
+	//
+	// Note: ideally we would be using a tail call in cases where
+	// there are params but no returns for ABI0->ABIInternal wrappers,
+	// provided that all params fit into registers (e.g. we don't have
+	// to allocate any stack space). Doing this will require some
+	// extra work in typecheck/walk/ssa, might want to add a new node
+	// OTAILCALL or something to this effect.
+	tailcall := tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0
+	if base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink {
+		// cannot tailcall on PPC64 with dynamic linking, as we need
+		// to restore R2 after call.
+		tailcall = false
+	}
+	if base.Ctxt.Arch.Name == "amd64" && wrapperABI == obj.ABIInternal {
+		// cannot tailcall from ABIInternal to ABI0 on AMD64, as we need
+		// to special registers (X15) when returning to ABIInternal.
+		tailcall = false
+	}
+
+	var tail ir.Node
+	if tailcall {
+		tail = ir.NewTailCallStmt(base.Pos, f.Nname)
+	} else {
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
+		call.Args = ir.ParamNames(tfn.Type())
+		call.IsDDD = tfn.Type().IsVariadic()
+		tail = call
+		if tfn.Type().NumResults() > 0 {
+			n := ir.NewReturnStmt(base.Pos, nil)
+			n.Results = []ir.Node{call}
+			tail = n
+		}
+	}
+	fn.Body.Append(tail)
+
+	typecheck.FinishFuncBody()
+	if base.Debug.DclStack != 0 {
+		types.CheckDclstack()
+	}
+
+	typecheck.Func(fn)
+	ir.CurFunc = fn
+	typecheck.Stmts(fn.Body)
+
+	escape.Batch([]*ir.Func{fn}, false)
+
+	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+	// Restore previous context.
+	base.Pos = savepos
+	typecheck.DeclContext = savedclcontext
+	ir.CurFunc = savedcurfn
+}
+
+// setupTextLsym initializes the LSym for a with-body text symbol.
+func setupTextLSym(f *ir.Func, flag int) {
+	if f.Dupok() {
+		flag |= obj.DUPOK
+	}
+	if f.Wrapper() {
+		flag |= obj.WRAPPER
+	}
+	if f.Needctxt() {
+		flag |= obj.NEEDCTXT
+	}
+	if f.Pragma&ir.Nosplit != 0 {
+		flag |= obj.NOSPLIT
+	}
+	if f.ReflectMethod() {
+		flag |= obj.REFLECTMETHOD
+	}
+
+	// Clumsy but important.
+	// See test/recover.go for test cases and src/reflect/value.go
+	// for the actual functions being considered.
+	if base.Ctxt.Pkgpath == "reflect" {
+		switch f.Sym().Name {
+		case "callReflect", "callMethod":
+			flag |= obj.WRAPPER
+		}
+	}
+
+	base.Ctxt.InitTextSym(f.LSym, flag)
+}
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
new file mode 100644
index 0000000..cc50ab3
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -0,0 +1,42 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/ssa"
+	"cmd/internal/obj"
+)
+
+var Arch ArchInfo
+
+// interface to back end
+
+type ArchInfo struct {
+	LinkArch *obj.LinkArch
+
+	REGSP     int
+	MAXWIDTH  int64
+	SoftFloat bool
+
+	PadFrame func(int64) int64
+
+	// ZeroRange zeroes a range of memory on stack. It is only inserted
+	// at function entry, and it is ok to clobber registers.
+	ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+
+	Ginsnop      func(*objw.Progs) *obj.Prog
+	Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
+
+	// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+	SSAMarkMoves func(*State, *ssa.Block)
+
+	// SSAGenValue emits Prog(s) for the Value.
+	SSAGenValue func(*State, *ssa.Value)
+
+	// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
+	// for all values in the block before SSAGenBlock.
+	SSAGenBlock func(s *State, b, next *ssa.Block)
+}
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
new file mode 100644
index 0000000..a243436
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -0,0 +1,200 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"bytes"
+	"fmt"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+func EnableNoWriteBarrierRecCheck() {
+	nowritebarrierrecCheck = newNowritebarrierrecChecker()
+}
+
+func NoWriteBarrierRecCheck() {
+	// Write barriers are now known. Check the
+	// call graph.
+	nowritebarrierrecCheck.check()
+	nowritebarrierrecCheck = nil
+}
+
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
+type nowritebarrierrecChecker struct {
+	// extraCalls contains extra function calls that may not be
+	// visible during later analysis. It maps from the ODCLFUNC of
+	// the caller to a list of callees.
+	extraCalls map[*ir.Func][]nowritebarrierrecCall
+
+	// curfn is the current function during AST walks.
+	curfn *ir.Func
+}
+
+type nowritebarrierrecCall struct {
+	target *ir.Func // caller or callee
+	lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before walk
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+	c := &nowritebarrierrecChecker{
+		extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
+	}
+
+	// Find all systemstack calls and record their targets. In
+	// general, flow analysis can't see into systemstack, but it's
+	// important to handle it for this check, so we model it
+	// directly. This has to happen before transforming closures in walk since
+	// it's a lot harder to work out the argument after.
+	for _, n := range typecheck.Target.Decls {
+		if n.Op() != ir.ODCLFUNC {
+			continue
+		}
+		c.curfn = n.(*ir.Func)
+		ir.Visit(n, c.findExtraCalls)
+	}
+	c.curfn = nil
+	return c
+}
+
+func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
+	if nn.Op() != ir.OCALLFUNC {
+		return
+	}
+	n := nn.(*ir.CallExpr)
+	if n.X == nil || n.X.Op() != ir.ONAME {
+		return
+	}
+	fn := n.X.(*ir.Name)
+	if fn.Class != ir.PFUNC || fn.Defn == nil {
+		return
+	}
+	if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+		return
+	}
+
+	var callee *ir.Func
+	arg := n.Args[0]
+	switch arg.Op() {
+	case ir.ONAME:
+		arg := arg.(*ir.Name)
+		callee = arg.Defn.(*ir.Func)
+	case ir.OCLOSURE:
+		arg := arg.(*ir.ClosureExpr)
+		callee = arg.Func
+	default:
+		base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+	}
+	if callee.Op() != ir.ODCLFUNC {
+		base.Fatalf("expected ODCLFUNC node, got %+v", callee)
+	}
+	c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+	// We record this information on the *Func so this is concurrent-safe.
+	if fn.NWBRCalls == nil {
+		fn.NWBRCalls = new([]ir.SymAndPos)
+	}
+	*fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+	// We walk the call graph as late as possible so we can
+	// capture all calls created by lowering, but this means we
+	// only get to see the obj.LSyms of calls. symToFunc lets us
+	// get back to the ODCLFUNCs.
+	symToFunc := make(map[*obj.LSym]*ir.Func)
+	// funcs records the back-edges of the BFS call graph walk. It
+	// maps from the ODCLFUNC of each function that must not have
+	// write barriers to the call that inhibits them. Functions
+	// that are directly marked go:nowritebarrierrec are in this
+	// map with a zero-valued nowritebarrierrecCall. This also
+	// acts as the set of marks for the BFS of the call graph.
+	funcs := make(map[*ir.Func]nowritebarrierrecCall)
+	// q is the queue of ODCLFUNC Nodes to visit in BFS order.
+	var q ir.NameQueue
+
+	for _, n := range typecheck.Target.Decls {
+		if n.Op() != ir.ODCLFUNC {
+			continue
+		}
+		fn := n.(*ir.Func)
+
+		symToFunc[fn.LSym] = fn
+
+		// Make nowritebarrierrec functions BFS roots.
+		if fn.Pragma&ir.Nowritebarrierrec != 0 {
+			funcs[fn] = nowritebarrierrecCall{}
+			q.PushRight(fn.Nname)
+		}
+		// Check go:nowritebarrier functions.
+		if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+			base.ErrorfAt(fn.WBPos, "write barrier prohibited")
+		}
+	}
+
+	// Perform a BFS of the call graph from all
+	// go:nowritebarrierrec functions.
+	enqueue := func(src, target *ir.Func, pos src.XPos) {
+		if target.Pragma&ir.Yeswritebarrierrec != 0 {
+			// Don't flow into this function.
+			return
+		}
+		if _, ok := funcs[target]; ok {
+			// Already found a path to target.
+			return
+		}
+
+		// Record the path.
+		funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+		q.PushRight(target.Nname)
+	}
+	for !q.Empty() {
+		fn := q.PopLeft().Func
+
+		// Check fn.
+		if fn.WBPos.IsKnown() {
+			var err bytes.Buffer
+			call := funcs[fn]
+			for call.target != nil {
+				fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
+				call = funcs[call.target]
+			}
+			base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
+			continue
+		}
+
+		// Enqueue fn's calls.
+		for _, callee := range c.extraCalls[fn] {
+			enqueue(fn, callee.target, callee.lineno)
+		}
+		if fn.NWBRCalls == nil {
+			continue
+		}
+		for _, callee := range *fn.NWBRCalls {
+			target := symToFunc[callee.Sym]
+			if target != nil {
+				enqueue(fn, target, callee.Pos)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
new file mode 100644
index 0000000..182f840
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -0,0 +1,273 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"internal/race"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+	"cmd/internal/sys"
+)
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+//
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvarlt(a, b *ir.Name) bool {
+	if (a.Class == ir.PAUTO) != (b.Class == ir.PAUTO) {
+		return b.Class == ir.PAUTO
+	}
+
+	if a.Class != ir.PAUTO {
+		return a.FrameOffset() < b.FrameOffset()
+	}
+
+	if a.Used() != b.Used() {
+		return a.Used()
+	}
+
+	ap := a.Type().HasPointers()
+	bp := b.Type().HasPointers()
+	if ap != bp {
+		return ap
+	}
+
+	ap = a.Needzero()
+	bp = b.Needzero()
+	if ap != bp {
+		return ap
+	}
+
+	if a.Type().Width != b.Type().Width {
+		return a.Type().Width > b.Type().Width
+	}
+
+	return a.Sym().Name < b.Sym().Name
+}
+
+// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
+type byStackVar []*ir.Name
+
+func (s byStackVar) Len() int           { return len(s) }
+func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
+func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+func (s *ssafn) AllocFrame(f *ssa.Func) {
+	s.stksize = 0
+	s.stkptrsize = 0
+	fn := s.curfn
+
+	// Mark the PAUTO's unused.
+	for _, ln := range fn.Dcl {
+		if ln.Class == ir.PAUTO {
+			ln.SetUsed(false)
+		}
+	}
+
+	for _, l := range f.RegAlloc {
+		if ls, ok := l.(ssa.LocalSlot); ok {
+			ls.N.SetUsed(true)
+		}
+	}
+
+	scratchUsed := false
+	for _, b := range f.Blocks {
+		for _, v := range b.Values {
+			if n, ok := v.Aux.(*ir.Name); ok {
+				switch n.Class {
+				case ir.PPARAM, ir.PPARAMOUT:
+					// Don't modify RegFP; it is a global.
+					if n != ir.RegFP {
+						n.SetUsed(true)
+					}
+				case ir.PAUTO:
+					n.SetUsed(true)
+				}
+			}
+			if !scratchUsed {
+				scratchUsed = v.Op.UsesScratch()
+			}
+
+		}
+	}
+
+	if f.Config.NeedsFpScratch && scratchUsed {
+		s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
+	}
+
+	sort.Sort(byStackVar(fn.Dcl))
+
+	// Reassign stack offsets of the locals that are used.
+	lastHasPtr := false
+	for i, n := range fn.Dcl {
+		if n.Op() != ir.ONAME || n.Class != ir.PAUTO {
+			continue
+		}
+		if !n.Used() {
+			fn.Dcl = fn.Dcl[:i]
+			break
+		}
+
+		types.CalcSize(n.Type())
+		w := n.Type().Width
+		if w >= types.MaxWidth || w < 0 {
+			base.Fatalf("bad width")
+		}
+		if w == 0 && lastHasPtr {
+			// Pad between a pointer-containing object and a zero-sized object.
+			// This prevents a pointer to the zero-sized object from being interpreted
+			// as a pointer to the pointer-containing object (and causing it
+			// to be scanned when it shouldn't be). See issue 24993.
+			w = 1
+		}
+		s.stksize += w
+		s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
+		if n.Type().HasPointers() {
+			s.stkptrsize = s.stksize
+			lastHasPtr = true
+		} else {
+			lastHasPtr = false
+		}
+		if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
+			s.stksize = types.Rnd(s.stksize, int64(types.PtrSize))
+		}
+		n.SetFrameOffset(-s.stksize)
+	}
+
+	s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
+	s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
+}
+
+const maxStackSize = 1 << 30
+
+// Compile builds an SSA backend function,
+// uses it to generate a plist,
+// and flushes that plist to machine code.
+// worker indicates which of the backend workers is doing the processing.
+func Compile(fn *ir.Func, worker int) {
+	f := buildssa(fn, worker)
+	// Note: check arg size to fix issue 25507.
+	if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
+		largeStackFramesMu.Lock()
+		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
+		largeStackFramesMu.Unlock()
+		return
+	}
+	pp := objw.NewProgs(fn, worker)
+	defer pp.Free()
+	genssa(f, pp)
+	// Check frame size again.
+	// The check above included only the space needed for local variables.
+	// After genssa, the space needed includes local variables and the callee arg region.
+	// We must do this check prior to calling pp.Flush.
+	// If there are any oversized stack frames,
+	// the assembler may emit inscrutable complaints about invalid instructions.
+	if pp.Text.To.Offset >= maxStackSize {
+		largeStackFramesMu.Lock()
+		locals := f.Frontend().(*ssafn).stksize
+		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
+		largeStackFramesMu.Unlock()
+		return
+	}
+
+	pp.Flush() // assemble, fill in boilerplate, etc.
+	// fieldtrack must be called after pp.Flush. See issue 20014.
+	fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
+}
+
+func init() {
+	if race.Enabled {
+		rand.Seed(time.Now().UnixNano())
+	}
+}
+
+// StackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func StackOffset(slot ssa.LocalSlot) int32 {
+	n := slot.N
+	var off int64
+	switch n.Class {
+	case ir.PAUTO:
+		off = n.FrameOffset()
+		if base.Ctxt.FixedFrameSize() == 0 {
+			off -= int64(types.PtrSize)
+		}
+		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+			// There is a word space for FP on ARM64 even if the frame pointer is disabled
+			off -= int64(types.PtrSize)
+		}
+	case ir.PPARAM, ir.PPARAMOUT:
+		off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+	}
+	return int32(off + slot.Off)
+}
+
+// fieldtrack adds R_USEFIELD relocations to fnsym to record any
+// struct fields that it used.
+func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
+	if fnsym == nil {
+		return
+	}
+	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
+		return
+	}
+
+	trackSyms := make([]*obj.LSym, 0, len(tracked))
+	for sym := range tracked {
+		trackSyms = append(trackSyms, sym)
+	}
+	sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name })
+	for _, sym := range trackSyms {
+		r := obj.Addrel(fnsym)
+		r.Sym = sym
+		r.Type = objabi.R_USEFIELD
+	}
+}
+
+// largeStack is info about a function whose stack frame is too large (rare).
+type largeStack struct {
+	locals int64
+	args   int64
+	callee int64
+	pos    src.XPos
+}
+
+var (
+	largeStackFramesMu sync.Mutex // protects largeStackFrames
+	largeStackFrames   []largeStack
+)
+
+func CheckLargeStacks() {
+	// Check whether any of the functions we have compiled have gigantic stack frames.
+	sort.Slice(largeStackFrames, func(i, j int) bool {
+		return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
+	})
+	for _, large := range largeStackFrames {
+		if large.callee != 0 {
+			base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+		} else {
+			base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/ssagen/pgen_test.go b/src/cmd/compile/internal/ssagen/pgen_test.go
new file mode 100644
index 0000000..69ed8ad
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/pgen_test.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"reflect"
+	"sort"
+	"testing"
+
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+func typeWithoutPointers() *types.Type {
+	return types.NewStruct(types.NoPkg, []*types.Field{
+		types.NewField(src.NoXPos, nil, types.New(types.TINT)),
+	})
+}
+
+func typeWithPointers() *types.Type {
+	return types.NewStruct(types.NoPkg, []*types.Field{
+		types.NewField(src.NoXPos, nil, types.NewPtr(types.New(types.TINT))),
+	})
+}
+
+func markUsed(n *ir.Name) *ir.Name {
+	n.SetUsed(true)
+	return n
+}
+
+func markNeedZero(n *ir.Name) *ir.Name {
+	n.SetNeedzero(true)
+	return n
+}
+
+// Test all code paths for cmpstackvarlt.
+func TestCmpstackvar(t *testing.T) {
+	nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
+		if s == nil {
+			s = &types.Sym{Name: "."}
+		}
+		n := typecheck.NewName(s)
+		n.SetType(t)
+		n.SetFrameOffset(xoffset)
+		n.Class = cl
+		return n
+	}
+	testdata := []struct {
+		a, b *ir.Name
+		lt   bool
+	}{
+		{
+			nod(0, nil, nil, ir.PAUTO),
+			nod(0, nil, nil, ir.PFUNC),
+			false,
+		},
+		{
+			nod(0, nil, nil, ir.PFUNC),
+			nod(0, nil, nil, ir.PAUTO),
+			true,
+		},
+		{
+			nod(0, nil, nil, ir.PFUNC),
+			nod(10, nil, nil, ir.PFUNC),
+			true,
+		},
+		{
+			nod(20, nil, nil, ir.PFUNC),
+			nod(10, nil, nil, ir.PFUNC),
+			false,
+		},
+		{
+			nod(10, nil, nil, ir.PFUNC),
+			nod(10, nil, nil, ir.PFUNC),
+			false,
+		},
+		{
+			nod(10, nil, nil, ir.PPARAM),
+			nod(20, nil, nil, ir.PPARAMOUT),
+			true,
+		},
+		{
+			nod(10, nil, nil, ir.PPARAMOUT),
+			nod(20, nil, nil, ir.PPARAM),
+			true,
+		},
+		{
+			markUsed(nod(0, nil, nil, ir.PAUTO)),
+			nod(0, nil, nil, ir.PAUTO),
+			true,
+		},
+		{
+			nod(0, nil, nil, ir.PAUTO),
+			markUsed(nod(0, nil, nil, ir.PAUTO)),
+			false,
+		},
+		{
+			nod(0, typeWithoutPointers(), nil, ir.PAUTO),
+			nod(0, typeWithPointers(), nil, ir.PAUTO),
+			false,
+		},
+		{
+			nod(0, typeWithPointers(), nil, ir.PAUTO),
+			nod(0, typeWithoutPointers(), nil, ir.PAUTO),
+			true,
+		},
+		{
+			markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
+			nod(0, &types.Type{}, nil, ir.PAUTO),
+			true,
+		},
+		{
+			nod(0, &types.Type{}, nil, ir.PAUTO),
+			markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
+			false,
+		},
+		{
+			nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
+			nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
+			false,
+		},
+		{
+			nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
+			nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
+			true,
+		},
+		{
+			nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+			nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+			true,
+		},
+		{
+			nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+			nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+			false,
+		},
+		{
+			nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+			nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+			false,
+		},
+	}
+	for _, d := range testdata {
+		got := cmpstackvarlt(d.a, d.b)
+		if got != d.lt {
+			t.Errorf("want %v < %v", d.a, d.b)
+		}
+		// If we expect a < b to be true, check that b < a is false.
+		if d.lt && cmpstackvarlt(d.b, d.a) {
+			t.Errorf("unexpected %v < %v", d.b, d.a)
+		}
+	}
+}
+
+func TestStackvarSort(t *testing.T) {
+	nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
+		n := typecheck.NewName(s)
+		n.SetType(t)
+		n.SetFrameOffset(xoffset)
+		n.Class = cl
+		return n
+	}
+	inp := []*ir.Name{
+		nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+		nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+		markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+		nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+	}
+	want := []*ir.Name{
+		nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+		markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+		markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+		nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+		nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+		nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
+	}
+	sort.Sort(byStackVar(inp))
+	if !reflect.DeepEqual(want, inp) {
+		t.Error("sort failed")
+		for i := range inp {
+			g := inp[i]
+			w := want[i]
+			eq := reflect.DeepEqual(w, g)
+			if !eq {
+				t.Log(i, w, g)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/ssagen/phi.go
similarity index 90%
rename from src/cmd/compile/internal/gc/phi.go
rename to src/cmd/compile/internal/ssagen/phi.go
index 5218cd0..01ad211 100644
--- a/src/cmd/compile/internal/gc/phi.go
+++ b/src/cmd/compile/internal/ssagen/phi.go
@@ -2,14 +2,16 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package ssagen
 
 import (
+	"container/heap"
+	"fmt"
+
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/ssa"
 	"cmd/compile/internal/types"
 	"cmd/internal/src"
-	"container/heap"
-	"fmt"
 )
 
 // This file contains the algorithm to place phi nodes in a function.
@@ -22,6 +24,14 @@
 
 const debugPhi = false
 
+// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
+type fwdRefAux struct {
+	_ [0]func() // ensure ir.Node isn't compared for equality
+	N ir.Node
+}
+
+func (fwdRefAux) CanBeAnSSAAux() {}
+
 // insertPhis finds all the places in the function where a phi is
 // necessary and inserts them.
 // Uses FwdRef ops to find all uses of variables, and s.defvars to find
@@ -40,11 +50,11 @@
 }
 
 type phiState struct {
-	s       *state                 // SSA state
-	f       *ssa.Func              // function to work on
-	defvars []map[*Node]*ssa.Value // defined variables at end of each block
+	s       *state                   // SSA state
+	f       *ssa.Func                // function to work on
+	defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
 
-	varnum map[*Node]int32 // variable numbering
+	varnum map[ir.Node]int32 // variable numbering
 
 	// properties of the dominator tree
 	idom  []*ssa.Block // dominator parents
@@ -59,7 +69,7 @@
 	hasDef *sparseSet   // has a write of the variable we're processing
 
 	// miscellaneous
-	placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
+	placeholder *ssa.Value // value to use as a "not set yet" placeholder.
 }
 
 func (s *phiState) insertPhis() {
@@ -70,15 +80,15 @@
 	// Find all the variables for which we need to match up reads & writes.
 	// This step prunes any basic-block-only variables from consideration.
 	// Generate a numbering for these variables.
-	s.varnum = map[*Node]int32{}
-	var vars []*Node
+	s.varnum = map[ir.Node]int32{}
+	var vars []ir.Node
 	var vartypes []*types.Type
 	for _, b := range s.f.Blocks {
 		for _, v := range b.Values {
 			if v.Op != ssa.OpFwdRef {
 				continue
 			}
-			var_ := v.Aux.(*Node)
+			var_ := v.Aux.(fwdRefAux).N
 
 			// Optimization: look back 1 block for the definition.
 			if len(b.Preds) == 1 {
@@ -179,11 +189,16 @@
 			if v.Op == ssa.OpPhi {
 				v.AuxInt = 0
 			}
+			// Any remaining FwdRefs are dead code.
+			if v.Op == ssa.OpFwdRef {
+				v.Op = ssa.OpUnknown
+				v.Aux = nil
+			}
 		}
 	}
 }
 
-func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
 	priq := &s.priq
 	q := s.q
 	queued := s.queued
@@ -240,7 +255,9 @@
 				hasPhi.add(c.ID)
 				v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
 				// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
-				s.s.addNamedValue(var_, v)
+				if var_.Op() == ir.ONAME {
+					s.s.addNamedValue(var_.(*ir.Name), v)
+				}
 				for range c.Preds {
 					v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
 				}
@@ -318,7 +335,7 @@
 			if v.Op != ssa.OpFwdRef {
 				continue
 			}
-			n := s.varnum[v.Aux.(*Node)]
+			n := s.varnum[v.Aux.(fwdRefAux).N]
 			v.Op = ssa.OpCopy
 			v.Aux = nil
 			v.AddArg(values[n])
@@ -432,11 +449,11 @@
 
 // Variant to use for small functions.
 type simplePhiState struct {
-	s         *state                 // SSA state
-	f         *ssa.Func              // function to work on
-	fwdrefs   []*ssa.Value           // list of FwdRefs to be processed
-	defvars   []map[*Node]*ssa.Value // defined variables at end of each block
-	reachable []bool                 // which blocks are reachable
+	s         *state                   // SSA state
+	f         *ssa.Func                // function to work on
+	fwdrefs   []*ssa.Value             // list of FwdRefs to be processed
+	defvars   []map[ir.Node]*ssa.Value // defined variables at end of each block
+	reachable []bool                   // which blocks are reachable
 }
 
 func (s *simplePhiState) insertPhis() {
@@ -449,7 +466,7 @@
 				continue
 			}
 			s.fwdrefs = append(s.fwdrefs, v)
-			var_ := v.Aux.(*Node)
+			var_ := v.Aux.(fwdRefAux).N
 			if _, ok := s.defvars[b.ID][var_]; !ok {
 				s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
 			}
@@ -463,7 +480,7 @@
 		v := s.fwdrefs[len(s.fwdrefs)-1]
 		s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
 		b := v.Block
-		var_ := v.Aux.(*Node)
+		var_ := v.Aux.(fwdRefAux).N
 		if b == s.f.Entry {
 			// No variable should be live at entry.
 			s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@@ -511,7 +528,7 @@
 }
 
 // lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
 	for {
 		if v := s.defvars[b.ID][var_]; v != nil {
 			return v
@@ -530,9 +547,11 @@
 		}
 	}
 	// Generate a FwdRef for the variable and return that.
-	v := b.NewValue0A(line, ssa.OpFwdRef, t, var_)
+	v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
 	s.defvars[b.ID][var_] = v
-	s.s.addNamedValue(var_, v)
+	if var_.Op() == ir.ONAME {
+		s.s.addNamedValue(var_.(*ir.Name), v)
+	}
 	s.fwdrefs = append(s.fwdrefs, v)
 	return v
 }
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
new file mode 100644
index 0000000..6b1ddeb
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -0,0 +1,7369 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"html"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/liveness"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+	"cmd/internal/sys"
+)
+
+var ssaConfig *ssa.Config
+var ssaCaches []ssa.Cache
+
+var ssaDump string     // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string      // optional destination for ssa dump file
+var ssaDumpStdout bool // whether to dump to stdout
+var ssaDumpCFG string  // generate CFGs for these phases
+const ssaDumpFile = "ssa.html"
+
+// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
+var ssaDumpInlined []*ir.Func
+
+func DumpInline(fn *ir.Func) {
+	if ssaDump != "" && ssaDump == ir.FuncName(fn) {
+		ssaDumpInlined = append(ssaDumpInlined, fn)
+	}
+}
+
+func InitEnv() {
+	ssaDump = os.Getenv("GOSSAFUNC")
+	ssaDir = os.Getenv("GOSSADIR")
+	if ssaDump != "" {
+		if strings.HasSuffix(ssaDump, "+") {
+			ssaDump = ssaDump[:len(ssaDump)-1]
+			ssaDumpStdout = true
+		}
+		spl := strings.Split(ssaDump, ":")
+		if len(spl) > 1 {
+			ssaDump = spl[0]
+			ssaDumpCFG = spl[1]
+		}
+	}
+}
+
+func InitConfig() {
+	types_ := ssa.NewTypes()
+
+	if Arch.SoftFloat {
+		softfloatInit()
+	}
+
+	// Generate a few pointer types that are uncommon in the frontend but common in the backend.
+	// Caching is disabled in the backend, so generating these here avoids allocations.
+	_ = types.NewPtr(types.Types[types.TINTER])                             // *interface{}
+	_ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING]))              // **string
+	_ = types.NewPtr(types.NewSlice(types.Types[types.TINTER]))             // *[]interface{}
+	_ = types.NewPtr(types.NewPtr(types.ByteType))                          // **byte
+	_ = types.NewPtr(types.NewSlice(types.ByteType))                        // *[]byte
+	_ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING]))            // *[]string
+	_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+	_ = types.NewPtr(types.Types[types.TINT16])                             // *int16
+	_ = types.NewPtr(types.Types[types.TINT64])                             // *int64
+	_ = types.NewPtr(types.ErrorType)                                       // *error
+	types.NewPtrCacheEnabled = false
+	ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
+	ssaConfig.SoftFloat = Arch.SoftFloat
+	ssaConfig.Race = base.Flag.Race
+	ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
+
+	// Set up some runtime functions we'll need to call.
+	ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
+	ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
+	ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
+	ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
+	ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+	ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
+	ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
+	ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
+	ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
+	ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
+	ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
+	ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+	ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
+	ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
+	ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
+	ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
+	ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
+	ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
+	ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
+	ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
+	ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
+	ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
+	ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+	ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
+	ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
+	ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
+	ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+	ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT")       // bool
+	ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41")         // bool
+	ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA")             // bool
+	ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4")         // bool
+	ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
+	ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
+	ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
+	ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
+	ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv")                 // asm func with special ABI
+	ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
+	ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
+
+	// asm funcs with special ABI
+	if base.Ctxt.Arch.Name == "amd64" {
+		GCWriteBarrierReg = map[int16]*obj.LSym{
+			x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
+			x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
+			x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
+			x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
+			x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
+			x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
+			x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
+			x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
+		}
+	}
+
+	if Arch.LinkArch.Family == sys.Wasm {
+		BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
+		BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
+		BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
+		BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
+		BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
+		BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
+		BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
+		BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
+		BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
+		BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
+		BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
+		BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
+		BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
+		BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
+		BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
+		BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
+	} else {
+		BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
+		BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
+		BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
+		BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
+		BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
+		BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
+		BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
+		BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
+		BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
+		BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
+		BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
+		BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
+		BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
+		BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
+		BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
+		BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
+	}
+	if Arch.LinkArch.PtrSize == 4 {
+		ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
+		ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
+		ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
+		ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
+		ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
+		ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
+		ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
+		ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
+		ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
+		ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
+		ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
+		ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
+		ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
+		ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
+		ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
+		ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
+	}
+
+	// Wasm (all asm funcs with special ABIs)
+	ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
+	ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
+	ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
+	ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
+	ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
+	ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
+}
+
+// getParam returns the Field of ith param of node n (which is a
+// function/method/interface call), where the receiver of a method call is
+// considered as the 0th parameter. This does not include the receiver of an
+// interface call.
+func getParam(n *ir.CallExpr, i int) *types.Field {
+	t := n.X.Type()
+	if n.Op() == ir.OCALLMETH {
+		base.Fatalf("OCALLMETH missed by walkCall")
+	}
+	return t.Params().Field(i)
+}
+
+// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
+func dvarint(x *obj.LSym, off int, v int64) int {
+	if v < 0 || v > 1e9 {
+		panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
+	}
+	if v < 1<<7 {
+		return objw.Uint8(x, off, uint8(v))
+	}
+	off = objw.Uint8(x, off, uint8((v&127)|128))
+	if v < 1<<14 {
+		return objw.Uint8(x, off, uint8(v>>7))
+	}
+	off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
+	if v < 1<<21 {
+		return objw.Uint8(x, off, uint8(v>>14))
+	}
+	off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
+	if v < 1<<28 {
+		return objw.Uint8(x, off, uint8(v>>21))
+	}
+	off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
+	return objw.Uint8(x, off, uint8(v>>28))
+}
+
+// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
+// that is using open-coded defers.  This funcdata is used to determine the active
+// defers in a function and execute those defers during panic processing.
+//
+// The funcdata is all encoded in varints (since values will almost always be less than
+// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
+// for stack variables are specified as the number of bytes below varp (pointer to the
+// top of the local variables) for their starting address. The format is:
+//
+//  - Max total argument size among all the defers
+//  - Offset of the deferBits variable
+//  - Number of defers in the function
+//  - Information about each defer call, in reverse order of appearance in the function:
+//    - Total argument size of the call
+//    - Offset of the closure value to call
+//    - Number of arguments (including interface receiver or method receiver as first arg)
+//    - Information about each argument
+//      - Offset of the stored defer argument in this function's frame
+//      - Size of the argument
+//      - Offset of where argument should be placed in the args frame when making call
+func (s *state) emitOpenDeferInfo() {
+	x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
+	s.curfn.LSym.Func().OpenCodedDeferInfo = x
+	off := 0
+
+	// Compute maxargsize (max size of arguments for all defers)
+	// first, so we can output it first to the funcdata
+	var maxargsize int64
+	for i := len(s.openDefers) - 1; i >= 0; i-- {
+		r := s.openDefers[i]
+		argsize := r.n.X.Type().ArgWidth()
+		if argsize > maxargsize {
+			maxargsize = argsize
+		}
+	}
+	off = dvarint(x, off, maxargsize)
+	off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
+	off = dvarint(x, off, int64(len(s.openDefers)))
+
+	// Write in reverse-order, for ease of running in that order at runtime
+	for i := len(s.openDefers) - 1; i >= 0; i-- {
+		r := s.openDefers[i]
+		off = dvarint(x, off, r.n.X.Type().ArgWidth())
+		off = dvarint(x, off, -r.closureNode.FrameOffset())
+		numArgs := len(r.argNodes)
+		if r.rcvrNode != nil {
+			// If there's an interface receiver, treat/place it as the first
+			// arg. (If there is a method receiver, it's already included as
+			// first arg in r.argNodes.)
+			numArgs++
+		}
+		off = dvarint(x, off, int64(numArgs))
+		if r.rcvrNode != nil {
+			off = dvarint(x, off, -r.rcvrNode.FrameOffset())
+			off = dvarint(x, off, s.config.PtrSize)
+			off = dvarint(x, off, 0)
+		}
+		for j, arg := range r.argNodes {
+			f := getParam(r.n, j)
+			off = dvarint(x, off, -arg.FrameOffset())
+			off = dvarint(x, off, f.Type.Size())
+			off = dvarint(x, off, f.Offset)
+		}
+	}
+}
+
+// buildssa builds an SSA function for fn.
+// worker indicates which of the backend workers is doing the processing.
+func buildssa(fn *ir.Func, worker int) *ssa.Func {
+	name := ir.FuncName(fn)
+	printssa := false
+	if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
+		printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump
+	}
+	var astBuf *bytes.Buffer
+	if printssa {
+		astBuf = &bytes.Buffer{}
+		ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
+		ir.FDumpList(astBuf, "buildssa-body", fn.Body)
+		ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
+		if ssaDumpStdout {
+			fmt.Println("generating SSA for", name)
+			fmt.Print(astBuf.String())
+		}
+	}
+
+	var s state
+	s.pushLine(fn.Pos())
+	defer s.popLine()
+
+	s.hasdefer = fn.HasDefer()
+	if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+		s.cgoUnsafeArgs = true
+	}
+
+	fe := ssafn{
+		curfn: fn,
+		log:   printssa && ssaDumpStdout,
+	}
+	s.curfn = fn
+
+	s.f = ssa.NewFunc(&fe)
+	s.config = ssaConfig
+	s.f.Type = fn.Type()
+	s.f.Config = ssaConfig
+	s.f.Cache = &ssaCaches[worker]
+	s.f.Cache.Reset()
+	s.f.Name = name
+	s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
+	s.f.PrintOrHtmlSSA = printssa
+	if fn.Pragma&ir.Nosplit != 0 {
+		s.f.NoSplit = true
+	}
+	if fn.Pragma&ir.RegisterParams != 0 { // TODO remove after register abi is working
+		if strings.Contains(name, ".") {
+			base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
+		}
+		s.f.Warnl(fn.Pos(), "declared function %v has register params", fn)
+	}
+
+	s.panics = map[funcLine]*ssa.Block{}
+	s.softFloat = s.config.SoftFloat
+
+	// Allocate starting block
+	s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
+	s.f.Entry.Pos = fn.Pos()
+
+	if printssa {
+		ssaDF := ssaDumpFile
+		if ssaDir != "" {
+			ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
+			ssaD := filepath.Dir(ssaDF)
+			os.MkdirAll(ssaD, 0755)
+		}
+		s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
+		// TODO: generate and print a mapping from nodes to values and blocks
+		dumpSourcesColumn(s.f.HTMLWriter, fn)
+		s.f.HTMLWriter.WriteAST("AST", astBuf)
+	}
+
+	// Allocate starting values
+	s.labels = map[string]*ssaLabel{}
+	s.fwdVars = map[ir.Node]*ssa.Value{}
+	s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
+
+	s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
+	switch {
+	case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
+		// Don't support open-coded defers for 386 ONLY when using shared
+		// libraries, because there is extra code (added by rewriteToUseGot())
+		// preceding the deferreturn/ret code that is generated by gencallret()
+		// that we don't track correctly.
+		s.hasOpenDefers = false
+	}
+	if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
+		// Skip doing open defers if there is any extra exit code (likely
+		// race detection), since we will not generate that code in the
+		// case of the extra deferreturn/ret segment.
+		s.hasOpenDefers = false
+	}
+	if s.hasOpenDefers {
+		// Similarly, skip if there are any heap-allocated result
+		// parameters that need to be copied back to their stack slots.
+		for _, f := range s.curfn.Type().Results().FieldSlice() {
+			if !f.Nname.(*ir.Name).OnStack() {
+				s.hasOpenDefers = false
+				break
+			}
+		}
+	}
+	if s.hasOpenDefers &&
+		s.curfn.NumReturns*s.curfn.NumDefers > 15 {
+		// Since we are generating defer calls at every exit for
+		// open-coded defers, skip doing open-coded defers if there are
+		// too many returns (especially if there are multiple defers).
+		// Open-coded defers are most important for improving performance
+		// for smaller functions (which don't have many returns).
+		s.hasOpenDefers = false
+	}
+
+	s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+	s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
+
+	s.startBlock(s.f.Entry)
+	s.vars[memVar] = s.startmem
+	if s.hasOpenDefers {
+		// Create the deferBits variable and stack slot.  deferBits is a
+		// bitmask showing which of the open-coded defers in this function
+		// have been activated.
+		deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
+		deferBitsTemp.SetAddrtaken(true)
+		s.deferBitsTemp = deferBitsTemp
+		// For this value, AuxInt is initialized to zero by default
+		startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+		s.vars[deferBitsVar] = startDeferBits
+		s.deferBitsAddr = s.addr(deferBitsTemp)
+		s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
+		// Make sure that the deferBits stack slot is kept alive (for use
+		// by panics) and stores to deferBits are not eliminated, even if
+		// all checking code on deferBits in the function exit can be
+		// eliminated, because the defer statements were all
+		// unconditional.
+		s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+	}
+
+	// Generate addresses of local declarations
+	s.decladdrs = map[*ir.Name]*ssa.Value{}
+	var args []ssa.Param
+	var results []ssa.Param
+	for _, n := range fn.Dcl {
+		switch n.Class {
+		case ir.PPARAM:
+			s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+			args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
+		case ir.PPARAMOUT:
+			s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+			results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset()), Name: n})
+		case ir.PAUTO:
+			// processed at each use, to prevent Addr coming
+			// before the decl.
+		default:
+			s.Fatalf("local variable with class %v unimplemented", n.Class)
+		}
+	}
+	s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, args, results)
+
+	// Populate SSAable arguments.
+	for _, n := range fn.Dcl {
+		if n.Class == ir.PPARAM && s.canSSA(n) {
+			v := s.newValue0A(ssa.OpArg, n.Type(), n)
+			s.vars[n] = v
+			s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
+		}
+	}
+
+	// Populate closure variables.
+	if !fn.ClosureCalled() {
+		clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
+		offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
+		for _, n := range fn.ClosureVars {
+			typ := n.Type()
+			if !n.Byval() {
+				typ = types.NewPtr(typ)
+			}
+
+			offset = types.Rnd(offset, typ.Alignment())
+			ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
+			offset += typ.Size()
+
+			// If n is a small variable captured by value, promote
+			// it to PAUTO so it can be converted to SSA.
+			//
+			// Note: While we never capture a variable by value if
+			// the user took its address, we may have generated
+			// runtime calls that did (#43701). Since we don't
+			// convert Addrtaken variables to SSA anyway, no point
+			// in promoting them either.
+			if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
+				n.Class = ir.PAUTO
+				fn.Dcl = append(fn.Dcl, n)
+				s.assign(n, s.load(n.Type(), ptr), false, 0)
+				continue
+			}
+
+			if !n.Byval() {
+				ptr = s.load(typ, ptr)
+			}
+			s.setHeapaddr(fn.Pos(), n, ptr)
+		}
+	}
+
+	// Convert the AST-based IR to the SSA-based IR
+	s.stmtList(fn.Enter)
+	s.zeroResults()
+	s.paramsToHeap()
+	s.stmtList(fn.Body)
+
+	// fallthrough to exit
+	if s.curBlock != nil {
+		s.pushLine(fn.Endlineno)
+		s.exit()
+		s.popLine()
+	}
+
+	for _, b := range s.f.Blocks {
+		if b.Pos != src.NoXPos {
+			s.updateUnsetPredPos(b)
+		}
+	}
+
+	s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
+
+	s.insertPhis()
+
+	// Main call to ssa package to compile function
+	ssa.Compile(s.f)
+
+	if s.hasOpenDefers {
+		s.emitOpenDeferInfo()
+	}
+
+	return s.f
+}
+
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function.  Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic.  For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+func (s *state) zeroResults() {
+	for _, f := range s.curfn.Type().Results().FieldSlice() {
+		n := f.Nname.(*ir.Name)
+		if !n.OnStack() {
+			// The local which points to the return value is the
+			// thing that needs zeroing. This is already handled
+			// by a Needzero annotation in plive.go:(*liveness).epilogue.
+			continue
+		}
+		// Zero the stack location containing f.
+		if typ := n.Type(); TypeOK(typ) {
+			s.assign(n, s.zeroVal(typ), false, 0)
+		} else {
+			s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+			s.zero(n.Type(), s.decladdrs[n])
+		}
+	}
+}
+
+// paramsToHeap produces code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func (s *state) paramsToHeap() {
+	do := func(params *types.Type) {
+		for _, f := range params.FieldSlice() {
+			if f.Nname == nil {
+				continue // anonymous or blank parameter
+			}
+			n := f.Nname.(*ir.Name)
+			if ir.IsBlank(n) || n.OnStack() {
+				continue
+			}
+			s.newHeapaddr(n)
+			if n.Class == ir.PPARAM {
+				s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
+			}
+		}
+	}
+
+	typ := s.curfn.Type()
+	do(typ.Recvs())
+	do(typ.Params())
+	do(typ.Results())
+}
+
+// newHeapaddr allocates heap memory for n and sets its heap address.
+func (s *state) newHeapaddr(n *ir.Name) {
+	s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
+}
+
+// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
+// and then sets it as n's heap address.
+func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
+	if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
+		base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
+	}
+
+	// Declare variable to hold address.
+	addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
+	addr.SetType(types.NewPtr(n.Type()))
+	addr.Class = ir.PAUTO
+	addr.SetUsed(true)
+	addr.Curfn = s.curfn
+	s.curfn.Dcl = append(s.curfn.Dcl, addr)
+	types.CalcSize(addr.Type())
+
+	if n.Class == ir.PPARAMOUT {
+		addr.SetIsOutputParamHeapAddr(true)
+	}
+
+	n.Heapaddr = addr
+	s.assign(addr, ptr, false, 0)
+}
+
+// newObject returns an SSA value denoting new(typ).
+func (s *state) newObject(typ *types.Type) *ssa.Value {
+	if typ.Size() == 0 {
+		return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
+	}
+	return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
+}
+
+// reflectType returns an SSA value representing a pointer to typ's
+// reflection type descriptor.
+func (s *state) reflectType(typ *types.Type) *ssa.Value {
+	lsym := reflectdata.TypeLinksym(typ)
+	return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
+}
+
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
+	// Read sources of target function fn.
+	fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+	targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
+	if err != nil {
+		writer.Logf("cannot read sources for function %v: %v", fn, err)
+	}
+
+	// Read sources of inlined functions.
+	var inlFns []*ssa.FuncLines
+	for _, fi := range ssaDumpInlined {
+		elno := fi.Endlineno
+		fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+		fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
+		if err != nil {
+			writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
+			continue
+		}
+		inlFns = append(inlFns, fnLines)
+	}
+
+	sort.Sort(ssa.ByTopo(inlFns))
+	if targetFn != nil {
+		inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
+	}
+
+	writer.WriteSources("sources", inlFns)
+}
+
+func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
+	f, err := os.Open(os.ExpandEnv(file))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	var lines []string
+	ln := uint(1)
+	scanner := bufio.NewScanner(f)
+	for scanner.Scan() && ln <= end {
+		if ln >= start {
+			lines = append(lines, scanner.Text())
+		}
+		ln++
+	}
+	return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
+}
+
+// updateUnsetPredPos propagates the earliest-value position information for b
+// towards all of b's predecessors that need a position, and recurs on that
+// predecessor if its position is updated. B should have a non-empty position.
+func (s *state) updateUnsetPredPos(b *ssa.Block) {
+	if b.Pos == src.NoXPos {
+		s.Fatalf("Block %s should have a position", b)
+	}
+	bestPos := src.NoXPos
+	for _, e := range b.Preds {
+		p := e.Block()
+		if !p.LackingPos() {
+			continue
+		}
+		if bestPos == src.NoXPos {
+			bestPos = b.Pos
+			for _, v := range b.Values {
+				if v.LackingPos() {
+					continue
+				}
+				if v.Pos != src.NoXPos {
+					// Assume values are still in roughly textual order;
+					// TODO: could also seek minimum position?
+					bestPos = v.Pos
+					break
+				}
+			}
+		}
+		p.Pos = bestPos
+		s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
+	}
+}
+
+// Information about each open-coded defer.
+type openDeferInfo struct {
+	// The node representing the call of the defer
+	n *ir.CallExpr
+	// If defer call is closure call, the address of the argtmp where the
+	// closure is stored.
+	closure *ssa.Value
+	// The node representing the argtmp where the closure is stored - used for
+	// function, method, or interface call, to store a closure that panic
+	// processing can use for this defer.
+	closureNode *ir.Name
+	// If defer call is interface call, the address of the argtmp where the
+	// receiver is stored
+	rcvr *ssa.Value
+	// The node representing the argtmp where the receiver is stored
+	rcvrNode *ir.Name
+	// The addresses of the argtmps where the evaluated arguments of the defer
+	// function call are stored.
+	argVals []*ssa.Value
+	// The nodes representing the argtmps where the args of the defer are stored
+	argNodes []*ir.Name
+}
+
+type state struct {
+	// configuration (arch) information
+	config *ssa.Config
+
+	// function we're building
+	f *ssa.Func
+
+	// Node for function
+	curfn *ir.Func
+
+	// labels in f
+	labels map[string]*ssaLabel
+
+	// unlabeled break and continue statement tracking
+	breakTo    *ssa.Block // current target for plain break statement
+	continueTo *ssa.Block // current target for plain continue statement
+
+	// current location where we're interpreting the AST
+	curBlock *ssa.Block
+
+	// variable assignments in the current block (map from variable symbol to ssa value)
+	// *Node is the unique identifier (an ONAME Node) for the variable.
+	// TODO: keep a single varnum map, then make all of these maps slices instead?
+	vars map[ir.Node]*ssa.Value
+
+	// fwdVars are variables that are used before they are defined in the current block.
+	// This map exists just to coalesce multiple references into a single FwdRef op.
+	// *Node is the unique identifier (an ONAME Node) for the variable.
+	fwdVars map[ir.Node]*ssa.Value
+
+	// all defined variables at the end of each block. Indexed by block ID.
+	defvars []map[ir.Node]*ssa.Value
+
+	// addresses of PPARAM and PPARAMOUT variables on the stack.
+	decladdrs map[*ir.Name]*ssa.Value
+
+	// starting values. Memory, stack pointer, and globals pointer
+	startmem *ssa.Value
+	sp       *ssa.Value
+	sb       *ssa.Value
+	// value representing address of where deferBits autotmp is stored
+	deferBitsAddr *ssa.Value
+	deferBitsTemp *ir.Name
+
+	// line number stack. The current line number is top of stack
+	line []src.XPos
+	// the last line number processed; it may have been popped
+	lastPos src.XPos
+
+	// list of panic calls by function name and line number.
+	// Used to deduplicate panic calls.
+	panics map[funcLine]*ssa.Block
+
+	cgoUnsafeArgs bool
+	hasdefer      bool // whether the function contains a defer statement
+	softFloat     bool
+	hasOpenDefers bool // whether we are doing open-coded defers
+
+	// If doing open-coded defers, list of info about the defer calls in
+	// scanning order. Hence, at exit we should run these defers in reverse
+	// order of this list
+	openDefers []*openDeferInfo
+	// For open-coded defers, this is the beginning and end blocks of the last
+	// defer exit code that we have generated so far. We use these to share
+	// code between exits if the shareDeferExits option (disabled by default)
+	// is on.
+	lastDeferExit       *ssa.Block // Entry block of last defer exit code we generated
+	lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
+	lastDeferCount      int        // Number of defers encountered at that point
+
+	prevCall *ssa.Value // the previous call; use this to tie results to the call op.
+}
+
+type funcLine struct {
+	f    *obj.LSym
+	base *src.PosBase
+	line uint
+}
+
+type ssaLabel struct {
+	target         *ssa.Block // block identified by this label
+	breakTarget    *ssa.Block // block to break to in control flow node identified by this label
+	continueTarget *ssa.Block // block to continue to in control flow node identified by this label
+}
+
+// label returns the label associated with sym, creating it if necessary.
+func (s *state) label(sym *types.Sym) *ssaLabel {
+	lab := s.labels[sym.Name]
+	if lab == nil {
+		lab = new(ssaLabel)
+		s.labels[sym.Name] = lab
+	}
+	return lab
+}
+
+func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
+func (s *state) Log() bool                            { return s.f.Log() }
+func (s *state) Fatalf(msg string, args ...interface{}) {
+	s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
+}
+func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
+func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
+
+func ssaMarker(name string) *ir.Name {
+	return typecheck.NewName(&types.Sym{Name: name})
+}
+
+var (
+	// marker node for the memory variable
+	memVar = ssaMarker("mem")
+
+	// marker nodes for temporary variables
+	ptrVar       = ssaMarker("ptr")
+	lenVar       = ssaMarker("len")
+	newlenVar    = ssaMarker("newlen")
+	capVar       = ssaMarker("cap")
+	typVar       = ssaMarker("typ")
+	okVar        = ssaMarker("ok")
+	deferBitsVar = ssaMarker("deferBits")
+)
+
+// startBlock sets the current block we're generating code in to b.
+func (s *state) startBlock(b *ssa.Block) {
+	if s.curBlock != nil {
+		s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
+	}
+	s.curBlock = b
+	s.vars = map[ir.Node]*ssa.Value{}
+	for n := range s.fwdVars {
+		delete(s.fwdVars, n)
+	}
+}
+
+// endBlock marks the end of generating code for the current block.
+// Returns the (former) current block. Returns nil if there is no current
+// block, i.e. if no code flows to the current execution point.
+func (s *state) endBlock() *ssa.Block {
+	b := s.curBlock
+	if b == nil {
+		return nil
+	}
+	for len(s.defvars) <= int(b.ID) {
+		s.defvars = append(s.defvars, nil)
+	}
+	s.defvars[b.ID] = s.vars
+	s.curBlock = nil
+	s.vars = nil
+	if b.LackingPos() {
+		// Empty plain blocks get the line of their successor (handled after all blocks created),
+		// except for increment blocks in For statements (handled in ssa conversion of OFOR),
+		// and for blocks ending in GOTO/BREAK/CONTINUE.
+		b.Pos = src.NoXPos
+	} else {
+		b.Pos = s.lastPos
+	}
+	return b
+}
+
+// pushLine pushes a line number on the line number stack.
+func (s *state) pushLine(line src.XPos) {
+	if !line.IsKnown() {
+		// the frontend may emit node with line number missing,
+		// use the parent line number in this case.
+		line = s.peekPos()
+		if base.Flag.K != 0 {
+			base.Warn("buildssa: unknown position (line 0)")
+		}
+	} else {
+		s.lastPos = line
+	}
+
+	s.line = append(s.line, line)
+}
+
+// popLine pops the top of the line number stack.
+func (s *state) popLine() {
+	s.line = s.line[:len(s.line)-1]
+}
+
+// peekPos peeks the top of the line number stack.
+func (s *state) peekPos() src.XPos {
+	return s.line[len(s.line)-1]
+}
+
+// newValue0 adds a new value with no arguments to the current block.
+func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
+	return s.curBlock.NewValue0(s.peekPos(), op, t)
+}
+
+// newValue0A adds a new value with no arguments and an aux value to the current block.
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+	return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
+}
+
+// newValue0I adds a new value with no arguments and an auxint value to the current block.
+func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
+	return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
+}
+
+// newValue1 adds a new value with one argument to the current block.
+func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
+}
+
+// newValue1A adds a new value with one argument and an aux value to the current block.
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue1Apos adds a new value with one argument and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
+	if isStmt {
+		return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+	}
+	return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
+}
+
+// newValue1I adds a new value with one argument and an auxint value to the current block.
+func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue2 adds a new value with two arguments to the current block.
+func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
+}
+
+// newValue2A adds a new value with two arguments and an aux value to the current block.
+func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue2Apos adds a new value with two arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
+	if isStmt {
+		return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+	}
+	return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
+}
+
+// newValue2I adds a new value with two arguments and an auxint value to the current block.
+func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue3 adds a new value with three arguments to the current block.
+func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
+}
+
+// newValue3I adds a new value with three arguments and an auxint value to the current block.
+func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3A adds a new value with three arguments and an aux value to the current block.
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3Apos adds a new value with three arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
+	if isStmt {
+		return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+	}
+	return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue4 adds a new value with four arguments to the current block.
+func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
+}
+
+// newValue4 adds a new value with four arguments and an auxint value to the current block.
+func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+	return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
+}
+
+// entryNewValue0 adds a new value with no arguments to the entry block.
+func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
+	return s.f.Entry.NewValue0(src.NoXPos, op, t)
+}
+
+// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+	return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
+}
+
+// entryNewValue1 adds a new value with one argument to the entry block.
+func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+	return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
+}
+
+// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
+func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+	return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
+}
+
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+	return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
+}
+
+// entryNewValue2 adds a new value with two arguments to the entry block.
+func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+	return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
+}
+
+// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
+func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+	return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
+}
+
+// const* routines add a new const value to the entry block.
+func (s *state) constSlice(t *types.Type) *ssa.Value {
+	return s.f.ConstSlice(t)
+}
+func (s *state) constInterface(t *types.Type) *ssa.Value {
+	return s.f.ConstInterface(t)
+}
+func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
+func (s *state) constEmptyString(t *types.Type) *ssa.Value {
+	return s.f.ConstEmptyString(t)
+}
+func (s *state) constBool(c bool) *ssa.Value {
+	return s.f.ConstBool(types.Types[types.TBOOL], c)
+}
+func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
+	return s.f.ConstInt8(t, c)
+}
+func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
+	return s.f.ConstInt16(t, c)
+}
+func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
+	return s.f.ConstInt32(t, c)
+}
+func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
+	return s.f.ConstInt64(t, c)
+}
+func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
+	return s.f.ConstFloat32(t, c)
+}
+func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
+	return s.f.ConstFloat64(t, c)
+}
+func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
+	if s.config.PtrSize == 8 {
+		return s.constInt64(t, c)
+	}
+	if int64(int32(c)) != c {
+		s.Fatalf("integer constant too big %d", c)
+	}
+	return s.constInt32(t, int32(c))
+}
+func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
+	return s.f.ConstOffPtrSP(t, c, s.sp)
+}
+
+// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
+// soft-float runtime function instead (when emitting soft-float code).
+func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+	if s.softFloat {
+		if c, ok := s.sfcall(op, arg); ok {
+			return c
+		}
+	}
+	return s.newValue1(op, t, arg)
+}
+func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+	if s.softFloat {
+		if c, ok := s.sfcall(op, arg0, arg1); ok {
+			return c
+		}
+	}
+	return s.newValue2(op, t, arg0, arg1)
+}
+
+type instrumentKind uint8
+
+const (
+	instrumentRead = iota
+	instrumentWrite
+	instrumentMove
+)
+
+func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+	s.instrument2(t, addr, nil, kind)
+}
+
+// instrumentFields instruments a read/write operation on addr.
+// If it is instrumenting for MSAN and t is a struct type, it instruments
+// operation for each field, instead of for the whole struct.
+func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+	if !base.Flag.MSan || !t.IsStruct() {
+		s.instrument(t, addr, kind)
+		return
+	}
+	for _, f := range t.Fields().Slice() {
+		if f.Sym.IsBlank() {
+			continue
+		}
+		offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
+		s.instrumentFields(f.Type, offptr, kind)
+	}
+}
+
+func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
+	if base.Flag.MSan {
+		s.instrument2(t, dst, src, instrumentMove)
+	} else {
+		s.instrument(t, src, instrumentRead)
+		s.instrument(t, dst, instrumentWrite)
+	}
+}
+
+func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
+	if !s.curfn.InstrumentBody() {
+		return
+	}
+
+	w := t.Size()
+	if w == 0 {
+		return // can't race on zero-sized things
+	}
+
+	if ssa.IsSanitizerSafeAddr(addr) {
+		return
+	}
+
+	var fn *obj.LSym
+	needWidth := false
+
+	if addr2 != nil && kind != instrumentMove {
+		panic("instrument2: non-nil addr2 for non-move instrumentation")
+	}
+
+	if base.Flag.MSan {
+		switch kind {
+		case instrumentRead:
+			fn = ir.Syms.Msanread
+		case instrumentWrite:
+			fn = ir.Syms.Msanwrite
+		case instrumentMove:
+			fn = ir.Syms.Msanmove
+		default:
+			panic("unreachable")
+		}
+		needWidth = true
+	} else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
+		// for composite objects we have to write every address
+		// because a write might happen to any subobject.
+		// composites with only one element don't have subobjects, though.
+		switch kind {
+		case instrumentRead:
+			fn = ir.Syms.Racereadrange
+		case instrumentWrite:
+			fn = ir.Syms.Racewriterange
+		default:
+			panic("unreachable")
+		}
+		needWidth = true
+	} else if base.Flag.Race {
+		// for non-composite objects we can write just the start
+		// address, as any write must write the first byte.
+		switch kind {
+		case instrumentRead:
+			fn = ir.Syms.Raceread
+		case instrumentWrite:
+			fn = ir.Syms.Racewrite
+		default:
+			panic("unreachable")
+		}
+	} else {
+		panic("unreachable")
+	}
+
+	args := []*ssa.Value{addr}
+	if addr2 != nil {
+		args = append(args, addr2)
+	}
+	if needWidth {
+		args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
+	}
+	s.rtcall(fn, true, nil, args...)
+}
+
+func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
+	s.instrumentFields(t, src, instrumentRead)
+	return s.rawLoad(t, src)
+}
+
+func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
+	return s.newValue2(ssa.OpLoad, t, src, s.mem())
+}
+
+func (s *state) store(t *types.Type, dst, val *ssa.Value) {
+	s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+}
+
+func (s *state) zero(t *types.Type, dst *ssa.Value) {
+	s.instrument(t, dst, instrumentWrite)
+	store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
+	store.Aux = t
+	s.vars[memVar] = store
+}
+
+func (s *state) move(t *types.Type, dst, src *ssa.Value) {
+	s.instrumentMove(t, dst, src)
+	store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
+	store.Aux = t
+	s.vars[memVar] = store
+}
+
+// stmtList converts the statement list n to SSA and adds it to s.
+func (s *state) stmtList(l ir.Nodes) {
+	for _, n := range l {
+		s.stmt(n)
+	}
+}
+
+// stmt converts the statement n to SSA and adds it to s.
+func (s *state) stmt(n ir.Node) {
+	if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
+		// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
+		s.pushLine(n.Pos())
+		defer s.popLine()
+	}
+
+	// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
+	// then this code is dead. Stop here.
+	if s.curBlock == nil && n.Op() != ir.OLABEL {
+		return
+	}
+
+	s.stmtList(n.Init())
+	switch n.Op() {
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		s.stmtList(n.List)
+
+	// No-ops
+	case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
+
+	// Expression statements
+	case ir.OCALLFUNC:
+		n := n.(*ir.CallExpr)
+		if ir.IsIntrinsicCall(n) {
+			s.intrinsicCall(n)
+			return
+		}
+		fallthrough
+
+	case ir.OCALLINTER:
+		n := n.(*ir.CallExpr)
+		s.callResult(n, callNormal)
+		if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
+			if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+				n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+				m := s.mem()
+				b := s.endBlock()
+				b.Kind = ssa.BlockExit
+				b.SetControl(m)
+				// TODO: never rewrite OPANIC to OCALLFUNC in the
+				// first place. Need to wait until all backends
+				// go through SSA.
+			}
+		}
+	case ir.ODEFER:
+		n := n.(*ir.GoDeferStmt)
+		if base.Debug.Defer > 0 {
+			var defertype string
+			if s.hasOpenDefers {
+				defertype = "open-coded"
+			} else if n.Esc() == ir.EscNever {
+				defertype = "stack-allocated"
+			} else {
+				defertype = "heap-allocated"
+			}
+			base.WarnfAt(n.Pos(), "%s defer", defertype)
+		}
+		if s.hasOpenDefers {
+			s.openDeferRecord(n.Call.(*ir.CallExpr))
+		} else {
+			d := callDefer
+			if n.Esc() == ir.EscNever {
+				d = callDeferStack
+			}
+			s.callResult(n.Call.(*ir.CallExpr), d)
+		}
+	case ir.OGO:
+		n := n.(*ir.GoDeferStmt)
+		s.callResult(n.Call.(*ir.CallExpr), callGo)
+
+	case ir.OAS2DOTTYPE:
+		n := n.(*ir.AssignListStmt)
+		res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
+		deref := false
+		if !TypeOK(n.Rhs[0].Type()) {
+			if res.Op != ssa.OpLoad {
+				s.Fatalf("dottype of non-load")
+			}
+			mem := s.mem()
+			if mem.Op == ssa.OpVarKill {
+				mem = mem.Args[0]
+			}
+			if res.Args[1] != mem {
+				s.Fatalf("memory no longer live from 2-result dottype load")
+			}
+			deref = true
+			res = res.Args[0]
+		}
+		s.assign(n.Lhs[0], res, deref, 0)
+		s.assign(n.Lhs[1], resok, false, 0)
+		return
+
+	case ir.OAS2FUNC:
+		// We come here only when it is an intrinsic call returning two values.
+		n := n.(*ir.AssignListStmt)
+		call := n.Rhs[0].(*ir.CallExpr)
+		if !ir.IsIntrinsicCall(call) {
+			s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
+		}
+		v := s.intrinsicCall(call)
+		v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
+		v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
+		s.assign(n.Lhs[0], v1, false, 0)
+		s.assign(n.Lhs[1], v2, false, 0)
+		return
+
+	case ir.ODCL:
+		n := n.(*ir.Decl)
+		if v := n.X; v.Esc() == ir.EscHeap {
+			s.newHeapaddr(v)
+		}
+
+	case ir.OLABEL:
+		n := n.(*ir.LabelStmt)
+		sym := n.Label
+		lab := s.label(sym)
+
+		// The label might already have a target block via a goto.
+		if lab.target == nil {
+			lab.target = s.f.NewBlock(ssa.BlockPlain)
+		}
+
+		// Go to that label.
+		// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
+		if s.curBlock != nil {
+			b := s.endBlock()
+			b.AddEdgeTo(lab.target)
+		}
+		s.startBlock(lab.target)
+
+	case ir.OGOTO:
+		n := n.(*ir.BranchStmt)
+		sym := n.Label
+
+		lab := s.label(sym)
+		if lab.target == nil {
+			lab.target = s.f.NewBlock(ssa.BlockPlain)
+		}
+
+		b := s.endBlock()
+		b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+		b.AddEdgeTo(lab.target)
+
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		if n.X == n.Y && n.X.Op() == ir.ONAME {
+			// An x=x assignment. No point in doing anything
+			// here. In addition, skipping this assignment
+			// prevents generating:
+			//   VARDEF x
+			//   COPY x -> x
+			// which is bad because x is incorrectly considered
+			// dead before the vardef. See issue #14904.
+			return
+		}
+
+		// Evaluate RHS.
+		rhs := n.Y
+		if rhs != nil {
+			switch rhs.Op() {
+			case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+				// All literals with nonzero fields have already been
+				// rewritten during walk. Any that remain are just T{}
+				// or equivalents. Use the zero value.
+				if !ir.IsZero(rhs) {
+					s.Fatalf("literal with nonzero value in SSA: %v", rhs)
+				}
+				rhs = nil
+			case ir.OAPPEND:
+				rhs := rhs.(*ir.CallExpr)
+				// Check whether we're writing the result of an append back to the same slice.
+				// If so, we handle it specially to avoid write barriers on the fast
+				// (non-growth) path.
+				if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
+					break
+				}
+				// If the slice can be SSA'd, it'll be on the stack,
+				// so there will be no write barriers,
+				// so there's no need to attempt to prevent them.
+				if s.canSSA(n.X) {
+					if base.Debug.Append > 0 { // replicating old diagnostic message
+						base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
+					}
+					break
+				}
+				if base.Debug.Append > 0 {
+					base.WarnfAt(n.Pos(), "append: len-only update")
+				}
+				s.append(rhs, true)
+				return
+			}
+		}
+
+		if ir.IsBlank(n.X) {
+			// _ = rhs
+			// Just evaluate rhs for side-effects.
+			if rhs != nil {
+				s.expr(rhs)
+			}
+			return
+		}
+
+		var t *types.Type
+		if n.Y != nil {
+			t = n.Y.Type()
+		} else {
+			t = n.X.Type()
+		}
+
+		var r *ssa.Value
+		deref := !TypeOK(t)
+		if deref {
+			if rhs == nil {
+				r = nil // Signal assign to use OpZero.
+			} else {
+				r = s.addr(rhs)
+			}
+		} else {
+			if rhs == nil {
+				r = s.zeroVal(t)
+			} else {
+				r = s.expr(rhs)
+			}
+		}
+
+		var skip skipMask
+		if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
+			// We're assigning a slicing operation back to its source.
+			// Don't write back fields we aren't changing. See issue #14855.
+			rhs := rhs.(*ir.SliceExpr)
+			i, j, k := rhs.Low, rhs.High, rhs.Max
+			if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
+				// [0:...] is the same as [:...]
+				i = nil
+			}
+			// TODO: detect defaults for len/cap also.
+			// Currently doesn't really work because (*p)[:len(*p)] appears here as:
+			//    tmp = len(*p)
+			//    (*p)[:tmp]
+			//if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
+			//      j = nil
+			//}
+			//if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
+			//      k = nil
+			//}
+			if i == nil {
+				skip |= skipPtr
+				if j == nil {
+					skip |= skipLen
+				}
+				if k == nil {
+					skip |= skipCap
+				}
+			}
+		}
+
+		s.assign(n.X, r, deref, skip)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		if ir.IsConst(n.Cond, constant.Bool) {
+			s.stmtList(n.Cond.Init())
+			if ir.BoolVal(n.Cond) {
+				s.stmtList(n.Body)
+			} else {
+				s.stmtList(n.Else)
+			}
+			break
+		}
+
+		bEnd := s.f.NewBlock(ssa.BlockPlain)
+		var likely int8
+		if n.Likely {
+			likely = 1
+		}
+		var bThen *ssa.Block
+		if len(n.Body) != 0 {
+			bThen = s.f.NewBlock(ssa.BlockPlain)
+		} else {
+			bThen = bEnd
+		}
+		var bElse *ssa.Block
+		if len(n.Else) != 0 {
+			bElse = s.f.NewBlock(ssa.BlockPlain)
+		} else {
+			bElse = bEnd
+		}
+		s.condBranch(n.Cond, bThen, bElse, likely)
+
+		if len(n.Body) != 0 {
+			s.startBlock(bThen)
+			s.stmtList(n.Body)
+			if b := s.endBlock(); b != nil {
+				b.AddEdgeTo(bEnd)
+			}
+		}
+		if len(n.Else) != 0 {
+			s.startBlock(bElse)
+			s.stmtList(n.Else)
+			if b := s.endBlock(); b != nil {
+				b.AddEdgeTo(bEnd)
+			}
+		}
+		s.startBlock(bEnd)
+
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		s.stmtList(n.Results)
+		b := s.exit()
+		b.Pos = s.lastPos.WithIsStmt()
+
+	case ir.OTAILCALL:
+		n := n.(*ir.TailCallStmt)
+		b := s.exit()
+		b.Kind = ssa.BlockRetJmp // override BlockRet
+		b.Aux = callTargetLSym(n.Target, s.curfn.LSym)
+
+	case ir.OCONTINUE, ir.OBREAK:
+		n := n.(*ir.BranchStmt)
+		var to *ssa.Block
+		if n.Label == nil {
+			// plain break/continue
+			switch n.Op() {
+			case ir.OCONTINUE:
+				to = s.continueTo
+			case ir.OBREAK:
+				to = s.breakTo
+			}
+		} else {
+			// labeled break/continue; look up the target
+			sym := n.Label
+			lab := s.label(sym)
+			switch n.Op() {
+			case ir.OCONTINUE:
+				to = lab.continueTarget
+			case ir.OBREAK:
+				to = lab.breakTarget
+			}
+		}
+
+		b := s.endBlock()
+		b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+		b.AddEdgeTo(to)
+
+	case ir.OFOR, ir.OFORUNTIL:
+		// OFOR: for Ninit; Left; Right { Nbody }
+		// cond (Left); body (Nbody); incr (Right)
+		//
+		// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
+		// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
+		n := n.(*ir.ForStmt)
+		bCond := s.f.NewBlock(ssa.BlockPlain)
+		bBody := s.f.NewBlock(ssa.BlockPlain)
+		bIncr := s.f.NewBlock(ssa.BlockPlain)
+		bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+		// ensure empty for loops have correct position; issue #30167
+		bBody.Pos = n.Pos()
+
+		// first, jump to condition test (OFOR) or body (OFORUNTIL)
+		b := s.endBlock()
+		if n.Op() == ir.OFOR {
+			b.AddEdgeTo(bCond)
+			// generate code to test condition
+			s.startBlock(bCond)
+			if n.Cond != nil {
+				s.condBranch(n.Cond, bBody, bEnd, 1)
+			} else {
+				b := s.endBlock()
+				b.Kind = ssa.BlockPlain
+				b.AddEdgeTo(bBody)
+			}
+
+		} else {
+			b.AddEdgeTo(bBody)
+		}
+
+		// set up for continue/break in body
+		prevContinue := s.continueTo
+		prevBreak := s.breakTo
+		s.continueTo = bIncr
+		s.breakTo = bEnd
+		var lab *ssaLabel
+		if sym := n.Label; sym != nil {
+			// labeled for loop
+			lab = s.label(sym)
+			lab.continueTarget = bIncr
+			lab.breakTarget = bEnd
+		}
+
+		// generate body
+		s.startBlock(bBody)
+		s.stmtList(n.Body)
+
+		// tear down continue/break
+		s.continueTo = prevContinue
+		s.breakTo = prevBreak
+		if lab != nil {
+			lab.continueTarget = nil
+			lab.breakTarget = nil
+		}
+
+		// done with body, goto incr
+		if b := s.endBlock(); b != nil {
+			b.AddEdgeTo(bIncr)
+		}
+
+		// generate incr (and, for OFORUNTIL, condition)
+		s.startBlock(bIncr)
+		if n.Post != nil {
+			s.stmt(n.Post)
+		}
+		if n.Op() == ir.OFOR {
+			if b := s.endBlock(); b != nil {
+				b.AddEdgeTo(bCond)
+				// It can happen that bIncr ends in a block containing only VARKILL,
+				// and that muddles the debugging experience.
+				if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos {
+					b.Pos = bCond.Pos
+				}
+			}
+		} else {
+			// bCond is unused in OFORUNTIL, so repurpose it.
+			bLateIncr := bCond
+			// test condition
+			s.condBranch(n.Cond, bLateIncr, bEnd, 1)
+			// generate late increment
+			s.startBlock(bLateIncr)
+			s.stmtList(n.Late)
+			s.endBlock().AddEdgeTo(bBody)
+		}
+
+		s.startBlock(bEnd)
+
+	case ir.OSWITCH, ir.OSELECT:
+		// These have been mostly rewritten by the front end into their Nbody fields.
+		// Our main task is to correctly hook up any break statements.
+		bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+		prevBreak := s.breakTo
+		s.breakTo = bEnd
+		var sym *types.Sym
+		var body ir.Nodes
+		if n.Op() == ir.OSWITCH {
+			n := n.(*ir.SwitchStmt)
+			sym = n.Label
+			body = n.Compiled
+		} else {
+			n := n.(*ir.SelectStmt)
+			sym = n.Label
+			body = n.Compiled
+		}
+
+		var lab *ssaLabel
+		if sym != nil {
+			// labeled
+			lab = s.label(sym)
+			lab.breakTarget = bEnd
+		}
+
+		// generate body code
+		s.stmtList(body)
+
+		s.breakTo = prevBreak
+		if lab != nil {
+			lab.breakTarget = nil
+		}
+
+		// walk adds explicit OBREAK nodes to the end of all reachable code paths.
+		// If we still have a current block here, then mark it unreachable.
+		if s.curBlock != nil {
+			m := s.mem()
+			b := s.endBlock()
+			b.Kind = ssa.BlockExit
+			b.SetControl(m)
+		}
+		s.startBlock(bEnd)
+
+	case ir.OVARDEF:
+		n := n.(*ir.UnaryExpr)
+		if !s.canSSA(n.X) {
+			s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
+		}
+	case ir.OVARKILL:
+		// Insert a varkill op to record that a variable is no longer live.
+		// We only care about liveness info at call sites, so putting the
+		// varkill in the store chain is enough to keep it correctly ordered
+		// with respect to call ops.
+		n := n.(*ir.UnaryExpr)
+		if !s.canSSA(n.X) {
+			s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
+		}
+
+	case ir.OVARLIVE:
+		// Insert a varlive op to record that a variable is still live.
+		n := n.(*ir.UnaryExpr)
+		v := n.X.(*ir.Name)
+		if !v.Addrtaken() {
+			s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
+		}
+		switch v.Class {
+		case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+		default:
+			s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
+		}
+		s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
+
+	case ir.OCHECKNIL:
+		n := n.(*ir.UnaryExpr)
+		p := s.expr(n.X)
+		s.nilCheck(p)
+
+	case ir.OINLMARK:
+		n := n.(*ir.InlineMarkStmt)
+		s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
+
+	default:
+		s.Fatalf("unhandled stmt %v", n.Op())
+	}
+}
+
+// If true, share as many open-coded defer exits as possible (with the downside of
+// worse line-number information)
+const shareDeferExits = false
+
+// exit processes any code that needs to be generated just before returning.
+// It returns a BlockRet block that ends the control flow. Its control value
+// will be set to the final memory state.
+func (s *state) exit() *ssa.Block {
+	lateResultLowering := s.f.DebugTest
+	if s.hasdefer {
+		if s.hasOpenDefers {
+			if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
+				if s.curBlock.Kind != ssa.BlockPlain {
+					panic("Block for an exit should be BlockPlain")
+				}
+				s.curBlock.AddEdgeTo(s.lastDeferExit)
+				s.endBlock()
+				return s.lastDeferFinalBlock
+			}
+			s.openDeferExit()
+		} else {
+			s.rtcall(ir.Syms.Deferreturn, true, nil)
+		}
+	}
+
+	var b *ssa.Block
+	var m *ssa.Value
+	// Do actual return.
+	// These currently turn into self-copies (in many cases).
+	if lateResultLowering {
+		resultFields := s.curfn.Type().Results().FieldSlice()
+		results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
+		m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
+		// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+		for i, f := range resultFields {
+			n := f.Nname.(*ir.Name)
+			s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+			if s.canSSA(n) { // result is in some SSA variable
+				results[i] = s.variable(n, n.Type())
+			} else if !n.OnStack() { // result is actually heap allocated
+				ha := s.expr(n.Heapaddr)
+				s.instrumentFields(n.Type(), ha, instrumentRead)
+				results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
+			} else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
+				// Before register ABI this ought to be a self-move, home=dest,
+				// With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
+				results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
+			}
+		}
+
+		// Run exit code. Today, this is just racefuncexit, in -race mode.
+		// TODO this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
+		// Spills in register allocation might just fix it.
+		s.stmtList(s.curfn.Exit)
+
+		results[len(results)-1] = s.mem()
+		m.AddArgs(results...)
+	} else {
+		// Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+		for _, f := range s.curfn.Type().Results().FieldSlice() {
+			n := f.Nname.(*ir.Name)
+			if s.canSSA(n) {
+				val := s.variable(n, n.Type())
+				s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+				s.store(n.Type(), s.decladdrs[n], val)
+			} else if !n.OnStack() {
+				s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+				s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr))
+			} // else, on stack but too large to SSA, the result is already in its destination by construction, so no store needed.
+
+			// TODO: if (SSA) val is ever spilled, we'd like to use the PPARAMOUT slot for spilling it. That won't happen currently.
+		}
+
+		// Run exit code. Today, this is just racefuncexit, in -race mode.
+		s.stmtList(s.curfn.Exit)
+
+		// Do actual return.
+		m = s.mem()
+	}
+	b = s.endBlock()
+	b.Kind = ssa.BlockRet
+	b.SetControl(m)
+	if s.hasdefer && s.hasOpenDefers {
+		s.lastDeferFinalBlock = b
+	}
+	return b
+}
+
+type opAndType struct {
+	op    ir.Op
+	etype types.Kind
+}
+
+var opToSSA = map[opAndType]ssa.Op{
+	opAndType{ir.OADD, types.TINT8}:    ssa.OpAdd8,
+	opAndType{ir.OADD, types.TUINT8}:   ssa.OpAdd8,
+	opAndType{ir.OADD, types.TINT16}:   ssa.OpAdd16,
+	opAndType{ir.OADD, types.TUINT16}:  ssa.OpAdd16,
+	opAndType{ir.OADD, types.TINT32}:   ssa.OpAdd32,
+	opAndType{ir.OADD, types.TUINT32}:  ssa.OpAdd32,
+	opAndType{ir.OADD, types.TINT64}:   ssa.OpAdd64,
+	opAndType{ir.OADD, types.TUINT64}:  ssa.OpAdd64,
+	opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+	opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+	opAndType{ir.OSUB, types.TINT8}:    ssa.OpSub8,
+	opAndType{ir.OSUB, types.TUINT8}:   ssa.OpSub8,
+	opAndType{ir.OSUB, types.TINT16}:   ssa.OpSub16,
+	opAndType{ir.OSUB, types.TUINT16}:  ssa.OpSub16,
+	opAndType{ir.OSUB, types.TINT32}:   ssa.OpSub32,
+	opAndType{ir.OSUB, types.TUINT32}:  ssa.OpSub32,
+	opAndType{ir.OSUB, types.TINT64}:   ssa.OpSub64,
+	opAndType{ir.OSUB, types.TUINT64}:  ssa.OpSub64,
+	opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+	opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+	opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+	opAndType{ir.ONEG, types.TINT8}:    ssa.OpNeg8,
+	opAndType{ir.ONEG, types.TUINT8}:   ssa.OpNeg8,
+	opAndType{ir.ONEG, types.TINT16}:   ssa.OpNeg16,
+	opAndType{ir.ONEG, types.TUINT16}:  ssa.OpNeg16,
+	opAndType{ir.ONEG, types.TINT32}:   ssa.OpNeg32,
+	opAndType{ir.ONEG, types.TUINT32}:  ssa.OpNeg32,
+	opAndType{ir.ONEG, types.TINT64}:   ssa.OpNeg64,
+	opAndType{ir.ONEG, types.TUINT64}:  ssa.OpNeg64,
+	opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+	opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+	opAndType{ir.OBITNOT, types.TINT8}:   ssa.OpCom8,
+	opAndType{ir.OBITNOT, types.TUINT8}:  ssa.OpCom8,
+	opAndType{ir.OBITNOT, types.TINT16}:  ssa.OpCom16,
+	opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+	opAndType{ir.OBITNOT, types.TINT32}:  ssa.OpCom32,
+	opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+	opAndType{ir.OBITNOT, types.TINT64}:  ssa.OpCom64,
+	opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+	opAndType{ir.OIMAG, types.TCOMPLEX64}:  ssa.OpComplexImag,
+	opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+	opAndType{ir.OREAL, types.TCOMPLEX64}:  ssa.OpComplexReal,
+	opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+	opAndType{ir.OMUL, types.TINT8}:    ssa.OpMul8,
+	opAndType{ir.OMUL, types.TUINT8}:   ssa.OpMul8,
+	opAndType{ir.OMUL, types.TINT16}:   ssa.OpMul16,
+	opAndType{ir.OMUL, types.TUINT16}:  ssa.OpMul16,
+	opAndType{ir.OMUL, types.TINT32}:   ssa.OpMul32,
+	opAndType{ir.OMUL, types.TUINT32}:  ssa.OpMul32,
+	opAndType{ir.OMUL, types.TINT64}:   ssa.OpMul64,
+	opAndType{ir.OMUL, types.TUINT64}:  ssa.OpMul64,
+	opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+	opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+	opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+	opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+	opAndType{ir.ODIV, types.TINT8}:   ssa.OpDiv8,
+	opAndType{ir.ODIV, types.TUINT8}:  ssa.OpDiv8u,
+	opAndType{ir.ODIV, types.TINT16}:  ssa.OpDiv16,
+	opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+	opAndType{ir.ODIV, types.TINT32}:  ssa.OpDiv32,
+	opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+	opAndType{ir.ODIV, types.TINT64}:  ssa.OpDiv64,
+	opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+	opAndType{ir.OMOD, types.TINT8}:   ssa.OpMod8,
+	opAndType{ir.OMOD, types.TUINT8}:  ssa.OpMod8u,
+	opAndType{ir.OMOD, types.TINT16}:  ssa.OpMod16,
+	opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+	opAndType{ir.OMOD, types.TINT32}:  ssa.OpMod32,
+	opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+	opAndType{ir.OMOD, types.TINT64}:  ssa.OpMod64,
+	opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+	opAndType{ir.OAND, types.TINT8}:   ssa.OpAnd8,
+	opAndType{ir.OAND, types.TUINT8}:  ssa.OpAnd8,
+	opAndType{ir.OAND, types.TINT16}:  ssa.OpAnd16,
+	opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
+	opAndType{ir.OAND, types.TINT32}:  ssa.OpAnd32,
+	opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
+	opAndType{ir.OAND, types.TINT64}:  ssa.OpAnd64,
+	opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+	opAndType{ir.OOR, types.TINT8}:   ssa.OpOr8,
+	opAndType{ir.OOR, types.TUINT8}:  ssa.OpOr8,
+	opAndType{ir.OOR, types.TINT16}:  ssa.OpOr16,
+	opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
+	opAndType{ir.OOR, types.TINT32}:  ssa.OpOr32,
+	opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
+	opAndType{ir.OOR, types.TINT64}:  ssa.OpOr64,
+	opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+	opAndType{ir.OXOR, types.TINT8}:   ssa.OpXor8,
+	opAndType{ir.OXOR, types.TUINT8}:  ssa.OpXor8,
+	opAndType{ir.OXOR, types.TINT16}:  ssa.OpXor16,
+	opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
+	opAndType{ir.OXOR, types.TINT32}:  ssa.OpXor32,
+	opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
+	opAndType{ir.OXOR, types.TINT64}:  ssa.OpXor64,
+	opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+	opAndType{ir.OEQ, types.TBOOL}:      ssa.OpEqB,
+	opAndType{ir.OEQ, types.TINT8}:      ssa.OpEq8,
+	opAndType{ir.OEQ, types.TUINT8}:     ssa.OpEq8,
+	opAndType{ir.OEQ, types.TINT16}:     ssa.OpEq16,
+	opAndType{ir.OEQ, types.TUINT16}:    ssa.OpEq16,
+	opAndType{ir.OEQ, types.TINT32}:     ssa.OpEq32,
+	opAndType{ir.OEQ, types.TUINT32}:    ssa.OpEq32,
+	opAndType{ir.OEQ, types.TINT64}:     ssa.OpEq64,
+	opAndType{ir.OEQ, types.TUINT64}:    ssa.OpEq64,
+	opAndType{ir.OEQ, types.TINTER}:     ssa.OpEqInter,
+	opAndType{ir.OEQ, types.TSLICE}:     ssa.OpEqSlice,
+	opAndType{ir.OEQ, types.TFUNC}:      ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TMAP}:       ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TCHAN}:      ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TPTR}:       ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TUINTPTR}:   ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+	opAndType{ir.OEQ, types.TFLOAT64}:   ssa.OpEq64F,
+	opAndType{ir.OEQ, types.TFLOAT32}:   ssa.OpEq32F,
+
+	opAndType{ir.ONE, types.TBOOL}:      ssa.OpNeqB,
+	opAndType{ir.ONE, types.TINT8}:      ssa.OpNeq8,
+	opAndType{ir.ONE, types.TUINT8}:     ssa.OpNeq8,
+	opAndType{ir.ONE, types.TINT16}:     ssa.OpNeq16,
+	opAndType{ir.ONE, types.TUINT16}:    ssa.OpNeq16,
+	opAndType{ir.ONE, types.TINT32}:     ssa.OpNeq32,
+	opAndType{ir.ONE, types.TUINT32}:    ssa.OpNeq32,
+	opAndType{ir.ONE, types.TINT64}:     ssa.OpNeq64,
+	opAndType{ir.ONE, types.TUINT64}:    ssa.OpNeq64,
+	opAndType{ir.ONE, types.TINTER}:     ssa.OpNeqInter,
+	opAndType{ir.ONE, types.TSLICE}:     ssa.OpNeqSlice,
+	opAndType{ir.ONE, types.TFUNC}:      ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TMAP}:       ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TCHAN}:      ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TPTR}:       ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TUINTPTR}:   ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+	opAndType{ir.ONE, types.TFLOAT64}:   ssa.OpNeq64F,
+	opAndType{ir.ONE, types.TFLOAT32}:   ssa.OpNeq32F,
+
+	opAndType{ir.OLT, types.TINT8}:    ssa.OpLess8,
+	opAndType{ir.OLT, types.TUINT8}:   ssa.OpLess8U,
+	opAndType{ir.OLT, types.TINT16}:   ssa.OpLess16,
+	opAndType{ir.OLT, types.TUINT16}:  ssa.OpLess16U,
+	opAndType{ir.OLT, types.TINT32}:   ssa.OpLess32,
+	opAndType{ir.OLT, types.TUINT32}:  ssa.OpLess32U,
+	opAndType{ir.OLT, types.TINT64}:   ssa.OpLess64,
+	opAndType{ir.OLT, types.TUINT64}:  ssa.OpLess64U,
+	opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+	opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+	opAndType{ir.OLE, types.TINT8}:    ssa.OpLeq8,
+	opAndType{ir.OLE, types.TUINT8}:   ssa.OpLeq8U,
+	opAndType{ir.OLE, types.TINT16}:   ssa.OpLeq16,
+	opAndType{ir.OLE, types.TUINT16}:  ssa.OpLeq16U,
+	opAndType{ir.OLE, types.TINT32}:   ssa.OpLeq32,
+	opAndType{ir.OLE, types.TUINT32}:  ssa.OpLeq32U,
+	opAndType{ir.OLE, types.TINT64}:   ssa.OpLeq64,
+	opAndType{ir.OLE, types.TUINT64}:  ssa.OpLeq64U,
+	opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+	opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
+}
+
+func (s *state) concreteEtype(t *types.Type) types.Kind {
+	e := t.Kind()
+	switch e {
+	default:
+		return e
+	case types.TINT:
+		if s.config.PtrSize == 8 {
+			return types.TINT64
+		}
+		return types.TINT32
+	case types.TUINT:
+		if s.config.PtrSize == 8 {
+			return types.TUINT64
+		}
+		return types.TUINT32
+	case types.TUINTPTR:
+		if s.config.PtrSize == 8 {
+			return types.TUINT64
+		}
+		return types.TUINT32
+	}
+}
+
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
+	etype := s.concreteEtype(t)
+	x, ok := opToSSA[opAndType{op, etype}]
+	if !ok {
+		s.Fatalf("unhandled binary op %v %s", op, etype)
+	}
+	return x
+}
+
+type opAndTwoTypes struct {
+	op     ir.Op
+	etype1 types.Kind
+	etype2 types.Kind
+}
+
+type twoTypes struct {
+	etype1 types.Kind
+	etype2 types.Kind
+}
+
+type twoOpsAndType struct {
+	op1              ssa.Op
+	op2              ssa.Op
+	intermediateType types.Kind
+}
+
+var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+
+	twoTypes{types.TINT8, types.TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+	twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+	twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+	twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+	twoTypes{types.TINT8, types.TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+	twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+	twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+	twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+	twoTypes{types.TFLOAT32, types.TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+	twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+	twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+	twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+	twoTypes{types.TFLOAT64, types.TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+	twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+	twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+	twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
+	// unsigned
+	twoTypes{types.TUINT8, types.TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+	twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+	twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+	twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64},            // Cvt64Uto32F, branchy code expansion instead
+
+	twoTypes{types.TUINT8, types.TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+	twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+	twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+	twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64},            // Cvt64Uto64F, branchy code expansion instead
+
+	twoTypes{types.TFLOAT32, types.TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+	twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+	twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+	twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64},          // Cvt32Fto64U, branchy code expansion instead
+
+	twoTypes{types.TFLOAT64, types.TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+	twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+	twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+	twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64},          // Cvt64Fto64U, branchy code expansion instead
+
+	// float
+	twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+	twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+	twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+	twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
+}
+
+// this map is used only for 32-bit arch, and only includes the difference
+// on 32-bit arch, don't use int64<->float conversion for uint32
+var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
+	twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+	twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+	twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+	twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
+}
+
+// uint64<->float conversions, only on machines that have instructions for that
+var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+	twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+	twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+	twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+	twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
+}
+
+var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
+	opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}:   ssa.OpLsh8x8,
+	opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}:  ssa.OpLsh8x8,
+	opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}:  ssa.OpLsh8x16,
+	opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+	opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}:  ssa.OpLsh8x32,
+	opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+	opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}:  ssa.OpLsh8x64,
+	opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+	opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}:   ssa.OpLsh16x8,
+	opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}:  ssa.OpLsh16x8,
+	opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}:  ssa.OpLsh16x16,
+	opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+	opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}:  ssa.OpLsh16x32,
+	opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+	opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}:  ssa.OpLsh16x64,
+	opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+	opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}:   ssa.OpLsh32x8,
+	opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}:  ssa.OpLsh32x8,
+	opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}:  ssa.OpLsh32x16,
+	opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+	opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}:  ssa.OpLsh32x32,
+	opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+	opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}:  ssa.OpLsh32x64,
+	opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+	opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}:   ssa.OpLsh64x8,
+	opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}:  ssa.OpLsh64x8,
+	opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}:  ssa.OpLsh64x16,
+	opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+	opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}:  ssa.OpLsh64x32,
+	opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+	opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}:  ssa.OpLsh64x64,
+	opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+	opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}:   ssa.OpRsh8x8,
+	opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}:  ssa.OpRsh8Ux8,
+	opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}:  ssa.OpRsh8x16,
+	opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+	opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}:  ssa.OpRsh8x32,
+	opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+	opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}:  ssa.OpRsh8x64,
+	opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+	opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}:   ssa.OpRsh16x8,
+	opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}:  ssa.OpRsh16Ux8,
+	opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}:  ssa.OpRsh16x16,
+	opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+	opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}:  ssa.OpRsh16x32,
+	opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+	opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}:  ssa.OpRsh16x64,
+	opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+	opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}:   ssa.OpRsh32x8,
+	opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}:  ssa.OpRsh32Ux8,
+	opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}:  ssa.OpRsh32x16,
+	opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+	opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}:  ssa.OpRsh32x32,
+	opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+	opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}:  ssa.OpRsh32x64,
+	opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+	opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}:   ssa.OpRsh64x8,
+	opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}:  ssa.OpRsh64Ux8,
+	opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}:  ssa.OpRsh64x16,
+	opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+	opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}:  ssa.OpRsh64x32,
+	opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+	opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}:  ssa.OpRsh64x64,
+	opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
+	etype1 := s.concreteEtype(t)
+	etype2 := s.concreteEtype(u)
+	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
+	if !ok {
+		s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
+	}
+	return x
+}
+
+// expr converts the expression n to ssa, adds it to s and returns the ssa result.
+func (s *state) expr(n ir.Node) *ssa.Value {
+	if ir.HasUniquePos(n) {
+		// ONAMEs and named OLITERALs have the line number
+		// of the decl, not the use. See issue 14742.
+		s.pushLine(n.Pos())
+		defer s.popLine()
+	}
+
+	s.stmtList(n.Init())
+	switch n.Op() {
+	case ir.OBYTES2STRTMP:
+		n := n.(*ir.ConvExpr)
+		slice := s.expr(n.X)
+		ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
+		len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+		return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+	case ir.OSTR2BYTESTMP:
+		n := n.(*ir.ConvExpr)
+		str := s.expr(n.X)
+		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+		len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+		return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+	case ir.OCFUNC:
+		n := n.(*ir.UnaryExpr)
+		aux := n.X.(*ir.Name).Linksym()
+		return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC {
+			// "value" of a function is the address of the function's closure
+			sym := staticdata.FuncLinksym(n)
+			return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+		}
+		if s.canSSA(n) {
+			return s.variable(n, n.Type())
+		}
+		return s.load(n.Type(), s.addr(n))
+	case ir.OLINKSYMOFFSET:
+		n := n.(*ir.LinksymOffsetExpr)
+		return s.load(n.Type(), s.addr(n))
+	case ir.ONIL:
+		n := n.(*ir.NilExpr)
+		t := n.Type()
+		switch {
+		case t.IsSlice():
+			return s.constSlice(t)
+		case t.IsInterface():
+			return s.constInterface(t)
+		default:
+			return s.constNil(t)
+		}
+	case ir.OLITERAL:
+		switch u := n.Val(); u.Kind() {
+		case constant.Int:
+			i := ir.IntVal(n.Type(), u)
+			switch n.Type().Size() {
+			case 1:
+				return s.constInt8(n.Type(), int8(i))
+			case 2:
+				return s.constInt16(n.Type(), int16(i))
+			case 4:
+				return s.constInt32(n.Type(), int32(i))
+			case 8:
+				return s.constInt64(n.Type(), i)
+			default:
+				s.Fatalf("bad integer size %d", n.Type().Size())
+				return nil
+			}
+		case constant.String:
+			i := constant.StringVal(u)
+			if i == "" {
+				return s.constEmptyString(n.Type())
+			}
+			return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
+		case constant.Bool:
+			return s.constBool(constant.BoolVal(u))
+		case constant.Float:
+			f, _ := constant.Float64Val(u)
+			switch n.Type().Size() {
+			case 4:
+				return s.constFloat32(n.Type(), f)
+			case 8:
+				return s.constFloat64(n.Type(), f)
+			default:
+				s.Fatalf("bad float size %d", n.Type().Size())
+				return nil
+			}
+		case constant.Complex:
+			re, _ := constant.Float64Val(constant.Real(u))
+			im, _ := constant.Float64Val(constant.Imag(u))
+			switch n.Type().Size() {
+			case 8:
+				pt := types.Types[types.TFLOAT32]
+				return s.newValue2(ssa.OpComplexMake, n.Type(),
+					s.constFloat32(pt, re),
+					s.constFloat32(pt, im))
+			case 16:
+				pt := types.Types[types.TFLOAT64]
+				return s.newValue2(ssa.OpComplexMake, n.Type(),
+					s.constFloat64(pt, re),
+					s.constFloat64(pt, im))
+			default:
+				s.Fatalf("bad complex size %d", n.Type().Size())
+				return nil
+			}
+		default:
+			s.Fatalf("unhandled OLITERAL %v", u.Kind())
+			return nil
+		}
+	case ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		to := n.Type()
+		from := n.X.Type()
+
+		// Assume everything will work out, so set up our return value.
+		// Anything interesting that happens from here is a fatal.
+		x := s.expr(n.X)
+		if to == from {
+			return x
+		}
+
+		// Special case for not confusing GC and liveness.
+		// We don't want pointers accidentally classified
+		// as not-pointers or vice-versa because of copy
+		// elision.
+		if to.IsPtrShaped() != from.IsPtrShaped() {
+			return s.newValue2(ssa.OpConvert, to, x, s.mem())
+		}
+
+		v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
+
+		// CONVNOP closure
+		if to.Kind() == types.TFUNC && from.IsPtrShaped() {
+			return v
+		}
+
+		// named <--> unnamed type or typed <--> untyped const
+		if from.Kind() == to.Kind() {
+			return v
+		}
+
+		// unsafe.Pointer <--> *T
+		if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
+			return v
+		}
+
+		// map <--> *hmap
+		if to.Kind() == types.TMAP && from.IsPtr() &&
+			to.MapType().Hmap == from.Elem() {
+			return v
+		}
+
+		types.CalcSize(from)
+		types.CalcSize(to)
+		if from.Width != to.Width {
+			s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
+			return nil
+		}
+		if etypesign(from.Kind()) != etypesign(to.Kind()) {
+			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
+			return nil
+		}
+
+		if base.Flag.Cfg.Instrumenting {
+			// These appear to be fine, but they fail the
+			// integer constraint below, so okay them here.
+			// Sample non-integer conversion: map[string]string -> *uint8
+			return v
+		}
+
+		if etypesign(from.Kind()) == 0 {
+			s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
+			return nil
+		}
+
+		// integer, same width, same sign
+		return v
+
+	case ir.OCONV:
+		n := n.(*ir.ConvExpr)
+		x := s.expr(n.X)
+		ft := n.X.Type() // from type
+		tt := n.Type()   // to type
+		if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
+			// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
+			return s.newValue1(ssa.OpCopy, n.Type(), x)
+		}
+		if ft.IsInteger() && tt.IsInteger() {
+			var op ssa.Op
+			if tt.Size() == ft.Size() {
+				op = ssa.OpCopy
+			} else if tt.Size() < ft.Size() {
+				// truncation
+				switch 10*ft.Size() + tt.Size() {
+				case 21:
+					op = ssa.OpTrunc16to8
+				case 41:
+					op = ssa.OpTrunc32to8
+				case 42:
+					op = ssa.OpTrunc32to16
+				case 81:
+					op = ssa.OpTrunc64to8
+				case 82:
+					op = ssa.OpTrunc64to16
+				case 84:
+					op = ssa.OpTrunc64to32
+				default:
+					s.Fatalf("weird integer truncation %v -> %v", ft, tt)
+				}
+			} else if ft.IsSigned() {
+				// sign extension
+				switch 10*ft.Size() + tt.Size() {
+				case 12:
+					op = ssa.OpSignExt8to16
+				case 14:
+					op = ssa.OpSignExt8to32
+				case 18:
+					op = ssa.OpSignExt8to64
+				case 24:
+					op = ssa.OpSignExt16to32
+				case 28:
+					op = ssa.OpSignExt16to64
+				case 48:
+					op = ssa.OpSignExt32to64
+				default:
+					s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
+				}
+			} else {
+				// zero extension
+				switch 10*ft.Size() + tt.Size() {
+				case 12:
+					op = ssa.OpZeroExt8to16
+				case 14:
+					op = ssa.OpZeroExt8to32
+				case 18:
+					op = ssa.OpZeroExt8to64
+				case 24:
+					op = ssa.OpZeroExt16to32
+				case 28:
+					op = ssa.OpZeroExt16to64
+				case 48:
+					op = ssa.OpZeroExt32to64
+				default:
+					s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
+				}
+			}
+			return s.newValue1(op, n.Type(), x)
+		}
+
+		if ft.IsFloat() || tt.IsFloat() {
+			conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+			if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
+				if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+					conv = conv1
+				}
+			}
+			if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
+				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+					conv = conv1
+				}
+			}
+
+			if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
+				if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+					// tt is float32 or float64, and ft is also unsigned
+					if tt.Size() == 4 {
+						return s.uint32Tofloat32(n, x, ft, tt)
+					}
+					if tt.Size() == 8 {
+						return s.uint32Tofloat64(n, x, ft, tt)
+					}
+				} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+					// ft is float32 or float64, and tt is unsigned integer
+					if ft.Size() == 4 {
+						return s.float32ToUint32(n, x, ft, tt)
+					}
+					if ft.Size() == 8 {
+						return s.float64ToUint32(n, x, ft, tt)
+					}
+				}
+			}
+
+			if !ok {
+				s.Fatalf("weird float conversion %v -> %v", ft, tt)
+			}
+			op1, op2, it := conv.op1, conv.op2, conv.intermediateType
+
+			if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
+				// normal case, not tripping over unsigned 64
+				if op1 == ssa.OpCopy {
+					if op2 == ssa.OpCopy {
+						return x
+					}
+					return s.newValueOrSfCall1(op2, n.Type(), x)
+				}
+				if op2 == ssa.OpCopy {
+					return s.newValueOrSfCall1(op1, n.Type(), x)
+				}
+				return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
+			}
+			// Tricky 64-bit unsigned cases.
+			if ft.IsInteger() {
+				// tt is float32 or float64, and ft is also unsigned
+				if tt.Size() == 4 {
+					return s.uint64Tofloat32(n, x, ft, tt)
+				}
+				if tt.Size() == 8 {
+					return s.uint64Tofloat64(n, x, ft, tt)
+				}
+				s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
+			}
+			// ft is float32 or float64, and tt is unsigned integer
+			if ft.Size() == 4 {
+				return s.float32ToUint64(n, x, ft, tt)
+			}
+			if ft.Size() == 8 {
+				return s.float64ToUint64(n, x, ft, tt)
+			}
+			s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
+			return nil
+		}
+
+		if ft.IsComplex() && tt.IsComplex() {
+			var op ssa.Op
+			if ft.Size() == tt.Size() {
+				switch ft.Size() {
+				case 8:
+					op = ssa.OpRound32F
+				case 16:
+					op = ssa.OpRound64F
+				default:
+					s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+				}
+			} else if ft.Size() == 8 && tt.Size() == 16 {
+				op = ssa.OpCvt32Fto64F
+			} else if ft.Size() == 16 && tt.Size() == 8 {
+				op = ssa.OpCvt64Fto32F
+			} else {
+				s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+			}
+			ftp := types.FloatForComplex(ft)
+			ttp := types.FloatForComplex(tt)
+			return s.newValue2(ssa.OpComplexMake, tt,
+				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
+				s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
+		}
+
+		s.Fatalf("unhandled OCONV %s -> %s", n.X.Type().Kind(), n.Type().Kind())
+		return nil
+
+	case ir.ODOTTYPE:
+		n := n.(*ir.TypeAssertExpr)
+		res, _ := s.dottype(n, false)
+		return res
+
+	// binary ops
+	case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		if n.X.Type().IsComplex() {
+			pt := types.FloatForComplex(n.X.Type())
+			op := s.ssaOp(ir.OEQ, pt)
+			r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+			i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+			c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+			switch n.Op() {
+			case ir.OEQ:
+				return c
+			case ir.ONE:
+				return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
+			default:
+				s.Fatalf("ordered complex compare %v", n.Op())
+			}
+		}
+
+		// Convert OGE and OGT into OLE and OLT.
+		op := n.Op()
+		switch op {
+		case ir.OGE:
+			op, a, b = ir.OLE, b, a
+		case ir.OGT:
+			op, a, b = ir.OLT, b, a
+		}
+		if n.X.Type().IsFloat() {
+			// float comparison
+			return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+		}
+		// integer comparison
+		return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+	case ir.OMUL:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		if n.Type().IsComplex() {
+			mulop := ssa.OpMul64F
+			addop := ssa.OpAdd64F
+			subop := ssa.OpSub64F
+			pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+			wt := types.Types[types.TFLOAT64]     // Compute in Float64 to minimize cancellation error
+
+			areal := s.newValue1(ssa.OpComplexReal, pt, a)
+			breal := s.newValue1(ssa.OpComplexReal, pt, b)
+			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+			if pt != wt { // Widen for calculation
+				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+			}
+
+			xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+			ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
+
+			if pt != wt { // Narrow to store back
+				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+			}
+
+			return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+		}
+
+		if n.Type().IsFloat() {
+			return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+		}
+
+		return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+
+	case ir.ODIV:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		if n.Type().IsComplex() {
+			// TODO this is not executed because the front-end substitutes a runtime call.
+			// That probably ought to change; with modest optimization the widen/narrow
+			// conversions could all be elided in larger expression trees.
+			mulop := ssa.OpMul64F
+			addop := ssa.OpAdd64F
+			subop := ssa.OpSub64F
+			divop := ssa.OpDiv64F
+			pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+			wt := types.Types[types.TFLOAT64]     // Compute in Float64 to minimize cancellation error
+
+			areal := s.newValue1(ssa.OpComplexReal, pt, a)
+			breal := s.newValue1(ssa.OpComplexReal, pt, b)
+			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+			if pt != wt { // Widen for calculation
+				areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+				breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+				aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+				bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+			}
+
+			denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
+			xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+			ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
+
+			// TODO not sure if this is best done in wide precision or narrow
+			// Double-rounding might be an issue.
+			// Note that the pre-SSA implementation does the entire calculation
+			// in wide format, so wide is compatible.
+			xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
+			ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
+
+			if pt != wt { // Narrow to store back
+				xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+				ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+			}
+			return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+		}
+		if n.Type().IsFloat() {
+			return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+		}
+		return s.intDivide(n, a, b)
+	case ir.OMOD:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		return s.intDivide(n, a, b)
+	case ir.OADD, ir.OSUB:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		if n.Type().IsComplex() {
+			pt := types.FloatForComplex(n.Type())
+			op := s.ssaOp(n.Op(), pt)
+			return s.newValue2(ssa.OpComplexMake, n.Type(),
+				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
+				s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+		}
+		if n.Type().IsFloat() {
+			return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+		}
+		return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+	case ir.OAND, ir.OOR, ir.OXOR:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+	case ir.OANDNOT:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+		return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		a := s.expr(n.X)
+		b := s.expr(n.Y)
+		bt := b.Type
+		if bt.IsSigned() {
+			cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
+			s.check(cmp, ir.Syms.Panicshift)
+			bt = bt.ToUnsigned()
+		}
+		return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+	case ir.OANDAND, ir.OOROR:
+		// To implement OANDAND (and OOROR), we introduce a
+		// new temporary variable to hold the result. The
+		// variable is associated with the OANDAND node in the
+		// s.vars table (normally variables are only
+		// associated with ONAME nodes). We convert
+		//     A && B
+		// to
+		//     var = A
+		//     if var {
+		//         var = B
+		//     }
+		// Using var in the subsequent block introduces the
+		// necessary phi variable.
+		n := n.(*ir.LogicalExpr)
+		el := s.expr(n.X)
+		s.vars[n] = el
+
+		b := s.endBlock()
+		b.Kind = ssa.BlockIf
+		b.SetControl(el)
+		// In theory, we should set b.Likely here based on context.
+		// However, gc only gives us likeliness hints
+		// in a single place, for plain OIF statements,
+		// and passing around context is finnicky, so don't bother for now.
+
+		bRight := s.f.NewBlock(ssa.BlockPlain)
+		bResult := s.f.NewBlock(ssa.BlockPlain)
+		if n.Op() == ir.OANDAND {
+			b.AddEdgeTo(bRight)
+			b.AddEdgeTo(bResult)
+		} else if n.Op() == ir.OOROR {
+			b.AddEdgeTo(bResult)
+			b.AddEdgeTo(bRight)
+		}
+
+		s.startBlock(bRight)
+		er := s.expr(n.Y)
+		s.vars[n] = er
+
+		b = s.endBlock()
+		b.AddEdgeTo(bResult)
+
+		s.startBlock(bResult)
+		return s.variable(n, types.Types[types.TBOOL])
+	case ir.OCOMPLEX:
+		n := n.(*ir.BinaryExpr)
+		r := s.expr(n.X)
+		i := s.expr(n.Y)
+		return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
+
+	// unary ops
+	case ir.ONEG:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		if n.Type().IsComplex() {
+			tp := types.FloatForComplex(n.Type())
+			negop := s.ssaOp(n.Op(), tp)
+			return s.newValue2(ssa.OpComplexMake, n.Type(),
+				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
+				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
+		}
+		return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+	case ir.ONOT, ir.OBITNOT:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+	case ir.OIMAG, ir.OREAL:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
+	case ir.OPLUS:
+		n := n.(*ir.UnaryExpr)
+		return s.expr(n.X)
+
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		return s.addr(n.X)
+
+	case ir.ORESULT:
+		n := n.(*ir.ResultExpr)
+		if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+			// Do the old thing
+			addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset)
+			return s.rawLoad(n.Type(), addr)
+		}
+		which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset)
+		if which == -1 {
+			// Do the old thing // TODO: Panic instead.
+			addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset)
+			return s.rawLoad(n.Type(), addr)
+		}
+		if TypeOK(n.Type()) {
+			return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
+		} else {
+			addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
+			return s.rawLoad(n.Type(), addr)
+		}
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+		return s.load(n.Type(), p)
+
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		if n.X.Op() == ir.OSTRUCTLIT {
+			// All literals with nonzero fields have already been
+			// rewritten during walk. Any that remain are just T{}
+			// or equivalents. Use the zero value.
+			if !ir.IsZero(n.X) {
+				s.Fatalf("literal with nonzero value in SSA: %v", n.X)
+			}
+			return s.zeroVal(n.Type())
+		}
+		// If n is addressable and can't be represented in
+		// SSA, then load just the selected field. This
+		// prevents false memory dependencies in race/msan
+		// instrumentation.
+		if ir.IsAddressable(n) && !s.canSSA(n) {
+			p := s.addr(n)
+			return s.load(n.Type(), p)
+		}
+		v := s.expr(n.X)
+		return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
+
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+		p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+		return s.load(n.Type(), p)
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		switch {
+		case n.X.Type().IsString():
+			if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
+				// Replace "abc"[1] with 'b'.
+				// Delayed until now because "abc"[1] is not an ideal constant.
+				// See test/fixedbugs/issue11370.go.
+				return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
+			}
+			a := s.expr(n.X)
+			i := s.expr(n.Index)
+			len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
+			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+			ptrtyp := s.f.Config.Types.BytePtr
+			ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
+			if ir.IsConst(n.Index, constant.Int) {
+				ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
+			} else {
+				ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+			}
+			return s.load(types.Types[types.TUINT8], ptr)
+		case n.X.Type().IsSlice():
+			p := s.addr(n)
+			return s.load(n.X.Type().Elem(), p)
+		case n.X.Type().IsArray():
+			if TypeOK(n.X.Type()) {
+				// SSA can handle arrays of length at most 1.
+				bound := n.X.Type().NumElem()
+				a := s.expr(n.X)
+				i := s.expr(n.Index)
+				if bound == 0 {
+					// Bounds check will never succeed.  Might as well
+					// use constants for the bounds check.
+					z := s.constInt(types.Types[types.TINT], 0)
+					s.boundsCheck(z, z, ssa.BoundsIndex, false)
+					// The return value won't be live, return junk.
+					return s.newValue0(ssa.OpUnknown, n.Type())
+				}
+				len := s.constInt(types.Types[types.TINT], bound)
+				s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
+				return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
+			}
+			p := s.addr(n)
+			return s.load(n.X.Type().Elem(), p)
+		default:
+			s.Fatalf("bad type for index %v", n.X.Type())
+			return nil
+		}
+
+	case ir.OLEN, ir.OCAP:
+		n := n.(*ir.UnaryExpr)
+		switch {
+		case n.X.Type().IsSlice():
+			op := ssa.OpSliceLen
+			if n.Op() == ir.OCAP {
+				op = ssa.OpSliceCap
+			}
+			return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
+		case n.X.Type().IsString(): // string; not reachable for OCAP
+			return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
+		case n.X.Type().IsMap(), n.X.Type().IsChan():
+			return s.referenceTypeBuiltin(n, s.expr(n.X))
+		default: // array
+			return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+		}
+
+	case ir.OSPTR:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		if n.X.Type().IsSlice() {
+			return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
+		} else {
+			return s.newValue1(ssa.OpStringPtr, n.Type(), a)
+		}
+
+	case ir.OITAB:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		return s.newValue1(ssa.OpITab, n.Type(), a)
+
+	case ir.OIDATA:
+		n := n.(*ir.UnaryExpr)
+		a := s.expr(n.X)
+		return s.newValue1(ssa.OpIData, n.Type(), a)
+
+	case ir.OEFACE:
+		n := n.(*ir.BinaryExpr)
+		tab := s.expr(n.X)
+		data := s.expr(n.Y)
+		return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
+
+	case ir.OSLICEHEADER:
+		n := n.(*ir.SliceHeaderExpr)
+		p := s.expr(n.Ptr)
+		l := s.expr(n.Len)
+		c := s.expr(n.Cap)
+		return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+	case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+		n := n.(*ir.SliceExpr)
+		v := s.expr(n.X)
+		var i, j, k *ssa.Value
+		if n.Low != nil {
+			i = s.expr(n.Low)
+		}
+		if n.High != nil {
+			j = s.expr(n.High)
+		}
+		if n.Max != nil {
+			k = s.expr(n.Max)
+		}
+		p, l, c := s.slice(v, i, j, k, n.Bounded())
+		return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+	case ir.OSLICESTR:
+		n := n.(*ir.SliceExpr)
+		v := s.expr(n.X)
+		var i, j *ssa.Value
+		if n.Low != nil {
+			i = s.expr(n.Low)
+		}
+		if n.High != nil {
+			j = s.expr(n.High)
+		}
+		p, l, _ := s.slice(v, i, j, nil, n.Bounded())
+		return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
+
+	case ir.OCALLFUNC:
+		n := n.(*ir.CallExpr)
+		if ir.IsIntrinsicCall(n) {
+			return s.intrinsicCall(n)
+		}
+		fallthrough
+
+	case ir.OCALLINTER, ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		return s.callResult(n, callNormal)
+
+	case ir.OGETG:
+		n := n.(*ir.CallExpr)
+		return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
+
+	case ir.OAPPEND:
+		return s.append(n.(*ir.CallExpr), false)
+
+	case ir.OSTRUCTLIT, ir.OARRAYLIT:
+		// All literals with nonzero fields have already been
+		// rewritten during walk. Any that remain are just T{}
+		// or equivalents. Use the zero value.
+		n := n.(*ir.CompLitExpr)
+		if !ir.IsZero(n) {
+			s.Fatalf("literal with nonzero value in SSA: %v", n)
+		}
+		return s.zeroVal(n.Type())
+
+	case ir.ONEW:
+		n := n.(*ir.UnaryExpr)
+		return s.newObject(n.Type().Elem())
+
+	default:
+		s.Fatalf("unhandled expr %v", n.Op())
+		return nil
+	}
+}
+
+// append converts an OAPPEND node to SSA.
+// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
+// adds it to s, and returns the Value.
+// If inplace is true, it writes the result of the OAPPEND expression n
+// back to the slice being appended to, and returns nil.
+// inplace MUST be set to false if the slice can be SSA'd.
+func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
+	// If inplace is false, process as expression "append(s, e1, e2, e3)":
+	//
+	// ptr, len, cap := s
+	// newlen := len + 3
+	// if newlen > cap {
+	//     ptr, len, cap = growslice(s, newlen)
+	//     newlen = len + 3 // recalculate to avoid a spill
+	// }
+	// // with write barriers, if needed:
+	// *(ptr+len) = e1
+	// *(ptr+len+1) = e2
+	// *(ptr+len+2) = e3
+	// return makeslice(ptr, newlen, cap)
+	//
+	//
+	// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
+	//
+	// a := &s
+	// ptr, len, cap := s
+	// newlen := len + 3
+	// if uint(newlen) > uint(cap) {
+	//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
+	//    vardef(a)       // if necessary, advise liveness we are writing a new a
+	//    *a.cap = newcap // write before ptr to avoid a spill
+	//    *a.ptr = newptr // with write barrier
+	// }
+	// newlen = len + 3 // recalculate to avoid a spill
+	// *a.len = newlen
+	// // with write barriers, if needed:
+	// *(ptr+len) = e1
+	// *(ptr+len+1) = e2
+	// *(ptr+len+2) = e3
+
+	et := n.Type().Elem()
+	pt := types.NewPtr(et)
+
+	// Evaluate slice
+	sn := n.Args[0] // the slice node is the first in the list
+
+	var slice, addr *ssa.Value
+	if inplace {
+		addr = s.addr(sn)
+		slice = s.load(n.Type(), addr)
+	} else {
+		slice = s.expr(sn)
+	}
+
+	// Allocate new blocks
+	grow := s.f.NewBlock(ssa.BlockPlain)
+	assign := s.f.NewBlock(ssa.BlockPlain)
+
+	// Decide if we need to grow
+	nargs := int64(len(n.Args) - 1)
+	p := s.newValue1(ssa.OpSlicePtr, pt, slice)
+	l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+	c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+	nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
+
+	cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
+	s.vars[ptrVar] = p
+
+	if !inplace {
+		s.vars[newlenVar] = nl
+		s.vars[capVar] = c
+	} else {
+		s.vars[lenVar] = l
+	}
+
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.Likely = ssa.BranchUnlikely
+	b.SetControl(cmp)
+	b.AddEdgeTo(grow)
+	b.AddEdgeTo(assign)
+
+	// Call growslice
+	s.startBlock(grow)
+	taddr := s.expr(n.X)
+	r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
+
+	if inplace {
+		if sn.Op() == ir.ONAME {
+			sn := sn.(*ir.Name)
+			if sn.Class != ir.PEXTERN {
+				// Tell liveness we're about to build a new slice
+				s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+			}
+		}
+		capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
+		s.store(types.Types[types.TINT], capaddr, r[2])
+		s.store(pt, addr, r[0])
+		// load the value we just stored to avoid having to spill it
+		s.vars[ptrVar] = s.load(pt, addr)
+		s.vars[lenVar] = r[1] // avoid a spill in the fast path
+	} else {
+		s.vars[ptrVar] = r[0]
+		s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
+		s.vars[capVar] = r[2]
+	}
+
+	b = s.endBlock()
+	b.AddEdgeTo(assign)
+
+	// assign new elements to slots
+	s.startBlock(assign)
+
+	if inplace {
+		l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+		nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
+		lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
+		s.store(types.Types[types.TINT], lenaddr, nl)
+	}
+
+	// Evaluate args
+	type argRec struct {
+		// if store is true, we're appending the value v.  If false, we're appending the
+		// value at *v.
+		v     *ssa.Value
+		store bool
+	}
+	args := make([]argRec, 0, nargs)
+	for _, n := range n.Args[1:] {
+		if TypeOK(n.Type()) {
+			args = append(args, argRec{v: s.expr(n), store: true})
+		} else {
+			v := s.addr(n)
+			args = append(args, argRec{v: v})
+		}
+	}
+
+	p = s.variable(ptrVar, pt) // generates phi for ptr
+	if !inplace {
+		nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
+		c = s.variable(capVar, types.Types[types.TINT])     // generates phi for cap
+	}
+	p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
+	for i, arg := range args {
+		addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
+		if arg.store {
+			s.storeType(et, addr, arg.v, 0, true)
+		} else {
+			s.move(et, addr, arg.v)
+		}
+	}
+
+	delete(s.vars, ptrVar)
+	if inplace {
+		delete(s.vars, lenVar)
+		return nil
+	}
+	delete(s.vars, newlenVar)
+	delete(s.vars, capVar)
+	// make result
+	return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
+}
+
+// condBranch evaluates the boolean expression cond and branches to yes
+// if cond is true and no if cond is false.
+// This function is intended to handle && and || better than just calling
+// s.expr(cond) and branching on the result.
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+	switch cond.Op() {
+	case ir.OANDAND:
+		cond := cond.(*ir.LogicalExpr)
+		mid := s.f.NewBlock(ssa.BlockPlain)
+		s.stmtList(cond.Init())
+		s.condBranch(cond.X, mid, no, max8(likely, 0))
+		s.startBlock(mid)
+		s.condBranch(cond.Y, yes, no, likely)
+		return
+		// Note: if likely==1, then both recursive calls pass 1.
+		// If likely==-1, then we don't have enough information to decide
+		// whether the first branch is likely or not. So we pass 0 for
+		// the likeliness of the first branch.
+		// TODO: have the frontend give us branch prediction hints for
+		// OANDAND and OOROR nodes (if it ever has such info).
+	case ir.OOROR:
+		cond := cond.(*ir.LogicalExpr)
+		mid := s.f.NewBlock(ssa.BlockPlain)
+		s.stmtList(cond.Init())
+		s.condBranch(cond.X, yes, mid, min8(likely, 0))
+		s.startBlock(mid)
+		s.condBranch(cond.Y, yes, no, likely)
+		return
+		// Note: if likely==-1, then both recursive calls pass -1.
+		// If likely==1, then we don't have enough info to decide
+		// the likelihood of the first branch.
+	case ir.ONOT:
+		cond := cond.(*ir.UnaryExpr)
+		s.stmtList(cond.Init())
+		s.condBranch(cond.X, no, yes, -likely)
+		return
+	case ir.OCONVNOP:
+		cond := cond.(*ir.ConvExpr)
+		s.stmtList(cond.Init())
+		s.condBranch(cond.X, yes, no, likely)
+		return
+	}
+	c := s.expr(cond)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(c)
+	b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
+	b.AddEdgeTo(yes)
+	b.AddEdgeTo(no)
+}
+
+type skipMask uint8
+
+const (
+	skipPtr skipMask = 1 << iota
+	skipLen
+	skipCap
+)
+
+// assign does left = right.
+// Right has already been evaluated to ssa, left has not.
+// If deref is true, then we do left = *right instead (and right has already been nil-checked).
+// If deref is true and right == nil, just do left = 0.
+// skip indicates assignments (at the top level) that can be avoided.
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+	if left.Op() == ir.ONAME && ir.IsBlank(left) {
+		return
+	}
+	t := left.Type()
+	types.CalcSize(t)
+	if s.canSSA(left) {
+		if deref {
+			s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
+		}
+		if left.Op() == ir.ODOT {
+			// We're assigning to a field of an ssa-able value.
+			// We need to build a new structure with the new value for the
+			// field we're assigning and the old values for the other fields.
+			// For instance:
+			//   type T struct {a, b, c int}
+			//   var T x
+			//   x.b = 5
+			// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
+
+			// Grab information about the structure type.
+			left := left.(*ir.SelectorExpr)
+			t := left.X.Type()
+			nf := t.NumFields()
+			idx := fieldIdx(left)
+
+			// Grab old value of structure.
+			old := s.expr(left.X)
+
+			// Make new structure.
+			new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
+
+			// Add fields as args.
+			for i := 0; i < nf; i++ {
+				if i == idx {
+					new.AddArg(right)
+				} else {
+					new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
+				}
+			}
+
+			// Recursively assign the new value we've made to the base of the dot op.
+			s.assign(left.X, new, false, 0)
+			// TODO: do we need to update named values here?
+			return
+		}
+		if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
+			left := left.(*ir.IndexExpr)
+			s.pushLine(left.Pos())
+			defer s.popLine()
+			// We're assigning to an element of an ssa-able array.
+			// a[i] = v
+			t := left.X.Type()
+			n := t.NumElem()
+
+			i := s.expr(left.Index) // index
+			if n == 0 {
+				// The bounds check must fail.  Might as well
+				// ignore the actual index and just use zeros.
+				z := s.constInt(types.Types[types.TINT], 0)
+				s.boundsCheck(z, z, ssa.BoundsIndex, false)
+				return
+			}
+			if n != 1 {
+				s.Fatalf("assigning to non-1-length array")
+			}
+			// Rewrite to a = [1]{v}
+			len := s.constInt(types.Types[types.TINT], 1)
+			s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
+			v := s.newValue1(ssa.OpArrayMake1, t, right)
+			s.assign(left.X, v, false, 0)
+			return
+		}
+		left := left.(*ir.Name)
+		// Update variable assignment.
+		s.vars[left] = right
+		s.addNamedValue(left, right)
+		return
+	}
+
+	// If this assignment clobbers an entire local variable, then emit
+	// OpVarDef so liveness analysis knows the variable is redefined.
+	if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
+		s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
+	}
+
+	// Left is not ssa-able. Compute its address.
+	addr := s.addr(left)
+	if ir.IsReflectHeaderDataField(left) {
+		// Package unsafe's documentation says storing pointers into
+		// reflect.SliceHeader and reflect.StringHeader's Data fields
+		// is valid, even though they have type uintptr (#19168).
+		// Mark it pointer type to signal the writebarrier pass to
+		// insert a write barrier.
+		t = types.Types[types.TUNSAFEPTR]
+	}
+	if deref {
+		// Treat as a mem->mem move.
+		if right == nil {
+			s.zero(t, addr)
+		} else {
+			s.move(t, addr, right)
+		}
+		return
+	}
+	// Treat as a store.
+	s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
+}
+
+// zeroVal returns the zero value for type t.
+func (s *state) zeroVal(t *types.Type) *ssa.Value {
+	switch {
+	case t.IsInteger():
+		switch t.Size() {
+		case 1:
+			return s.constInt8(t, 0)
+		case 2:
+			return s.constInt16(t, 0)
+		case 4:
+			return s.constInt32(t, 0)
+		case 8:
+			return s.constInt64(t, 0)
+		default:
+			s.Fatalf("bad sized integer type %v", t)
+		}
+	case t.IsFloat():
+		switch t.Size() {
+		case 4:
+			return s.constFloat32(t, 0)
+		case 8:
+			return s.constFloat64(t, 0)
+		default:
+			s.Fatalf("bad sized float type %v", t)
+		}
+	case t.IsComplex():
+		switch t.Size() {
+		case 8:
+			z := s.constFloat32(types.Types[types.TFLOAT32], 0)
+			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+		case 16:
+			z := s.constFloat64(types.Types[types.TFLOAT64], 0)
+			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+		default:
+			s.Fatalf("bad sized complex type %v", t)
+		}
+
+	case t.IsString():
+		return s.constEmptyString(t)
+	case t.IsPtrShaped():
+		return s.constNil(t)
+	case t.IsBoolean():
+		return s.constBool(false)
+	case t.IsInterface():
+		return s.constInterface(t)
+	case t.IsSlice():
+		return s.constSlice(t)
+	case t.IsStruct():
+		n := t.NumFields()
+		v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
+		for i := 0; i < n; i++ {
+			v.AddArg(s.zeroVal(t.FieldType(i)))
+		}
+		return v
+	case t.IsArray():
+		switch t.NumElem() {
+		case 0:
+			return s.entryNewValue0(ssa.OpArrayMake0, t)
+		case 1:
+			return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
+		}
+	}
+	s.Fatalf("zero for type %v not implemented", t)
+	return nil
+}
+
+type callKind int8
+
+const (
+	callNormal callKind = iota
+	callDefer
+	callDeferStack
+	callGo
+)
+
+type sfRtCallDef struct {
+	rtfn  *obj.LSym
+	rtype types.Kind
+}
+
+var softFloatOps map[ssa.Op]sfRtCallDef
+
+func softfloatInit() {
+	// Some of these operations get transformed by sfcall.
+	softFloatOps = map[ssa.Op]sfRtCallDef{
+		ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+		ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+		ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+		ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+		ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
+		ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
+		ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
+		ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
+
+		ssa.OpEq64F:   sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+		ssa.OpEq32F:   sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+		ssa.OpNeq64F:  sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+		ssa.OpNeq32F:  sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+		ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
+		ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
+		ssa.OpLeq64F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
+		ssa.OpLeq32F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
+
+		ssa.OpCvt32to32F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
+		ssa.OpCvt32Fto32:  sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
+		ssa.OpCvt64to32F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
+		ssa.OpCvt32Fto64:  sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
+		ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
+		ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
+		ssa.OpCvt32to64F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
+		ssa.OpCvt64Fto32:  sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
+		ssa.OpCvt64to64F:  sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
+		ssa.OpCvt64Fto64:  sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
+		ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
+		ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
+		ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
+		ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
+	}
+}
+
+// TODO: do not emit sfcall if operation can be optimized to constant in later
+// opt phase
+func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+	if callDef, ok := softFloatOps[op]; ok {
+		switch op {
+		case ssa.OpLess32F,
+			ssa.OpLess64F,
+			ssa.OpLeq32F,
+			ssa.OpLeq64F:
+			args[0], args[1] = args[1], args[0]
+		case ssa.OpSub32F,
+			ssa.OpSub64F:
+			args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+		}
+
+		result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
+		if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
+			result = s.newValue1(ssa.OpNot, result.Type, result)
+		}
+		return result, true
+	}
+	return nil, false
+}
+
+var intrinsics map[intrinsicKey]intrinsicBuilder
+
+// An intrinsicBuilder converts a call node n into an ssa value that
+// implements that call as an intrinsic. args is a list of arguments to the func.
+type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
+
+type intrinsicKey struct {
+	arch *sys.Arch
+	pkg  string
+	fn   string
+}
+
+func InitTables() {
+	intrinsics = map[intrinsicKey]intrinsicBuilder{}
+
+	var all []*sys.Arch
+	var p4 []*sys.Arch
+	var p8 []*sys.Arch
+	var lwatomics []*sys.Arch
+	for _, a := range &sys.Archs {
+		all = append(all, a)
+		if a.PtrSize == 4 {
+			p4 = append(p4, a)
+		} else {
+			p8 = append(p8, a)
+		}
+		if a.Family != sys.PPC64 {
+			lwatomics = append(lwatomics, a)
+		}
+	}
+
+	// add adds the intrinsic b for pkg.fn for the given list of architectures.
+	add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
+		for _, a := range archs {
+			intrinsics[intrinsicKey{a, pkg, fn}] = b
+		}
+	}
+	// addF does the same as add but operates on architecture families.
+	addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
+		m := 0
+		for _, f := range archFamilies {
+			if f >= 32 {
+				panic("too many architecture families")
+			}
+			m |= 1 << uint(f)
+		}
+		for _, a := range all {
+			if m>>uint(a.Family)&1 != 0 {
+				intrinsics[intrinsicKey{a, pkg, fn}] = b
+			}
+		}
+	}
+	// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
+	alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
+		aliased := false
+		for _, a := range archs {
+			if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
+				intrinsics[intrinsicKey{a, pkg, fn}] = b
+				aliased = true
+			}
+		}
+		if !aliased {
+			panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
+		}
+	}
+
+	/******** runtime ********/
+	if !base.Flag.Cfg.Instrumenting {
+		add("runtime", "slicebytetostringtmp",
+			func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+				// Compiler frontend optimizations emit OBYTES2STRTMP nodes
+				// for the backend instead of slicebytetostringtmp calls
+				// when not instrumenting.
+				return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
+			},
+			all...)
+	}
+	addF("runtime/internal/math", "MulUintptr",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+			}
+			return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+		},
+		sys.AMD64, sys.I386, sys.MIPS64)
+	add("runtime", "KeepAlive",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
+			s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+			return nil
+		},
+		all...)
+	add("runtime", "getclosureptr",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
+		},
+		all...)
+
+	add("runtime", "getcallerpc",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+		},
+		all...)
+
+	add("runtime", "getcallersp",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
+		},
+		all...)
+
+	/******** runtime/internal/sys ********/
+	addF("runtime/internal/sys", "Ctz32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+	addF("runtime/internal/sys", "Ctz64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+	addF("runtime/internal/sys", "Bswap32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+	addF("runtime/internal/sys", "Bswap64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+
+	/******** runtime/internal/atomic ********/
+	addF("runtime/internal/atomic", "Load",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Load8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Load64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "LoadAcq",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+		},
+		sys.PPC64, sys.S390X)
+	addF("runtime/internal/atomic", "LoadAcq64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+		},
+		sys.PPC64)
+	addF("runtime/internal/atomic", "Loadp",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+	addF("runtime/internal/atomic", "Store",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Store8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Store64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "StorepNoWB",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "StoreRel",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.PPC64, sys.S390X)
+	addF("runtime/internal/atomic", "StoreRel64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.PPC64)
+
+	addF("runtime/internal/atomic", "Xchg",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+		},
+		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Xchg64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+		},
+		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+	type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
+
+	makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
+
+		return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			// Target Atomic feature is identified by dynamic detection
+			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
+			v := s.load(types.Types[types.TBOOL], addr)
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(v)
+			bTrue := s.f.NewBlock(ssa.BlockPlain)
+			bFalse := s.f.NewBlock(ssa.BlockPlain)
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bTrue)
+			b.AddEdgeTo(bFalse)
+			b.Likely = ssa.BranchLikely
+
+			// We have atomic instructions - use it directly.
+			s.startBlock(bTrue)
+			emit(s, n, args, op1, typ)
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Use original instruction sequence.
+			s.startBlock(bFalse)
+			emit(s, n, args, op0, typ)
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Merge results.
+			s.startBlock(bEnd)
+			if rtyp == types.TNIL {
+				return nil
+			} else {
+				return s.variable(n, types.Types[rtyp])
+			}
+		}
+	}
+
+	atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+		v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
+		s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+		s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+	}
+	addF("runtime/internal/atomic", "Xchg",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "Xchg64",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+		sys.ARM64)
+
+	addF("runtime/internal/atomic", "Xadd",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+		},
+		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Xadd64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+		},
+		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+	addF("runtime/internal/atomic", "Xadd",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "Xadd64",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+		sys.ARM64)
+
+	addF("runtime/internal/atomic", "Cas",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+		},
+		sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "Cas64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+		},
+		sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+	addF("runtime/internal/atomic", "CasRel",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+			s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+			return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+		},
+		sys.PPC64)
+
+	atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+		v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+		s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+		s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+	}
+
+	addF("runtime/internal/atomic", "Cas",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "Cas64",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
+		sys.ARM64)
+
+	addF("runtime/internal/atomic", "And8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+	addF("runtime/internal/atomic", "And",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+	addF("runtime/internal/atomic", "Or8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+	addF("runtime/internal/atomic", "Or",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+			return nil
+		},
+		sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+
+	atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+		s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+	}
+
+	addF("runtime/internal/atomic", "And8",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "And",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "Or8",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+		sys.ARM64)
+	addF("runtime/internal/atomic", "Or",
+		makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+		sys.ARM64)
+
+	alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
+	alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
+	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
+	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
+	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
+	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
+	alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+	alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+	alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+	alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+	alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+	alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
+	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
+	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
+	alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+	alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+	alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+	alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+	alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+	alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
+	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
+	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
+	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
+	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
+	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
+	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
+	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
+	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
+	alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
+
+	/******** math ********/
+	addF("math", "Sqrt",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
+	addF("math", "Trunc",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+	addF("math", "Ceil",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+	addF("math", "Floor",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+	addF("math", "Round",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.PPC64, sys.S390X)
+	addF("math", "RoundToEven",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.S390X, sys.Wasm)
+	addF("math", "Abs",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
+		},
+		sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
+	addF("math", "Copysign",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
+		},
+		sys.PPC64, sys.Wasm)
+	addF("math", "FMA",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+		},
+		sys.ARM64, sys.PPC64, sys.S390X)
+	addF("math", "FMA",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if !s.config.UseFMA {
+				s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+				return s.variable(n, types.Types[types.TFLOAT64])
+			}
+			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(v)
+			bTrue := s.f.NewBlock(ssa.BlockPlain)
+			bFalse := s.f.NewBlock(ssa.BlockPlain)
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bTrue)
+			b.AddEdgeTo(bFalse)
+			b.Likely = ssa.BranchLikely // >= haswell cpus are common
+
+			// We have the intrinsic - use it directly.
+			s.startBlock(bTrue)
+			s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Call the pure Go version.
+			s.startBlock(bFalse)
+			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Merge results.
+			s.startBlock(bEnd)
+			return s.variable(n, types.Types[types.TFLOAT64])
+		},
+		sys.AMD64)
+	addF("math", "FMA",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if !s.config.UseFMA {
+				s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+				return s.variable(n, types.Types[types.TFLOAT64])
+			}
+			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
+			v := s.load(types.Types[types.TBOOL], addr)
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(v)
+			bTrue := s.f.NewBlock(ssa.BlockPlain)
+			bFalse := s.f.NewBlock(ssa.BlockPlain)
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bTrue)
+			b.AddEdgeTo(bFalse)
+			b.Likely = ssa.BranchLikely
+
+			// We have the intrinsic - use it directly.
+			s.startBlock(bTrue)
+			s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Call the pure Go version.
+			s.startBlock(bFalse)
+			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Merge results.
+			s.startBlock(bEnd)
+			return s.variable(n, types.Types[types.TFLOAT64])
+		},
+		sys.ARM)
+
+	makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+		return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(v)
+			bTrue := s.f.NewBlock(ssa.BlockPlain)
+			bFalse := s.f.NewBlock(ssa.BlockPlain)
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bTrue)
+			b.AddEdgeTo(bFalse)
+			b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
+
+			// We have the intrinsic - use it directly.
+			s.startBlock(bTrue)
+			s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Call the pure Go version.
+			s.startBlock(bFalse)
+			s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Merge results.
+			s.startBlock(bEnd)
+			return s.variable(n, types.Types[types.TFLOAT64])
+		}
+	}
+	addF("math", "RoundToEven",
+		makeRoundAMD64(ssa.OpRoundToEven),
+		sys.AMD64)
+	addF("math", "Floor",
+		makeRoundAMD64(ssa.OpFloor),
+		sys.AMD64)
+	addF("math", "Ceil",
+		makeRoundAMD64(ssa.OpCeil),
+		sys.AMD64)
+	addF("math", "Trunc",
+		makeRoundAMD64(ssa.OpTrunc),
+		sys.AMD64)
+
+	/******** math/bits ********/
+	addF("math/bits", "TrailingZeros64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "TrailingZeros32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "TrailingZeros16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+			c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+			y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+			return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+		},
+		sys.MIPS)
+	addF("math/bits", "TrailingZeros16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
+	addF("math/bits", "TrailingZeros16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+			c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+			y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+			return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+		},
+		sys.S390X, sys.PPC64)
+	addF("math/bits", "TrailingZeros8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+			c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+			y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+			return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+		},
+		sys.MIPS)
+	addF("math/bits", "TrailingZeros8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
+	addF("math/bits", "TrailingZeros8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+			c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+			y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+			return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+		},
+		sys.S390X)
+	alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
+	alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
+	// ReverseBytes inlines correctly, no need to intrinsify it.
+	// ReverseBytes16 lowers to a rotate, no need for anything special here.
+	addF("math/bits", "Len64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "Len32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64)
+	addF("math/bits", "Len32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+			}
+			x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+			return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+		},
+		sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "Len16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+				return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+			}
+			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+			return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+		},
+		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "Len16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64)
+	addF("math/bits", "Len8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+				return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+			}
+			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+			return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+		},
+		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	addF("math/bits", "Len8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64)
+	addF("math/bits", "Len",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+			}
+			return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+		},
+		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+	// LeadingZeros is handled because it trivially calls Len.
+	addF("math/bits", "Reverse64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64)
+	addF("math/bits", "Reverse32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64)
+	addF("math/bits", "Reverse16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64)
+	addF("math/bits", "Reverse8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64)
+	addF("math/bits", "Reverse",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			if s.config.PtrSize == 4 {
+				return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
+			}
+			return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64)
+	addF("math/bits", "RotateLeft8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
+		},
+		sys.AMD64)
+	addF("math/bits", "RotateLeft16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
+		},
+		sys.AMD64)
+	addF("math/bits", "RotateLeft32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
+		},
+		sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+	addF("math/bits", "RotateLeft64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
+		},
+		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+	alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
+
+	makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+		return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(v)
+			bTrue := s.f.NewBlock(ssa.BlockPlain)
+			bFalse := s.f.NewBlock(ssa.BlockPlain)
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bTrue)
+			b.AddEdgeTo(bFalse)
+			b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
+
+			// We have the intrinsic - use it directly.
+			s.startBlock(bTrue)
+			op := op64
+			if s.config.PtrSize == 4 {
+				op = op32
+			}
+			s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Call the pure Go version.
+			s.startBlock(bFalse)
+			s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
+			s.endBlock().AddEdgeTo(bEnd)
+
+			// Merge results.
+			s.startBlock(bEnd)
+			return s.variable(n, types.Types[types.TINT])
+		}
+	}
+	addF("math/bits", "OnesCount64",
+		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
+		sys.AMD64)
+	addF("math/bits", "OnesCount64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
+		},
+		sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+	addF("math/bits", "OnesCount32",
+		makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
+		sys.AMD64)
+	addF("math/bits", "OnesCount32",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
+		},
+		sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+	addF("math/bits", "OnesCount16",
+		makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
+		sys.AMD64)
+	addF("math/bits", "OnesCount16",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
+		},
+		sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+	addF("math/bits", "OnesCount8",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
+		},
+		sys.S390X, sys.PPC64, sys.Wasm)
+	addF("math/bits", "OnesCount",
+		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
+		sys.AMD64)
+	addF("math/bits", "Mul64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
+		},
+		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
+	alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
+	addF("math/bits", "Add64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+		},
+		sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
+	alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
+	addF("math/bits", "Sub64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+		},
+		sys.AMD64, sys.ARM64, sys.S390X)
+	alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
+	addF("math/bits", "Div64",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			// check for divide-by-zero/overflow and panic with appropriate message
+			cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
+			s.check(cmpZero, ir.Syms.Panicdivide)
+			cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
+			s.check(cmpOverflow, ir.Syms.Panicoverflow)
+			return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+		},
+		sys.AMD64)
+	alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
+
+	alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
+	alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
+	alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
+	alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
+	alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
+	alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
+
+	/******** sync/atomic ********/
+
+	// Note: these are disabled by flag_race in findIntrinsic below.
+	alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
+	alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
+	alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
+	alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
+	alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
+	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
+	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
+
+	alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
+	alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
+	// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
+	alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
+	alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
+	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
+	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
+
+	alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
+	alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
+	alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
+	alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
+	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
+	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+	alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
+	alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
+	alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
+	alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
+	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
+	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
+
+	alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
+	alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
+	alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
+	alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
+	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
+	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+	/******** math/big ********/
+	add("math/big", "mulWW",
+		func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
+		},
+		sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
+}
+
+// findIntrinsic returns a function which builds the SSA equivalent of the
+// function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
+func findIntrinsic(sym *types.Sym) intrinsicBuilder {
+	if sym == nil || sym.Pkg == nil {
+		return nil
+	}
+	pkg := sym.Pkg.Path
+	if sym.Pkg == types.LocalPkg {
+		pkg = base.Ctxt.Pkgpath
+	}
+	if base.Flag.Race && pkg == "sync/atomic" {
+		// The race detector needs to be able to intercept these calls.
+		// We can't intrinsify them.
+		return nil
+	}
+	// Skip intrinsifying math functions (which may contain hard-float
+	// instructions) when soft-float
+	if Arch.SoftFloat && pkg == "math" {
+		return nil
+	}
+
+	fn := sym.Name
+	if ssa.IntrinsicsDisable {
+		if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
+			// These runtime functions don't have definitions, must be intrinsics.
+		} else {
+			return nil
+		}
+	}
+	return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
+}
+
+func IsIntrinsicCall(n *ir.CallExpr) bool {
+	if n == nil {
+		return false
+	}
+	name, ok := n.X.(*ir.Name)
+	if !ok {
+		return false
+	}
+	return findIntrinsic(name.Sym()) != nil
+}
+
+// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
+func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
+	v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
+	if ssa.IntrinsicsDebug > 0 {
+		x := v
+		if x == nil {
+			x = s.mem()
+		}
+		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
+			x = x.Args[0]
+		}
+		base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
+	}
+	return v
+}
+
+// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
+func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
+	args := make([]*ssa.Value, len(n.Args))
+	for i, n := range n.Args {
+		args[i] = s.expr(n)
+	}
+	return args
+}
+
+// openDeferRecord adds code to evaluate and store the args for an open-code defer
+// call, and records info about the defer, so we can generate proper code on the
+// exit paths. n is the sub-node of the defer node that is the actual function
+// call. We will also record funcdata information on where the args are stored
+// (as well as the deferBits variable), and this will enable us to run the proper
+// defer calls during panics.
+func (s *state) openDeferRecord(n *ir.CallExpr) {
+	var args []*ssa.Value
+	var argNodes []*ir.Name
+
+	opendefer := &openDeferInfo{
+		n: n,
+	}
+	fn := n.X
+	if n.Op() == ir.OCALLFUNC {
+		// We must always store the function value in a stack slot for the
+		// runtime panic code to use. But in the defer exit code, we will
+		// call the function directly if it is a static function.
+		closureVal := s.expr(fn)
+		closure := s.openDeferSave(nil, fn.Type(), closureVal)
+		opendefer.closureNode = closure.Aux.(*ir.Name)
+		if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+			opendefer.closure = closure
+		}
+	} else if n.Op() == ir.OCALLMETH {
+		base.Fatalf("OCALLMETH missed by walkCall")
+	} else {
+		if fn.Op() != ir.ODOTINTER {
+			base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
+		}
+		fn := fn.(*ir.SelectorExpr)
+		closure, rcvr := s.getClosureAndRcvr(fn)
+		opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
+		// Important to get the receiver type correct, so it is recognized
+		// as a pointer for GC purposes.
+		opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
+		opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
+		opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
+	}
+	for _, argn := range n.Args {
+		var v *ssa.Value
+		if TypeOK(argn.Type()) {
+			v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
+		} else {
+			v = s.openDeferSave(argn, argn.Type(), nil)
+		}
+		args = append(args, v)
+		argNodes = append(argNodes, v.Aux.(*ir.Name))
+	}
+	opendefer.argVals = args
+	opendefer.argNodes = argNodes
+	index := len(s.openDefers)
+	s.openDefers = append(s.openDefers, opendefer)
+
+	// Update deferBits only after evaluation and storage to stack of
+	// args/receiver/interface is successful.
+	bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+	newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+	s.vars[deferBitsVar] = newDeferBits
+	s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
+}
+
+// openDeferSave generates SSA nodes to store a value (with type t) for an
+// open-coded defer at an explicit autotmp location on the stack, so it can be
+// reloaded and used for the appropriate call on exit. If type t is SSAable, then
+// val must be non-nil (and n should be nil) and val is the value to be stored. If
+// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
+// evaluated (via s.addr() below) to get the value that is to be stored. The
+// function returns an SSA value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
+	canSSA := TypeOK(t)
+	var pos src.XPos
+	if canSSA {
+		pos = val.Pos
+	} else {
+		pos = n.Pos()
+	}
+	argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+	argTemp.SetOpenDeferSlot(true)
+	var addrArgTemp *ssa.Value
+	// Use OpVarLive to make sure stack slots for the args, etc. are not
+	// removed by dead-store elimination
+	if s.curBlock.ID != s.f.Entry.ID {
+		// Force the argtmp storing this defer function/receiver/arg to be
+		// declared in the entry block, so that it will be live for the
+		// defer exit code (which will actually access it only if the
+		// associated defer call has been activated).
+		s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+		s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+		addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+	} else {
+		// Special case if we're still in the entry block. We can't use
+		// the above code, since s.defvars[s.f.Entry.ID] isn't defined
+		// until we end the entry block with s.endBlock().
+		s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
+		s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
+		addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
+	}
+	if t.HasPointers() {
+		// Since we may use this argTemp during exit depending on the
+		// deferBits, we must define it unconditionally on entry.
+		// Therefore, we must make sure it is zeroed out in the entry
+		// block if it contains pointers, else GC may wrongly follow an
+		// uninitialized pointer value.
+		argTemp.SetNeedzero(true)
+	}
+	if !canSSA {
+		a := s.addr(n)
+		s.move(t, addrArgTemp, a)
+		return addrArgTemp
+	}
+	// We are storing to the stack, hence we can avoid the full checks in
+	// storeType() (no write barrier) and do a simple store().
+	s.store(t, addrArgTemp, val)
+	return addrArgTemp
+}
+
+// openDeferExit generates SSA for processing all the open coded defers at exit.
+// The code involves loading deferBits, and checking each of the bits to see if
+// the corresponding defer statement was executed. For each bit that is turned
+// on, the associated defer call is made.
+func (s *state) openDeferExit() {
+	deferExit := s.f.NewBlock(ssa.BlockPlain)
+	s.endBlock().AddEdgeTo(deferExit)
+	s.startBlock(deferExit)
+	s.lastDeferExit = deferExit
+	s.lastDeferCount = len(s.openDefers)
+	zeroval := s.constInt8(types.Types[types.TUINT8], 0)
+	// Test for and run defers in reverse order
+	for i := len(s.openDefers) - 1; i >= 0; i-- {
+		r := s.openDefers[i]
+		bCond := s.f.NewBlock(ssa.BlockPlain)
+		bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+		deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
+		// Generate code to check if the bit associated with the current
+		// defer is set.
+		bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+		andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+		eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
+		b := s.endBlock()
+		b.Kind = ssa.BlockIf
+		b.SetControl(eqVal)
+		b.AddEdgeTo(bEnd)
+		b.AddEdgeTo(bCond)
+		bCond.AddEdgeTo(bEnd)
+		s.startBlock(bCond)
+
+		// Clear this bit in deferBits and force store back to stack, so
+		// we will not try to re-run this defer call if this defer call panics.
+		nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+		maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+		s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
+		// Use this value for following tests, so we keep previous
+		// bits cleared.
+		s.vars[deferBitsVar] = maskedval
+
+		// Generate code to call the function call of the defer, using the
+		// closure/receiver/args that were stored in argtmps at the point
+		// of the defer statement.
+		argStart := base.Ctxt.FixedFrameSize()
+		fn := r.n.X
+		stksize := fn.Type().ArgWidth()
+		var ACArgs []ssa.Param
+		var ACResults []ssa.Param
+		var callArgs []*ssa.Value
+		if r.rcvr != nil {
+			// rcvr in case of OCALLINTER
+			v := s.load(r.rcvr.Type.Elem(), r.rcvr)
+			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
+			callArgs = append(callArgs, v)
+		}
+		for j, argAddrVal := range r.argVals {
+			f := getParam(r.n, j)
+			ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
+			var a *ssa.Value
+			if !TypeOK(f.Type) {
+				a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
+			} else {
+				a = s.load(f.Type, argAddrVal)
+			}
+			callArgs = append(callArgs, a)
+		}
+		var call *ssa.Value
+		if r.closure != nil {
+			v := s.load(r.closure.Type.Elem(), r.closure)
+			s.maybeNilCheckClosure(v, callDefer)
+			codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
+			aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+			call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+		} else {
+			aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults)
+			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+		}
+		callArgs = append(callArgs, s.mem())
+		call.AddArgs(callArgs...)
+		call.AuxInt = stksize
+		s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+		// Make sure that the stack slots with pointers are kept live
+		// through the call (which is a pre-emption point). Also, we will
+		// use the first call of the last defer exit to compute liveness
+		// for the deferreturn, so we want all stack slots to be live.
+		if r.closureNode != nil {
+			s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+		}
+		if r.rcvrNode != nil {
+			if r.rcvrNode.Type().HasPointers() {
+				s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
+			}
+		}
+		for _, argNode := range r.argNodes {
+			if argNode.Type().HasPointers() {
+				s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
+			}
+		}
+
+		s.endBlock()
+		s.startBlock(bEnd)
+	}
+}
+
+func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
+	return s.call(n, k, false)
+}
+
+func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
+	return s.call(n, k, true)
+}
+
+// Calls the function n using the specified call type.
+// Returns the address of the return value (or nil if none).
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
+	s.prevCall = nil
+	var callee *ir.Name    // target function (if static)
+	var closure *ssa.Value // ptr to closure to run (if dynamic)
+	var codeptr *ssa.Value // ptr to target code (if dynamic)
+	var rcvr *ssa.Value    // receiver to set
+	fn := n.X
+	var ACArgs []ssa.Param
+	var ACResults []ssa.Param
+	var callArgs []*ssa.Value
+	res := n.X.Type().Results()
+	if k == callNormal {
+		nf := res.NumFields()
+		for i := 0; i < nf; i++ {
+			fp := res.Field(i)
+			ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())})
+		}
+	}
+
+	inRegisters := false
+
+	switch n.Op() {
+	case ir.OCALLFUNC:
+		if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
+			fn := fn.(*ir.Name)
+			callee = fn
+			// TODO remove after register abi is working
+			inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
+			inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
+			inRegisters = inRegistersImported || inRegistersSamePackage
+			if inRegisters {
+				s.f.Warnl(n.Pos(), "called function %v has register params", callee)
+			}
+			break
+		}
+		closure = s.expr(fn)
+		if k != callDefer && k != callDeferStack {
+			// Deferred nil function needs to panic when the function is invoked,
+			// not the point of defer statement.
+			s.maybeNilCheckClosure(closure, k)
+		}
+	case ir.OCALLMETH:
+		base.Fatalf("OCALLMETH missed by walkCall")
+	case ir.OCALLINTER:
+		if fn.Op() != ir.ODOTINTER {
+			s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
+		}
+		fn := fn.(*ir.SelectorExpr)
+		var iclosure *ssa.Value
+		iclosure, rcvr = s.getClosureAndRcvr(fn)
+		if k == callNormal {
+			codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
+		} else {
+			closure = iclosure
+		}
+	}
+	types.CalcSize(fn.Type())
+	stksize := fn.Type().ArgWidth() // includes receiver, args, and results
+
+	var call *ssa.Value
+	if k == callDeferStack {
+		// Make a defer struct d on the stack.
+		t := deferstruct(stksize)
+		d := typecheck.TempAt(n.Pos(), s.curfn, t)
+
+		s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+		addr := s.addr(d)
+
+		// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
+		// 0: siz
+		s.store(types.Types[types.TUINT32],
+			s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
+			s.constInt32(types.Types[types.TUINT32], int32(stksize)))
+		// 1: started, set in deferprocStack
+		// 2: heap, set in deferprocStack
+		// 3: openDefer
+		// 4: sp, set in deferprocStack
+		// 5: pc, set in deferprocStack
+		// 6: fn
+		s.store(closure.Type,
+			s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
+			closure)
+		// 7: panic, set in deferprocStack
+		// 8: link, set in deferprocStack
+		// 9: framepc
+		// 10: varp
+		// 11: fd
+
+		// Then, store all the arguments of the defer call.
+		ft := fn.Type()
+		off := t.FieldOff(12)
+		args := n.Args
+
+		// Set receiver (for interface calls). Always a pointer.
+		if rcvr != nil {
+			p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
+			s.store(types.Types[types.TUINTPTR], p, rcvr)
+		}
+		// Set receiver (for method calls).
+		if n.Op() == ir.OCALLMETH {
+			base.Fatalf("OCALLMETH missed by walkCall")
+		}
+		// Set other args.
+		for _, f := range ft.Params().Fields().Slice() {
+			s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
+			args = args[1:]
+		}
+
+		// Call runtime.deferprocStack with pointer to _defer record.
+		ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
+		aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults)
+		callArgs = append(callArgs, addr, s.mem())
+		call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+		call.AddArgs(callArgs...)
+		if stksize < int64(types.PtrSize) {
+			// We need room for both the call to deferprocStack and the call to
+			// the deferred function.
+			// TODO Revisit this if/when we pass args in registers.
+			stksize = int64(types.PtrSize)
+		}
+		call.AuxInt = stksize
+	} else {
+		// Store arguments to stack, including defer/go arguments and receiver for method calls.
+		// These are written in SP-offset order.
+		argStart := base.Ctxt.FixedFrameSize()
+		// Defer/go args.
+		if k != callNormal {
+			// Write argsize and closure (args to newproc/deferproc).
+			argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
+			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)})
+			callArgs = append(callArgs, argsize)
+			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)})
+			callArgs = append(callArgs, closure)
+			stksize += 2 * int64(types.PtrSize)
+			argStart += 2 * int64(types.PtrSize)
+		}
+
+		// Set receiver (for interface calls).
+		if rcvr != nil {
+			ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
+			callArgs = append(callArgs, rcvr)
+		}
+
+		// Write args.
+		t := n.X.Type()
+		args := n.Args
+		if n.Op() == ir.OCALLMETH {
+			base.Fatalf("OCALLMETH missed by walkCall")
+		}
+		for i, n := range args {
+			f := t.Params().Field(i)
+			ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset)
+			ACArgs = append(ACArgs, ACArg)
+			callArgs = append(callArgs, arg)
+		}
+
+		callArgs = append(callArgs, s.mem())
+
+		// call target
+		switch {
+		case k == callDefer:
+			aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults)
+			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+		case k == callGo:
+			aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults)
+			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+		case closure != nil:
+			// rawLoad because loading the code pointer from a
+			// closure is always safe, but IsSanitizerSafeAddr
+			// can't always figure that out currently, and it's
+			// critical that we not clobber any arguments already
+			// stored onto the stack.
+			codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
+			aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+			call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+		case codeptr != nil:
+			aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
+			call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+		case callee != nil:
+			aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults)
+			call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+		default:
+			s.Fatalf("bad call type %v %v", n.Op(), n)
+		}
+		call.AddArgs(callArgs...)
+		call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
+	}
+	s.prevCall = call
+	s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+	// Insert OVARLIVE nodes
+	for _, name := range n.KeepAlive {
+		s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
+	}
+
+	// Finish block for defers
+	if k == callDefer || k == callDeferStack {
+		b := s.endBlock()
+		b.Kind = ssa.BlockDefer
+		b.SetControl(call)
+		bNext := s.f.NewBlock(ssa.BlockPlain)
+		b.AddEdgeTo(bNext)
+		// Add recover edge to exit code.
+		r := s.f.NewBlock(ssa.BlockPlain)
+		s.startBlock(r)
+		s.exit()
+		b.AddEdgeTo(r)
+		b.Likely = ssa.BranchLikely
+		s.startBlock(bNext)
+	}
+
+	if res.NumFields() == 0 || k != callNormal {
+		// call has no return value. Continue with the next statement.
+		return nil
+	}
+	fp := res.Field(0)
+	if returnResultAddr {
+		pt := types.NewPtr(fp.Type)
+		return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
+	}
+
+	return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
+}
+
+// maybeNilCheckClosure checks if a nil check of a closure is needed in some
+// architecture-dependent situations and, if so, emits the nil check.
+func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
+	if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
+		// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
+		// TODO(neelance): On other architectures this should be eliminated by the optimization steps
+		s.nilCheck(closure)
+	}
+}
+
+// getClosureAndRcvr returns values for the appropriate closure and receiver of an
+// interface call
+func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
+	i := s.expr(fn.X)
+	itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
+	s.nilCheck(itab)
+	itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
+	closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
+	rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
+	return closure, rcvr
+}
+
+// etypesign returns the signed-ness of e, for integer/pointer etypes.
+// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
+func etypesign(e types.Kind) int8 {
+	switch e {
+	case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
+		return -1
+	case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
+		return +1
+	}
+	return 0
+}
+
+// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
+// The value that the returned Value represents is guaranteed to be non-nil.
+func (s *state) addr(n ir.Node) *ssa.Value {
+	if n.Op() != ir.ONAME {
+		s.pushLine(n.Pos())
+		defer s.popLine()
+	}
+
+	if s.canSSA(n) {
+		s.Fatalf("addr of canSSA expression: %+v", n)
+	}
+
+	t := types.NewPtr(n.Type())
+	linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
+		v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
+		// TODO: Make OpAddr use AuxInt as well as Aux.
+		if offset != 0 {
+			v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
+		}
+		return v
+	}
+	switch n.Op() {
+	case ir.OLINKSYMOFFSET:
+		no := n.(*ir.LinksymOffsetExpr)
+		return linksymOffset(no.Linksym, no.Offset_)
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Heapaddr != nil {
+			return s.expr(n.Heapaddr)
+		}
+		switch n.Class {
+		case ir.PEXTERN:
+			// global variable
+			return linksymOffset(n.Linksym(), 0)
+		case ir.PPARAM:
+			// parameter slot
+			v := s.decladdrs[n]
+			if v != nil {
+				return v
+			}
+			if n == ir.RegFP {
+				// Special arg that points to the frame pointer (Used by ORECOVER).
+				return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem)
+			}
+			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+			return nil
+		case ir.PAUTO:
+			return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
+
+		case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+			// ensure that we reuse symbols for out parameters so
+			// that cse works on their addresses
+			return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
+		default:
+			s.Fatalf("variable address class %v not implemented", n.Class)
+			return nil
+		}
+	case ir.ORESULT:
+		// load return from callee
+		n := n.(*ir.ResultExpr)
+		if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+			return s.constOffPtrSP(t, n.Offset)
+		}
+		which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset)
+		if which == -1 {
+			// Do the old thing // TODO: Panic instead.
+			return s.constOffPtrSP(t, n.Offset)
+		}
+		x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
+		return x
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		if n.X.Type().IsSlice() {
+			a := s.expr(n.X)
+			i := s.expr(n.Index)
+			len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
+			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+			p := s.newValue1(ssa.OpSlicePtr, t, a)
+			return s.newValue2(ssa.OpPtrIndex, t, p, i)
+		} else { // array
+			a := s.addr(n.X)
+			i := s.expr(n.Index)
+			len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+			i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+			return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
+		}
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		return s.exprPtr(n.X, n.Bounded(), n.Pos())
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		p := s.addr(n.X)
+		return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+		return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+	case ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if n.Type() == n.X.Type() {
+			return s.addr(n.X)
+		}
+		addr := s.addr(n.X)
+		return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
+	case ir.OCALLFUNC, ir.OCALLINTER:
+		n := n.(*ir.CallExpr)
+		return s.callAddr(n, callNormal)
+	case ir.ODOTTYPE:
+		n := n.(*ir.TypeAssertExpr)
+		v, _ := s.dottype(n, false)
+		if v.Op != ssa.OpLoad {
+			s.Fatalf("dottype of non-load")
+		}
+		if v.Args[1] != s.mem() {
+			s.Fatalf("memory no longer live from dottype load")
+		}
+		return v.Args[0]
+	default:
+		s.Fatalf("unhandled addr %v", n.Op())
+		return nil
+	}
+}
+
+// canSSA reports whether n is SSA-able.
+// n must be an ONAME (or an ODOT sequence with an ONAME base).
+func (s *state) canSSA(n ir.Node) bool {
+	if base.Flag.N != 0 {
+		return false
+	}
+	for {
+		nn := n
+		if nn.Op() == ir.ODOT {
+			nn := nn.(*ir.SelectorExpr)
+			n = nn.X
+			continue
+		}
+		if nn.Op() == ir.OINDEX {
+			nn := nn.(*ir.IndexExpr)
+			if nn.X.Type().IsArray() {
+				n = nn.X
+				continue
+			}
+		}
+		break
+	}
+	if n.Op() != ir.ONAME {
+		return false
+	}
+	return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
+}
+
+func (s *state) canSSAName(name *ir.Name) bool {
+	if name.Addrtaken() || !name.OnStack() {
+		return false
+	}
+	switch name.Class {
+	case ir.PPARAMOUT:
+		if s.hasdefer {
+			// TODO: handle this case? Named return values must be
+			// in memory so that the deferred function can see them.
+			// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
+			// Or maybe not, see issue 18860.  Even unnamed return values
+			// must be written back so if a defer recovers, the caller can see them.
+			return false
+		}
+		if s.cgoUnsafeArgs {
+			// Cgo effectively takes the address of all result args,
+			// but the compiler can't see that.
+			return false
+		}
+	}
+	if name.Class == ir.PPARAM && name.Sym() != nil && name.Sym().Name == ".this" {
+		// wrappers generated by genwrapper need to update
+		// the .this pointer in place.
+		// TODO: treat as a PPARAMOUT?
+		return false
+	}
+	return true
+	// TODO: try to make more variables SSAable?
+}
+
+// TypeOK reports whether variables of type t are SSA-able.
+func TypeOK(t *types.Type) bool {
+	types.CalcSize(t)
+	if t.Width > int64(4*types.PtrSize) {
+		// 4*Widthptr is an arbitrary constant. We want it
+		// to be at least 3*Widthptr so slices can be registerized.
+		// Too big and we'll introduce too much register pressure.
+		return false
+	}
+	switch t.Kind() {
+	case types.TARRAY:
+		// We can't do larger arrays because dynamic indexing is
+		// not supported on SSA variables.
+		// TODO: allow if all indexes are constant.
+		if t.NumElem() <= 1 {
+			return TypeOK(t.Elem())
+		}
+		return false
+	case types.TSTRUCT:
+		if t.NumFields() > ssa.MaxStruct {
+			return false
+		}
+		for _, t1 := range t.Fields().Slice() {
+			if !TypeOK(t1.Type) {
+				return false
+			}
+		}
+		return true
+	default:
+		return true
+	}
+}
+
+// exprPtr evaluates n to a pointer and nil-checks it.
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
+	p := s.expr(n)
+	if bounded || n.NonNil() {
+		if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
+			s.f.Warnl(lineno, "removed nil check")
+		}
+		return p
+	}
+	s.nilCheck(p)
+	return p
+}
+
+// nilCheck generates nil pointer checking code.
+// Used only for automatically inserted nil checks,
+// not for user code like 'x != nil'.
+func (s *state) nilCheck(ptr *ssa.Value) {
+	if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
+		return
+	}
+	s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
+}
+
+// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
+// Starts a new block on return.
+// On input, len must be converted to full int width and be nonnegative.
+// Returns idx converted to full int width.
+// If bounded is true then caller guarantees the index is not out of bounds
+// (but boundsCheck will still extend the index to full int width).
+func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+	idx = s.extendIndex(idx, len, kind, bounded)
+
+	if bounded || base.Flag.B != 0 {
+		// If bounded or bounds checking is flag-disabled, then no check necessary,
+		// just return the extended index.
+		//
+		// Here, bounded == true if the compiler generated the index itself,
+		// such as in the expansion of a slice initializer. These indexes are
+		// compiler-generated, not Go program variables, so they cannot be
+		// attacker-controlled, so we can omit Spectre masking as well.
+		//
+		// Note that we do not want to omit Spectre masking in code like:
+		//
+		//	if 0 <= i && i < len(x) {
+		//		use(x[i])
+		//	}
+		//
+		// Lucky for us, bounded==false for that code.
+		// In that case (handled below), we emit a bound check (and Spectre mask)
+		// and then the prove pass will remove the bounds check.
+		// In theory the prove pass could potentially remove certain
+		// Spectre masks, but it's very delicate and probably better
+		// to be conservative and leave them all in.
+		return idx
+	}
+
+	bNext := s.f.NewBlock(ssa.BlockPlain)
+	bPanic := s.f.NewBlock(ssa.BlockExit)
+
+	if !idx.Type.IsSigned() {
+		switch kind {
+		case ssa.BoundsIndex:
+			kind = ssa.BoundsIndexU
+		case ssa.BoundsSliceAlen:
+			kind = ssa.BoundsSliceAlenU
+		case ssa.BoundsSliceAcap:
+			kind = ssa.BoundsSliceAcapU
+		case ssa.BoundsSliceB:
+			kind = ssa.BoundsSliceBU
+		case ssa.BoundsSlice3Alen:
+			kind = ssa.BoundsSlice3AlenU
+		case ssa.BoundsSlice3Acap:
+			kind = ssa.BoundsSlice3AcapU
+		case ssa.BoundsSlice3B:
+			kind = ssa.BoundsSlice3BU
+		case ssa.BoundsSlice3C:
+			kind = ssa.BoundsSlice3CU
+		}
+	}
+
+	var cmp *ssa.Value
+	if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
+		cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
+	} else {
+		cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
+	}
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchLikely
+	b.AddEdgeTo(bNext)
+	b.AddEdgeTo(bPanic)
+
+	s.startBlock(bPanic)
+	if Arch.LinkArch.Family == sys.Wasm {
+		// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
+		// Should be similar to gcWriteBarrier, but I can't make it work.
+		s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
+	} else {
+		mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
+		s.endBlock().SetControl(mem)
+	}
+	s.startBlock(bNext)
+
+	// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
+	if base.Flag.Cfg.SpectreIndex {
+		op := ssa.OpSpectreIndex
+		if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
+			op = ssa.OpSpectreSliceIndex
+		}
+		idx = s.newValue2(op, types.Types[types.TINT], idx, len)
+	}
+
+	return idx
+}
+
+// If cmp (a bool) is false, panic using the given function.
+func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchLikely
+	bNext := s.f.NewBlock(ssa.BlockPlain)
+	line := s.peekPos()
+	pos := base.Ctxt.PosTable.Pos(line)
+	fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
+	bPanic := s.panics[fl]
+	if bPanic == nil {
+		bPanic = s.f.NewBlock(ssa.BlockPlain)
+		s.panics[fl] = bPanic
+		s.startBlock(bPanic)
+		// The panic call takes/returns memory to ensure that the right
+		// memory state is observed if the panic happens.
+		s.rtcall(fn, false, nil)
+	}
+	b.AddEdgeTo(bNext)
+	b.AddEdgeTo(bPanic)
+	s.startBlock(bNext)
+}
+
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
+	needcheck := true
+	switch b.Op {
+	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
+		if b.AuxInt != 0 {
+			needcheck = false
+		}
+	}
+	if needcheck {
+		// do a size-appropriate check for zero
+		cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
+		s.check(cmp, ir.Syms.Panicdivide)
+	}
+	return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+}
+
+// rtcall issues a call to the given runtime function fn with the listed args.
+// Returns a slice of results of the given result types.
+// The call is added to the end of the current block.
+// If returns is false, the block is marked as an exit block.
+func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
+	s.prevCall = nil
+	// Write args to the stack
+	off := base.Ctxt.FixedFrameSize()
+	var ACArgs []ssa.Param
+	var ACResults []ssa.Param
+	var callArgs []*ssa.Value
+
+	for _, arg := range args {
+		t := arg.Type
+		off = types.Rnd(off, t.Alignment())
+		size := t.Size()
+		ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
+		callArgs = append(callArgs, arg)
+		off += size
+	}
+	off = types.Rnd(off, int64(types.RegSize))
+
+	// Accumulate results types and offsets
+	offR := off
+	for _, t := range results {
+		offR = types.Rnd(offR, t.Alignment())
+		ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)})
+		offR += t.Size()
+	}
+
+	// Issue call
+	var call *ssa.Value
+	aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
+	callArgs = append(callArgs, s.mem())
+	call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+	call.AddArgs(callArgs...)
+	s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+
+	if !returns {
+		// Finish block
+		b := s.endBlock()
+		b.Kind = ssa.BlockExit
+		b.SetControl(call)
+		call.AuxInt = off - base.Ctxt.FixedFrameSize()
+		if len(results) > 0 {
+			s.Fatalf("panic call can't have results")
+		}
+		return nil
+	}
+
+	// Load results
+	res := make([]*ssa.Value, len(results))
+	for i, t := range results {
+		off = types.Rnd(off, t.Alignment())
+		if TypeOK(t) {
+			res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
+		} else {
+			addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
+			res[i] = s.rawLoad(t, addr)
+		}
+		off += t.Size()
+	}
+	off = types.Rnd(off, int64(types.PtrSize))
+
+	// Remember how much callee stack space we needed.
+	call.AuxInt = off
+
+	return res
+}
+
+// do *left = right for type t.
+func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
+	s.instrument(t, left, instrumentWrite)
+
+	if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
+		// Known to not have write barrier. Store the whole type.
+		s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+		return
+	}
+
+	// store scalar fields first, so write barrier stores for
+	// pointer fields can be grouped together, and scalar values
+	// don't need to be live across the write barrier call.
+	// TODO: if the writebarrier pass knows how to reorder stores,
+	// we can do a single store here as long as skip==0.
+	s.storeTypeScalars(t, left, right, skip)
+	if skip&skipPtr == 0 && t.HasPointers() {
+		s.storeTypePtrs(t, left, right)
+	}
+}
+
+// do *left = right for all scalar (non-pointer) parts of t.
+func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
+	switch {
+	case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
+		s.store(t, left, right)
+	case t.IsPtrShaped():
+		if t.IsPtr() && t.Elem().NotInHeap() {
+			s.store(t, left, right) // see issue 42032
+		}
+		// otherwise, no scalar fields.
+	case t.IsString():
+		if skip&skipLen != 0 {
+			return
+		}
+		len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
+		lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+		s.store(types.Types[types.TINT], lenAddr, len)
+	case t.IsSlice():
+		if skip&skipLen == 0 {
+			len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
+			lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+			s.store(types.Types[types.TINT], lenAddr, len)
+		}
+		if skip&skipCap == 0 {
+			cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
+			capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
+			s.store(types.Types[types.TINT], capAddr, cap)
+		}
+	case t.IsInterface():
+		// itab field doesn't need a write barrier (even though it is a pointer).
+		itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
+		s.store(types.Types[types.TUINTPTR], left, itab)
+	case t.IsStruct():
+		n := t.NumFields()
+		for i := 0; i < n; i++ {
+			ft := t.FieldType(i)
+			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+			s.storeTypeScalars(ft, addr, val, 0)
+		}
+	case t.IsArray() && t.NumElem() == 0:
+		// nothing
+	case t.IsArray() && t.NumElem() == 1:
+		s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
+	default:
+		s.Fatalf("bad write barrier type %v", t)
+	}
+}
+
+// do *left = right for all pointer parts of t.
+func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
+	switch {
+	case t.IsPtrShaped():
+		if t.IsPtr() && t.Elem().NotInHeap() {
+			break // see issue 42032
+		}
+		s.store(t, left, right)
+	case t.IsString():
+		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
+		s.store(s.f.Config.Types.BytePtr, left, ptr)
+	case t.IsSlice():
+		elType := types.NewPtr(t.Elem())
+		ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
+		s.store(elType, left, ptr)
+	case t.IsInterface():
+		// itab field is treated as a scalar.
+		idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
+		idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
+		s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
+	case t.IsStruct():
+		n := t.NumFields()
+		for i := 0; i < n; i++ {
+			ft := t.FieldType(i)
+			if !ft.HasPointers() {
+				continue
+			}
+			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+			s.storeTypePtrs(ft, addr, val)
+		}
+	case t.IsArray() && t.NumElem() == 0:
+		// nothing
+	case t.IsArray() && t.NumElem() == 1:
+		s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
+	default:
+		s.Fatalf("bad write barrier type %v", t)
+	}
+}
+
+// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param and value for the call.
+func (s *state) putArg(n ir.Node, t *types.Type, off int64) (ssa.Param, *ssa.Value) {
+	var a *ssa.Value
+	if !TypeOK(t) {
+		a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
+	} else {
+		a = s.expr(n)
+	}
+	return ssa.Param{Type: t, Offset: int32(off)}, a
+}
+
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
+	pt := types.NewPtr(t)
+	var addr *ssa.Value
+	if base == s.sp {
+		// Use special routine that avoids allocation on duplicate offsets.
+		addr = s.constOffPtrSP(pt, off)
+	} else {
+		addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
+	}
+
+	if !TypeOK(t) {
+		a := s.addr(n)
+		s.move(t, addr, a)
+		return
+	}
+
+	a := s.expr(n)
+	s.storeType(t, addr, a, 0, false)
+}
+
+// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
+// i,j,k may be nil, in which case they are set to their default value.
+// v may be a slice, string or pointer to an array.
+func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
+	t := v.Type
+	var ptr, len, cap *ssa.Value
+	switch {
+	case t.IsSlice():
+		ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
+		len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+		cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
+	case t.IsString():
+		ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+		len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
+		cap = len
+	case t.IsPtr():
+		if !t.Elem().IsArray() {
+			s.Fatalf("bad ptr to array in slice %v\n", t)
+		}
+		s.nilCheck(v)
+		ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
+		len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
+		cap = len
+	default:
+		s.Fatalf("bad type in slice %v\n", t)
+	}
+
+	// Set default values
+	if i == nil {
+		i = s.constInt(types.Types[types.TINT], 0)
+	}
+	if j == nil {
+		j = len
+	}
+	three := true
+	if k == nil {
+		three = false
+		k = cap
+	}
+
+	// Panic if slice indices are not in bounds.
+	// Make sure we check these in reverse order so that we're always
+	// comparing against a value known to be nonnegative. See issue 28797.
+	if three {
+		if k != cap {
+			kind := ssa.BoundsSlice3Alen
+			if t.IsSlice() {
+				kind = ssa.BoundsSlice3Acap
+			}
+			k = s.boundsCheck(k, cap, kind, bounded)
+		}
+		if j != k {
+			j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
+		}
+		i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
+	} else {
+		if j != k {
+			kind := ssa.BoundsSliceAlen
+			if t.IsSlice() {
+				kind = ssa.BoundsSliceAcap
+			}
+			j = s.boundsCheck(j, k, kind, bounded)
+		}
+		i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
+	}
+
+	// Word-sized integer operations.
+	subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+	mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+	andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
+
+	// Calculate the length (rlen) and capacity (rcap) of the new slice.
+	// For strings the capacity of the result is unimportant. However,
+	// we use rcap to test if we've generated a zero-length slice.
+	// Use length of strings for that.
+	rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
+	rcap := rlen
+	if j != k && !t.IsString() {
+		rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
+	}
+
+	if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
+		// No pointer arithmetic necessary.
+		return ptr, rlen, rcap
+	}
+
+	// Calculate the base pointer (rptr) for the new slice.
+	//
+	// Generate the following code assuming that indexes are in bounds.
+	// The masking is to make sure that we don't generate a slice
+	// that points to the next object in memory. We cannot just set
+	// the pointer to nil because then we would create a nil slice or
+	// string.
+	//
+	//     rcap = k - i
+	//     rlen = j - i
+	//     rptr = ptr + (mask(rcap) & (i * stride))
+	//
+	// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
+	// of the element type.
+	stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
+
+	// The delta is the number of bytes to offset ptr by.
+	delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
+
+	// If we're slicing to the point where the capacity is zero,
+	// zero out the delta.
+	mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+	delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
+
+	// Compute rptr = ptr + delta.
+	rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
+
+	return rptr, rlen, rcap
+}
+
+type u642fcvtTab struct {
+	leq, cvt2F, and, rsh, or, add ssa.Op
+	one                           func(*state, *types.Type, int64) *ssa.Value
+}
+
+var u64_f64 = u642fcvtTab{
+	leq:   ssa.OpLeq64,
+	cvt2F: ssa.OpCvt64to64F,
+	and:   ssa.OpAnd64,
+	rsh:   ssa.OpRsh64Ux64,
+	or:    ssa.OpOr64,
+	add:   ssa.OpAdd64F,
+	one:   (*state).constInt64,
+}
+
+var u64_f32 = u642fcvtTab{
+	leq:   ssa.OpLeq64,
+	cvt2F: ssa.OpCvt64to32F,
+	and:   ssa.OpAnd64,
+	rsh:   ssa.OpRsh64Ux64,
+	or:    ssa.OpOr64,
+	add:   ssa.OpAdd32F,
+	one:   (*state).constInt64,
+}
+
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	// if x >= 0 {
+	//    result = (floatY) x
+	// } else {
+	// 	  y = uintX(x) ; y = x & 1
+	// 	  z = uintX(x) ; z = z >> 1
+	// 	  z = z >> 1
+	// 	  z = z | y
+	// 	  result = floatY(z)
+	// 	  result = result + result
+	// }
+	//
+	// Code borrowed from old code generator.
+	// What's going on: large 64-bit "unsigned" looks like
+	// negative number to hardware's integer-to-float
+	// conversion. However, because the mantissa is only
+	// 63 bits, we don't need the LSB, so instead we do an
+	// unsigned right shift (divide by two), convert, and
+	// double. However, before we do that, we need to be
+	// sure that we do not lose a "1" if that made the
+	// difference in the resulting rounding. Therefore, we
+	// preserve it, and OR (not ADD) it back in. The case
+	// that matters is when the eleven discarded bits are
+	// equal to 10000000001; that rounds up, and the 1 cannot
+	// be lost else it would round down if the LSB of the
+	// candidate mantissa is 0.
+	cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchLikely
+
+	bThen := s.f.NewBlock(ssa.BlockPlain)
+	bElse := s.f.NewBlock(ssa.BlockPlain)
+	bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+	b.AddEdgeTo(bThen)
+	s.startBlock(bThen)
+	a0 := s.newValue1(cvttab.cvt2F, tt, x)
+	s.vars[n] = a0
+	s.endBlock()
+	bThen.AddEdgeTo(bAfter)
+
+	b.AddEdgeTo(bElse)
+	s.startBlock(bElse)
+	one := cvttab.one(s, ft, 1)
+	y := s.newValue2(cvttab.and, ft, x, one)
+	z := s.newValue2(cvttab.rsh, ft, x, one)
+	z = s.newValue2(cvttab.or, ft, z, y)
+	a := s.newValue1(cvttab.cvt2F, tt, z)
+	a1 := s.newValue2(cvttab.add, tt, a, a)
+	s.vars[n] = a1
+	s.endBlock()
+	bElse.AddEdgeTo(bAfter)
+
+	s.startBlock(bAfter)
+	return s.variable(n, n.Type())
+}
+
+type u322fcvtTab struct {
+	cvtI2F, cvtF2F ssa.Op
+}
+
+var u32_f64 = u322fcvtTab{
+	cvtI2F: ssa.OpCvt32to64F,
+	cvtF2F: ssa.OpCopy,
+}
+
+var u32_f32 = u322fcvtTab{
+	cvtI2F: ssa.OpCvt32to32F,
+	cvtF2F: ssa.OpCvt64Fto32F,
+}
+
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	// if x >= 0 {
+	// 	result = floatY(x)
+	// } else {
+	// 	result = floatY(float64(x) + (1<<32))
+	// }
+	cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchLikely
+
+	bThen := s.f.NewBlock(ssa.BlockPlain)
+	bElse := s.f.NewBlock(ssa.BlockPlain)
+	bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+	b.AddEdgeTo(bThen)
+	s.startBlock(bThen)
+	a0 := s.newValue1(cvttab.cvtI2F, tt, x)
+	s.vars[n] = a0
+	s.endBlock()
+	bThen.AddEdgeTo(bAfter)
+
+	b.AddEdgeTo(bElse)
+	s.startBlock(bElse)
+	a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+	twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+	a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
+	a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
+
+	s.vars[n] = a3
+	s.endBlock()
+	bElse.AddEdgeTo(bAfter)
+
+	s.startBlock(bAfter)
+	return s.variable(n, n.Type())
+}
+
+// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
+func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
+	if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
+		s.Fatalf("node must be a map or a channel")
+	}
+	// if n == nil {
+	//   return 0
+	// } else {
+	//   // len
+	//   return *((*int)n)
+	//   // cap
+	//   return *(((*int)n)+1)
+	// }
+	lenType := n.Type()
+	nilValue := s.constNil(types.Types[types.TUINTPTR])
+	cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchUnlikely
+
+	bThen := s.f.NewBlock(ssa.BlockPlain)
+	bElse := s.f.NewBlock(ssa.BlockPlain)
+	bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+	// length/capacity of a nil map/chan is zero
+	b.AddEdgeTo(bThen)
+	s.startBlock(bThen)
+	s.vars[n] = s.zeroVal(lenType)
+	s.endBlock()
+	bThen.AddEdgeTo(bAfter)
+
+	b.AddEdgeTo(bElse)
+	s.startBlock(bElse)
+	switch n.Op() {
+	case ir.OLEN:
+		// length is stored in the first word for map/chan
+		s.vars[n] = s.load(lenType, x)
+	case ir.OCAP:
+		// capacity is stored in the second word for chan
+		sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
+		s.vars[n] = s.load(lenType, sw)
+	default:
+		s.Fatalf("op must be OLEN or OCAP")
+	}
+	s.endBlock()
+	bElse.AddEdgeTo(bAfter)
+
+	s.startBlock(bAfter)
+	return s.variable(n, lenType)
+}
+
+type f2uCvtTab struct {
+	ltf, cvt2U, subf, or ssa.Op
+	floatValue           func(*state, *types.Type, float64) *ssa.Value
+	intValue             func(*state, *types.Type, int64) *ssa.Value
+	cutoff               uint64
+}
+
+var f32_u64 = f2uCvtTab{
+	ltf:        ssa.OpLess32F,
+	cvt2U:      ssa.OpCvt32Fto64,
+	subf:       ssa.OpSub32F,
+	or:         ssa.OpOr64,
+	floatValue: (*state).constFloat32,
+	intValue:   (*state).constInt64,
+	cutoff:     1 << 63,
+}
+
+var f64_u64 = f2uCvtTab{
+	ltf:        ssa.OpLess64F,
+	cvt2U:      ssa.OpCvt64Fto64,
+	subf:       ssa.OpSub64F,
+	or:         ssa.OpOr64,
+	floatValue: (*state).constFloat64,
+	intValue:   (*state).constInt64,
+	cutoff:     1 << 63,
+}
+
+var f32_u32 = f2uCvtTab{
+	ltf:        ssa.OpLess32F,
+	cvt2U:      ssa.OpCvt32Fto32,
+	subf:       ssa.OpSub32F,
+	or:         ssa.OpOr32,
+	floatValue: (*state).constFloat32,
+	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+	cutoff:     1 << 31,
+}
+
+var f64_u32 = f2uCvtTab{
+	ltf:        ssa.OpLess64F,
+	cvt2U:      ssa.OpCvt64Fto32,
+	subf:       ssa.OpSub64F,
+	or:         ssa.OpOr32,
+	floatValue: (*state).constFloat64,
+	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+	cutoff:     1 << 31,
+}
+
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.floatToUint(&f32_u64, n, x, ft, tt)
+}
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.floatToUint(&f64_u64, n, x, ft, tt)
+}
+
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.floatToUint(&f32_u32, n, x, ft, tt)
+}
+
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	return s.floatToUint(&f64_u32, n, x, ft, tt)
+}
+
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+	// cutoff:=1<<(intY_Size-1)
+	// if x < floatX(cutoff) {
+	// 	result = uintY(x)
+	// } else {
+	// 	y = x - floatX(cutoff)
+	// 	z = uintY(y)
+	// 	result = z | -(cutoff)
+	// }
+	cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
+	cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cmp)
+	b.Likely = ssa.BranchLikely
+
+	bThen := s.f.NewBlock(ssa.BlockPlain)
+	bElse := s.f.NewBlock(ssa.BlockPlain)
+	bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+	b.AddEdgeTo(bThen)
+	s.startBlock(bThen)
+	a0 := s.newValue1(cvttab.cvt2U, tt, x)
+	s.vars[n] = a0
+	s.endBlock()
+	bThen.AddEdgeTo(bAfter)
+
+	b.AddEdgeTo(bElse)
+	s.startBlock(bElse)
+	y := s.newValue2(cvttab.subf, ft, x, cutoff)
+	y = s.newValue1(cvttab.cvt2U, tt, y)
+	z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
+	a1 := s.newValue2(cvttab.or, tt, y, z)
+	s.vars[n] = a1
+	s.endBlock()
+	bElse.AddEdgeTo(bAfter)
+
+	s.startBlock(bAfter)
+	return s.variable(n, n.Type())
+}
+
+// dottype generates SSA for a type assertion node.
+// commaok indicates whether to panic or return a bool.
+// If commaok is false, resok will be nil.
+func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+	iface := s.expr(n.X)              // input interface
+	target := s.reflectType(n.Type()) // target type
+	byteptr := s.f.Config.Types.BytePtr
+
+	if n.Type().IsInterface() {
+		if n.Type().IsEmptyInterface() {
+			// Converting to an empty interface.
+			// Input could be an empty or nonempty interface.
+			if base.Debug.TypeAssert > 0 {
+				base.WarnfAt(n.Pos(), "type assertion inlined")
+			}
+
+			// Get itab/type field from input.
+			itab := s.newValue1(ssa.OpITab, byteptr, iface)
+			// Conversion succeeds iff that field is not nil.
+			cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+
+			if n.X.Type().IsEmptyInterface() && commaok {
+				// Converting empty interface to empty interface with ,ok is just a nil check.
+				return iface, cond
+			}
+
+			// Branch on nilness.
+			b := s.endBlock()
+			b.Kind = ssa.BlockIf
+			b.SetControl(cond)
+			b.Likely = ssa.BranchLikely
+			bOk := s.f.NewBlock(ssa.BlockPlain)
+			bFail := s.f.NewBlock(ssa.BlockPlain)
+			b.AddEdgeTo(bOk)
+			b.AddEdgeTo(bFail)
+
+			if !commaok {
+				// On failure, panic by calling panicnildottype.
+				s.startBlock(bFail)
+				s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+
+				// On success, return (perhaps modified) input interface.
+				s.startBlock(bOk)
+				if n.X.Type().IsEmptyInterface() {
+					res = iface // Use input interface unchanged.
+					return
+				}
+				// Load type out of itab, build interface with existing idata.
+				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+				typ := s.load(byteptr, off)
+				idata := s.newValue1(ssa.OpIData, byteptr, iface)
+				res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
+				return
+			}
+
+			s.startBlock(bOk)
+			// nonempty -> empty
+			// Need to load type from itab
+			off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+			s.vars[typVar] = s.load(byteptr, off)
+			s.endBlock()
+
+			// itab is nil, might as well use that as the nil result.
+			s.startBlock(bFail)
+			s.vars[typVar] = itab
+			s.endBlock()
+
+			// Merge point.
+			bEnd := s.f.NewBlock(ssa.BlockPlain)
+			bOk.AddEdgeTo(bEnd)
+			bFail.AddEdgeTo(bEnd)
+			s.startBlock(bEnd)
+			idata := s.newValue1(ssa.OpIData, byteptr, iface)
+			res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
+			resok = cond
+			delete(s.vars, typVar)
+			return
+		}
+		// converting to a nonempty interface needs a runtime call.
+		if base.Debug.TypeAssert > 0 {
+			base.WarnfAt(n.Pos(), "type assertion not inlined")
+		}
+		if n.X.Type().IsEmptyInterface() {
+			if commaok {
+				call := s.rtcall(ir.Syms.AssertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
+				return call[0], call[1]
+			}
+			return s.rtcall(ir.Syms.AssertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
+		}
+		if commaok {
+			call := s.rtcall(ir.Syms.AssertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
+			return call[0], call[1]
+		}
+		return s.rtcall(ir.Syms.AssertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
+	}
+
+	if base.Debug.TypeAssert > 0 {
+		base.WarnfAt(n.Pos(), "type assertion inlined")
+	}
+
+	// Converting to a concrete type.
+	direct := types.IsDirectIface(n.Type())
+	itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
+	if base.Debug.TypeAssert > 0 {
+		base.WarnfAt(n.Pos(), "type assertion inlined")
+	}
+	var targetITab *ssa.Value
+	if n.X.Type().IsEmptyInterface() {
+		// Looking for pointer to target type.
+		targetITab = target
+	} else {
+		// Looking for pointer to itab for target type and source interface.
+		targetITab = s.expr(n.Itab)
+	}
+
+	var tmp ir.Node     // temporary for use with large types
+	var addr *ssa.Value // address of tmp
+	if commaok && !TypeOK(n.Type()) {
+		// unSSAable type, use temporary.
+		// TODO: get rid of some of these temporaries.
+		tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type())
+		s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp.(*ir.Name), s.mem())
+		addr = s.addr(tmp)
+	}
+
+	cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
+	b := s.endBlock()
+	b.Kind = ssa.BlockIf
+	b.SetControl(cond)
+	b.Likely = ssa.BranchLikely
+
+	bOk := s.f.NewBlock(ssa.BlockPlain)
+	bFail := s.f.NewBlock(ssa.BlockPlain)
+	b.AddEdgeTo(bOk)
+	b.AddEdgeTo(bFail)
+
+	if !commaok {
+		// on failure, panic by calling panicdottype
+		s.startBlock(bFail)
+		taddr := s.reflectType(n.X.Type())
+		if n.X.Type().IsEmptyInterface() {
+			s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
+		} else {
+			s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
+		}
+
+		// on success, return data from interface
+		s.startBlock(bOk)
+		if direct {
+			return s.newValue1(ssa.OpIData, n.Type(), iface), nil
+		}
+		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+		return s.load(n.Type(), p), nil
+	}
+
+	// commaok is the more complicated case because we have
+	// a control flow merge point.
+	bEnd := s.f.NewBlock(ssa.BlockPlain)
+	// Note that we need a new valVar each time (unlike okVar where we can
+	// reuse the variable) because it might have a different type every time.
+	valVar := ssaMarker("val")
+
+	// type assertion succeeded
+	s.startBlock(bOk)
+	if tmp == nil {
+		if direct {
+			s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
+		} else {
+			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+			s.vars[valVar] = s.load(n.Type(), p)
+		}
+	} else {
+		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+		s.move(n.Type(), addr, p)
+	}
+	s.vars[okVar] = s.constBool(true)
+	s.endBlock()
+	bOk.AddEdgeTo(bEnd)
+
+	// type assertion failed
+	s.startBlock(bFail)
+	if tmp == nil {
+		s.vars[valVar] = s.zeroVal(n.Type())
+	} else {
+		s.zero(n.Type(), addr)
+	}
+	s.vars[okVar] = s.constBool(false)
+	s.endBlock()
+	bFail.AddEdgeTo(bEnd)
+
+	// merge point
+	s.startBlock(bEnd)
+	if tmp == nil {
+		res = s.variable(valVar, n.Type())
+		delete(s.vars, valVar)
+	} else {
+		res = s.load(n.Type(), addr)
+		s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
+	}
+	resok = s.variable(okVar, types.Types[types.TBOOL])
+	delete(s.vars, okVar)
+	return res, resok
+}
+
+// variable returns the value of a variable at the current location.
+func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
+	v := s.vars[n]
+	if v != nil {
+		return v
+	}
+	v = s.fwdVars[n]
+	if v != nil {
+		return v
+	}
+
+	if s.curBlock == s.f.Entry {
+		// No variable should be live at entry.
+		s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
+	}
+	// Make a FwdRef, which records a value that's live on block input.
+	// We'll find the matching definition as part of insertPhis.
+	v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
+	s.fwdVars[n] = v
+	if n.Op() == ir.ONAME {
+		s.addNamedValue(n.(*ir.Name), v)
+	}
+	return v
+}
+
+func (s *state) mem() *ssa.Value {
+	return s.variable(memVar, types.TypeMem)
+}
+
+func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
+	if n.Class == ir.Pxxx {
+		// Don't track our marker nodes (memVar etc.).
+		return
+	}
+	if ir.IsAutoTmp(n) {
+		// Don't track temporary variables.
+		return
+	}
+	if n.Class == ir.PPARAMOUT {
+		// Don't track named output values.  This prevents return values
+		// from being assigned too early. See #14591 and #14762. TODO: allow this.
+		return
+	}
+	loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
+	values, ok := s.f.NamedValues[loc]
+	if !ok {
+		s.f.Names = append(s.f.Names, loc)
+	}
+	s.f.NamedValues[loc] = append(values, v)
+}
+
+// Generate a disconnected call to a runtime routine and a return.
+func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog {
+	p := pp.Prog(obj.ACALL)
+	p.To.Type = obj.TYPE_MEM
+	p.To.Name = obj.NAME_EXTERN
+	p.To.Sym = sym
+	p = pp.Prog(obj.ARET)
+	return p
+}
+
+// Branch is an unresolved branch.
+type Branch struct {
+	P *obj.Prog  // branch instruction
+	B *ssa.Block // target
+}
+
+// State contains state needed during Prog generation.
+type State struct {
+	ABI obj.ABI
+
+	pp *objw.Progs
+
+	// Branches remembers all the branch instructions we've seen
+	// and where they would like to go.
+	Branches []Branch
+
+	// bstart remembers where each block starts (indexed by block ID)
+	bstart []*obj.Prog
+
+	// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
+	ScratchFpMem *ir.Name
+
+	maxarg int64 // largest frame size for arguments to calls made by the function
+
+	// Map from GC safe points to liveness index, generated by
+	// liveness analysis.
+	livenessMap liveness.Map
+
+	// lineRunStart records the beginning of the current run of instructions
+	// within a single block sharing the same line number
+	// Used to move statement marks to the beginning of such runs.
+	lineRunStart *obj.Prog
+
+	// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
+	OnWasmStackSkipped int
+}
+
+// Prog appends a new Prog.
+func (s *State) Prog(as obj.As) *obj.Prog {
+	p := s.pp.Prog(as)
+	if objw.LosesStmtMark(as) {
+		return p
+	}
+	// Float a statement start to the beginning of any same-line run.
+	// lineRunStart is reset at block boundaries, which appears to work well.
+	if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
+		s.lineRunStart = p
+	} else if p.Pos.IsStmt() == src.PosIsStmt {
+		s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
+		p.Pos = p.Pos.WithNotStmt()
+	}
+	return p
+}
+
+// Pc returns the current Prog.
+func (s *State) Pc() *obj.Prog {
+	return s.pp.Next
+}
+
+// SetPos sets the current source position.
+func (s *State) SetPos(pos src.XPos) {
+	s.pp.Pos = pos
+}
+
+// Br emits a single branch instruction and returns the instruction.
+// Not all architectures need the returned instruction, but otherwise
+// the boilerplate is common to all.
+func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
+	p := s.Prog(op)
+	p.To.Type = obj.TYPE_BRANCH
+	s.Branches = append(s.Branches, Branch{P: p, B: target})
+	return p
+}
+
+// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
+// that reduce "jumpy" line number churn when debugging.
+// Spill/fill/copy instructions from the register allocator,
+// phi functions, and instructions with a no-pos position
+// are examples of instructions that can cause churn.
+func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
+	switch v.Op {
+	case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
+		// These are not statements
+		s.SetPos(v.Pos.WithNotStmt())
+	default:
+		p := v.Pos
+		if p != src.NoXPos {
+			// If the position is defined, update the position.
+			// Also convert default IsStmt to NotStmt; only
+			// explicit statement boundaries should appear
+			// in the generated code.
+			if p.IsStmt() != src.PosIsStmt {
+				p = p.WithNotStmt()
+				// Calls use the pos attached to v, but copy the statement mark from State
+			}
+			s.SetPos(p)
+		} else {
+			s.SetPos(s.pp.Pos.WithNotStmt())
+		}
+	}
+}
+
+// genssa appends entries to pp for each instruction in f.
+func genssa(f *ssa.Func, pp *objw.Progs) {
+	var s State
+	s.ABI = f.OwnAux.Fn.ABI()
+
+	e := f.Frontend().(*ssafn)
+
+	s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
+
+	openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
+	if openDeferInfo != nil {
+		// This function uses open-coded defers -- write out the funcdata
+		// info that we computed at the end of genssa.
+		p := pp.Prog(obj.AFUNCDATA)
+		p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Name = obj.NAME_EXTERN
+		p.To.Sym = openDeferInfo
+	}
+
+	// Remember where each block starts.
+	s.bstart = make([]*obj.Prog, f.NumBlocks())
+	s.pp = pp
+	var progToValue map[*obj.Prog]*ssa.Value
+	var progToBlock map[*obj.Prog]*ssa.Block
+	var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
+	if f.PrintOrHtmlSSA {
+		progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
+		progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
+		f.Logf("genssa %s\n", f.Name)
+		progToBlock[s.pp.Next] = f.Blocks[0]
+	}
+
+	s.ScratchFpMem = e.scratchFpMem
+
+	if base.Ctxt.Flag_locationlists {
+		if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
+			f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
+		}
+		valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
+		for i := range valueToProgAfter {
+			valueToProgAfter[i] = nil
+		}
+	}
+
+	// If the very first instruction is not tagged as a statement,
+	// debuggers may attribute it to previous function in program.
+	firstPos := src.NoXPos
+	for _, v := range f.Entry.Values {
+		if v.Pos.IsStmt() == src.PosIsStmt {
+			firstPos = v.Pos
+			v.Pos = firstPos.WithDefaultStmt()
+			break
+		}
+	}
+
+	// inlMarks has an entry for each Prog that implements an inline mark.
+	// It maps from that Prog to the global inlining id of the inlined body
+	// which should unwind to this Prog's location.
+	var inlMarks map[*obj.Prog]int32
+	var inlMarkList []*obj.Prog
+
+	// inlMarksByPos maps from a (column 1) source position to the set of
+	// Progs that are in the set above and have that source position.
+	var inlMarksByPos map[src.XPos][]*obj.Prog
+
+	// Emit basic blocks
+	for i, b := range f.Blocks {
+		s.bstart[b.ID] = s.pp.Next
+		s.lineRunStart = nil
+
+		// Attach a "default" liveness info. Normally this will be
+		// overwritten in the Values loop below for each Value. But
+		// for an empty block this will be used for its control
+		// instruction. We won't use the actual liveness map on a
+		// control instruction. Just mark it something that is
+		// preemptible, unless this function is "all unsafe".
+		s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
+
+		// Emit values in block
+		Arch.SSAMarkMoves(&s, b)
+		for _, v := range b.Values {
+			x := s.pp.Next
+			s.DebugFriendlySetPosFrom(v)
+
+			switch v.Op {
+			case ssa.OpInitMem:
+				// memory arg needs no code
+			case ssa.OpArg:
+				// input args need no code
+			case ssa.OpSP, ssa.OpSB:
+				// nothing to do
+			case ssa.OpSelect0, ssa.OpSelect1:
+				// nothing to do
+			case ssa.OpGetG:
+				// nothing to do when there's a g register,
+				// and checkLower complains if there's not
+			case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
+				// nothing to do; already used by liveness
+			case ssa.OpPhi:
+				CheckLoweredPhi(v)
+			case ssa.OpConvert:
+				// nothing to do; no-op conversion for liveness
+				if v.Args[0].Reg() != v.Reg() {
+					v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
+				}
+			case ssa.OpInlMark:
+				p := Arch.Ginsnop(s.pp)
+				if inlMarks == nil {
+					inlMarks = map[*obj.Prog]int32{}
+					inlMarksByPos = map[src.XPos][]*obj.Prog{}
+				}
+				inlMarks[p] = v.AuxInt32()
+				inlMarkList = append(inlMarkList, p)
+				pos := v.Pos.AtColumn1()
+				inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
+
+			default:
+				// Attach this safe point to the next
+				// instruction.
+				s.pp.NextLive = s.livenessMap.Get(v)
+
+				// Special case for first line in function; move it to the start.
+				if firstPos != src.NoXPos {
+					s.SetPos(firstPos)
+					firstPos = src.NoXPos
+				}
+				// let the backend handle it
+				Arch.SSAGenValue(&s, v)
+			}
+
+			if base.Ctxt.Flag_locationlists {
+				valueToProgAfter[v.ID] = s.pp.Next
+			}
+
+			if f.PrintOrHtmlSSA {
+				for ; x != s.pp.Next; x = x.Link {
+					progToValue[x] = v
+				}
+			}
+		}
+		// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
+		if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+			p := Arch.Ginsnop(s.pp)
+			p.Pos = p.Pos.WithIsStmt()
+			if b.Pos == src.NoXPos {
+				b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion.  See #35652.
+				if b.Pos == src.NoXPos {
+					b.Pos = pp.Text.Pos // Sometimes p.Pos is empty.  See #35695.
+				}
+			}
+			b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
+		}
+		// Emit control flow instructions for block
+		var next *ssa.Block
+		if i < len(f.Blocks)-1 && base.Flag.N == 0 {
+			// If -N, leave next==nil so every block with successors
+			// ends in a JMP (except call blocks - plive doesn't like
+			// select{send,recv} followed by a JMP call).  Helps keep
+			// line numbers for otherwise empty blocks.
+			next = f.Blocks[i+1]
+		}
+		x := s.pp.Next
+		s.SetPos(b.Pos)
+		Arch.SSAGenBlock(&s, b, next)
+		if f.PrintOrHtmlSSA {
+			for ; x != s.pp.Next; x = x.Link {
+				progToBlock[x] = b
+			}
+		}
+	}
+	if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
+		// We need the return address of a panic call to
+		// still be inside the function in question. So if
+		// it ends in a call which doesn't return, add a
+		// nop (which will never execute) after the call.
+		Arch.Ginsnop(pp)
+	}
+	if openDeferInfo != nil {
+		// When doing open-coded defers, generate a disconnected call to
+		// deferreturn and a return. This will be used to during panic
+		// recovery to unwind the stack and return back to the runtime.
+		s.pp.NextLive = s.livenessMap.DeferReturn
+		gencallret(pp, ir.Syms.Deferreturn)
+	}
+
+	if inlMarks != nil {
+		// We have some inline marks. Try to find other instructions we're
+		// going to emit anyway, and use those instructions instead of the
+		// inline marks.
+		for p := pp.Text; p != nil; p = p.Link {
+			if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
+				// Don't use 0-sized instructions as inline marks, because we need
+				// to identify inline mark instructions by pc offset.
+				// (Some of these instructions are sometimes zero-sized, sometimes not.
+				// We must not use anything that even might be zero-sized.)
+				// TODO: are there others?
+				continue
+			}
+			if _, ok := inlMarks[p]; ok {
+				// Don't use inline marks themselves. We don't know
+				// whether they will be zero-sized or not yet.
+				continue
+			}
+			pos := p.Pos.AtColumn1()
+			s := inlMarksByPos[pos]
+			if len(s) == 0 {
+				continue
+			}
+			for _, m := range s {
+				// We found an instruction with the same source position as
+				// some of the inline marks.
+				// Use this instruction instead.
+				p.Pos = p.Pos.WithIsStmt() // promote position to a statement
+				pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
+				// Make the inline mark a real nop, so it doesn't generate any code.
+				m.As = obj.ANOP
+				m.Pos = src.NoXPos
+				m.From = obj.Addr{}
+				m.To = obj.Addr{}
+			}
+			delete(inlMarksByPos, pos)
+		}
+		// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
+		for _, p := range inlMarkList {
+			if p.As != obj.ANOP {
+				pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
+			}
+		}
+	}
+
+	if base.Ctxt.Flag_locationlists {
+		debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
+		e.curfn.DebugInfo = debugInfo
+		bstart := s.bstart
+		// Note that at this moment, Prog.Pc is a sequence number; it's
+		// not a real PC until after assembly, so this mapping has to
+		// be done later.
+		debugInfo.GetPC = func(b, v ssa.ID) int64 {
+			switch v {
+			case ssa.BlockStart.ID:
+				if b == f.Entry.ID {
+					return 0 // Start at the very beginning, at the assembler-generated prologue.
+					// this should only happen for function args (ssa.OpArg)
+				}
+				return bstart[b].Pc
+			case ssa.BlockEnd.ID:
+				return e.curfn.LSym.Size
+			default:
+				return valueToProgAfter[v].Pc
+			}
+		}
+	}
+
+	// Resolve branches, and relax DefaultStmt into NotStmt
+	for _, br := range s.Branches {
+		br.P.To.SetTarget(s.bstart[br.B.ID])
+		if br.P.Pos.IsStmt() != src.PosIsStmt {
+			br.P.Pos = br.P.Pos.WithNotStmt()
+		} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
+			br.P.Pos = br.P.Pos.WithNotStmt()
+		}
+
+	}
+
+	if e.log { // spew to stdout
+		filename := ""
+		for p := pp.Text; p != nil; p = p.Link {
+			if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+				filename = p.InnermostFilename()
+				f.Logf("# %s\n", filename)
+			}
+
+			var s string
+			if v, ok := progToValue[p]; ok {
+				s = v.String()
+			} else if b, ok := progToBlock[p]; ok {
+				s = b.String()
+			} else {
+				s = "   " // most value and branch strings are 2-3 characters long
+			}
+			f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
+		}
+	}
+	if f.HTMLWriter != nil { // spew to ssa.html
+		var buf bytes.Buffer
+		buf.WriteString("<code>")
+		buf.WriteString("<dl class=\"ssa-gen\">")
+		filename := ""
+		for p := pp.Text; p != nil; p = p.Link {
+			// Don't spam every line with the file name, which is often huge.
+			// Only print changes, and "unknown" is not a change.
+			if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+				filename = p.InnermostFilename()
+				buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
+				buf.WriteString(html.EscapeString("# " + filename))
+				buf.WriteString("</dd>")
+			}
+
+			buf.WriteString("<dt class=\"ssa-prog-src\">")
+			if v, ok := progToValue[p]; ok {
+				buf.WriteString(v.HTML())
+			} else if b, ok := progToBlock[p]; ok {
+				buf.WriteString("<b>" + b.HTML() + "</b>")
+			}
+			buf.WriteString("</dt>")
+			buf.WriteString("<dd class=\"ssa-prog\">")
+			buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
+			buf.WriteString("</dd>")
+		}
+		buf.WriteString("</dl>")
+		buf.WriteString("</code>")
+		f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
+	}
+
+	defframe(&s, e)
+
+	f.HTMLWriter.Close()
+	f.HTMLWriter = nil
+}
+
+func defframe(s *State, e *ssafn) {
+	pp := s.pp
+
+	frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
+	if Arch.PadFrame != nil {
+		frame = Arch.PadFrame(frame)
+	}
+
+	// Fill in argument and frame size.
+	pp.Text.To.Type = obj.TYPE_TEXTSIZE
+	pp.Text.To.Val = int32(types.Rnd(e.curfn.Type().ArgWidth(), int64(types.RegSize)))
+	pp.Text.To.Offset = frame
+
+	// Insert code to zero ambiguously live variables so that the
+	// garbage collector only sees initialized values when it
+	// looks for pointers.
+	p := pp.Text
+	var lo, hi int64
+
+	// Opaque state for backend to use. Current backends use it to
+	// keep track of which helper registers have been zeroed.
+	var state uint32
+
+	// Iterate through declarations. They are sorted in decreasing Xoffset order.
+	for _, n := range e.curfn.Dcl {
+		if !n.Needzero() {
+			continue
+		}
+		if n.Class != ir.PAUTO {
+			e.Fatalf(n.Pos(), "needzero class %d", n.Class)
+		}
+		if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
+			e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
+		}
+
+		if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
+			// Merge with range we already have.
+			lo = n.FrameOffset()
+			continue
+		}
+
+		// Zero old range
+		p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+
+		// Set new range.
+		lo = n.FrameOffset()
+		hi = lo + n.Type().Size()
+	}
+
+	// Zero final range.
+	Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+}
+
+// For generating consecutive jump instructions to model a specific branching
+type IndexJump struct {
+	Jump  obj.As
+	Index int
+}
+
+func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
+	p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
+	p.Pos = b.Pos
+}
+
+// CombJump generates combinational instructions (2 at present) for a block jump,
+// thereby the behaviour of non-standard condition codes could be simulated
+func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
+	switch next {
+	case b.Succs[0].Block():
+		s.oneJump(b, &jumps[0][0])
+		s.oneJump(b, &jumps[0][1])
+	case b.Succs[1].Block():
+		s.oneJump(b, &jumps[1][0])
+		s.oneJump(b, &jumps[1][1])
+	default:
+		var q *obj.Prog
+		if b.Likely != ssa.BranchUnlikely {
+			s.oneJump(b, &jumps[1][0])
+			s.oneJump(b, &jumps[1][1])
+			q = s.Br(obj.AJMP, b.Succs[1].Block())
+		} else {
+			s.oneJump(b, &jumps[0][0])
+			s.oneJump(b, &jumps[0][1])
+			q = s.Br(obj.AJMP, b.Succs[0].Block())
+		}
+		q.Pos = b.Pos
+	}
+}
+
+// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func AddAux(a *obj.Addr, v *ssa.Value) {
+	AddAux2(a, v, v.AuxInt)
+}
+func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
+	if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
+		v.Fatalf("bad AddAux addr %v", a)
+	}
+	// add integer offset
+	a.Offset += offset
+
+	// If no additional symbol offset, we're done.
+	if v.Aux == nil {
+		return
+	}
+	// Add symbol's offset from its base register.
+	switch n := v.Aux.(type) {
+	case *ssa.AuxCall:
+		a.Name = obj.NAME_EXTERN
+		a.Sym = n.Fn
+	case *obj.LSym:
+		a.Name = obj.NAME_EXTERN
+		a.Sym = n
+	case *ir.Name:
+		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+			a.Name = obj.NAME_PARAM
+			a.Sym = ir.Orig(n).(*ir.Name).Linksym()
+			a.Offset += n.FrameOffset()
+			break
+		}
+		a.Name = obj.NAME_AUTO
+		a.Sym = n.Linksym()
+		a.Offset += n.FrameOffset()
+	default:
+		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+	}
+}
+
+// extendIndex extends v to a full int width.
+// panic with the given kind if v does not fit in an int (only on 32-bit archs).
+func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+	size := idx.Type.Size()
+	if size == s.config.PtrSize {
+		return idx
+	}
+	if size > s.config.PtrSize {
+		// truncate 64-bit indexes on 32-bit pointer archs. Test the
+		// high word and branch to out-of-bounds failure if it is not 0.
+		var lo *ssa.Value
+		if idx.Type.IsSigned() {
+			lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
+		} else {
+			lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
+		}
+		if bounded || base.Flag.B != 0 {
+			return lo
+		}
+		bNext := s.f.NewBlock(ssa.BlockPlain)
+		bPanic := s.f.NewBlock(ssa.BlockExit)
+		hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+		cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
+		if !idx.Type.IsSigned() {
+			switch kind {
+			case ssa.BoundsIndex:
+				kind = ssa.BoundsIndexU
+			case ssa.BoundsSliceAlen:
+				kind = ssa.BoundsSliceAlenU
+			case ssa.BoundsSliceAcap:
+				kind = ssa.BoundsSliceAcapU
+			case ssa.BoundsSliceB:
+				kind = ssa.BoundsSliceBU
+			case ssa.BoundsSlice3Alen:
+				kind = ssa.BoundsSlice3AlenU
+			case ssa.BoundsSlice3Acap:
+				kind = ssa.BoundsSlice3AcapU
+			case ssa.BoundsSlice3B:
+				kind = ssa.BoundsSlice3BU
+			case ssa.BoundsSlice3C:
+				kind = ssa.BoundsSlice3CU
+			}
+		}
+		b := s.endBlock()
+		b.Kind = ssa.BlockIf
+		b.SetControl(cmp)
+		b.Likely = ssa.BranchLikely
+		b.AddEdgeTo(bNext)
+		b.AddEdgeTo(bPanic)
+
+		s.startBlock(bPanic)
+		mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
+		s.endBlock().SetControl(mem)
+		s.startBlock(bNext)
+
+		return lo
+	}
+
+	// Extend value to the required size
+	var op ssa.Op
+	if idx.Type.IsSigned() {
+		switch 10*size + s.config.PtrSize {
+		case 14:
+			op = ssa.OpSignExt8to32
+		case 18:
+			op = ssa.OpSignExt8to64
+		case 24:
+			op = ssa.OpSignExt16to32
+		case 28:
+			op = ssa.OpSignExt16to64
+		case 48:
+			op = ssa.OpSignExt32to64
+		default:
+			s.Fatalf("bad signed index extension %s", idx.Type)
+		}
+	} else {
+		switch 10*size + s.config.PtrSize {
+		case 14:
+			op = ssa.OpZeroExt8to32
+		case 18:
+			op = ssa.OpZeroExt8to64
+		case 24:
+			op = ssa.OpZeroExt16to32
+		case 28:
+			op = ssa.OpZeroExt16to64
+		case 48:
+			op = ssa.OpZeroExt32to64
+		default:
+			s.Fatalf("bad unsigned index extension %s", idx.Type)
+		}
+	}
+	return s.newValue1(op, types.Types[types.TINT], idx)
+}
+
+// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
+// Called during ssaGenValue.
+func CheckLoweredPhi(v *ssa.Value) {
+	if v.Op != ssa.OpPhi {
+		v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
+	}
+	if v.Type.IsMemory() {
+		return
+	}
+	f := v.Block.Func
+	loc := f.RegAlloc[v.ID]
+	for _, a := range v.Args {
+		if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+			v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
+		}
+	}
+}
+
+// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
+// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
+// That register contains the closure pointer on closure entry.
+func CheckLoweredGetClosurePtr(v *ssa.Value) {
+	entry := v.Block.Func.Entry
+	if entry != v.Block || entry.Values[0] != v {
+		base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+	}
+}
+
+func AddrAuto(a *obj.Addr, v *ssa.Value) {
+	n, off := ssa.AutoVar(v)
+	a.Type = obj.TYPE_MEM
+	a.Sym = n.Linksym()
+	a.Reg = int16(Arch.REGSP)
+	a.Offset = n.FrameOffset() + off
+	if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+		a.Name = obj.NAME_PARAM
+	} else {
+		a.Name = obj.NAME_AUTO
+	}
+}
+
+func (s *State) AddrScratch(a *obj.Addr) {
+	if s.ScratchFpMem == nil {
+		panic("no scratch memory available; forgot to declare usesScratch for Op?")
+	}
+	a.Type = obj.TYPE_MEM
+	a.Name = obj.NAME_AUTO
+	a.Sym = s.ScratchFpMem.Linksym()
+	a.Reg = int16(Arch.REGSP)
+	a.Offset = s.ScratchFpMem.Offset_
+}
+
+// Call returns a new CALL instruction for the SSA value v.
+// It uses PrepareCall to prepare the call.
+func (s *State) Call(v *ssa.Value) *obj.Prog {
+	pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+	s.PrepareCall(v)
+
+	p := s.Prog(obj.ACALL)
+	if pPosIsStmt == src.PosIsStmt {
+		p.Pos = v.Pos.WithIsStmt()
+	} else {
+		p.Pos = v.Pos.WithNotStmt()
+	}
+	if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+		p.To.Type = obj.TYPE_MEM
+		p.To.Name = obj.NAME_EXTERN
+		p.To.Sym = sym.Fn
+	} else {
+		// TODO(mdempsky): Can these differences be eliminated?
+		switch Arch.LinkArch.Family {
+		case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
+			p.To.Type = obj.TYPE_REG
+		case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
+			p.To.Type = obj.TYPE_MEM
+		default:
+			base.Fatalf("unknown indirect call family")
+		}
+		p.To.Reg = v.Args[0].Reg()
+	}
+	return p
+}
+
+// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
+// It must be called immediately before emitting the actual CALL instruction,
+// since it emits PCDATA for the stack map at the call (calls are safe points).
+func (s *State) PrepareCall(v *ssa.Value) {
+	idx := s.livenessMap.Get(v)
+	if !idx.StackMapValid() {
+		// See Liveness.hasStackMap.
+		if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
+			base.Fatalf("missing stack map index for %v", v.LongString())
+		}
+	}
+
+	call, ok := v.Aux.(*ssa.AuxCall)
+
+	if ok && call.Fn == ir.Syms.Deferreturn {
+		// Deferred calls will appear to be returning to
+		// the CALL deferreturn(SB) that we are about to emit.
+		// However, the stack trace code will show the line
+		// of the instruction byte before the return PC.
+		// To avoid that being an unrelated instruction,
+		// insert an actual hardware NOP that will have the right line number.
+		// This is different from obj.ANOP, which is a virtual no-op
+		// that doesn't make it into the instruction stream.
+		Arch.Ginsnopdefer(s.pp)
+	}
+
+	if ok {
+		// Record call graph information for nowritebarrierrec
+		// analysis.
+		if nowritebarrierrecCheck != nil {
+			nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
+		}
+	}
+
+	if s.maxarg < v.AuxInt {
+		s.maxarg = v.AuxInt
+	}
+}
+
+// UseArgs records the fact that an instruction needs a certain amount of
+// callee args space for its use.
+func (s *State) UseArgs(n int64) {
+	if s.maxarg < n {
+		s.maxarg = n
+	}
+}
+
+// fieldIdx finds the index of the field referred to by the ODOT node n.
+func fieldIdx(n *ir.SelectorExpr) int {
+	t := n.X.Type()
+	if !t.IsStruct() {
+		panic("ODOT's LHS is not a struct")
+	}
+
+	for i, f := range t.Fields().Slice() {
+		if f.Sym == n.Sel {
+			if f.Offset != n.Offset() {
+				panic("field offset doesn't match")
+			}
+			return i
+		}
+	}
+	panic(fmt.Sprintf("can't find field in expr %v\n", n))
+
+	// TODO: keep the result of this function somewhere in the ODOT Node
+	// so we don't have to recompute it each time we need it.
+}
+
+// ssafn holds frontend information about a function that the backend is processing.
+// It also exports a bunch of compiler services for the ssa backend.
+type ssafn struct {
+	curfn        *ir.Func
+	strings      map[string]*obj.LSym // map from constant string to data symbols
+	scratchFpMem *ir.Name             // temp for floating point register / memory moves on some architectures
+	stksize      int64                // stack size for current frame
+	stkptrsize   int64                // prefix of stack containing pointers
+	log          bool                 // print ssa debug to the stdout
+}
+
+// StringData returns a symbol which
+// is the data component of a global string constant containing s.
+func (e *ssafn) StringData(s string) *obj.LSym {
+	if aux, ok := e.strings[s]; ok {
+		return aux
+	}
+	if e.strings == nil {
+		e.strings = make(map[string]*obj.LSym)
+	}
+	data := staticdata.StringSym(e.curfn.Pos(), s)
+	e.strings[s] = data
+	return data
+}
+
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
+	return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
+}
+
+func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+	ptrType := types.NewPtr(types.Types[types.TUINT8])
+	lenType := types.Types[types.TINT]
+	// Split this string up into two separate variables.
+	p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+	l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+	return p, l
+}
+
+func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+	n := name.N
+	u := types.Types[types.TUINTPTR]
+	t := types.NewPtr(types.Types[types.TUINT8])
+	// Split this interface up into two separate variables.
+	f := ".itab"
+	if n.Type().IsEmptyInterface() {
+		f = ".type"
+	}
+	c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set
+	d := e.SplitSlot(&name, ".data", u.Size(), t)
+	return c, d
+}
+
+func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
+	ptrType := types.NewPtr(name.Type.Elem())
+	lenType := types.Types[types.TINT]
+	p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+	l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+	c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+	return p, l, c
+}
+
+func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+	s := name.Type.Size() / 2
+	var t *types.Type
+	if s == 8 {
+		t = types.Types[types.TFLOAT64]
+	} else {
+		t = types.Types[types.TFLOAT32]
+	}
+	r := e.SplitSlot(&name, ".real", 0, t)
+	i := e.SplitSlot(&name, ".imag", t.Size(), t)
+	return r, i
+}
+
+func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+	var t *types.Type
+	if name.Type.IsSigned() {
+		t = types.Types[types.TINT32]
+	} else {
+		t = types.Types[types.TUINT32]
+	}
+	if Arch.LinkArch.ByteOrder == binary.BigEndian {
+		return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
+	}
+	return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
+}
+
+func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
+	st := name.Type
+	// Note: the _ field may appear several times.  But
+	// have no fear, identically-named but distinct Autos are
+	// ok, albeit maybe confusing for a debugger.
+	return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
+}
+
+func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
+	n := name.N
+	at := name.Type
+	if at.NumElem() != 1 {
+		e.Fatalf(n.Pos(), "bad array size")
+	}
+	et := at.Elem()
+	return e.SplitSlot(&name, "[0]", 0, et)
+}
+
+func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
+	return reflectdata.ITabSym(it, offset)
+}
+
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+	node := parent.N
+
+	if node.Class != ir.PAUTO || node.Addrtaken() {
+		// addressed things and non-autos retain their parents (i.e., cannot truly be split)
+		return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+	}
+
+	s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+	n := ir.NewNameAt(parent.N.Pos(), s)
+	s.Def = n
+	ir.AsNode(s.Def).Name().SetUsed(true)
+	n.SetType(t)
+	n.Class = ir.PAUTO
+	n.SetEsc(ir.EscNever)
+	n.Curfn = e.curfn
+	e.curfn.Dcl = append(e.curfn.Dcl, n)
+	types.CalcSize(t)
+	return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
+}
+
+func (e *ssafn) CanSSA(t *types.Type) bool {
+	return TypeOK(t)
+}
+
+func (e *ssafn) Line(pos src.XPos) string {
+	return base.FmtPos(pos)
+}
+
+// Log logs a message from the compiler.
+func (e *ssafn) Logf(msg string, args ...interface{}) {
+	if e.log {
+		fmt.Printf(msg, args...)
+	}
+}
+
+func (e *ssafn) Log() bool {
+	return e.log
+}
+
+// Fatal reports a compiler error and exits.
+func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
+	base.Pos = pos
+	nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+	base.Fatalf("'%s': "+msg, nargs...)
+}
+
+// Warnl reports a "warning", which is usually flag-triggered
+// logging output for the benefit of tests.
+func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
+	base.WarnfAt(pos, fmt_, args...)
+}
+
+func (e *ssafn) Debug_checknil() bool {
+	return base.Debug.Nil != 0
+}
+
+func (e *ssafn) UseWriteBarrier() bool {
+	return base.Flag.WB
+}
+
+func (e *ssafn) Syslook(name string) *obj.LSym {
+	switch name {
+	case "goschedguarded":
+		return ir.Syms.Goschedguarded
+	case "writeBarrier":
+		return ir.Syms.WriteBarrier
+	case "gcWriteBarrier":
+		return ir.Syms.GCWriteBarrier
+	case "typedmemmove":
+		return ir.Syms.Typedmemmove
+	case "typedmemclr":
+		return ir.Syms.Typedmemclr
+	}
+	e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
+	return nil
+}
+
+func (e *ssafn) SetWBPos(pos src.XPos) {
+	e.curfn.SetWBPos(pos)
+}
+
+func (e *ssafn) MyImportPath() string {
+	return base.Ctxt.Pkgpath
+}
+
+func clobberBase(n ir.Node) ir.Node {
+	if n.Op() == ir.ODOT {
+		n := n.(*ir.SelectorExpr)
+		if n.X.Type().NumFields() == 1 {
+			return clobberBase(n.X)
+		}
+	}
+	if n.Op() == ir.OINDEX {
+		n := n.(*ir.IndexExpr)
+		if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
+			return clobberBase(n.X)
+		}
+	}
+	return n
+}
+
+// callTargetLSym determines the correct LSym for 'callee' when called
+// from function 'caller'. There are a couple of different scenarios
+// to contend with here:
+//
+// 1. if 'caller' is an ABI wrapper, then we always want to use the
+//    LSym from the Func for the callee.
+//
+// 2. if 'caller' is not an ABI wrapper, then we looked at the callee
+//    to see if it corresponds to a "known" ABI0 symbol (e.g. assembly
+//    routine defined in the current package); if so, we want the call to
+//    directly target the ABI0 symbol (effectively bypassing the
+//    ABIInternal->ABI0 wrapper for 'callee').
+//
+// 3. in all other cases, want the regular ABIInternal linksym
+//
+func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym {
+	lsym := callee.Linksym()
+	if !base.Flag.ABIWrap {
+		return lsym
+	}
+	fn := callee.Func
+	if fn == nil {
+		return lsym
+	}
+
+	// check for case 1 above
+	if callerLSym.ABIWrapper() {
+		if nlsym := fn.LSym; nlsym != nil {
+			lsym = nlsym
+		}
+	} else {
+		// check for case 2 above
+		defABI, hasDefABI := symabiDefs[lsym.Name]
+		if hasDefABI && defABI == obj.ABI0 {
+			lsym = callee.LinksymABI(obj.ABI0)
+		}
+	}
+	return lsym
+}
+
+func min8(a, b int8) int8 {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max8(a, b int8) int8 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+// deferstruct makes a runtime._defer structure, with additional space for
+// stksize bytes of args.
+func deferstruct(stksize int64) *types.Type {
+	makefield := func(name string, typ *types.Type) *types.Field {
+		// Unlike the global makefield function, this one needs to set Pkg
+		// because these types might be compared (in SSA CSE sorting).
+		// TODO: unify this makefield and the global one above.
+		sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
+		return types.NewField(src.NoXPos, sym, typ)
+	}
+	argtype := types.NewArray(types.Types[types.TUINT8], stksize)
+	argtype.Width = stksize
+	argtype.Align = 1
+	// These fields must match the ones in runtime/runtime2.go:_defer and
+	// cmd/compile/internal/gc/ssa.go:(*state).call.
+	fields := []*types.Field{
+		makefield("siz", types.Types[types.TUINT32]),
+		makefield("started", types.Types[types.TBOOL]),
+		makefield("heap", types.Types[types.TBOOL]),
+		makefield("openDefer", types.Types[types.TBOOL]),
+		makefield("sp", types.Types[types.TUINTPTR]),
+		makefield("pc", types.Types[types.TUINTPTR]),
+		// Note: the types here don't really matter. Defer structures
+		// are always scanned explicitly during stack copying and GC,
+		// so we make them uintptr type even though they are real pointers.
+		makefield("fn", types.Types[types.TUINTPTR]),
+		makefield("_panic", types.Types[types.TUINTPTR]),
+		makefield("link", types.Types[types.TUINTPTR]),
+		makefield("framepc", types.Types[types.TUINTPTR]),
+		makefield("varp", types.Types[types.TUINTPTR]),
+		makefield("fd", types.Types[types.TUINTPTR]),
+		makefield("args", argtype),
+	}
+
+	// build struct holding the above fields
+	s := types.NewStruct(types.NoPkg, fields)
+	s.SetNoalg(true)
+	types.CalcStructSize(s)
+	return s
+}
+
+var (
+	BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
+	ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
+)
+
+// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
+var GCWriteBarrierReg map[int16]*obj.LSym
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
new file mode 100644
index 0000000..b06fd7a
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -0,0 +1,353 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+	"crypto/sha256"
+	"fmt"
+	"go/constant"
+	"io"
+	"io/ioutil"
+	"os"
+	"sort"
+	"strconv"
+	"sync"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+// InitAddrOffset writes the static name symbol lsym to n, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node.
+func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) {
+	if n.Op() != ir.ONAME {
+		base.Fatalf("InitAddr n op %v", n.Op())
+	}
+	if n.Sym() == nil {
+		base.Fatalf("InitAddr nil n sym")
+	}
+	s := n.Linksym()
+	s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off)
+}
+
+// InitAddr is InitAddrOffset, with offset fixed to 0.
+func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) {
+	InitAddrOffset(n, noff, lsym, 0)
+}
+
+// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME node.
+func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) {
+	s := n.Linksym()
+	s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0)
+	s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
+	s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
+}
+
+func InitSliceBytes(nam *ir.Name, off int64, s string) {
+	if nam.Op() != ir.ONAME {
+		base.Fatalf("InitSliceBytes %v", nam)
+	}
+	InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
+}
+
+const (
+	stringSymPrefix  = "go.string."
+	stringSymPattern = ".gostring.%d.%x"
+)
+
+// StringSym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
+func StringSym(pos src.XPos, s string) (data *obj.LSym) {
+	var symname string
+	if len(s) > 100 {
+		// Huge strings are hashed to avoid long names in object files.
+		// Indulge in some paranoia by writing the length of s, too,
+		// as protection against length extension attacks.
+		// Same pattern is known to fileStringSym below.
+		h := sha256.New()
+		io.WriteString(h, s)
+		symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
+	} else {
+		// Small strings get named directly by their contents.
+		symname = strconv.Quote(s)
+	}
+
+	symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
+	if !symdata.OnList() {
+		off := dstringdata(symdata, 0, s, pos, "string")
+		objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+		symdata.Set(obj.AttrContentAddressable, true)
+	}
+
+	return symdata
+}
+
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, 0, err
+	}
+	defer f.Close()
+	info, err := f.Stat()
+	if err != nil {
+		return nil, 0, err
+	}
+	if !info.Mode().IsRegular() {
+		return nil, 0, fmt.Errorf("not a regular file")
+	}
+	size := info.Size()
+	if size <= 1*1024 {
+		data, err := ioutil.ReadAll(f)
+		if err != nil {
+			return nil, 0, err
+		}
+		if int64(len(data)) != size {
+			return nil, 0, fmt.Errorf("file changed between reads")
+		}
+		var sym *obj.LSym
+		if readonly {
+			sym = StringSym(pos, string(data))
+		} else {
+			sym = slicedata(pos, string(data)).Linksym()
+		}
+		if len(hash) > 0 {
+			sum := sha256.Sum256(data)
+			copy(hash, sum[:])
+		}
+		return sym, size, nil
+	}
+	if size > 2e9 {
+		// ggloblsym takes an int32,
+		// and probably the rest of the toolchain
+		// can't handle such big symbols either.
+		// See golang.org/issue/9862.
+		return nil, 0, fmt.Errorf("file too large")
+	}
+
+	// File is too big to read and keep in memory.
+	// Compute hash if needed for read-only content hashing or if the caller wants it.
+	var sum []byte
+	if readonly || len(hash) > 0 {
+		h := sha256.New()
+		n, err := io.Copy(h, f)
+		if err != nil {
+			return nil, 0, err
+		}
+		if n != size {
+			return nil, 0, fmt.Errorf("file changed between reads")
+		}
+		sum = h.Sum(nil)
+		copy(hash, sum)
+	}
+
+	var symdata *obj.LSym
+	if readonly {
+		symname := fmt.Sprintf(stringSymPattern, size, sum)
+		symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
+		if !symdata.OnList() {
+			info := symdata.NewFileInfo()
+			info.Name = file
+			info.Size = size
+			objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+			// Note: AttrContentAddressable cannot be set here,
+			// because the content-addressable-handling code
+			// does not know about file symbols.
+		}
+	} else {
+		// Emit a zero-length data symbol
+		// and then fix up length and content to use file.
+		symdata = slicedata(pos, "").Linksym()
+		symdata.Size = size
+		symdata.Type = objabi.SNOPTRDATA
+		info := symdata.NewFileInfo()
+		info.Name = file
+		info.Size = size
+	}
+
+	return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *ir.Name {
+	slicedataGen++
+	symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
+	sym := types.LocalPkg.Lookup(symname)
+	symnode := typecheck.NewName(sym)
+	sym.Def = symnode
+
+	lsym := symnode.Linksym()
+	off := dstringdata(lsym, 0, s, pos, "slice")
+	objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+
+	return symnode
+}
+
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+	// Objects that are too large will cause the data section to overflow right away,
+	// causing a cryptic error message by the linker. Check for oversize objects here
+	// and provide a useful error message instead.
+	if int64(len(t)) > 2e9 {
+		base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
+		return 0
+	}
+
+	s.WriteString(base.Ctxt, int64(off), len(t), t)
+	return off + len(t)
+}
+
+var (
+	funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
+	funcsyms   []*types.Sym
+)
+
+// FuncSym returns s·f.
+func FuncSym(s *types.Sym) *types.Sym {
+	// funcsymsmu here serves to protect not just mutations of funcsyms (below),
+	// but also the package lookup of the func sym name,
+	// since this function gets called concurrently from the backend.
+	// There are no other concurrent package lookups in the backend,
+	// except for the types package, which is protected separately.
+	// Reusing funcsymsmu to also cover this package lookup
+	// avoids a general, broader, expensive package lookup mutex.
+	// Note NeedFuncSym also does package look-up of func sym names,
+	// but that it is only called serially, from the front end.
+	funcsymsmu.Lock()
+	sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
+	// Don't export s·f when compiling for dynamic linking.
+	// When dynamically linking, the necessary function
+	// symbols will be created explicitly with NeedFuncSym.
+	// See the NeedFuncSym comment for details.
+	if !base.Ctxt.Flag_dynlink && !existed {
+		funcsyms = append(funcsyms, s)
+	}
+	funcsymsmu.Unlock()
+	return sf
+}
+
+func FuncLinksym(n *ir.Name) *obj.LSym {
+	if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+		base.Fatalf("expected func name: %v", n)
+	}
+	return FuncSym(n.Sym()).Linksym()
+}
+
+func GlobalLinksym(n *ir.Name) *obj.LSym {
+	if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
+		base.Fatalf("expected global variable: %v", n)
+	}
+	return n.Linksym()
+}
+
+// NeedFuncSym ensures that s·f is exported, if needed.
+// It is only used with -dynlink.
+// When not compiling for dynamic linking,
+// the funcsyms are created as needed by
+// the packages that use them.
+// Normally we emit the s·f stubs as DUPOK syms,
+// but DUPOK doesn't work across shared library boundaries.
+// So instead, when dynamic linking, we only create
+// the s·f stubs in s's package.
+func NeedFuncSym(s *types.Sym) {
+	if base.Ctxt.InParallel {
+		// The append below probably just needs to lock
+		// funcsymsmu, like in FuncSym.
+		base.Fatalf("NeedFuncSym must be called in serial")
+	}
+	if !base.Ctxt.Flag_dynlink {
+		return
+	}
+	if s.IsBlank() {
+		return
+	}
+	if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+		// runtime.getg(), getclosureptr(), getcallerpc(), and
+		// getcallersp() are not real functions and so do not
+		// get funcsyms.
+		return
+	}
+	funcsyms = append(funcsyms, s)
+}
+
+func WriteFuncSyms() {
+	sort.Slice(funcsyms, func(i, j int) bool {
+		return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name
+	})
+	for _, s := range funcsyms {
+		sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
+		objw.SymPtr(sf, 0, s.Linksym(), 0)
+		objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+	}
+}
+
+// InitConst writes the static literal c to n.
+// Neither n nor c is modified.
+func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
+	if n.Op() != ir.ONAME {
+		base.Fatalf("InitConst n op %v", n.Op())
+	}
+	if n.Sym() == nil {
+		base.Fatalf("InitConst nil n sym")
+	}
+	if c.Op() == ir.ONIL {
+		return
+	}
+	if c.Op() != ir.OLITERAL {
+		base.Fatalf("InitConst c op %v", c.Op())
+	}
+	s := n.Linksym()
+	switch u := c.Val(); u.Kind() {
+	case constant.Bool:
+		i := int64(obj.Bool2int(constant.BoolVal(u)))
+		s.WriteInt(base.Ctxt, noff, wid, i)
+
+	case constant.Int:
+		s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
+
+	case constant.Float:
+		f, _ := constant.Float64Val(u)
+		switch c.Type().Kind() {
+		case types.TFLOAT32:
+			s.WriteFloat32(base.Ctxt, noff, float32(f))
+		case types.TFLOAT64:
+			s.WriteFloat64(base.Ctxt, noff, f)
+		}
+
+	case constant.Complex:
+		re, _ := constant.Float64Val(constant.Real(u))
+		im, _ := constant.Float64Val(constant.Imag(u))
+		switch c.Type().Kind() {
+		case types.TCOMPLEX64:
+			s.WriteFloat32(base.Ctxt, noff, float32(re))
+			s.WriteFloat32(base.Ctxt, noff+4, float32(im))
+		case types.TCOMPLEX128:
+			s.WriteFloat64(base.Ctxt, noff, re)
+			s.WriteFloat64(base.Ctxt, noff+8, im)
+		}
+
+	case constant.String:
+		i := constant.StringVal(u)
+		symdata := StringSym(n.Pos(), i)
+		s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
+		s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
+
+	default:
+		base.Fatalf("InitConst unhandled OLITERAL %v", c)
+	}
+}
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
new file mode 100644
index 0000000..8936c4f
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -0,0 +1,181 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+	"path"
+	"sort"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+const (
+	embedUnknown = iota
+	embedBytes
+	embedString
+	embedFiles
+)
+
+func embedFileList(v *ir.Name, kind int) []string {
+	// Build list of files to store.
+	have := make(map[string]bool)
+	var list []string
+	for _, e := range *v.Embed {
+		for _, pattern := range e.Patterns {
+			files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
+			if !ok {
+				base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+			}
+			for _, file := range files {
+				if base.Flag.Cfg.Embed.Files[file] == "" {
+					base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+					continue
+				}
+				if !have[file] {
+					have[file] = true
+					list = append(list, file)
+				}
+				if kind == embedFiles {
+					for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+						have[dir] = true
+						list = append(list, dir+"/")
+					}
+				}
+			}
+		}
+	}
+	sort.Slice(list, func(i, j int) bool {
+		return embedFileLess(list[i], list[j])
+	})
+
+	if kind == embedString || kind == embedBytes {
+		if len(list) > 1 {
+			base.ErrorfAt(v.Pos(), "invalid go:embed: multiple files for type %v", v.Type())
+			return nil
+		}
+	}
+
+	return list
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+	if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
+		return embedFiles
+	}
+	if typ.Kind() == types.TSTRING {
+		return embedString
+	}
+	if typ.Sym() == nil && typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 {
+		return embedBytes
+	}
+	return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+	if name[len(name)-1] == '/' {
+		isDir = true
+		name = name[:len(name)-1]
+	}
+	i := len(name) - 1
+	for i >= 0 && name[i] != '/' {
+		i--
+	}
+	if i < 0 {
+		return ".", name, isDir
+	}
+	return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+	xdir, xelem, _ := embedFileNameSplit(x)
+	ydir, yelem, _ := embedFileNameSplit(y)
+	return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// WriteEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func WriteEmbed(v *ir.Name) {
+	// TODO(mdempsky): User errors should be reported by the frontend.
+
+	commentPos := (*v.Embed)[0].Pos
+	if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
+		prevPos := base.Pos
+		base.Pos = commentPos
+		base.ErrorfVers("go1.16", "go:embed")
+		base.Pos = prevPos
+		return
+	}
+	if base.Flag.Cfg.Embed.Patterns == nil {
+		base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
+		return
+	}
+	kind := embedKind(v.Type())
+	if kind == embedUnknown {
+		base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
+		return
+	}
+
+	files := embedFileList(v, kind)
+	switch kind {
+	case embedString, embedBytes:
+		file := files[0]
+		fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
+		if err != nil {
+			base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
+		}
+		sym := v.Linksym()
+		off := 0
+		off = objw.SymPtr(sym, off, fsym, 0)       // data string
+		off = objw.Uintptr(sym, off, uint64(size)) // len
+		if kind == embedBytes {
+			objw.Uintptr(sym, off, uint64(size)) // cap for slice
+		}
+
+	case embedFiles:
+		slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
+		off := 0
+		// []files pointed at by Files
+		off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
+		off = objw.Uintptr(slicedata, off, uint64(len(files)))
+		off = objw.Uintptr(slicedata, off, uint64(len(files)))
+
+		// embed/embed.go type file is:
+		//	name string
+		//	data string
+		//	hash [16]byte
+		// Emit one of these per file in the set.
+		const hashSize = 16
+		hash := make([]byte, hashSize)
+		for _, file := range files {
+			off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string
+			off = objw.Uintptr(slicedata, off, uint64(len(file)))
+			if strings.HasSuffix(file, "/") {
+				// entry for directory - no data
+				off = objw.Uintptr(slicedata, off, 0)
+				off = objw.Uintptr(slicedata, off, 0)
+				off += hashSize
+			} else {
+				fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
+				if err != nil {
+					base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
+				}
+				off = objw.SymPtr(slicedata, off, fsym, 0) // data string
+				off = objw.Uintptr(slicedata, off, uint64(size))
+				off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
+			}
+		}
+		objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+		sym := v.Linksym()
+		objw.SymPtr(sym, 0, slicedata, 0)
+	}
+}
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
new file mode 100644
index 0000000..f3ad82e
--- /dev/null
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -0,0 +1,610 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticinit
+
+import (
+	"fmt"
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+type Entry struct {
+	Xoffset int64   // struct, array only
+	Expr    ir.Node // bytes of run-time computed expressions
+}
+
+type Plan struct {
+	E []Entry
+}
+
+// An Schedule is used to decompose assignment statements into
+// static and dynamic initialization parts. Static initializations are
+// handled by populating variables' linker symbol data, while dynamic
+// initializations are accumulated to be executed in order.
+type Schedule struct {
+	// Out is the ordered list of dynamic initialization
+	// statements.
+	Out []ir.Node
+
+	Plans map[ir.Node]*Plan
+	Temps map[ir.Node]*ir.Name
+}
+
+func (s *Schedule) append(n ir.Node) {
+	s.Out = append(s.Out, n)
+}
+
+// StaticInit adds an initialization statement n to the schedule.
+func (s *Schedule) StaticInit(n ir.Node) {
+	if !s.tryStaticInit(n) {
+		if base.Flag.Percent != 0 {
+			ir.Dump("nonstatic", n)
+		}
+		s.append(n)
+	}
+}
+
+// tryStaticInit attempts to statically execute an initialization
+// statement and reports whether it succeeded.
+func (s *Schedule) tryStaticInit(nn ir.Node) bool {
+	// Only worry about simple "l = r" assignments. Multiple
+	// variable/expression OAS2 assignments have already been
+	// replaced by multiple simple OAS assignments, and the other
+	// OAS2* assignments mostly necessitate dynamic execution
+	// anyway.
+	if nn.Op() != ir.OAS {
+		return false
+	}
+	n := nn.(*ir.AssignStmt)
+	if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
+		// Discard.
+		return true
+	}
+	lno := ir.SetPos(n)
+	defer func() { base.Pos = lno }()
+	nam := n.X.(*ir.Name)
+	return s.StaticAssign(nam, 0, n.Y, nam.Type())
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
+	if rn.Class == ir.PFUNC {
+		// TODO if roff != 0 { panic }
+		staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn))
+		return true
+	}
+	if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
+		return false
+	}
+	if rn.Defn.Op() != ir.OAS {
+		return false
+	}
+	if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
+		return false
+	}
+	if rn.Embed != nil {
+		return false
+	}
+	orig := rn
+	r := rn.Defn.(*ir.AssignStmt).Y
+	if r == nil {
+		// No explicit initialization value. Probably zeroed but perhaps
+		// supplied externally and of unknown value.
+		return false
+	}
+
+	for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
+		r = r.(*ir.ConvExpr).X
+	}
+
+	switch r.Op() {
+	case ir.OMETHEXPR:
+		r = r.(*ir.SelectorExpr).FuncName()
+		fallthrough
+	case ir.ONAME:
+		r := r.(*ir.Name)
+		if s.staticcopy(l, loff, r, typ) {
+			return true
+		}
+		// We may have skipped past one or more OCONVNOPs, so
+		// use conv to ensure r is assignable to l (#13263).
+		dst := ir.Node(l)
+		if loff != 0 || !types.Identical(typ, l.Type()) {
+			dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
+		}
+		s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
+		return true
+
+	case ir.ONIL:
+		return true
+
+	case ir.OLITERAL:
+		if ir.IsZero(r) {
+			return true
+		}
+		staticdata.InitConst(l, loff, r, int(typ.Width))
+		return true
+
+	case ir.OADDR:
+		r := r.(*ir.AddrExpr)
+		if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME {
+			staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a))
+			return true
+		}
+
+	case ir.OPTRLIT:
+		r := r.(*ir.AddrExpr)
+		switch r.X.Op() {
+		case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
+			// copy pointer
+			staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r]))
+			return true
+		}
+
+	case ir.OSLICELIT:
+		r := r.(*ir.CompLitExpr)
+		// copy slice
+		staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len)
+		return true
+
+	case ir.OARRAYLIT, ir.OSTRUCTLIT:
+		r := r.(*ir.CompLitExpr)
+		p := s.Plans[r]
+		for i := range p.E {
+			e := &p.E[i]
+			typ := e.Expr.Type()
+			if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+				staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Width))
+				continue
+			}
+			x := e.Expr
+			if x.Op() == ir.OMETHEXPR {
+				x = x.(*ir.SelectorExpr).FuncName()
+			}
+			if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
+				continue
+			}
+			// Requires computation, but we're
+			// copying someone else's computation.
+			ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
+			rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
+			ir.SetPos(rr)
+			s.append(ir.NewAssignStmt(base.Pos, ll, rr))
+		}
+
+		return true
+	}
+
+	return false
+}
+
+func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
+	if r == nil {
+		// No explicit initialization value. Either zero or supplied
+		// externally.
+		return true
+	}
+	for r.Op() == ir.OCONVNOP {
+		r = r.(*ir.ConvExpr).X
+	}
+
+	assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) {
+		if s.StaticAssign(a, aoff, v, v.Type()) {
+			return
+		}
+		var lhs ir.Node
+		if ir.IsBlank(a) {
+			// Don't use NameOffsetExpr with blank (#43677).
+			lhs = ir.BlankNode
+		} else {
+			lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type())
+		}
+		s.append(ir.NewAssignStmt(pos, lhs, v))
+	}
+
+	switch r.Op() {
+	case ir.ONAME:
+		r := r.(*ir.Name)
+		return s.staticcopy(l, loff, r, typ)
+
+	case ir.OMETHEXPR:
+		r := r.(*ir.SelectorExpr)
+		return s.staticcopy(l, loff, r.FuncName(), typ)
+
+	case ir.ONIL:
+		return true
+
+	case ir.OLITERAL:
+		if ir.IsZero(r) {
+			return true
+		}
+		staticdata.InitConst(l, loff, r, int(typ.Width))
+		return true
+
+	case ir.OADDR:
+		r := r.(*ir.AddrExpr)
+		if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN {
+			staticdata.InitAddrOffset(l, loff, name.Linksym(), offset)
+			return true
+		}
+		fallthrough
+
+	case ir.OPTRLIT:
+		r := r.(*ir.AddrExpr)
+		switch r.X.Op() {
+		case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
+			// Init pointer.
+			a := StaticName(r.X.Type())
+
+			s.Temps[r] = a
+			staticdata.InitAddr(l, loff, a.Linksym())
+
+			// Init underlying literal.
+			assign(base.Pos, a, 0, r.X)
+			return true
+		}
+		//dump("not static ptrlit", r);
+
+	case ir.OSTR2BYTES:
+		r := r.(*ir.ConvExpr)
+		if l.Class == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
+			sval := ir.StringVal(r.X)
+			staticdata.InitSliceBytes(l, loff, sval)
+			return true
+		}
+
+	case ir.OSLICELIT:
+		r := r.(*ir.CompLitExpr)
+		s.initplan(r)
+		// Init slice.
+		ta := types.NewArray(r.Type().Elem(), r.Len)
+		ta.SetNoalg(true)
+		a := StaticName(ta)
+		s.Temps[r] = a
+		staticdata.InitSlice(l, loff, a.Linksym(), r.Len)
+		// Fall through to init underlying array.
+		l = a
+		loff = 0
+		fallthrough
+
+	case ir.OARRAYLIT, ir.OSTRUCTLIT:
+		r := r.(*ir.CompLitExpr)
+		s.initplan(r)
+
+		p := s.Plans[r]
+		for i := range p.E {
+			e := &p.E[i]
+			if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+				staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
+				continue
+			}
+			ir.SetPos(e.Expr)
+			assign(base.Pos, l, loff+e.Xoffset, e.Expr)
+		}
+
+		return true
+
+	case ir.OMAPLIT:
+		break
+
+	case ir.OCLOSURE:
+		r := r.(*ir.ClosureExpr)
+		if ir.IsTrivialClosure(r) {
+			if base.Debug.Closure > 0 {
+				base.WarnfAt(r.Pos(), "closure converted to global")
+			}
+			// Closures with no captured variables are globals,
+			// so the assignment can be done at link time.
+			// TODO if roff != 0 { panic }
+			staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname))
+			return true
+		}
+		ir.ClosureDebugRuntimeCheck(r)
+
+	case ir.OCONVIFACE:
+		// This logic is mirrored in isStaticCompositeLiteral.
+		// If you change something here, change it there, and vice versa.
+
+		// Determine the underlying concrete type and value we are converting from.
+		r := r.(*ir.ConvExpr)
+		val := ir.Node(r)
+		for val.Op() == ir.OCONVIFACE {
+			val = val.(*ir.ConvExpr).X
+		}
+
+		if val.Type().IsInterface() {
+			// val is an interface type.
+			// If val is nil, we can statically initialize l;
+			// both words are zero and so there no work to do, so report success.
+			// If val is non-nil, we have no concrete type to record,
+			// and we won't be able to statically initialize its value, so report failure.
+			return val.Op() == ir.ONIL
+		}
+
+		reflectdata.MarkTypeUsedInInterface(val.Type(), l.Linksym())
+
+		var itab *ir.AddrExpr
+		if typ.IsEmptyInterface() {
+			itab = reflectdata.TypePtr(val.Type())
+		} else {
+			itab = reflectdata.ITabAddr(val.Type(), typ)
+		}
+
+		// Create a copy of l to modify while we emit data.
+
+		// Emit itab, advance offset.
+		staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym)
+
+		// Emit data.
+		if types.IsDirectIface(val.Type()) {
+			if val.Op() == ir.ONIL {
+				// Nil is zero, nothing to do.
+				return true
+			}
+			// Copy val directly into n.
+			ir.SetPos(val)
+			assign(base.Pos, l, loff+int64(types.PtrSize), val)
+		} else {
+			// Construct temp to hold val, write pointer to temp into n.
+			a := StaticName(val.Type())
+			s.Temps[val] = a
+			assign(base.Pos, a, 0, val)
+			staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym())
+		}
+
+		return true
+	}
+
+	//dump("not static", r);
+	return false
+}
+
+func (s *Schedule) initplan(n ir.Node) {
+	if s.Plans[n] != nil {
+		return
+	}
+	p := new(Plan)
+	s.Plans[n] = p
+	switch n.Op() {
+	default:
+		base.Fatalf("initplan")
+
+	case ir.OARRAYLIT, ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		var k int64
+		for _, a := range n.List {
+			if a.Op() == ir.OKEY {
+				kv := a.(*ir.KeyExpr)
+				k = typecheck.IndexConst(kv.Key)
+				if k < 0 {
+					base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
+				}
+				a = kv.Value
+			}
+			s.addvalue(p, k*n.Type().Elem().Width, a)
+			k++
+		}
+
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, a := range n.List {
+			if a.Op() != ir.OSTRUCTKEY {
+				base.Fatalf("initplan structlit")
+			}
+			a := a.(*ir.StructKeyExpr)
+			if a.Field.IsBlank() {
+				continue
+			}
+			s.addvalue(p, a.Offset, a.Value)
+		}
+
+	case ir.OMAPLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, a := range n.List {
+			if a.Op() != ir.OKEY {
+				base.Fatalf("initplan maplit")
+			}
+			a := a.(*ir.KeyExpr)
+			s.addvalue(p, -1, a.Value)
+		}
+	}
+}
+
+func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) {
+	// special case: zero can be dropped entirely
+	if ir.IsZero(n) {
+		return
+	}
+
+	// special case: inline struct and array (not slice) literals
+	if isvaluelit(n) {
+		s.initplan(n)
+		q := s.Plans[n]
+		for _, qe := range q.E {
+			// qe is a copy; we are not modifying entries in q.E
+			qe.Xoffset += xoffset
+			p.E = append(p.E, qe)
+		}
+		return
+	}
+
+	// add to plan
+	p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n})
+}
+
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
+
+var statuniqgen int // name generator for static temps
+
+// StaticName returns a name backed by a (writable) static data symbol.
+// Use readonlystaticname for read-only node.
+func StaticName(t *types.Type) *ir.Name {
+	// Don't use LookupNum; it interns the resulting string, but these are all unique.
+	n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+	statuniqgen++
+	typecheck.Declare(n, ir.PEXTERN)
+	n.SetType(t)
+	n.Linksym().Set(obj.AttrLocal, true)
+	return n
+}
+
+// StaticLoc returns the static address of n, if n has one, or else nil.
+func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) {
+	if n == nil {
+		return nil, 0, false
+	}
+
+	switch n.Op() {
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		return n, 0, true
+
+	case ir.OMETHEXPR:
+		n := n.(*ir.SelectorExpr)
+		return StaticLoc(n.FuncName())
+
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		if name, offset, ok = StaticLoc(n.X); !ok {
+			break
+		}
+		offset += n.Offset()
+		return name, offset, true
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		if n.X.Type().IsSlice() {
+			break
+		}
+		if name, offset, ok = StaticLoc(n.X); !ok {
+			break
+		}
+		l := getlit(n.Index)
+		if l < 0 {
+			break
+		}
+
+		// Check for overflow.
+		if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) {
+			break
+		}
+		offset += int64(l) * n.Type().Width
+		return name, offset, true
+	}
+
+	return nil, 0, false
+}
+
+// AnySideEffects reports whether n contains any operations that could have observable side effects.
+func AnySideEffects(n ir.Node) bool {
+	return ir.Any(n, func(n ir.Node) bool {
+		switch n.Op() {
+		// Assume side effects unless we know otherwise.
+		default:
+			return true
+
+		// No side effects here (arguments are checked separately).
+		case ir.ONAME,
+			ir.ONONAME,
+			ir.OTYPE,
+			ir.OPACK,
+			ir.OLITERAL,
+			ir.ONIL,
+			ir.OADD,
+			ir.OSUB,
+			ir.OOR,
+			ir.OXOR,
+			ir.OADDSTR,
+			ir.OADDR,
+			ir.OANDAND,
+			ir.OBYTES2STR,
+			ir.ORUNES2STR,
+			ir.OSTR2BYTES,
+			ir.OSTR2RUNES,
+			ir.OCAP,
+			ir.OCOMPLIT,
+			ir.OMAPLIT,
+			ir.OSTRUCTLIT,
+			ir.OARRAYLIT,
+			ir.OSLICELIT,
+			ir.OPTRLIT,
+			ir.OCONV,
+			ir.OCONVIFACE,
+			ir.OCONVNOP,
+			ir.ODOT,
+			ir.OEQ,
+			ir.ONE,
+			ir.OLT,
+			ir.OLE,
+			ir.OGT,
+			ir.OGE,
+			ir.OKEY,
+			ir.OSTRUCTKEY,
+			ir.OLEN,
+			ir.OMUL,
+			ir.OLSH,
+			ir.ORSH,
+			ir.OAND,
+			ir.OANDNOT,
+			ir.ONEW,
+			ir.ONOT,
+			ir.OBITNOT,
+			ir.OPLUS,
+			ir.ONEG,
+			ir.OOROR,
+			ir.OPAREN,
+			ir.ORUNESTR,
+			ir.OREAL,
+			ir.OIMAG,
+			ir.OCOMPLEX:
+			return false
+
+		// Only possible side effect is division by zero.
+		case ir.ODIV, ir.OMOD:
+			n := n.(*ir.BinaryExpr)
+			if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
+				return true
+			}
+
+		// Only possible side effect is panic on invalid size,
+		// but many makechan and makemap use size zero, which is definitely OK.
+		case ir.OMAKECHAN, ir.OMAKEMAP:
+			n := n.(*ir.MakeExpr)
+			if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
+				return true
+			}
+
+		// Only possible side effect is panic on invalid size.
+		// TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
+		case ir.OMAKESLICE, ir.OMAKESLICECOPY:
+			return true
+		}
+		return false
+	})
+}
+
+func getlit(lit ir.Node) int {
+	if ir.IsSmallIntConst(lit) {
+		return int(ir.Int64Val(lit))
+	}
+	return -1
+}
+
+func isvaluelit(n ir.Node) bool {
+	return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
+}
diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go
index f84bd2d..22680dc 100644
--- a/src/cmd/compile/internal/syntax/dumper_test.go
+++ b/src/cmd/compile/internal/syntax/dumper_test.go
@@ -13,7 +13,7 @@
 		t.Skip("skipping test in short mode")
 	}
 
-	// provide a dummy error handler so parsing doesn't stop after first error
+	// provide a no-op error handler so parsing doesn't stop after first error
 	ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches)
 	if err != nil {
 		t.Error(err)
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
index 815630f..487cab1 100644
--- a/src/cmd/compile/internal/syntax/nodes.go
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -114,7 +114,7 @@
 
 // All declarations belonging to the same group point to the same Group node.
 type Group struct {
-	dummy int // not empty so we are guaranteed different Group instances
+	_ int // not empty so we are guaranteed different Group instances
 }
 
 // ----------------------------------------------------------------------------
diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go
index 3c759b2..a7cd40f 100644
--- a/src/cmd/compile/internal/syntax/operator_string.go
+++ b/src/cmd/compile/internal/syntax/operator_string.go
@@ -1,9 +1,37 @@
-// Code generated by "stringer -type Operator -linecomment"; DO NOT EDIT.
+// Code generated by "stringer -type Operator -linecomment tokens.go"; DO NOT EDIT.
 
 package syntax
 
 import "strconv"
 
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Def-1]
+	_ = x[Not-2]
+	_ = x[Recv-3]
+	_ = x[OrOr-4]
+	_ = x[AndAnd-5]
+	_ = x[Eql-6]
+	_ = x[Neq-7]
+	_ = x[Lss-8]
+	_ = x[Leq-9]
+	_ = x[Gtr-10]
+	_ = x[Geq-11]
+	_ = x[Add-12]
+	_ = x[Sub-13]
+	_ = x[Or-14]
+	_ = x[Xor-15]
+	_ = x[Mul-16]
+	_ = x[Div-17]
+	_ = x[Rem-18]
+	_ = x[And-19]
+	_ = x[AndNot-20]
+	_ = x[Shl-21]
+	_ = x[Shr-22]
+}
+
 const _Operator_name = ":!<-||&&==!=<<=>>=+-|^*/%&&^<<>>"
 
 var _Operator_index = [...]uint8{0, 1, 2, 4, 6, 8, 10, 12, 13, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 32}
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
index c3b9aca..fe72e7a 100644
--- a/src/cmd/compile/internal/syntax/printer_test.go
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -18,7 +18,7 @@
 		t.Skip("skipping test in short mode")
 	}
 
-	// provide a dummy error handler so parsing doesn't stop after first error
+	// provide a no-op error handler so parsing doesn't stop after first error
 	ast, err := ParseFile(*src_, func(error) {}, nil, 0)
 	if err != nil {
 		t.Error(err)
diff --git a/src/cmd/compile/internal/syntax/token_string.go b/src/cmd/compile/internal/syntax/token_string.go
index 3cf5473..ef295eb 100644
--- a/src/cmd/compile/internal/syntax/token_string.go
+++ b/src/cmd/compile/internal/syntax/token_string.go
@@ -1,9 +1,62 @@
-// Code generated by "stringer -type token -linecomment"; DO NOT EDIT.
+// Code generated by "stringer -type token -linecomment tokens.go"; DO NOT EDIT.
 
 package syntax
 
 import "strconv"
 
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[_EOF-1]
+	_ = x[_Name-2]
+	_ = x[_Literal-3]
+	_ = x[_Operator-4]
+	_ = x[_AssignOp-5]
+	_ = x[_IncOp-6]
+	_ = x[_Assign-7]
+	_ = x[_Define-8]
+	_ = x[_Arrow-9]
+	_ = x[_Star-10]
+	_ = x[_Lparen-11]
+	_ = x[_Lbrack-12]
+	_ = x[_Lbrace-13]
+	_ = x[_Rparen-14]
+	_ = x[_Rbrack-15]
+	_ = x[_Rbrace-16]
+	_ = x[_Comma-17]
+	_ = x[_Semi-18]
+	_ = x[_Colon-19]
+	_ = x[_Dot-20]
+	_ = x[_DotDotDot-21]
+	_ = x[_Break-22]
+	_ = x[_Case-23]
+	_ = x[_Chan-24]
+	_ = x[_Const-25]
+	_ = x[_Continue-26]
+	_ = x[_Default-27]
+	_ = x[_Defer-28]
+	_ = x[_Else-29]
+	_ = x[_Fallthrough-30]
+	_ = x[_For-31]
+	_ = x[_Func-32]
+	_ = x[_Go-33]
+	_ = x[_Goto-34]
+	_ = x[_If-35]
+	_ = x[_Import-36]
+	_ = x[_Interface-37]
+	_ = x[_Map-38]
+	_ = x[_Package-39]
+	_ = x[_Range-40]
+	_ = x[_Return-41]
+	_ = x[_Select-42]
+	_ = x[_Struct-43]
+	_ = x[_Switch-44]
+	_ = x[_Type-45]
+	_ = x[_Var-46]
+	_ = x[tokenCount-47]
+}
+
 const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar"
 
 var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171}
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
index 3b97cb6..2936b65 100644
--- a/src/cmd/compile/internal/syntax/tokens.go
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -6,7 +6,7 @@
 
 type token uint
 
-//go:generate stringer -type token -linecomment
+//go:generate stringer -type token -linecomment tokens.go
 
 const (
 	_    token = iota
@@ -105,7 +105,7 @@
 
 type Operator uint
 
-//go:generate stringer -type Operator -linecomment
+//go:generate stringer -type Operator -linecomment tokens.go
 
 const (
 	_ Operator = iota
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
new file mode 100644
index 0000000..decc296
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -0,0 +1,295 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"bufio"
+	"cmd/compile/internal/abi"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/obj/x86"
+	"cmd/internal/src"
+	"os"
+	"testing"
+)
+
+// AMD64 registers available:
+// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
+// - floating point: X0 - X14
+var configAMD64 = abi.NewABIConfig(9, 15)
+
+func TestMain(m *testing.M) {
+	ssagen.Arch.LinkArch = &x86.Linkamd64
+	ssagen.Arch.REGSP = x86.REGSP
+	ssagen.Arch.MAXWIDTH = 1 << 50
+	types.MaxWidth = ssagen.Arch.MAXWIDTH
+	base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+	base.Ctxt.DiagFunc = base.Errorf
+	base.Ctxt.DiagFlush = base.FlushErrors
+	base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+	types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+	types.RegSize = ssagen.Arch.LinkArch.RegSize
+	typecheck.InitUniverse()
+	os.Exit(m.Run())
+}
+
+func TestABIUtilsBasic1(t *testing.T) {
+
+	// func(x int32) int32
+	i32 := types.Types[types.TINT32]
+	ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32})
+
+	// expected results
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 } spilloffset: 0 typ: int32
+        OUT 0: R{ I0 } spilloffset: -1 typ: int32
+        offsetToSpillArea: 0 spillAreaSize: 8
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsBasic2(t *testing.T) {
+	// func(x int32, y float64) (int32, float64, float64)
+	i8 := types.Types[types.TINT8]
+	i16 := types.Types[types.TINT16]
+	i32 := types.Types[types.TINT32]
+	i64 := types.Types[types.TINT64]
+	f32 := types.Types[types.TFLOAT32]
+	f64 := types.Types[types.TFLOAT64]
+	c64 := types.Types[types.TCOMPLEX64]
+	c128 := types.Types[types.TCOMPLEX128]
+	ft := mkFuncType(nil,
+		[]*types.Type{
+			i8, i16, i32, i64,
+			f32, f32, f64, f64,
+			i8, i16, i32, i64,
+			f32, f32, f64, f64,
+			c128, c128, c128, c128, c64,
+			i8, i16, i32, i64,
+			i8, i16, i32, i64},
+		[]*types.Type{i32, f64, f64})
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 } spilloffset: 0 typ: int8
+        IN 1: R{ I1 } spilloffset: 2 typ: int16
+        IN 2: R{ I2 } spilloffset: 4 typ: int32
+        IN 3: R{ I3 } spilloffset: 8 typ: int64
+        IN 4: R{ F0 } spilloffset: 16 typ: float32
+        IN 5: R{ F1 } spilloffset: 20 typ: float32
+        IN 6: R{ F2 } spilloffset: 24 typ: float64
+        IN 7: R{ F3 } spilloffset: 32 typ: float64
+        IN 8: R{ I4 } spilloffset: 40 typ: int8
+        IN 9: R{ I5 } spilloffset: 42 typ: int16
+        IN 10: R{ I6 } spilloffset: 44 typ: int32
+        IN 11: R{ I7 } spilloffset: 48 typ: int64
+        IN 12: R{ F4 } spilloffset: 56 typ: float32
+        IN 13: R{ F5 } spilloffset: 60 typ: float32
+        IN 14: R{ F6 } spilloffset: 64 typ: float64
+        IN 15: R{ F7 } spilloffset: 72 typ: float64
+        IN 16: R{ F8 F9 } spilloffset: 80 typ: complex128
+        IN 17: R{ F10 F11 } spilloffset: 96 typ: complex128
+        IN 18: R{ F12 F13 } spilloffset: 112 typ: complex128
+        IN 19: R{ } offset: 0 typ: complex128
+        IN 20: R{ } offset: 16 typ: complex64
+        IN 21: R{ I8 } spilloffset: 128 typ: int8
+        IN 22: R{ } offset: 24 typ: int16
+        IN 23: R{ } offset: 28 typ: int32
+        IN 24: R{ } offset: 32 typ: int64
+        IN 25: R{ } offset: 40 typ: int8
+        IN 26: R{ } offset: 42 typ: int16
+        IN 27: R{ } offset: 44 typ: int32
+        IN 28: R{ } offset: 48 typ: int64
+        OUT 0: R{ I0 } spilloffset: -1 typ: int32
+        OUT 1: R{ F0 } spilloffset: -1 typ: float64
+        OUT 2: R{ F1 } spilloffset: -1 typ: float64
+        offsetToSpillArea: 56 spillAreaSize: 136
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsArrays(t *testing.T) {
+	i32 := types.Types[types.TINT32]
+	ae := types.NewArray(i32, 0)
+	a1 := types.NewArray(i32, 1)
+	a2 := types.NewArray(i32, 2)
+	aa1 := types.NewArray(a1, 1)
+	ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2},
+		[]*types.Type{a2, a1, ae, aa1})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 } spilloffset: 0 typ: [1]int32
+        IN 1: R{ } offset: 0 typ: [0]int32
+        IN 2: R{ I1 } spilloffset: 4 typ: [1][1]int32
+        IN 3: R{ } offset: 0 typ: [2]int32
+        OUT 0: R{ } offset: 8 typ: [2]int32
+        OUT 1: R{ I0 } spilloffset: -1 typ: [1]int32
+        OUT 2: R{ } offset: 16 typ: [0]int32
+        OUT 3: R{ I1 } spilloffset: -1 typ: [1][1]int32
+        offsetToSpillArea: 16 spillAreaSize: 8
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct1(t *testing.T) {
+	i8 := types.Types[types.TINT8]
+	i16 := types.Types[types.TINT16]
+	i32 := types.Types[types.TINT32]
+	i64 := types.Types[types.TINT64]
+	s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+	ft := mkFuncType(nil, []*types.Type{i8, s, i64},
+		[]*types.Type{s, i8, i32})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 } spilloffset: 0 typ: int8
+        IN 1: R{ I1 I2 I3 I4 } spilloffset: 2 typ: struct { int8; int8; struct {}; int8; int16 }
+        IN 2: R{ I5 } spilloffset: 8 typ: int64
+        OUT 0: R{ I0 I1 I2 I3 } spilloffset: -1 typ: struct { int8; int8; struct {}; int8; int16 }
+        OUT 1: R{ I4 } spilloffset: -1 typ: int8
+        OUT 2: R{ I5 } spilloffset: -1 typ: int32
+        offsetToSpillArea: 0 spillAreaSize: 16
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct2(t *testing.T) {
+	f64 := types.Types[types.TFLOAT64]
+	i64 := types.Types[types.TINT64]
+	s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})})
+	fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+	ft := mkFuncType(nil, []*types.Type{s, s, fs},
+		[]*types.Type{fs, fs})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} }
+        IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} }
+        IN 2: R{ I2 F0 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} }
+        OUT 0: R{ I0 F0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+        OUT 1: R{ I1 F1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+        offsetToSpillArea: 0 spillAreaSize: 64
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsSliceString(t *testing.T) {
+	i32 := types.Types[types.TINT32]
+	sli32 := types.NewSlice(i32)
+	str := types.New(types.TSTRING)
+	i8 := types.Types[types.TINT8]
+	i64 := types.Types[types.TINT64]
+	ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32},
+		[]*types.Type{str, i64, str, sli32})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: []int32
+        IN 1: R{ I3 } spilloffset: 24 typ: int8
+        IN 2: R{ I4 I5 I6 } spilloffset: 32 typ: []int32
+        IN 3: R{ I7 } spilloffset: 56 typ: int8
+        IN 4: R{ } offset: 0 typ: string
+        IN 5: R{ I8 } spilloffset: 57 typ: int8
+        IN 6: R{ } offset: 16 typ: int64
+        IN 7: R{ } offset: 24 typ: []int32
+        OUT 0: R{ I0 I1 } spilloffset: -1 typ: string
+        OUT 1: R{ I2 } spilloffset: -1 typ: int64
+        OUT 2: R{ I3 I4 } spilloffset: -1 typ: string
+        OUT 3: R{ I5 I6 I7 } spilloffset: -1 typ: []int32
+        offsetToSpillArea: 48 spillAreaSize: 64
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsMethod(t *testing.T) {
+	i16 := types.Types[types.TINT16]
+	i64 := types.Types[types.TINT64]
+	f64 := types.Types[types.TFLOAT64]
+
+	s1 := mkstruct([]*types.Type{i16, i16, i16})
+	ps1 := types.NewPtr(s1)
+	a7 := types.NewArray(ps1, 7)
+	ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
+		[]*types.Type{a7, f64, i64})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; int16 }
+        IN 1: R{ I3 } spilloffset: 8 typ: *struct { int16; int16; int16 }
+        IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 }
+        IN 3: R{ F0 } spilloffset: 16 typ: float64
+        IN 4: R{ I4 } spilloffset: 24 typ: int16
+        IN 5: R{ I5 } spilloffset: 26 typ: int16
+        IN 6: R{ I6 } spilloffset: 28 typ: int16
+        OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 }
+        OUT 1: R{ F0 } spilloffset: -1 typ: float64
+        OUT 2: R{ I0 } spilloffset: -1 typ: int64
+        offsetToSpillArea: 112 spillAreaSize: 32
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABIUtilsInterfaces(t *testing.T) {
+	ei := types.Types[types.TINTER] // interface{}
+	pei := types.NewPtr(ei)         // *interface{}
+	fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
+		[]*types.Type{types.UntypedString})
+	field := types.NewField(src.NoXPos, nil, fldt)
+	// interface{ ...() string }
+	nei := types.NewInterface(types.LocalPkg, []*types.Field{field})
+
+	i16 := types.Types[types.TINT16]
+	tb := types.Types[types.TBOOL]
+	s1 := mkstruct([]*types.Type{i16, i16, tb})
+
+	ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
+		[]*types.Type{ei, nei, pei})
+
+	exp := makeExpectedDump(`
+        IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool }
+        IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {}
+        IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {}
+        IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { () untyped string }
+        IN 4: R{ } offset: 0 typ: *interface {}
+        IN 5: R{ } offset: 8 typ: interface { () untyped string }
+        IN 6: R{ } offset: 24 typ: int16
+        OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {}
+        OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { () untyped string }
+        OUT 2: R{ I4 } spilloffset: -1 typ: *interface {}
+        offsetToSpillArea: 32 spillAreaSize: 56
+`)
+
+	abitest(t, ft, exp)
+}
+
+func TestABINumParamRegs(t *testing.T) {
+	i8 := types.Types[types.TINT8]
+	i16 := types.Types[types.TINT16]
+	i32 := types.Types[types.TINT32]
+	i64 := types.Types[types.TINT64]
+	f32 := types.Types[types.TFLOAT32]
+	f64 := types.Types[types.TFLOAT64]
+	c64 := types.Types[types.TCOMPLEX64]
+	c128 := types.Types[types.TCOMPLEX128]
+
+	s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+	a := types.NewArray(s, 3)
+
+	nrtest(t, i8, 1)
+	nrtest(t, i16, 1)
+	nrtest(t, i32, 1)
+	nrtest(t, i64, 1)
+	nrtest(t, f32, 1)
+	nrtest(t, f64, 1)
+	nrtest(t, c64, 2)
+	nrtest(t, c128, 2)
+	nrtest(t, s, 4)
+	nrtest(t, a, 12)
+
+}
\ No newline at end of file
diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
new file mode 100644
index 0000000..19dd3a5
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+// This file contains utility routines and harness infrastructure used
+// by the ABI tests in "abiutils_test.go".
+
+import (
+	"cmd/compile/internal/abi"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"fmt"
+	"strings"
+	"testing"
+	"text/scanner"
+)
+
+func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
+	field := types.NewField(src.NoXPos, s, t)
+	n := typecheck.NewName(s)
+	n.Class = which
+	field.Nname = n
+	n.SetType(t)
+	return field
+}
+
+// mkstruct is a helper routine to create a struct type with fields
+// of the types specified in 'fieldtypes'.
+func mkstruct(fieldtypes []*types.Type) *types.Type {
+	fields := make([]*types.Field, len(fieldtypes))
+	for k, t := range fieldtypes {
+		if t == nil {
+			panic("bad -- field has no type")
+		}
+		f := types.NewField(src.NoXPos, nil, t)
+		fields[k] = f
+	}
+	s := types.NewStruct(types.LocalPkg, fields)
+	return s
+}
+
+func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
+	q := typecheck.Lookup("?")
+	inf := []*types.Field{}
+	for _, it := range ins {
+		inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
+	}
+	outf := []*types.Field{}
+	for _, ot := range outs {
+		outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT))
+	}
+	var rf *types.Field
+	if rcvr != nil {
+		rf = mkParamResultField(rcvr, q, ir.PPARAM)
+	}
+	return types.NewSignature(types.LocalPkg, rf, inf, outf)
+}
+
+type expectedDump struct {
+	dump string
+	file string
+	line int
+}
+
+func tokenize(src string) []string {
+	var s scanner.Scanner
+	s.Init(strings.NewReader(src))
+	res := []string{}
+	for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+		res = append(res, s.TokenText())
+	}
+	return res
+}
+
+func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
+	n := ir.AsNode(f.Nname).(*ir.Name)
+	if n.FrameOffset() != int64(r.Offset()) {
+		t.Errorf("%s %d: got offset %d wanted %d t=%v",
+			which, idx, r.Offset(), n.Offset_, f.Type)
+		return 1
+	}
+	return 0
+}
+
+func makeExpectedDump(e string) expectedDump {
+	return expectedDump{dump: e}
+}
+
+func difftokens(atoks []string, etoks []string) string {
+	if len(atoks) != len(etoks) {
+		return fmt.Sprintf("expected %d tokens got %d",
+			len(etoks), len(atoks))
+	}
+	for i := 0; i < len(etoks); i++ {
+		if etoks[i] == atoks[i] {
+			continue
+		}
+
+		return fmt.Sprintf("diff at token %d: expected %q got %q",
+			i, etoks[i], atoks[i])
+	}
+	return ""
+}
+
+func nrtest(t *testing.T, ft *types.Type, expected int) {
+	types.CalcSize(ft)
+	got := configAMD64.NumParamRegs(ft)
+	if got != expected {
+		t.Errorf("]\nexpected num regs = %d, got %d, type %v", expected, got, ft)
+	}
+}
+
+func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
+
+	types.CalcSize(ft)
+
+	// Analyze with full set of registers.
+	regRes := configAMD64.ABIAnalyze(ft)
+	regResString := strings.TrimSpace(regRes.String())
+
+	// Check results.
+	reason := difftokens(tokenize(regResString), tokenize(exp.dump))
+	if reason != "" {
+		t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s",
+			strings.TrimSpace(exp.dump), regResString, reason)
+	}
+
+	// Analyze again with empty register set.
+	empty := abi.NewABIConfig(0, 0)
+	emptyRes := empty.ABIAnalyze(ft)
+	emptyResString := emptyRes.String()
+
+	// Walk the results and make sure the offsets assigned match
+	// up with those assiged by CalcSize. This checks to make sure that
+	// when we have no available registers the ABI assignment degenerates
+	// back to the original ABI0.
+
+	// receiver
+	failed := 0
+	rfsl := ft.Recvs().Fields().Slice()
+	poff := 0
+	if len(rfsl) != 0 {
+		failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.InParams()[0], "receiver", 0)
+		poff = 1
+	}
+	// params
+	pfsl := ft.Params().Fields().Slice()
+	for k, f := range pfsl {
+		verifyParamResultOffset(t, f, emptyRes.InParams()[k+poff], "param", k)
+	}
+	// results
+	ofsl := ft.Results().Fields().Slice()
+	for k, f := range ofsl {
+		failed |= verifyParamResultOffset(t, f, emptyRes.OutParams()[k], "result", k)
+	}
+
+	if failed != 0 {
+		t.Logf("emptyres:\n%s\n", emptyResString)
+	}
+}
diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/test/bench_test.go
similarity index 98%
rename from src/cmd/compile/internal/gc/bench_test.go
rename to src/cmd/compile/internal/test/bench_test.go
index 8c42881..3fffe57 100644
--- a/src/cmd/compile/internal/gc/bench_test.go
+++ b/src/cmd/compile/internal/test/bench_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import "testing"
 
diff --git a/src/cmd/compile/internal/gc/constFold_test.go b/src/cmd/compile/internal/test/constFold_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/constFold_test.go
rename to src/cmd/compile/internal/test/constFold_test.go
index 59f905d..7159f0e 100644
--- a/src/cmd/compile/internal/gc/constFold_test.go
+++ b/src/cmd/compile/internal/test/constFold_test.go
@@ -1,7 +1,7 @@
 // run
 // Code generated by gen/constFoldGen.go. DO NOT EDIT.
 
-package gc
+package test
 
 import "testing"
 
diff --git a/src/cmd/compile/internal/gc/dep_test.go b/src/cmd/compile/internal/test/dep_test.go
similarity index 91%
rename from src/cmd/compile/internal/gc/dep_test.go
rename to src/cmd/compile/internal/test/dep_test.go
index c1dac93..26122e6 100644
--- a/src/cmd/compile/internal/gc/dep_test.go
+++ b/src/cmd/compile/internal/test/dep_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"internal/testenv"
@@ -18,7 +18,7 @@
 	}
 	for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
 		switch dep {
-		case "go/build", "go/token":
+		case "go/build", "go/scanner":
 			t.Errorf("undesired dependency on %q", dep)
 		}
 	}
diff --git a/src/cmd/compile/internal/gc/fixedbugs_test.go b/src/cmd/compile/internal/test/fixedbugs_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/fixedbugs_test.go
rename to src/cmd/compile/internal/test/fixedbugs_test.go
index 8ac4436..e7e2f7e 100644
--- a/src/cmd/compile/internal/gc/fixedbugs_test.go
+++ b/src/cmd/compile/internal/test/fixedbugs_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"internal/testenv"
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/test/float_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/float_test.go
rename to src/cmd/compile/internal/test/float_test.go
index c619d25..884a983 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"math"
diff --git a/src/cmd/compile/internal/gc/global_test.go b/src/cmd/compile/internal/test/global_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/global_test.go
rename to src/cmd/compile/internal/test/global_test.go
index edad6d0..5f5f7d6 100644
--- a/src/cmd/compile/internal/gc/global_test.go
+++ b/src/cmd/compile/internal/test/global_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"bytes"
diff --git a/src/cmd/compile/internal/gc/iface_test.go b/src/cmd/compile/internal/test/iface_test.go
similarity index 98%
rename from src/cmd/compile/internal/gc/iface_test.go
rename to src/cmd/compile/internal/test/iface_test.go
index 21c6587..ebc4f89 100644
--- a/src/cmd/compile/internal/gc/iface_test.go
+++ b/src/cmd/compile/internal/test/iface_test.go
@@ -2,15 +2,13 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
+
+import "testing"
 
 // Test to make sure we make copies of the values we
 // put in interfaces.
 
-import (
-	"testing"
-)
-
 var x int
 
 func TestEfaceConv1(t *testing.T) {
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/inl_test.go
rename to src/cmd/compile/internal/test/inl_test.go
index 02735e5..9d31975 100644
--- a/src/cmd/compile/internal/gc/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"bufio"
diff --git a/src/cmd/compile/internal/gc/lang_test.go b/src/cmd/compile/internal/test/lang_test.go
similarity index 98%
rename from src/cmd/compile/internal/gc/lang_test.go
rename to src/cmd/compile/internal/test/lang_test.go
index 72e7f07..67c1551 100644
--- a/src/cmd/compile/internal/gc/lang_test.go
+++ b/src/cmd/compile/internal/test/lang_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"internal/testenv"
diff --git a/src/cmd/compile/internal/gc/logic_test.go b/src/cmd/compile/internal/test/logic_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/logic_test.go
rename to src/cmd/compile/internal/test/logic_test.go
index 78d2dd2..1d7043f 100644
--- a/src/cmd/compile/internal/gc/logic_test.go
+++ b/src/cmd/compile/internal/test/logic_test.go
@@ -1,4 +1,4 @@
-package gc
+package test
 
 import "testing"
 
diff --git a/src/cmd/compile/internal/gc/reproduciblebuilds_test.go b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/reproduciblebuilds_test.go
rename to src/cmd/compile/internal/test/reproduciblebuilds_test.go
index 8101e44..4d84f9c 100644
--- a/src/cmd/compile/internal/gc/reproduciblebuilds_test.go
+++ b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc_test
+package test
 
 import (
 	"bytes"
diff --git a/src/cmd/compile/internal/gc/shift_test.go b/src/cmd/compile/internal/test/shift_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/shift_test.go
rename to src/cmd/compile/internal/test/shift_test.go
index ce2eedf..ea88f0a 100644
--- a/src/cmd/compile/internal/gc/shift_test.go
+++ b/src/cmd/compile/internal/test/shift_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"reflect"
diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/ssa_test.go
rename to src/cmd/compile/internal/test/ssa_test.go
index 7f7c946..2f3e24c 100644
--- a/src/cmd/compile/internal/gc/ssa_test.go
+++ b/src/cmd/compile/internal/test/ssa_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import (
 	"bytes"
diff --git a/src/cmd/compile/internal/gc/testdata/addressed_test.go b/src/cmd/compile/internal/test/testdata/addressed_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/addressed_test.go
rename to src/cmd/compile/internal/test/testdata/addressed_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/append_test.go b/src/cmd/compile/internal/test/testdata/append_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/append_test.go
rename to src/cmd/compile/internal/test/testdata/append_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/arithBoundary_test.go b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/arithBoundary_test.go
rename to src/cmd/compile/internal/test/testdata/arithBoundary_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/arithConst_test.go b/src/cmd/compile/internal/test/testdata/arithConst_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/arithConst_test.go
rename to src/cmd/compile/internal/test/testdata/arithConst_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/arith_test.go
rename to src/cmd/compile/internal/test/testdata/arith_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/array_test.go b/src/cmd/compile/internal/test/testdata/array_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/array_test.go
rename to src/cmd/compile/internal/test/testdata/array_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/assert_test.go b/src/cmd/compile/internal/test/testdata/assert_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/assert_test.go
rename to src/cmd/compile/internal/test/testdata/assert_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/break_test.go b/src/cmd/compile/internal/test/testdata/break_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/break_test.go
rename to src/cmd/compile/internal/test/testdata/break_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/chan_test.go b/src/cmd/compile/internal/test/testdata/chan_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/chan_test.go
rename to src/cmd/compile/internal/test/testdata/chan_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/closure_test.go b/src/cmd/compile/internal/test/testdata/closure_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/closure_test.go
rename to src/cmd/compile/internal/test/testdata/closure_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/cmpConst_test.go b/src/cmd/compile/internal/test/testdata/cmpConst_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/cmpConst_test.go
rename to src/cmd/compile/internal/test/testdata/cmpConst_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/cmp_test.go b/src/cmd/compile/internal/test/testdata/cmp_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/cmp_test.go
rename to src/cmd/compile/internal/test/testdata/cmp_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/compound_test.go b/src/cmd/compile/internal/test/testdata/compound_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/compound_test.go
rename to src/cmd/compile/internal/test/testdata/compound_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/copy_test.go b/src/cmd/compile/internal/test/testdata/copy_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/copy_test.go
rename to src/cmd/compile/internal/test/testdata/copy_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/ctl_test.go
rename to src/cmd/compile/internal/test/testdata/ctl_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go
rename to src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/divbyzero_test.go b/src/cmd/compile/internal/test/testdata/divbyzero_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/divbyzero_test.go
rename to src/cmd/compile/internal/test/testdata/divbyzero_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/dupLoad_test.go b/src/cmd/compile/internal/test/testdata/dupLoad_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/dupLoad_test.go
rename to src/cmd/compile/internal/test/testdata/dupLoad_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go
rename to src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
diff --git a/src/cmd/compile/internal/gc/testdata/fp_test.go b/src/cmd/compile/internal/test/testdata/fp_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/fp_test.go
rename to src/cmd/compile/internal/test/testdata/fp_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
rename to src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
rename to src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go
rename to src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go
rename to src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/copyGen.go b/src/cmd/compile/internal/test/testdata/gen/copyGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/copyGen.go
rename to src/cmd/compile/internal/test/testdata/gen/copyGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/gen/zeroGen.go
rename to src/cmd/compile/internal/test/testdata/gen/zeroGen.go
diff --git a/src/cmd/compile/internal/gc/testdata/loadstore_test.go b/src/cmd/compile/internal/test/testdata/loadstore_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/loadstore_test.go
rename to src/cmd/compile/internal/test/testdata/loadstore_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/map_test.go b/src/cmd/compile/internal/test/testdata/map_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/map_test.go
rename to src/cmd/compile/internal/test/testdata/map_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/namedReturn_test.go b/src/cmd/compile/internal/test/testdata/namedReturn_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/namedReturn_test.go
rename to src/cmd/compile/internal/test/testdata/namedReturn_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/phi_test.go b/src/cmd/compile/internal/test/testdata/phi_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/phi_test.go
rename to src/cmd/compile/internal/test/testdata/phi_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/regalloc_test.go b/src/cmd/compile/internal/test/testdata/regalloc_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/regalloc_test.go
rename to src/cmd/compile/internal/test/testdata/regalloc_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go
rename to src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go
rename to src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go
rename to src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
similarity index 95%
rename from src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go
rename to src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
index db5ca7d..b87daed 100644
--- a/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
@@ -53,7 +53,7 @@
 		return
 	}
 	// Address-taken local of type A, which will insure that the
-	// compiler's dtypesym() routine will create a method wrapper.
+	// compiler's writeType() routine will create a method wrapper.
 	var a, b A
 	a.next = x
 	a.prev = &b
diff --git a/src/cmd/compile/internal/gc/testdata/short_test.go b/src/cmd/compile/internal/test/testdata/short_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/short_test.go
rename to src/cmd/compile/internal/test/testdata/short_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/slice_test.go b/src/cmd/compile/internal/test/testdata/slice_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/slice_test.go
rename to src/cmd/compile/internal/test/testdata/slice_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/sqrtConst_test.go b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/sqrtConst_test.go
rename to src/cmd/compile/internal/test/testdata/sqrtConst_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/string_test.go b/src/cmd/compile/internal/test/testdata/string_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/string_test.go
rename to src/cmd/compile/internal/test/testdata/string_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/unsafe_test.go b/src/cmd/compile/internal/test/testdata/unsafe_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/unsafe_test.go
rename to src/cmd/compile/internal/test/testdata/unsafe_test.go
diff --git a/src/cmd/compile/internal/gc/testdata/zero_test.go b/src/cmd/compile/internal/test/testdata/zero_test.go
similarity index 100%
rename from src/cmd/compile/internal/gc/testdata/zero_test.go
rename to src/cmd/compile/internal/test/testdata/zero_test.go
diff --git a/src/cmd/compile/internal/gc/truncconst_test.go b/src/cmd/compile/internal/test/truncconst_test.go
similarity index 98%
rename from src/cmd/compile/internal/gc/truncconst_test.go
rename to src/cmd/compile/internal/test/truncconst_test.go
index d153818..7705042 100644
--- a/src/cmd/compile/internal/gc/truncconst_test.go
+++ b/src/cmd/compile/internal/test/truncconst_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
 import "testing"
 
diff --git a/src/cmd/compile/internal/gc/zerorange_test.go b/src/cmd/compile/internal/test/zerorange_test.go
similarity index 98%
rename from src/cmd/compile/internal/gc/zerorange_test.go
rename to src/cmd/compile/internal/test/zerorange_test.go
index 89f4cb9..cb1a6e0 100644
--- a/src/cmd/compile/internal/gc/zerorange_test.go
+++ b/src/cmd/compile/internal/test/zerorange_test.go
@@ -2,11 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package test
 
-import (
-	"testing"
-)
+import "testing"
 
 var glob = 3
 var globp *int64
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
new file mode 100644
index 0000000..1c1b077
--- /dev/null
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -0,0 +1,87 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typebits
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/bitvec"
+	"cmd/compile/internal/types"
+)
+
+// NOTE: The bitmap for a specific type t could be cached in t after
+// the first run and then simply copied into bv at the correct offset
+// on future calls with the same type t.
+func Set(t *types.Type, off int64, bv bitvec.BitVec) {
+	if t.Align > 0 && off&int64(t.Align-1) != 0 {
+		base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
+	}
+	if !t.HasPointers() {
+		// Note: this case ensures that pointers to go:notinheap types
+		// are not considered pointers by garbage collection and stack copying.
+		return
+	}
+
+	switch t.Kind() {
+	case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
+		if off&int64(types.PtrSize-1) != 0 {
+			base.Fatalf("typebits.Set: invalid alignment, %v", t)
+		}
+		bv.Set(int32(off / int64(types.PtrSize))) // pointer
+
+	case types.TSTRING:
+		// struct { byte *str; intgo len; }
+		if off&int64(types.PtrSize-1) != 0 {
+			base.Fatalf("typebits.Set: invalid alignment, %v", t)
+		}
+		bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
+
+	case types.TINTER:
+		// struct { Itab *tab;	void *data; }
+		// or, when isnilinter(t)==true:
+		// struct { Type *type; void *data; }
+		if off&int64(types.PtrSize-1) != 0 {
+			base.Fatalf("typebits.Set: invalid alignment, %v", t)
+		}
+		// The first word of an interface is a pointer, but we don't
+		// treat it as such.
+		// 1. If it is a non-empty interface, the pointer points to an itab
+		//    which is always in persistentalloc space.
+		// 2. If it is an empty interface, the pointer points to a _type.
+		//   a. If it is a compile-time-allocated type, it points into
+		//      the read-only data section.
+		//   b. If it is a reflect-allocated type, it points into the Go heap.
+		//      Reflect is responsible for keeping a reference to
+		//      the underlying type so it won't be GCd.
+		// If we ever have a moving GC, we need to change this for 2b (as
+		// well as scan itabs to update their itab._type fields).
+		bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot
+
+	case types.TSLICE:
+		// struct { byte *array; uintgo len; uintgo cap; }
+		if off&int64(types.PtrSize-1) != 0 {
+			base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t)
+		}
+		bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
+
+	case types.TARRAY:
+		elt := t.Elem()
+		if elt.Width == 0 {
+			// Short-circuit for #20739.
+			break
+		}
+		for i := int64(0); i < t.NumElem(); i++ {
+			Set(elt, off, bv)
+			off += elt.Width
+		}
+
+	case types.TSTRUCT:
+		for _, f := range t.Fields().Slice() {
+			Set(f.Type, off+f.Offset, bv)
+		}
+
+	default:
+		base.Fatalf("typebits.Set: unexpected type, %v", t)
+	}
+}
diff --git a/src/cmd/compile/internal/typecheck/bexport.go b/src/cmd/compile/internal/typecheck/bexport.go
new file mode 100644
index 0000000..4a84bb1
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/bexport.go
@@ -0,0 +1,102 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import "cmd/compile/internal/types"
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+	// Objects
+	packageTag = -(iota + 1)
+	constTag
+	typeTag
+	varTag
+	funcTag
+	endTag
+
+	// Types
+	namedTag
+	arrayTag
+	sliceTag
+	dddTag
+	structTag
+	pointerTag
+	signatureTag
+	interfaceTag
+	mapTag
+	chanTag
+
+	// Values
+	falseTag
+	trueTag
+	int64Tag
+	floatTag
+	fractionTag // not used by gc
+	complexTag
+	stringTag
+	nilTag
+	unknownTag // not used by gc (only appears in packages with errors)
+
+	// Type aliases
+	aliasTag
+)
+
+var predecl []*types.Type // initialized lazily
+
+func predeclared() []*types.Type {
+	if predecl == nil {
+		// initialize lazily to be sure that all
+		// elements have been initialized before
+		predecl = []*types.Type{
+			// basic types
+			types.Types[types.TBOOL],
+			types.Types[types.TINT],
+			types.Types[types.TINT8],
+			types.Types[types.TINT16],
+			types.Types[types.TINT32],
+			types.Types[types.TINT64],
+			types.Types[types.TUINT],
+			types.Types[types.TUINT8],
+			types.Types[types.TUINT16],
+			types.Types[types.TUINT32],
+			types.Types[types.TUINT64],
+			types.Types[types.TUINTPTR],
+			types.Types[types.TFLOAT32],
+			types.Types[types.TFLOAT64],
+			types.Types[types.TCOMPLEX64],
+			types.Types[types.TCOMPLEX128],
+			types.Types[types.TSTRING],
+
+			// basic type aliases
+			types.ByteType,
+			types.RuneType,
+
+			// error
+			types.ErrorType,
+
+			// untyped types
+			types.UntypedBool,
+			types.UntypedInt,
+			types.UntypedRune,
+			types.UntypedFloat,
+			types.UntypedComplex,
+			types.UntypedString,
+			types.Types[types.TNIL],
+
+			// package unsafe
+			types.Types[types.TUNSAFEPTR],
+
+			// invalid type (package contains errors)
+			types.Types[types.Txxx],
+
+			// any type, for builtin export data
+			types.Types[types.TANY],
+		}
+	}
+	return predecl
+}
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
new file mode 100644
index 0000000..0dee852
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -0,0 +1,343 @@
+// Code generated by mkbuiltin.go. DO NOT EDIT.
+
+package typecheck
+
+import (
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+var runtimeDecls = [...]struct {
+	name string
+	tag  int
+	typ  int
+}{
+	{"newobject", funcTag, 4},
+	{"mallocgc", funcTag, 8},
+	{"panicdivide", funcTag, 9},
+	{"panicshift", funcTag, 9},
+	{"panicmakeslicelen", funcTag, 9},
+	{"panicmakeslicecap", funcTag, 9},
+	{"throwinit", funcTag, 9},
+	{"panicwrap", funcTag, 9},
+	{"gopanic", funcTag, 11},
+	{"gorecover", funcTag, 14},
+	{"goschedguarded", funcTag, 9},
+	{"goPanicIndex", funcTag, 16},
+	{"goPanicIndexU", funcTag, 18},
+	{"goPanicSliceAlen", funcTag, 16},
+	{"goPanicSliceAlenU", funcTag, 18},
+	{"goPanicSliceAcap", funcTag, 16},
+	{"goPanicSliceAcapU", funcTag, 18},
+	{"goPanicSliceB", funcTag, 16},
+	{"goPanicSliceBU", funcTag, 18},
+	{"goPanicSlice3Alen", funcTag, 16},
+	{"goPanicSlice3AlenU", funcTag, 18},
+	{"goPanicSlice3Acap", funcTag, 16},
+	{"goPanicSlice3AcapU", funcTag, 18},
+	{"goPanicSlice3B", funcTag, 16},
+	{"goPanicSlice3BU", funcTag, 18},
+	{"goPanicSlice3C", funcTag, 16},
+	{"goPanicSlice3CU", funcTag, 18},
+	{"printbool", funcTag, 19},
+	{"printfloat", funcTag, 21},
+	{"printint", funcTag, 23},
+	{"printhex", funcTag, 25},
+	{"printuint", funcTag, 25},
+	{"printcomplex", funcTag, 27},
+	{"printstring", funcTag, 29},
+	{"printpointer", funcTag, 30},
+	{"printuintptr", funcTag, 31},
+	{"printiface", funcTag, 30},
+	{"printeface", funcTag, 30},
+	{"printslice", funcTag, 30},
+	{"printnl", funcTag, 9},
+	{"printsp", funcTag, 9},
+	{"printlock", funcTag, 9},
+	{"printunlock", funcTag, 9},
+	{"concatstring2", funcTag, 34},
+	{"concatstring3", funcTag, 35},
+	{"concatstring4", funcTag, 36},
+	{"concatstring5", funcTag, 37},
+	{"concatstrings", funcTag, 39},
+	{"cmpstring", funcTag, 40},
+	{"intstring", funcTag, 43},
+	{"slicebytetostring", funcTag, 44},
+	{"slicebytetostringtmp", funcTag, 45},
+	{"slicerunetostring", funcTag, 48},
+	{"stringtoslicebyte", funcTag, 50},
+	{"stringtoslicerune", funcTag, 53},
+	{"slicecopy", funcTag, 54},
+	{"decoderune", funcTag, 55},
+	{"countrunes", funcTag, 56},
+	{"convI2I", funcTag, 57},
+	{"convT16", funcTag, 58},
+	{"convT32", funcTag, 58},
+	{"convT64", funcTag, 58},
+	{"convTstring", funcTag, 58},
+	{"convTslice", funcTag, 58},
+	{"convT2E", funcTag, 59},
+	{"convT2Enoptr", funcTag, 59},
+	{"convT2I", funcTag, 59},
+	{"convT2Inoptr", funcTag, 59},
+	{"assertE2I", funcTag, 57},
+	{"assertE2I2", funcTag, 60},
+	{"assertI2I", funcTag, 57},
+	{"assertI2I2", funcTag, 60},
+	{"panicdottypeE", funcTag, 61},
+	{"panicdottypeI", funcTag, 61},
+	{"panicnildottype", funcTag, 62},
+	{"ifaceeq", funcTag, 64},
+	{"efaceeq", funcTag, 64},
+	{"fastrand", funcTag, 66},
+	{"makemap64", funcTag, 68},
+	{"makemap", funcTag, 69},
+	{"makemap_small", funcTag, 70},
+	{"mapaccess1", funcTag, 71},
+	{"mapaccess1_fast32", funcTag, 72},
+	{"mapaccess1_fast64", funcTag, 72},
+	{"mapaccess1_faststr", funcTag, 72},
+	{"mapaccess1_fat", funcTag, 73},
+	{"mapaccess2", funcTag, 74},
+	{"mapaccess2_fast32", funcTag, 75},
+	{"mapaccess2_fast64", funcTag, 75},
+	{"mapaccess2_faststr", funcTag, 75},
+	{"mapaccess2_fat", funcTag, 76},
+	{"mapassign", funcTag, 71},
+	{"mapassign_fast32", funcTag, 72},
+	{"mapassign_fast32ptr", funcTag, 72},
+	{"mapassign_fast64", funcTag, 72},
+	{"mapassign_fast64ptr", funcTag, 72},
+	{"mapassign_faststr", funcTag, 72},
+	{"mapiterinit", funcTag, 77},
+	{"mapdelete", funcTag, 77},
+	{"mapdelete_fast32", funcTag, 78},
+	{"mapdelete_fast64", funcTag, 78},
+	{"mapdelete_faststr", funcTag, 78},
+	{"mapiternext", funcTag, 79},
+	{"mapclear", funcTag, 80},
+	{"makechan64", funcTag, 82},
+	{"makechan", funcTag, 83},
+	{"chanrecv1", funcTag, 85},
+	{"chanrecv2", funcTag, 86},
+	{"chansend1", funcTag, 88},
+	{"closechan", funcTag, 30},
+	{"writeBarrier", varTag, 90},
+	{"typedmemmove", funcTag, 91},
+	{"typedmemclr", funcTag, 92},
+	{"typedslicecopy", funcTag, 93},
+	{"selectnbsend", funcTag, 94},
+	{"selectnbrecv", funcTag, 95},
+	{"selectnbrecv2", funcTag, 97},
+	{"selectsetpc", funcTag, 98},
+	{"selectgo", funcTag, 99},
+	{"block", funcTag, 9},
+	{"makeslice", funcTag, 100},
+	{"makeslice64", funcTag, 101},
+	{"makeslicecopy", funcTag, 102},
+	{"growslice", funcTag, 104},
+	{"memmove", funcTag, 105},
+	{"memclrNoHeapPointers", funcTag, 106},
+	{"memclrHasPointers", funcTag, 106},
+	{"memequal", funcTag, 107},
+	{"memequal0", funcTag, 108},
+	{"memequal8", funcTag, 108},
+	{"memequal16", funcTag, 108},
+	{"memequal32", funcTag, 108},
+	{"memequal64", funcTag, 108},
+	{"memequal128", funcTag, 108},
+	{"f32equal", funcTag, 109},
+	{"f64equal", funcTag, 109},
+	{"c64equal", funcTag, 109},
+	{"c128equal", funcTag, 109},
+	{"strequal", funcTag, 109},
+	{"interequal", funcTag, 109},
+	{"nilinterequal", funcTag, 109},
+	{"memhash", funcTag, 110},
+	{"memhash0", funcTag, 111},
+	{"memhash8", funcTag, 111},
+	{"memhash16", funcTag, 111},
+	{"memhash32", funcTag, 111},
+	{"memhash64", funcTag, 111},
+	{"memhash128", funcTag, 111},
+	{"f32hash", funcTag, 111},
+	{"f64hash", funcTag, 111},
+	{"c64hash", funcTag, 111},
+	{"c128hash", funcTag, 111},
+	{"strhash", funcTag, 111},
+	{"interhash", funcTag, 111},
+	{"nilinterhash", funcTag, 111},
+	{"int64div", funcTag, 112},
+	{"uint64div", funcTag, 113},
+	{"int64mod", funcTag, 112},
+	{"uint64mod", funcTag, 113},
+	{"float64toint64", funcTag, 114},
+	{"float64touint64", funcTag, 115},
+	{"float64touint32", funcTag, 116},
+	{"int64tofloat64", funcTag, 117},
+	{"uint64tofloat64", funcTag, 118},
+	{"uint32tofloat64", funcTag, 119},
+	{"complex128div", funcTag, 120},
+	{"racefuncenter", funcTag, 31},
+	{"racefuncenterfp", funcTag, 9},
+	{"racefuncexit", funcTag, 9},
+	{"raceread", funcTag, 31},
+	{"racewrite", funcTag, 31},
+	{"racereadrange", funcTag, 121},
+	{"racewriterange", funcTag, 121},
+	{"msanread", funcTag, 121},
+	{"msanwrite", funcTag, 121},
+	{"msanmove", funcTag, 122},
+	{"checkptrAlignment", funcTag, 123},
+	{"checkptrArithmetic", funcTag, 125},
+	{"libfuzzerTraceCmp1", funcTag, 127},
+	{"libfuzzerTraceCmp2", funcTag, 129},
+	{"libfuzzerTraceCmp4", funcTag, 130},
+	{"libfuzzerTraceCmp8", funcTag, 131},
+	{"libfuzzerTraceConstCmp1", funcTag, 127},
+	{"libfuzzerTraceConstCmp2", funcTag, 129},
+	{"libfuzzerTraceConstCmp4", funcTag, 130},
+	{"libfuzzerTraceConstCmp8", funcTag, 131},
+	{"x86HasPOPCNT", varTag, 6},
+	{"x86HasSSE41", varTag, 6},
+	{"x86HasFMA", varTag, 6},
+	{"armHasVFPv4", varTag, 6},
+	{"arm64HasATOMICS", varTag, 6},
+}
+
+func runtimeTypes() []*types.Type {
+	var typs [132]*types.Type
+	typs[0] = types.ByteType
+	typs[1] = types.NewPtr(typs[0])
+	typs[2] = types.Types[types.TANY]
+	typs[3] = types.NewPtr(typs[2])
+	typs[4] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])})
+	typs[5] = types.Types[types.TUINTPTR]
+	typs[6] = types.Types[types.TBOOL]
+	typs[7] = types.Types[types.TUNSAFEPTR]
+	typs[8] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[6])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])})
+	typs[9] = types.NewSignature(types.NoPkg, nil, nil, nil)
+	typs[10] = types.Types[types.TINTER]
+	typs[11] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[10])}, nil)
+	typs[12] = types.Types[types.TINT32]
+	typs[13] = types.NewPtr(typs[12])
+	typs[14] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[13])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[10])})
+	typs[15] = types.Types[types.TINT]
+	typs[16] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15])}, nil)
+	typs[17] = types.Types[types.TUINT]
+	typs[18] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[17]), types.NewField(src.NoXPos, nil, typs[15])}, nil)
+	typs[19] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])}, nil)
+	typs[20] = types.Types[types.TFLOAT64]
+	typs[21] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, nil)
+	typs[22] = types.Types[types.TINT64]
+	typs[23] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}, nil)
+	typs[24] = types.Types[types.TUINT64]
+	typs[25] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}, nil)
+	typs[26] = types.Types[types.TCOMPLEX128]
+	typs[27] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[26])}, nil)
+	typs[28] = types.Types[types.TSTRING]
+	typs[29] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}, nil)
+	typs[30] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}, nil)
+	typs[31] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[32] = types.NewArray(typs[0], 32)
+	typs[33] = types.NewPtr(typs[32])
+	typs[34] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[35] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[36] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[37] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[38] = types.NewSlice(typs[28])
+	typs[39] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[38])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[40] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])})
+	typs[41] = types.NewArray(typs[0], 4)
+	typs[42] = types.NewPtr(typs[41])
+	typs[43] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[42]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[44] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[45] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[46] = types.RuneType
+	typs[47] = types.NewSlice(typs[46])
+	typs[48] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[47])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])})
+	typs[49] = types.NewSlice(typs[0])
+	typs[50] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[33]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[49])})
+	typs[51] = types.NewArray(typs[46], 32)
+	typs[52] = types.NewPtr(typs[51])
+	typs[53] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[52]), types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[47])})
+	typs[54] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])})
+	typs[55] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[46]), types.NewField(src.NoXPos, nil, typs[15])})
+	typs[56] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[28])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])})
+	typs[57] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])})
+	typs[58] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])})
+	typs[59] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2])})
+	typs[60] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[2]), types.NewField(src.NoXPos, nil, typs[6])})
+	typs[61] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1])}, nil)
+	typs[62] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1])}, nil)
+	typs[63] = types.NewPtr(typs[5])
+	typs[64] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[63]), types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[65] = types.Types[types.TUINT32]
+	typs[66] = types.NewSignature(types.NoPkg, nil, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])})
+	typs[67] = types.NewMap(typs[2], typs[2])
+	typs[68] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])})
+	typs[69] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])})
+	typs[70] = types.NewSignature(types.NoPkg, nil, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[67])})
+	typs[71] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])})
+	typs[72] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])})
+	typs[73] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])})
+	typs[74] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])})
+	typs[75] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])})
+	typs[76] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[1])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[6])})
+	typs[77] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[78] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67]), types.NewField(src.NoXPos, nil, typs[2])}, nil)
+	typs[79] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[80] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[67])}, nil)
+	typs[81] = types.NewChan(typs[2], types.Cboth)
+	typs[82] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[81])})
+	typs[83] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[81])})
+	typs[84] = types.NewChan(typs[2], types.Crecv)
+	typs[85] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[84]), types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[86] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[84]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[87] = types.NewChan(typs[2], types.Csend)
+	typs[88] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[87]), types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[89] = types.NewArray(typs[0], 3)
+	typs[90] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[89]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+	typs[91] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[92] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3])}, nil)
+	typs[93] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15])})
+	typs[94] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[87]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[95] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[84])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[96] = types.NewPtr(typs[6])
+	typs[97] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[96]), types.NewField(src.NoXPos, nil, typs[84])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[98] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[63])}, nil)
+	typs[99] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[63]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[6])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[6])})
+	typs[100] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])})
+	typs[101] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])})
+	typs[102] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[15]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[7])})
+	typs[103] = types.NewSlice(typs[2])
+	typs[104] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[103]), types.NewField(src.NoXPos, nil, typs[15])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[103])})
+	typs[105] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[106] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[107] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[108] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[3]), types.NewField(src.NoXPos, nil, typs[3])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[109] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[7])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[6])})
+	typs[110] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])})
+	typs[111] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[5])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[5])})
+	typs[112] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22]), types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])})
+	typs[113] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24]), types.NewField(src.NoXPos, nil, typs[24])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])})
+	typs[114] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])})
+	typs[115] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])})
+	typs[116] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])})
+	typs[117] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[22])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])})
+	typs[118] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])})
+	typs[119] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[20])})
+	typs[120] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[26]), types.NewField(src.NoXPos, nil, typs[26])}, []*types.Field{types.NewField(src.NoXPos, nil, typs[26])})
+	typs[121] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[122] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5]), types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[123] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[1]), types.NewField(src.NoXPos, nil, typs[5])}, nil)
+	typs[124] = types.NewSlice(typs[7])
+	typs[125] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[7]), types.NewField(src.NoXPos, nil, typs[124])}, nil)
+	typs[126] = types.Types[types.TUINT8]
+	typs[127] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[126]), types.NewField(src.NoXPos, nil, typs[126])}, nil)
+	typs[128] = types.Types[types.TUINT16]
+	typs[129] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[128]), types.NewField(src.NoXPos, nil, typs[128])}, nil)
+	typs[130] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[65]), types.NewField(src.NoXPos, nil, typs[65])}, nil)
+	typs[131] = types.NewSignature(types.NoPkg, nil, []*types.Field{types.NewField(src.NoXPos, nil, typs[24]), types.NewField(src.NoXPos, nil, typs[24])}, nil)
+	return typs[:]
+}
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/typecheck/builtin/runtime.go
similarity index 100%
rename from src/cmd/compile/internal/gc/builtin/runtime.go
rename to src/cmd/compile/internal/typecheck/builtin/runtime.go
diff --git a/src/cmd/compile/internal/gc/builtin_test.go b/src/cmd/compile/internal/typecheck/builtin_test.go
similarity index 96%
rename from src/cmd/compile/internal/gc/builtin_test.go
rename to src/cmd/compile/internal/typecheck/builtin_test.go
index 57f24b2..fb9d3e3 100644
--- a/src/cmd/compile/internal/gc/builtin_test.go
+++ b/src/cmd/compile/internal/typecheck/builtin_test.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc_test
+package typecheck
 
 import (
 	"bytes"
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
new file mode 100644
index 0000000..1a8e583
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -0,0 +1,935 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"fmt"
+	"go/constant"
+	"go/token"
+	"math"
+	"math/big"
+	"strings"
+	"unicode"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+func roundFloat(v constant.Value, sz int64) constant.Value {
+	switch sz {
+	case 4:
+		f, _ := constant.Float32Val(v)
+		return makeFloat64(float64(f))
+	case 8:
+		f, _ := constant.Float64Val(v)
+		return makeFloat64(f)
+	}
+	base.Fatalf("unexpected size: %v", sz)
+	panic("unreachable")
+}
+
+// truncate float literal fv to 32-bit or 64-bit precision
+// according to type; return truncated value.
+func truncfltlit(v constant.Value, t *types.Type) constant.Value {
+	if t.IsUntyped() || overflow(v, t) {
+		// If there was overflow, simply continuing would set the
+		// value to Inf which in turn would lead to spurious follow-on
+		// errors. Avoid this by returning the existing value.
+		return v
+	}
+
+	return roundFloat(v, t.Size())
+}
+
+// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
+// precision, according to type; return truncated value. In case of
+// overflow, calls Errorf but does not truncate the input value.
+func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
+	if t.IsUntyped() || overflow(v, t) {
+		// If there was overflow, simply continuing would set the
+		// value to Inf which in turn would lead to spurious follow-on
+		// errors. Avoid this by returning the existing value.
+		return v
+	}
+
+	fsz := t.Size() / 2
+	return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz))
+}
+
+// TODO(mdempsky): Replace these with better APIs.
+func convlit(n ir.Node, t *types.Type) ir.Node    { return convlit1(n, t, false, nil) }
+func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+
+// convlit1 converts an untyped expression n to type t. If n already
+// has a type, convlit1 has no effect.
+//
+// For explicit conversions, t must be non-nil, and integer-to-string
+// conversions are allowed.
+//
+// For implicit conversions (e.g., assignments), t may be nil; if so,
+// n is converted to its default type.
+//
+// If there's an error converting n to t, context is used in the error
+// message.
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
+	if explicit && t == nil {
+		base.Fatalf("explicit conversion missing type")
+	}
+	if t != nil && t.IsUntyped() {
+		base.Fatalf("bad conversion to untyped: %v", t)
+	}
+
+	if n == nil || n.Type() == nil {
+		// Allow sloppy callers.
+		return n
+	}
+	if !n.Type().IsUntyped() {
+		// Already typed; nothing to do.
+		return n
+	}
+
+	// Nil is technically not a constant, so handle it specially.
+	if n.Type().Kind() == types.TNIL {
+		if n.Op() != ir.ONIL {
+			base.Fatalf("unexpected op: %v (%v)", n, n.Op())
+		}
+		n = ir.Copy(n)
+		if t == nil {
+			base.Errorf("use of untyped nil")
+			n.SetDiag(true)
+			n.SetType(nil)
+			return n
+		}
+
+		if !t.HasNil() {
+			// Leave for caller to handle.
+			return n
+		}
+
+		n.SetType(t)
+		return n
+	}
+
+	if t == nil || !ir.OKForConst[t.Kind()] {
+		t = defaultType(n.Type())
+	}
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected untyped expression: %v", n)
+
+	case ir.OLITERAL:
+		v := convertVal(n.Val(), t, explicit)
+		if v.Kind() == constant.Unknown {
+			n = ir.NewConstExpr(n.Val(), n)
+			break
+		}
+		n = ir.NewConstExpr(v, n)
+		n.SetType(t)
+		return n
+
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
+		ot := operandType(n.Op(), t)
+		if ot == nil {
+			n = DefaultLit(n, nil)
+			break
+		}
+
+		n := n.(*ir.UnaryExpr)
+		n.X = convlit(n.X, ot)
+		if n.X.Type() == nil {
+			n.SetType(nil)
+			return n
+		}
+		n.SetType(t)
+		return n
+
+	case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
+		ot := operandType(n.Op(), t)
+		if ot == nil {
+			n = DefaultLit(n, nil)
+			break
+		}
+
+		var l, r ir.Node
+		switch n := n.(type) {
+		case *ir.BinaryExpr:
+			n.X = convlit(n.X, ot)
+			n.Y = convlit(n.Y, ot)
+			l, r = n.X, n.Y
+		case *ir.LogicalExpr:
+			n.X = convlit(n.X, ot)
+			n.Y = convlit(n.Y, ot)
+			l, r = n.X, n.Y
+		}
+
+		if l.Type() == nil || r.Type() == nil {
+			n.SetType(nil)
+			return n
+		}
+		if !types.Identical(l.Type(), r.Type()) {
+			base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+			n.SetType(nil)
+			return n
+		}
+
+		n.SetType(t)
+		return n
+
+	case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		if !t.IsBoolean() {
+			break
+		}
+		n.SetType(t)
+		return n
+
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		n.X = convlit1(n.X, t, explicit, nil)
+		n.SetType(n.X.Type())
+		if n.Type() != nil && !n.Type().IsInteger() {
+			base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+			n.SetType(nil)
+		}
+		return n
+	}
+
+	if !n.Diag() {
+		if !t.Broke() {
+			if explicit {
+				base.Errorf("cannot convert %L to type %v", n, t)
+			} else if context != nil {
+				base.Errorf("cannot use %L as type %v in %s", n, t, context())
+			} else {
+				base.Errorf("cannot use %L as type %v", n, t)
+			}
+		}
+		n.SetDiag(true)
+	}
+	n.SetType(nil)
+	return n
+}
+
+func operandType(op ir.Op, t *types.Type) *types.Type {
+	switch op {
+	case ir.OCOMPLEX:
+		if t.IsComplex() {
+			return types.FloatForComplex(t)
+		}
+	case ir.OREAL, ir.OIMAG:
+		if t.IsFloat() {
+			return types.ComplexForFloat(t)
+		}
+	default:
+		if okfor[op][t.Kind()] {
+			return t
+		}
+	}
+	return nil
+}
+
+// convertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns Val{} instead.
+//
+// If explicit is true, then conversions from integer to string are
+// also allowed.
+func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+	switch ct := v.Kind(); ct {
+	case constant.Bool:
+		if t.IsBoolean() {
+			return v
+		}
+
+	case constant.String:
+		if t.IsString() {
+			return v
+		}
+
+	case constant.Int:
+		if explicit && t.IsString() {
+			return tostr(v)
+		}
+		fallthrough
+	case constant.Float, constant.Complex:
+		switch {
+		case t.IsInteger():
+			v = toint(v)
+			overflow(v, t)
+			return v
+		case t.IsFloat():
+			v = toflt(v)
+			v = truncfltlit(v, t)
+			return v
+		case t.IsComplex():
+			v = tocplx(v)
+			v = trunccmplxlit(v, t)
+			return v
+		}
+	}
+
+	return constant.MakeUnknown()
+}
+
+func tocplx(v constant.Value) constant.Value {
+	return constant.ToComplex(v)
+}
+
+func toflt(v constant.Value) constant.Value {
+	if v.Kind() == constant.Complex {
+		if constant.Sign(constant.Imag(v)) != 0 {
+			base.Errorf("constant %v truncated to real", v)
+		}
+		v = constant.Real(v)
+	}
+
+	return constant.ToFloat(v)
+}
+
+func toint(v constant.Value) constant.Value {
+	if v.Kind() == constant.Complex {
+		if constant.Sign(constant.Imag(v)) != 0 {
+			base.Errorf("constant %v truncated to integer", v)
+		}
+		v = constant.Real(v)
+	}
+
+	if v := constant.ToInt(v); v.Kind() == constant.Int {
+		return v
+	}
+
+	// The value of v cannot be represented as an integer;
+	// so we need to print an error message.
+	// Unfortunately some float values cannot be
+	// reasonably formatted for inclusion in an error
+	// message (example: 1 + 1e-100), so first we try to
+	// format the float; if the truncation resulted in
+	// something that looks like an integer we omit the
+	// value from the error message.
+	// (See issue #11371).
+	f := ir.BigFloat(v)
+	if f.MantExp(nil) > 2*ir.ConstPrec {
+		base.Errorf("integer too large")
+	} else {
+		var t big.Float
+		t.Parse(fmt.Sprint(v), 0)
+		if t.IsInt() {
+			base.Errorf("constant truncated to integer")
+		} else {
+			base.Errorf("constant %v truncated to integer", v)
+		}
+	}
+
+	// Prevent follow-on errors.
+	// TODO(mdempsky): Use constant.MakeUnknown() instead.
+	return constant.MakeInt64(1)
+}
+
+// overflow reports whether constant value v is too large
+// to represent with type t, and emits an error message if so.
+func overflow(v constant.Value, t *types.Type) bool {
+	// v has already been converted
+	// to appropriate form for t.
+	if t.IsUntyped() {
+		return false
+	}
+	if v.Kind() == constant.Int && constant.BitLen(v) > ir.ConstPrec {
+		base.Errorf("integer too large")
+		return true
+	}
+	if ir.ConstOverflow(v, t) {
+		base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t)
+		return true
+	}
+	return false
+}
+
+func tostr(v constant.Value) constant.Value {
+	if v.Kind() == constant.Int {
+		r := unicode.ReplacementChar
+		if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune {
+			r = rune(x)
+		}
+		v = constant.MakeString(string(r))
+	}
+	return v
+}
+
+var tokenForOp = [...]token.Token{
+	ir.OPLUS:   token.ADD,
+	ir.ONEG:    token.SUB,
+	ir.ONOT:    token.NOT,
+	ir.OBITNOT: token.XOR,
+
+	ir.OADD:    token.ADD,
+	ir.OSUB:    token.SUB,
+	ir.OMUL:    token.MUL,
+	ir.ODIV:    token.QUO,
+	ir.OMOD:    token.REM,
+	ir.OOR:     token.OR,
+	ir.OXOR:    token.XOR,
+	ir.OAND:    token.AND,
+	ir.OANDNOT: token.AND_NOT,
+	ir.OOROR:   token.LOR,
+	ir.OANDAND: token.LAND,
+
+	ir.OEQ: token.EQL,
+	ir.ONE: token.NEQ,
+	ir.OLT: token.LSS,
+	ir.OLE: token.LEQ,
+	ir.OGT: token.GTR,
+	ir.OGE: token.GEQ,
+
+	ir.OLSH: token.SHL,
+	ir.ORSH: token.SHR,
+}
+
+// EvalConst returns a constant-evaluated expression equivalent to n.
+// If n is not a constant, EvalConst returns n.
+// Otherwise, EvalConst returns a new OLITERAL with the same value as n,
+// and with .Orig pointing back to n.
+func EvalConst(n ir.Node) ir.Node {
+	// Pick off just the opcodes that can be constant evaluated.
+	switch n.Op() {
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+		n := n.(*ir.UnaryExpr)
+		nl := n.X
+		if nl.Op() == ir.OLITERAL {
+			var prec uint
+			if n.Type().IsUnsigned() {
+				prec = uint(n.Type().Size() * 8)
+			}
+			return OrigConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
+		}
+
+	case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT:
+		n := n.(*ir.BinaryExpr)
+		nl, nr := n.X, n.Y
+		if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+			rval := nr.Val()
+
+			// check for divisor underflow in complex division (see issue 20227)
+			if n.Op() == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
+				base.Errorf("complex division by zero")
+				n.SetType(nil)
+				return n
+			}
+			if (n.Op() == ir.ODIV || n.Op() == ir.OMOD) && constant.Sign(rval) == 0 {
+				base.Errorf("division by zero")
+				n.SetType(nil)
+				return n
+			}
+
+			tok := tokenForOp[n.Op()]
+			if n.Op() == ir.ODIV && n.Type().IsInteger() {
+				tok = token.QUO_ASSIGN // integer division
+			}
+			return OrigConst(n, constant.BinaryOp(nl.Val(), tok, rval))
+		}
+
+	case ir.OOROR, ir.OANDAND:
+		n := n.(*ir.LogicalExpr)
+		nl, nr := n.X, n.Y
+		if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+			return OrigConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+		}
+
+	case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		nl, nr := n.X, n.Y
+		if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+			return OrigBool(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+		}
+
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		nl, nr := n.X, n.Y
+		if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+			// shiftBound from go/types; "so we can express smallestFloat64"
+			const shiftBound = 1023 - 1 + 52
+			s, ok := constant.Uint64Val(nr.Val())
+			if !ok || s > shiftBound {
+				base.Errorf("invalid shift count %v", nr)
+				n.SetType(nil)
+				break
+			}
+			return OrigConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
+		}
+
+	case ir.OCONV, ir.ORUNESTR:
+		n := n.(*ir.ConvExpr)
+		nl := n.X
+		if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
+			return OrigConst(n, convertVal(nl.Val(), n.Type(), true))
+		}
+
+	case ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		nl := n.X
+		if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
+			// set so n.Orig gets OCONV instead of OCONVNOP
+			n.SetOp(ir.OCONV)
+			return OrigConst(n, nl.Val())
+		}
+
+	case ir.OADDSTR:
+		// Merge adjacent constants in the argument list.
+		n := n.(*ir.AddStringExpr)
+		s := n.List
+		need := 0
+		for i := 0; i < len(s); i++ {
+			if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
+				// Can't merge s[i] into s[i-1]; need a slot in the list.
+				need++
+			}
+		}
+		if need == len(s) {
+			return n
+		}
+		if need == 1 {
+			var strs []string
+			for _, c := range s {
+				strs = append(strs, ir.StringVal(c))
+			}
+			return OrigConst(n, constant.MakeString(strings.Join(strs, "")))
+		}
+		newList := make([]ir.Node, 0, need)
+		for i := 0; i < len(s); i++ {
+			if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
+				// merge from i up to but not including i2
+				var strs []string
+				i2 := i
+				for i2 < len(s) && ir.IsConst(s[i2], constant.String) {
+					strs = append(strs, ir.StringVal(s[i2]))
+					i2++
+				}
+
+				nl := ir.Copy(n).(*ir.AddStringExpr)
+				nl.List = s[i:i2]
+				newList = append(newList, OrigConst(nl, constant.MakeString(strings.Join(strs, ""))))
+				i = i2 - 1
+			} else {
+				newList = append(newList, s[i])
+			}
+		}
+
+		nn := ir.Copy(n).(*ir.AddStringExpr)
+		nn.List = newList
+		return nn
+
+	case ir.OCAP, ir.OLEN:
+		n := n.(*ir.UnaryExpr)
+		nl := n.X
+		switch nl.Type().Kind() {
+		case types.TSTRING:
+			if ir.IsConst(nl, constant.String) {
+				return OrigInt(n, int64(len(ir.StringVal(nl))))
+			}
+		case types.TARRAY:
+			if !anyCallOrChan(nl) {
+				return OrigInt(n, nl.Type().NumElem())
+			}
+		}
+
+	case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+		n := n.(*ir.UnaryExpr)
+		return OrigInt(n, evalunsafe(n))
+
+	case ir.OREAL:
+		n := n.(*ir.UnaryExpr)
+		nl := n.X
+		if nl.Op() == ir.OLITERAL {
+			return OrigConst(n, constant.Real(nl.Val()))
+		}
+
+	case ir.OIMAG:
+		n := n.(*ir.UnaryExpr)
+		nl := n.X
+		if nl.Op() == ir.OLITERAL {
+			return OrigConst(n, constant.Imag(nl.Val()))
+		}
+
+	case ir.OCOMPLEX:
+		n := n.(*ir.BinaryExpr)
+		nl, nr := n.X, n.Y
+		if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+			return OrigConst(n, makeComplex(nl.Val(), nr.Val()))
+		}
+	}
+
+	return n
+}
+
+func makeFloat64(f float64) constant.Value {
+	if math.IsInf(f, 0) {
+		base.Fatalf("infinity is not a valid constant")
+	}
+	return constant.MakeFloat64(f)
+}
+
+func makeComplex(real, imag constant.Value) constant.Value {
+	return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
+}
+
+func square(x constant.Value) constant.Value {
+	return constant.BinaryOp(x, token.MUL, x)
+}
+
+// For matching historical "constant OP overflow" error messages.
+// TODO(mdempsky): Replace with error messages like go/types uses.
+var overflowNames = [...]string{
+	ir.OADD:    "addition",
+	ir.OSUB:    "subtraction",
+	ir.OMUL:    "multiplication",
+	ir.OLSH:    "shift",
+	ir.OXOR:    "bitwise XOR",
+	ir.OBITNOT: "bitwise complement",
+}
+
+// OrigConst returns an OLITERAL with orig n and value v.
+func OrigConst(n ir.Node, v constant.Value) ir.Node {
+	lno := ir.SetPos(n)
+	v = convertVal(v, n.Type(), false)
+	base.Pos = lno
+
+	switch v.Kind() {
+	case constant.Int:
+		if constant.BitLen(v) <= ir.ConstPrec {
+			break
+		}
+		fallthrough
+	case constant.Unknown:
+		what := overflowNames[n.Op()]
+		if what == "" {
+			base.Fatalf("unexpected overflow: %v", n.Op())
+		}
+		base.ErrorfAt(n.Pos(), "constant %v overflow", what)
+		n.SetType(nil)
+		return n
+	}
+
+	return ir.NewConstExpr(v, n)
+}
+
+func OrigBool(n ir.Node, v bool) ir.Node {
+	return OrigConst(n, constant.MakeBool(v))
+}
+
+func OrigInt(n ir.Node, v int64) ir.Node {
+	return OrigConst(n, constant.MakeInt64(v))
+}
+
+// DefaultLit on both nodes simultaneously;
+// if they're both ideal going in they better
+// get the same type going out.
+// force means must assign concrete (non-ideal) type.
+// The results of defaultlit2 MUST be assigned back to l and r, e.g.
+// 	n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
+	if l.Type() == nil || r.Type() == nil {
+		return l, r
+	}
+	if !l.Type().IsUntyped() {
+		r = convlit(r, l.Type())
+		return l, r
+	}
+
+	if !r.Type().IsUntyped() {
+		l = convlit(l, r.Type())
+		return l, r
+	}
+
+	if !force {
+		return l, r
+	}
+
+	// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
+	if l.Type().IsBoolean() != r.Type().IsBoolean() {
+		return l, r
+	}
+	if l.Type().IsString() != r.Type().IsString() {
+		return l, r
+	}
+	if ir.IsNil(l) || ir.IsNil(r) {
+		return l, r
+	}
+
+	t := defaultType(mixUntyped(l.Type(), r.Type()))
+	l = convlit(l, t)
+	r = convlit(r, t)
+	return l, r
+}
+
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+	if t1 == t2 {
+		return t1
+	}
+
+	rank := func(t *types.Type) int {
+		switch t {
+		case types.UntypedInt:
+			return 0
+		case types.UntypedRune:
+			return 1
+		case types.UntypedFloat:
+			return 2
+		case types.UntypedComplex:
+			return 3
+		}
+		base.Fatalf("bad type %v", t)
+		panic("unreachable")
+	}
+
+	if rank(t2) > rank(t1) {
+		return t2
+	}
+	return t1
+}
+
+func defaultType(t *types.Type) *types.Type {
+	if !t.IsUntyped() || t.Kind() == types.TNIL {
+		return t
+	}
+
+	switch t {
+	case types.UntypedBool:
+		return types.Types[types.TBOOL]
+	case types.UntypedString:
+		return types.Types[types.TSTRING]
+	case types.UntypedInt:
+		return types.Types[types.TINT]
+	case types.UntypedRune:
+		return types.RuneType
+	case types.UntypedFloat:
+		return types.Types[types.TFLOAT64]
+	case types.UntypedComplex:
+		return types.Types[types.TCOMPLEX128]
+	}
+
+	base.Fatalf("bad type %v", t)
+	return nil
+}
+
+// IndexConst checks if Node n contains a constant expression
+// representable as a non-negative int and returns its value.
+// If n is not a constant expression, not representable as an
+// integer, or negative, it returns -1. If n is too large, it
+// returns -2.
+func IndexConst(n ir.Node) int64 {
+	if n.Op() != ir.OLITERAL {
+		return -1
+	}
+	if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+		return -1
+	}
+
+	v := toint(n.Val())
+	if v.Kind() != constant.Int || constant.Sign(v) < 0 {
+		return -1
+	}
+	if ir.ConstOverflow(v, types.Types[types.TINT]) {
+		return -2
+	}
+	return ir.IntVal(types.Types[types.TINT], v)
+}
+
+// anyCallOrChan reports whether n contains any calls or channel operations.
+func anyCallOrChan(n ir.Node) bool {
+	return ir.Any(n, func(n ir.Node) bool {
+		switch n.Op() {
+		case ir.OAPPEND,
+			ir.OCALL,
+			ir.OCALLFUNC,
+			ir.OCALLINTER,
+			ir.OCALLMETH,
+			ir.OCAP,
+			ir.OCLOSE,
+			ir.OCOMPLEX,
+			ir.OCOPY,
+			ir.ODELETE,
+			ir.OIMAG,
+			ir.OLEN,
+			ir.OMAKE,
+			ir.ONEW,
+			ir.OPANIC,
+			ir.OPRINT,
+			ir.OPRINTN,
+			ir.OREAL,
+			ir.ORECOVER,
+			ir.ORECV:
+			return true
+		}
+		return false
+	})
+}
+
+// A constSet represents a set of Go constant expressions.
+type constSet struct {
+	m map[constSetKey]src.XPos
+}
+
+type constSetKey struct {
+	typ *types.Type
+	val interface{}
+}
+
+// add adds constant expression n to s. If a constant expression of
+// equal value and identical type has already been added, then add
+// reports an error about the duplicate value.
+//
+// pos provides position information for where expression n occurred
+// (in case n does not have its own position information). what and
+// where are used in the error message.
+//
+// n must not be an untyped constant.
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
+	if conv := n; conv.Op() == ir.OCONVIFACE {
+		conv := conv.(*ir.ConvExpr)
+		if conv.Implicit() {
+			n = conv.X
+		}
+	}
+
+	if !ir.IsConstNode(n) {
+		return
+	}
+	if n.Type().IsUntyped() {
+		base.Fatalf("%v is untyped", n)
+	}
+
+	// Consts are only duplicates if they have the same value and
+	// identical types.
+	//
+	// In general, we have to use types.Identical to test type
+	// identity, because == gives false negatives for anonymous
+	// types and the byte/uint8 and rune/int32 builtin type
+	// aliases.  However, this is not a problem here, because
+	// constant expressions are always untyped or have a named
+	// type, and we explicitly handle the builtin type aliases
+	// below.
+	//
+	// This approach may need to be revisited though if we fix
+	// #21866 by treating all type aliases like byte/uint8 and
+	// rune/int32.
+
+	typ := n.Type()
+	switch typ {
+	case types.ByteType:
+		typ = types.Types[types.TUINT8]
+	case types.RuneType:
+		typ = types.Types[types.TINT32]
+	}
+	k := constSetKey{typ, ir.ConstValue(n)}
+
+	if ir.HasUniquePos(n) {
+		pos = n.Pos()
+	}
+
+	if s.m == nil {
+		s.m = make(map[constSetKey]src.XPos)
+	}
+
+	if prevPos, isDup := s.m[k]; isDup {
+		base.ErrorfAt(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
+			what, nodeAndVal(n), where,
+			what, base.FmtPos(prevPos))
+	} else {
+		s.m[k] = pos
+	}
+}
+
+// nodeAndVal reports both an expression and its constant value, if
+// the latter is non-obvious.
+//
+// TODO(mdempsky): This could probably be a fmt.go flag.
+func nodeAndVal(n ir.Node) string {
+	show := fmt.Sprint(n)
+	val := ir.ConstValue(n)
+	if s := fmt.Sprintf("%#v", val); show != s {
+		show += " (value " + s + ")"
+	}
+	return show
+}
+
+// evalunsafe evaluates a package unsafe operation and returns the result.
+func evalunsafe(n ir.Node) int64 {
+	switch n.Op() {
+	case ir.OALIGNOF, ir.OSIZEOF:
+		n := n.(*ir.UnaryExpr)
+		n.X = Expr(n.X)
+		n.X = DefaultLit(n.X, nil)
+		tr := n.X.Type()
+		if tr == nil {
+			return 0
+		}
+		types.CalcSize(tr)
+		if n.Op() == ir.OALIGNOF {
+			return int64(tr.Align)
+		}
+		return tr.Width
+
+	case ir.OOFFSETOF:
+		// must be a selector.
+		n := n.(*ir.UnaryExpr)
+		if n.X.Op() != ir.OXDOT {
+			base.Errorf("invalid expression %v", n)
+			return 0
+		}
+		sel := n.X.(*ir.SelectorExpr)
+
+		// Remember base of selector to find it back after dot insertion.
+		// Since r->left may be mutated by typechecking, check it explicitly
+		// first to track it correctly.
+		sel.X = Expr(sel.X)
+		sbase := sel.X
+
+		tsel := Expr(sel)
+		n.X = tsel
+		if tsel.Type() == nil {
+			return 0
+		}
+		switch tsel.Op() {
+		case ir.ODOT, ir.ODOTPTR:
+			break
+		case ir.OCALLPART:
+			base.Errorf("invalid expression %v: argument is a method value", n)
+			return 0
+		default:
+			base.Errorf("invalid expression %v", n)
+			return 0
+		}
+
+		// Sum offsets for dots until we reach sbase.
+		var v int64
+		var next ir.Node
+		for r := tsel; r != sbase; r = next {
+			switch r.Op() {
+			case ir.ODOTPTR:
+				// For Offsetof(s.f), s may itself be a pointer,
+				// but accessing f must not otherwise involve
+				// indirection via embedded pointer types.
+				r := r.(*ir.SelectorExpr)
+				if r.X != sbase {
+					base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X)
+					return 0
+				}
+				fallthrough
+			case ir.ODOT:
+				r := r.(*ir.SelectorExpr)
+				v += r.Offset()
+				next = r.X
+			default:
+				ir.Dump("unsafenmagic", tsel)
+				base.Fatalf("impossible %v node after dot insertion", r.Op())
+			}
+		}
+		return v
+	}
+
+	base.Fatalf("unexpected op %v", n.Op())
+	return 0
+}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
new file mode 100644
index 0000000..eab0bb0
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -0,0 +1,474 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"fmt"
+	"strconv"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+var DeclContext ir.Class = ir.PEXTERN // PEXTERN/PAUTO
+
+func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func {
+	if tfn.Op() != ir.OTFUNC {
+		base.Fatalf("expected OTFUNC node, got %v", tfn)
+	}
+
+	fn := ir.NewFunc(base.Pos)
+	fn.Nname = ir.NewNameAt(base.Pos, sym)
+	fn.Nname.Func = fn
+	fn.Nname.Defn = fn
+	fn.Nname.Ntype = tfn
+	ir.MarkFunc(fn.Nname)
+	StartFuncBody(fn)
+	fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype)
+	return fn
+}
+
+// Declare records that Node n declares symbol n.Sym in the specified
+// declaration context.
+func Declare(n *ir.Name, ctxt ir.Class) {
+	if ir.IsBlank(n) {
+		return
+	}
+
+	s := n.Sym()
+
+	// kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
+	if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
+		base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
+	}
+
+	if ctxt == ir.PEXTERN {
+		if s.Name == "init" {
+			base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
+		}
+		if s.Name == "main" && s.Pkg.Name == "main" {
+			base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
+		}
+		Target.Externs = append(Target.Externs, n)
+	} else {
+		if ir.CurFunc == nil && ctxt == ir.PAUTO {
+			base.Pos = n.Pos()
+			base.Fatalf("automatic outside function")
+		}
+		if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
+			ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+		}
+		types.Pushdcl(s)
+		n.Curfn = ir.CurFunc
+	}
+
+	if ctxt == ir.PAUTO {
+		n.SetFrameOffset(0)
+	}
+
+	if s.Block == types.Block {
+		// functype will print errors about duplicate function arguments.
+		// Don't repeat the error here.
+		if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
+			Redeclared(n.Pos(), s, "in this block")
+		}
+	}
+
+	s.Block = types.Block
+	s.Lastlineno = base.Pos
+	s.Def = n
+	n.Class = ctxt
+	if ctxt == ir.PFUNC {
+		n.Sym().SetFunc(true)
+	}
+
+	autoexport(n, ctxt)
+}
+
+// Export marks n for export (or reexport).
+func Export(n *ir.Name) {
+	if n.Sym().OnExportList() {
+		return
+	}
+	n.Sym().SetOnExportList(true)
+
+	if base.Flag.E != 0 {
+		fmt.Printf("export symbol %v\n", n.Sym())
+	}
+
+	Target.Exports = append(Target.Exports, n)
+}
+
+// Redeclared emits a diagnostic about symbol s being redeclared at pos.
+func Redeclared(pos src.XPos, s *types.Sym, where string) {
+	if !s.Lastlineno.IsKnown() {
+		pkgName := DotImportRefs[s.Def.(*ir.Ident)]
+		base.ErrorfAt(pos, "%v redeclared %s\n"+
+			"\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
+	} else {
+		prevPos := s.Lastlineno
+
+		// When an import and a declaration collide in separate files,
+		// present the import as the "redeclared", because the declaration
+		// is visible where the import is, but not vice versa.
+		// See issue 4510.
+		if s.Def == nil {
+			pos, prevPos = prevPos, pos
+		}
+
+		base.ErrorfAt(pos, "%v redeclared %s\n"+
+			"\t%v: previous declaration", s, where, base.FmtPos(prevPos))
+	}
+}
+
+// declare the function proper
+// and declare the arguments.
+// called in extern-declaration context
+// returns in auto-declaration context.
+func StartFuncBody(fn *ir.Func) {
+	// change the declaration context from extern to auto
+	funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext})
+	ir.CurFunc = fn
+	DeclContext = ir.PAUTO
+
+	types.Markdcl()
+
+	if fn.Nname.Ntype != nil {
+		funcargs(fn.Nname.Ntype.(*ir.FuncType))
+	} else {
+		funcargs2(fn.Type())
+	}
+}
+
+// finish the body.
+// called in auto-declaration context.
+// returns in extern-declaration context.
+func FinishFuncBody() {
+	// change the declaration context from auto to previous context
+	types.Popdcl()
+	var e funcStackEnt
+	funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
+	ir.CurFunc, DeclContext = e.curfn, e.dclcontext
+}
+
+func CheckFuncStack() {
+	if len(funcStack) != 0 {
+		base.Fatalf("funcStack is non-empty: %v", len(funcStack))
+	}
+}
+
+// Add a method, declared as a function.
+// - msym is the method symbol
+// - t is function type (with receiver)
+// Returns a pointer to the existing or added Field; or nil if there's an error.
+func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+	if msym == nil {
+		base.Fatalf("no method symbol")
+	}
+
+	// get parent type sym
+	rf := t.Recv() // ptr to this structure
+	if rf == nil {
+		base.Errorf("missing receiver")
+		return nil
+	}
+
+	mt := types.ReceiverBaseType(rf.Type)
+	if mt == nil || mt.Sym() == nil {
+		pa := rf.Type
+		t := pa
+		if t != nil && t.IsPtr() {
+			if t.Sym() != nil {
+				base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+				return nil
+			}
+			t = t.Elem()
+		}
+
+		switch {
+		case t == nil || t.Broke():
+			// rely on typecheck having complained before
+		case t.Sym() == nil:
+			base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
+		case t.IsPtr():
+			base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+		case t.IsInterface():
+			base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
+		default:
+			// Should have picked off all the reasons above,
+			// but just in case, fall back to generic error.
+			base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
+		}
+		return nil
+	}
+
+	if local && mt.Sym().Pkg != types.LocalPkg {
+		base.Errorf("cannot define new methods on non-local type %v", mt)
+		return nil
+	}
+
+	if msym.IsBlank() {
+		return nil
+	}
+
+	if mt.IsStruct() {
+		for _, f := range mt.Fields().Slice() {
+			if f.Sym == msym {
+				base.Errorf("type %v has both field and method named %v", mt, msym)
+				f.SetBroke(true)
+				return nil
+			}
+		}
+	}
+
+	for _, f := range mt.Methods().Slice() {
+		if msym.Name != f.Sym.Name {
+			continue
+		}
+		// types.Identical only checks that incoming and result parameters match,
+		// so explicitly check that the receiver parameters match too.
+		if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
+			base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+		}
+		return f
+	}
+
+	f := types.NewField(base.Pos, msym, t)
+	f.Nname = n.Nname
+	f.SetNointerface(nointerface)
+
+	mt.Methods().Append(f)
+	return f
+}
+
+func autoexport(n *ir.Name, ctxt ir.Class) {
+	if n.Sym().Pkg != types.LocalPkg {
+		return
+	}
+	if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN {
+		return
+	}
+	if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
+		return
+	}
+
+	if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" {
+		Export(n)
+	}
+	if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+		n.Sym().SetAsm(true)
+		Target.Asms = append(Target.Asms, n)
+	}
+}
+
+// checkdupfields emits errors for duplicately named fields or methods in
+// a list of struct or interface types.
+func checkdupfields(what string, fss ...[]*types.Field) {
+	seen := make(map[*types.Sym]bool)
+	for _, fs := range fss {
+		for _, f := range fs {
+			if f.Sym == nil || f.Sym.IsBlank() {
+				continue
+			}
+			if seen[f.Sym] {
+				base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+				continue
+			}
+			seen[f.Sym] = true
+		}
+	}
+}
+
+// structs, functions, and methods.
+// they don't belong here, but where do they belong?
+func checkembeddedtype(t *types.Type) {
+	if t == nil {
+		return
+	}
+
+	if t.Sym() == nil && t.IsPtr() {
+		t = t.Elem()
+		if t.IsInterface() {
+			base.Errorf("embedded type cannot be a pointer to interface")
+		}
+	}
+
+	if t.IsPtr() || t.IsUnsafePtr() {
+		base.Errorf("embedded type cannot be a pointer")
+	} else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+		t.ForwardType().Embedlineno = base.Pos
+	}
+}
+
+// TODO(mdempsky): Move to package types.
+func FakeRecv() *types.Field {
+	return types.NewField(src.NoXPos, nil, types.FakeRecvType())
+}
+
+var fakeRecvField = FakeRecv
+
+var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
+
+type funcStackEnt struct {
+	curfn      *ir.Func
+	dclcontext ir.Class
+}
+
+func funcarg(n *ir.Field, ctxt ir.Class) {
+	if n.Sym == nil {
+		return
+	}
+
+	name := ir.NewNameAt(n.Pos, n.Sym)
+	n.Decl = name
+	name.Ntype = n.Ntype
+	Declare(name, ctxt)
+}
+
+func funcarg2(f *types.Field, ctxt ir.Class) {
+	if f.Sym == nil {
+		return
+	}
+	n := ir.NewNameAt(f.Pos, f.Sym)
+	f.Nname = n
+	n.SetType(f.Type)
+	Declare(n, ctxt)
+}
+
+func funcargs(nt *ir.FuncType) {
+	if nt.Op() != ir.OTFUNC {
+		base.Fatalf("funcargs %v", nt.Op())
+	}
+
+	// declare the receiver and in arguments.
+	if nt.Recv != nil {
+		funcarg(nt.Recv, ir.PPARAM)
+	}
+	for _, n := range nt.Params {
+		funcarg(n, ir.PPARAM)
+	}
+
+	// declare the out arguments.
+	gen := len(nt.Params)
+	for _, n := range nt.Results {
+		if n.Sym == nil {
+			// Name so that escape analysis can track it. ~r stands for 'result'.
+			n.Sym = LookupNum("~r", gen)
+			gen++
+		}
+		if n.Sym.IsBlank() {
+			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
+			// The name must be different from ~r above because if you have
+			//	func f() (_ int)
+			//	func g() int
+			// f is allowed to use a plain 'return' with no arguments, while g is not.
+			// So the two cases must be distinguished.
+			n.Sym = LookupNum("~b", gen)
+			gen++
+		}
+
+		funcarg(n, ir.PPARAMOUT)
+	}
+}
+
+// Same as funcargs, except run over an already constructed TFUNC.
+// This happens during import, where the hidden_fndcl rule has
+// used functype directly to parse the function's type.
+func funcargs2(t *types.Type) {
+	if t.Kind() != types.TFUNC {
+		base.Fatalf("funcargs2 %v", t)
+	}
+
+	for _, f := range t.Recvs().Fields().Slice() {
+		funcarg2(f, ir.PPARAM)
+	}
+	for _, f := range t.Params().Fields().Slice() {
+		funcarg2(f, ir.PPARAM)
+	}
+	for _, f := range t.Results().Fields().Slice() {
+		funcarg2(f, ir.PPARAMOUT)
+	}
+}
+
+func Temp(t *types.Type) *ir.Name {
+	return TempAt(base.Pos, ir.CurFunc, t)
+}
+
+// make a new Node off the books
+func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
+	if curfn == nil {
+		base.Fatalf("no curfn for TempAt")
+	}
+	if curfn.Op() == ir.OCLOSURE {
+		ir.Dump("TempAt", curfn)
+		base.Fatalf("adding TempAt to wrong closure function")
+	}
+	if t == nil {
+		base.Fatalf("TempAt called with nil type")
+	}
+	if t.Kind() == types.TFUNC && t.Recv() != nil {
+		base.Fatalf("misuse of method type: %v", t)
+	}
+
+	s := &types.Sym{
+		Name: autotmpname(len(curfn.Dcl)),
+		Pkg:  types.LocalPkg,
+	}
+	n := ir.NewNameAt(pos, s)
+	s.Def = n
+	n.SetType(t)
+	n.Class = ir.PAUTO
+	n.SetEsc(ir.EscNever)
+	n.Curfn = curfn
+	n.SetUsed(true)
+	n.SetAutoTemp(true)
+	curfn.Dcl = append(curfn.Dcl, n)
+
+	types.CalcSize(t)
+
+	return n
+}
+
+// autotmpname returns the name for an autotmp variable numbered n.
+func autotmpname(n int) string {
+	// Give each tmp a different name so that they can be registerized.
+	// Add a preceding . to avoid clashing with legal names.
+	const prefix = ".autotmp_"
+	// Start with a buffer big enough to hold a large n.
+	b := []byte(prefix + "      ")[:len(prefix)]
+	b = strconv.AppendInt(b, int64(n), 10)
+	return types.InternString(b)
+}
+
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
+func NewMethodType(sig *types.Type, recv *types.Type) *types.Type {
+	nrecvs := 0
+	if recv != nil {
+		nrecvs++
+	}
+
+	// TODO(mdempsky): Move this function to types.
+	// TODO(mdempsky): Preserve positions, names, and package from sig+recv.
+
+	params := make([]*types.Field, nrecvs+sig.Params().Fields().Len())
+	if recv != nil {
+		params[0] = types.NewField(base.Pos, nil, recv)
+	}
+	for i, param := range sig.Params().Fields().Slice() {
+		d := types.NewField(base.Pos, nil, param.Type)
+		d.SetIsDDD(param.IsDDD())
+		params[nrecvs+i] = d
+	}
+
+	results := make([]*types.Field, sig.Results().Fields().Len())
+	for i, t := range sig.Results().Fields().Slice() {
+		results[i] = types.NewField(base.Pos, nil, t.Type)
+	}
+
+	return types.NewSignature(types.LocalPkg, nil, params, results)
+}
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
new file mode 100644
index 0000000..63d0a1e
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// importalias declares symbol s as an imported type alias with type t.
+// ipkg is the package being imported
+func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+	return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
+}
+
+// importconst declares symbol s as an imported constant with type t and value val.
+// ipkg is the package being imported
+func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+	n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
+	n.SetVal(val)
+	return n
+}
+
+// importfunc declares symbol s as an imported function with type t.
+// ipkg is the package being imported
+func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+	n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
+	n.Func = ir.NewFunc(pos)
+	n.Func.Nname = n
+	return n
+}
+
+// importobj declares symbol s as an imported object representable by op.
+// ipkg is the package being imported
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+	n := importsym(ipkg, pos, s, op, ctxt)
+	n.SetType(t)
+	if ctxt == ir.PFUNC {
+		n.Sym().SetFunc(true)
+	}
+	return n
+}
+
+func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+	if n := s.PkgDef(); n != nil {
+		base.Fatalf("importsym of symbol that already exists: %v", n)
+	}
+
+	n := ir.NewDeclNameAt(pos, op, s)
+	n.Class = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too?
+	s.SetPkgDef(n)
+	return n
+}
+
+// importtype returns the named type declared by symbol s.
+// If no such type has been declared yet, a forward declaration is returned.
+// ipkg is the package being imported
+func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
+	n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
+	n.SetType(types.NewNamed(n))
+	return n
+}
+
+// importvar declares symbol s as an imported variable with type t.
+// ipkg is the package being imported
+func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+	return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
+}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
new file mode 100644
index 0000000..339fb00
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -0,0 +1,877 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"fmt"
+	"go/constant"
+	"go/token"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+)
+
+// tcAddr typechecks an OADDR node.
+func tcAddr(n *ir.AddrExpr) ir.Node {
+	n.X = Expr(n.X)
+	if n.X.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	switch n.X.Op() {
+	case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+		n.SetOp(ir.OPTRLIT)
+
+	default:
+		checklvalue(n.X, "take the address of")
+		r := ir.OuterValue(n.X)
+		if r.Op() == ir.ONAME {
+			r := r.(*ir.Name)
+			if ir.Orig(r) != r {
+				base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+			}
+		}
+		n.X = DefaultLit(n.X, nil)
+		if n.X.Type() == nil {
+			n.SetType(nil)
+			return n
+		}
+	}
+
+	n.SetType(types.NewPtr(n.X.Type()))
+	return n
+}
+
+func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+	if l.Type() == nil || l.Type() == nil {
+		return l, r, nil
+	}
+
+	r = DefaultLit(r, types.Types[types.TUINT])
+	t := r.Type()
+	if !t.IsInteger() {
+		base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+		return l, r, nil
+	}
+	if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) {
+		base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+		return l, r, nil
+	}
+	t = l.Type()
+	if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
+		base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+		return l, r, nil
+	}
+
+	// no DefaultLit for left
+	// the outer context gives the type
+	t = l.Type()
+	if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+		t = types.UntypedInt
+	}
+	return l, r, t
+}
+
+// tcArith typechecks operands of a binary arithmetic expression.
+// The result of tcArith MUST be assigned back to original operands,
+// t is the type of the expression, and should be set by the caller. e.g:
+//     n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
+//     n.SetType(t)
+func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+	l, r = defaultlit2(l, r, false)
+	if l.Type() == nil || r.Type() == nil {
+		return l, r, nil
+	}
+	t := l.Type()
+	if t.Kind() == types.TIDEAL {
+		t = r.Type()
+	}
+	aop := ir.OXXX
+	if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+		// comparison is okay as long as one side is
+		// assignable to the other.  convert so they have
+		// the same type.
+		//
+		// the only conversion that isn't a no-op is concrete == interface.
+		// in that case, check comparability of the concrete type.
+		// The conversion allocates, so only do it if the concrete type is huge.
+		converted := false
+		if r.Type().Kind() != types.TBLANK {
+			aop, _ = assignop(l.Type(), r.Type())
+			if aop != ir.OXXX {
+				if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
+					base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+					return l, r, nil
+				}
+
+				types.CalcSize(l.Type())
+				if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
+					l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
+					l.SetTypecheck(1)
+				}
+
+				t = r.Type()
+				converted = true
+			}
+		}
+
+		if !converted && l.Type().Kind() != types.TBLANK {
+			aop, _ = assignop(r.Type(), l.Type())
+			if aop != ir.OXXX {
+				if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
+					base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+					return l, r, nil
+				}
+
+				types.CalcSize(r.Type())
+				if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
+					r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
+					r.SetTypecheck(1)
+				}
+
+				t = l.Type()
+			}
+		}
+	}
+
+	if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+		l, r = defaultlit2(l, r, true)
+		if l.Type() == nil || r.Type() == nil {
+			return l, r, nil
+		}
+		if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+			base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+			return l, r, nil
+		}
+	}
+
+	if t.Kind() == types.TIDEAL {
+		t = mixUntyped(l.Type(), r.Type())
+	}
+	if dt := defaultType(t); !okfor[op][dt.Kind()] {
+		base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+		return l, r, nil
+	}
+
+	// okfor allows any array == array, map == map, func == func.
+	// restrict to slice/map/func == nil and nil == slice/map/func.
+	if l.Type().IsArray() && !types.IsComparable(l.Type()) {
+		base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+		return l, r, nil
+	}
+
+	if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+		base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+		return l, r, nil
+	}
+
+	if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+		base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+		return l, r, nil
+	}
+
+	if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+		base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+		return l, r, nil
+	}
+
+	if l.Type().IsStruct() {
+		if f := types.IncomparableField(l.Type()); f != nil {
+			base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+			return l, r, nil
+		}
+	}
+
+	if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
+		if constant.Sign(r.Val()) == 0 {
+			base.Errorf("division by zero")
+			return l, r, nil
+		}
+	}
+
+	return l, r, t
+}
+
+// The result of tcCompLit MUST be assigned back to n, e.g.
+// 	n.Left = tcCompLit(n.Left)
+func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("tcCompLit", n)(&res)
+	}
+
+	lno := base.Pos
+	defer func() {
+		base.Pos = lno
+	}()
+
+	if n.Ntype == nil {
+		base.ErrorfAt(n.Pos(), "missing type in composite literal")
+		n.SetType(nil)
+		return n
+	}
+
+	// Save original node (including n.Right)
+	n.SetOrig(ir.Copy(n))
+
+	ir.SetPos(n.Ntype)
+
+	// Need to handle [...]T arrays specially.
+	if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
+		array.Elem = typecheckNtype(array.Elem)
+		elemType := array.Elem.Type()
+		if elemType == nil {
+			n.SetType(nil)
+			return n
+		}
+		length := typecheckarraylit(elemType, -1, n.List, "array literal")
+		n.SetOp(ir.OARRAYLIT)
+		n.SetType(types.NewArray(elemType, length))
+		n.Ntype = nil
+		return n
+	}
+
+	n.Ntype = typecheckNtype(n.Ntype)
+	t := n.Ntype.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	n.SetType(t)
+
+	switch t.Kind() {
+	default:
+		base.Errorf("invalid composite literal type %v", t)
+		n.SetType(nil)
+
+	case types.TARRAY:
+		typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal")
+		n.SetOp(ir.OARRAYLIT)
+		n.Ntype = nil
+
+	case types.TSLICE:
+		length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal")
+		n.SetOp(ir.OSLICELIT)
+		n.Ntype = nil
+		n.Len = length
+
+	case types.TMAP:
+		var cs constSet
+		for i3, l := range n.List {
+			ir.SetPos(l)
+			if l.Op() != ir.OKEY {
+				n.List[i3] = Expr(l)
+				base.Errorf("missing key in map literal")
+				continue
+			}
+			l := l.(*ir.KeyExpr)
+
+			r := l.Key
+			r = pushtype(r, t.Key())
+			r = Expr(r)
+			l.Key = AssignConv(r, t.Key(), "map key")
+			cs.add(base.Pos, l.Key, "key", "map literal")
+
+			r = l.Value
+			r = pushtype(r, t.Elem())
+			r = Expr(r)
+			l.Value = AssignConv(r, t.Elem(), "map value")
+		}
+
+		n.SetOp(ir.OMAPLIT)
+		n.Ntype = nil
+
+	case types.TSTRUCT:
+		// Need valid field offsets for Xoffset below.
+		types.CalcSize(t)
+
+		errored := false
+		if len(n.List) != 0 && nokeys(n.List) {
+			// simple list of variables
+			ls := n.List
+			for i, n1 := range ls {
+				ir.SetPos(n1)
+				n1 = Expr(n1)
+				ls[i] = n1
+				if i >= t.NumFields() {
+					if !errored {
+						base.Errorf("too many values in %v", n)
+						errored = true
+					}
+					continue
+				}
+
+				f := t.Field(i)
+				s := f.Sym
+				if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+					base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+				}
+				// No pushtype allowed here. Must name fields for that.
+				n1 = AssignConv(n1, f.Type, "field value")
+				sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
+				sk.Offset = f.Offset
+				ls[i] = sk
+			}
+			if len(ls) < t.NumFields() {
+				base.Errorf("too few values in %v", n)
+			}
+		} else {
+			hash := make(map[string]bool)
+
+			// keyed list
+			ls := n.List
+			for i, l := range ls {
+				ir.SetPos(l)
+
+				if l.Op() == ir.OKEY {
+					kv := l.(*ir.KeyExpr)
+					key := kv.Key
+
+					// Sym might have resolved to name in other top-level
+					// package, because of import dot. Redirect to correct sym
+					// before we do the lookup.
+					s := key.Sym()
+					if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+						s = Lookup(s.Name)
+					}
+
+					// An OXDOT uses the Sym field to hold
+					// the field to the right of the dot,
+					// so s will be non-nil, but an OXDOT
+					// is never a valid struct literal key.
+					if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
+						base.Errorf("invalid field name %v in struct initializer", key)
+						continue
+					}
+
+					l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
+					ls[i] = l
+				}
+
+				if l.Op() != ir.OSTRUCTKEY {
+					if !errored {
+						base.Errorf("mixture of field:value and value initializers")
+						errored = true
+					}
+					ls[i] = Expr(ls[i])
+					continue
+				}
+				l := l.(*ir.StructKeyExpr)
+
+				f := lookdot1(nil, l.Field, t, t.Fields(), 0)
+				if f == nil {
+					if ci := lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+						if visible(ci.Sym) {
+							base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym)
+						} else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+							base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t)
+						} else {
+							base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+						}
+						continue
+					}
+					var f *types.Field
+					p, _ := dotpath(l.Field, t, &f, true)
+					if p == nil || f.IsMethod() {
+						base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+						continue
+					}
+					// dotpath returns the parent embedded types in reverse order.
+					var ep []string
+					for ei := len(p) - 1; ei >= 0; ei-- {
+						ep = append(ep, p[ei].field.Sym.Name)
+					}
+					ep = append(ep, l.Field.Name)
+					base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
+					continue
+				}
+				fielddup(f.Sym.Name, hash)
+				l.Offset = f.Offset
+
+				// No pushtype allowed here. Tried and rejected.
+				l.Value = Expr(l.Value)
+				l.Value = AssignConv(l.Value, f.Type, "field value")
+			}
+		}
+
+		n.SetOp(ir.OSTRUCTLIT)
+		n.Ntype = nil
+	}
+
+	return n
+}
+
+// tcConv typechecks an OCONV node.
+func tcConv(n *ir.ConvExpr) ir.Node {
+	types.CheckSize(n.Type()) // ensure width is calculated for backend
+	n.X = Expr(n.X)
+	n.X = convlit1(n.X, n.Type(), true, nil)
+	t := n.X.Type()
+	if t == nil || n.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	op, why := convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+	if op == ir.OXXX {
+		if !n.Diag() && !n.Type().Broke() && !n.X.Diag() {
+			base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why)
+			n.SetDiag(true)
+		}
+		n.SetOp(ir.OCONV)
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetOp(op)
+	switch n.Op() {
+	case ir.OCONVNOP:
+		if t.Kind() == n.Type().Kind() {
+			switch t.Kind() {
+			case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+				// Floating point casts imply rounding and
+				// so the conversion must be kept.
+				n.SetOp(ir.OCONV)
+			}
+		}
+
+	// do not convert to []byte literal. See CL 125796.
+	// generated code and compiler memory footprint is better without it.
+	case ir.OSTR2BYTES:
+		// ok
+
+	case ir.OSTR2RUNES:
+		if n.X.Op() == ir.OLITERAL {
+			return stringtoruneslit(n)
+		}
+	}
+	return n
+}
+
+// tcDot typechecks an OXDOT or ODOT node.
+func tcDot(n *ir.SelectorExpr, top int) ir.Node {
+	if n.Op() == ir.OXDOT {
+		n = AddImplicitDots(n)
+		n.SetOp(ir.ODOT)
+		if n.X == nil {
+			n.SetType(nil)
+			return n
+		}
+	}
+
+	n.X = typecheck(n.X, ctxExpr|ctxType)
+	n.X = DefaultLit(n.X, nil)
+
+	t := n.X.Type()
+	if t == nil {
+		base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n))
+		n.SetType(nil)
+		return n
+	}
+
+	if n.X.Op() == ir.OTYPE {
+		return typecheckMethodExpr(n)
+	}
+
+	if t.IsPtr() && !t.Elem().IsInterface() {
+		t = t.Elem()
+		if t == nil {
+			n.SetType(nil)
+			return n
+		}
+		n.SetOp(ir.ODOTPTR)
+		types.CheckSize(t)
+	}
+
+	if n.Sel.IsBlank() {
+		base.Errorf("cannot refer to blank field or method")
+		n.SetType(nil)
+		return n
+	}
+
+	if lookdot(n, t, 0) == nil {
+		// Legitimate field or method lookup failed, try to explain the error
+		switch {
+		case t.IsEmptyInterface():
+			base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type())
+
+		case t.IsPtr() && t.Elem().IsInterface():
+			// Pointer to interface is almost always a mistake.
+			base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type())
+
+		case lookdot(n, t, 1) != nil:
+			// Field or method matches by name, but it is not exported.
+			base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel)
+
+		default:
+			if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
+				base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym)
+			} else {
+				base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel)
+			}
+		}
+		n.SetType(nil)
+		return n
+	}
+
+	if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
+		n.SetOp(ir.OCALLPART)
+		n.SetType(MethodValueWrapper(n).Type())
+	}
+	return n
+}
+
+// tcDotType typechecks an ODOTTYPE node.
+func tcDotType(n *ir.TypeAssertExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !t.IsInterface() {
+		base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+		n.SetType(nil)
+		return n
+	}
+
+	if n.Ntype != nil {
+		n.Ntype = typecheckNtype(n.Ntype)
+		n.SetType(n.Ntype.Type())
+		n.Ntype = nil
+		if n.Type() == nil {
+			return n
+		}
+	}
+
+	if n.Type() != nil && !n.Type().IsInterface() {
+		var missing, have *types.Field
+		var ptr int
+		if !implements(n.Type(), t, &missing, &have, &ptr) {
+			if have != nil && have.Sym == missing.Sym {
+				base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+					"\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+			} else if ptr != 0 {
+				base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
+			} else if have != nil {
+				base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+					"\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+			} else {
+				base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
+			}
+			n.SetType(nil)
+			return n
+		}
+	}
+	return n
+}
+
+// tcITab typechecks an OITAB node.
+func tcITab(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	t := n.X.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !t.IsInterface() {
+		base.Fatalf("OITAB of %v", t)
+	}
+	n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
+	return n
+}
+
+// tcIndex typechecks an OINDEX node.
+func tcIndex(n *ir.IndexExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	n.X = implicitstar(n.X)
+	l := n.X
+	n.Index = Expr(n.Index)
+	r := n.Index
+	t := l.Type()
+	if t == nil || r.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	switch t.Kind() {
+	default:
+		base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+		n.SetType(nil)
+		return n
+
+	case types.TSTRING, types.TARRAY, types.TSLICE:
+		n.Index = indexlit(n.Index)
+		if t.IsString() {
+			n.SetType(types.ByteType)
+		} else {
+			n.SetType(t.Elem())
+		}
+		why := "string"
+		if t.IsArray() {
+			why = "array"
+		} else if t.IsSlice() {
+			why = "slice"
+		}
+
+		if n.Index.Type() != nil && !n.Index.Type().IsInteger() {
+			base.Errorf("non-integer %s index %v", why, n.Index)
+			return n
+		}
+
+		if !n.Bounded() && ir.IsConst(n.Index, constant.Int) {
+			x := n.Index.Val()
+			if constant.Sign(x) < 0 {
+				base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index)
+			} else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+				base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem())
+			} else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) {
+				base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X)))
+			} else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+				base.Errorf("invalid %s index %v (index too large)", why, n.Index)
+			}
+		}
+
+	case types.TMAP:
+		n.Index = AssignConv(n.Index, t.Key(), "map index")
+		n.SetType(t.Elem())
+		n.SetOp(ir.OINDEXMAP)
+		n.Assigned = false
+	}
+	return n
+}
+
+// tcLenCap typechecks an OLEN or OCAP node.
+func tcLenCap(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	n.X = implicitstar(n.X)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	var ok bool
+	if n.Op() == ir.OLEN {
+		ok = okforlen[t.Kind()]
+	} else {
+		ok = okforcap[t.Kind()]
+	}
+	if !ok {
+		base.Errorf("invalid argument %L for %v", l, n.Op())
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetType(types.Types[types.TINT])
+	return n
+}
+
+// tcRecv typechecks an ORECV node.
+func tcRecv(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !t.IsChan() {
+		base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+		n.SetType(nil)
+		return n
+	}
+
+	if !t.ChanDir().CanRecv() {
+		base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetType(t.Elem())
+	return n
+}
+
+// tcSPtr typechecks an OSPTR node.
+func tcSPtr(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	t := n.X.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !t.IsSlice() && !t.IsString() {
+		base.Fatalf("OSPTR of %v", t)
+	}
+	if t.IsString() {
+		n.SetType(types.NewPtr(types.Types[types.TUINT8]))
+	} else {
+		n.SetType(types.NewPtr(t.Elem()))
+	}
+	return n
+}
+
+// tcSlice typechecks an OSLICE or OSLICE3 node.
+func tcSlice(n *ir.SliceExpr) ir.Node {
+	n.X = DefaultLit(Expr(n.X), nil)
+	n.Low = indexlit(Expr(n.Low))
+	n.High = indexlit(Expr(n.High))
+	n.Max = indexlit(Expr(n.Max))
+	hasmax := n.Op().IsSlice3()
+	l := n.X
+	if l.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	if l.Type().IsArray() {
+		if !ir.IsAddressable(n.X) {
+			base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+			n.SetType(nil)
+			return n
+		}
+
+		addr := NodAddr(n.X)
+		addr.SetImplicit(true)
+		n.X = Expr(addr)
+		l = n.X
+	}
+	t := l.Type()
+	var tp *types.Type
+	if t.IsString() {
+		if hasmax {
+			base.Errorf("invalid operation %v (3-index slice of string)", n)
+			n.SetType(nil)
+			return n
+		}
+		n.SetType(t)
+		n.SetOp(ir.OSLICESTR)
+	} else if t.IsPtr() && t.Elem().IsArray() {
+		tp = t.Elem()
+		n.SetType(types.NewSlice(tp.Elem()))
+		types.CalcSize(n.Type())
+		if hasmax {
+			n.SetOp(ir.OSLICE3ARR)
+		} else {
+			n.SetOp(ir.OSLICEARR)
+		}
+	} else if t.IsSlice() {
+		n.SetType(t)
+	} else {
+		base.Errorf("cannot slice %v (type %v)", l, t)
+		n.SetType(nil)
+		return n
+	}
+
+	if n.Low != nil && !checksliceindex(l, n.Low, tp) {
+		n.SetType(nil)
+		return n
+	}
+	if n.High != nil && !checksliceindex(l, n.High, tp) {
+		n.SetType(nil)
+		return n
+	}
+	if n.Max != nil && !checksliceindex(l, n.Max, tp) {
+		n.SetType(nil)
+		return n
+	}
+	if !checksliceconst(n.Low, n.High) || !checksliceconst(n.Low, n.Max) || !checksliceconst(n.High, n.Max) {
+		n.SetType(nil)
+		return n
+	}
+	return n
+}
+
+// tcSliceHeader typechecks an OSLICEHEADER node.
+func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node {
+	// Errors here are Fatalf instead of Errorf because only the compiler
+	// can construct an OSLICEHEADER node.
+	// Components used in OSLICEHEADER that are supplied by parsed source code
+	// have already been typechecked in e.g. OMAKESLICE earlier.
+	t := n.Type()
+	if t == nil {
+		base.Fatalf("no type specified for OSLICEHEADER")
+	}
+
+	if !t.IsSlice() {
+		base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
+	}
+
+	if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() {
+		base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
+	}
+
+	n.Ptr = Expr(n.Ptr)
+	n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT])
+	n.Cap = DefaultLit(Expr(n.Cap), types.Types[types.TINT])
+
+	if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 {
+		base.Fatalf("len for OSLICEHEADER must be non-negative")
+	}
+
+	if ir.IsConst(n.Cap, constant.Int) && ir.Int64Val(n.Cap) < 0 {
+		base.Fatalf("cap for OSLICEHEADER must be non-negative")
+	}
+
+	if ir.IsConst(n.Len, constant.Int) && ir.IsConst(n.Cap, constant.Int) && constant.Compare(n.Len.Val(), token.GTR, n.Cap.Val()) {
+		base.Fatalf("len larger than cap for OSLICEHEADER")
+	}
+
+	return n
+}
+
+// tcStar typechecks an ODEREF node, which may be an expression or a type.
+func tcStar(n *ir.StarExpr, top int) ir.Node {
+	n.X = typecheck(n.X, ctxExpr|ctxType)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if l.Op() == ir.OTYPE {
+		n.SetOTYPE(types.NewPtr(l.Type()))
+		// Ensure l.Type gets CalcSize'd for the backend. Issue 20174.
+		types.CheckSize(l.Type())
+		return n
+	}
+
+	if !t.IsPtr() {
+		if top&(ctxExpr|ctxStmt) != 0 {
+			base.Errorf("invalid indirect of %L", n.X)
+			n.SetType(nil)
+			return n
+		}
+		base.Errorf("%v is not a type", l)
+		return n
+	}
+
+	n.SetType(t.Elem())
+	return n
+}
+
+// tcUnaryArith typechecks a unary arithmetic expression.
+func tcUnaryArith(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !okfor[n.Op()][defaultType(t).Kind()] {
+		base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetType(t)
+	return n
+}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
new file mode 100644
index 0000000..7ab5f68
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -0,0 +1,972 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+
+	"fmt"
+	"go/constant"
+	"go/token"
+)
+
+// package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node {
+	var n ir.Node
+	if len(args) == 0 {
+		n = NodNil()
+		n.SetType(typ)
+	} else {
+		lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
+		lit.List.Append(args...)
+		lit.SetImplicit(true)
+		n = lit
+	}
+
+	n = Expr(n)
+	if n.Type() == nil {
+		base.Fatalf("mkdotargslice: typecheck failed")
+	}
+	return n
+}
+
+// FixVariadicCall rewrites calls to variadic functions to use an
+// explicit ... argument if one is not already present.
+func FixVariadicCall(call *ir.CallExpr) {
+	fntype := call.X.Type()
+	if !fntype.IsVariadic() || call.IsDDD {
+		return
+	}
+
+	vi := fntype.NumParams() - 1
+	vt := fntype.Params().Field(vi).Type
+
+	args := call.Args
+	extra := args[vi:]
+	slice := MakeDotArgs(vt, extra)
+	for i := range extra {
+		extra[i] = nil // allow GC
+	}
+
+	call.Args = append(args[:vi], slice)
+	call.IsDDD = true
+}
+
+// ClosureType returns the struct type used to hold all the information
+// needed in the closure for clo (clo must be a OCLOSURE node).
+// The address of a variable of the returned type can be cast to a func.
+func ClosureType(clo *ir.ClosureExpr) *types.Type {
+	// Create closure in the form of a composite literal.
+	// supposing the closure captures an int i and a string s
+	// and has one float64 argument and no results,
+	// the generated code looks like:
+	//
+	//	clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
+	//
+	// The use of the struct provides type information to the garbage
+	// collector so that it can walk the closure. We could use (in this case)
+	// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+	// The information appears in the binary in the form of type descriptors;
+	// the struct is unnamed so that closures in multiple packages with the
+	// same struct type can share the descriptor.
+	fields := []*types.Field{
+		types.NewField(base.Pos, Lookup(".F"), types.Types[types.TUINTPTR]),
+	}
+	for _, v := range clo.Func.ClosureVars {
+		typ := v.Type()
+		if !v.Byval() {
+			typ = types.NewPtr(typ)
+		}
+		fields = append(fields, types.NewField(base.Pos, v.Sym(), typ))
+	}
+	typ := types.NewStruct(types.NoPkg, fields)
+	typ.SetNoalg(true)
+	return typ
+}
+
+// PartialCallType returns the struct type used to hold all the information
+// needed in the closure for n (n must be a OCALLPART node).
+// The address of a variable of the returned type can be cast to a func.
+func PartialCallType(n *ir.SelectorExpr) *types.Type {
+	t := types.NewStruct(types.NoPkg, []*types.Field{
+		types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]),
+		types.NewField(base.Pos, Lookup("R"), n.X.Type()),
+	})
+	t.SetNoalg(true)
+	return t
+}
+
+// Lazy typechecking of imported bodies. For local functions, CanInline will set ->typecheck
+// because they're a copy of an already checked body.
+func ImportedBody(fn *ir.Func) {
+	lno := ir.SetPos(fn.Nname)
+
+	// When we load an inlined body, we need to allow OADDR
+	// operations on untyped expressions. We will fix the
+	// addrtaken flags on all the arguments of the OADDR with the
+	// computeAddrtaken call below (after we typecheck the body).
+	// TODO: export/import types and addrtaken marks along with inlined bodies,
+	// so this will be unnecessary.
+	IncrementalAddrtaken = false
+	defer func() {
+		if DirtyAddrtaken {
+			ComputeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available
+			DirtyAddrtaken = false
+		}
+		IncrementalAddrtaken = true
+	}()
+
+	ImportBody(fn)
+
+	// Stmts(fn.Inl.Body) below is only for imported functions;
+	// their bodies may refer to unsafe as long as the package
+	// was marked safe during import (which was checked then).
+	// the ->inl of a local function has been typechecked before CanInline copied it.
+	pkg := fnpkg(fn.Nname)
+
+	if pkg == types.LocalPkg || pkg == nil {
+		return // ImportedBody on local function
+	}
+
+	if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
+		fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body))
+	}
+
+	savefn := ir.CurFunc
+	ir.CurFunc = fn
+	Stmts(fn.Inl.Body)
+	ir.CurFunc = savefn
+
+	// During ImportBody (which imports fn.Func.Inl.Body),
+	// declarations are added to fn.Func.Dcl by funcBody(). Move them
+	// to fn.Func.Inl.Dcl for consistency with how local functions
+	// behave. (Append because ImportedBody may be called multiple
+	// times on same fn.)
+	fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
+	fn.Dcl = nil
+
+	base.Pos = lno
+}
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *ir.Name) *types.Pkg {
+	if ir.IsMethod(fn) {
+		// method
+		rcvr := fn.Type().Recv().Type
+
+		if rcvr.IsPtr() {
+			rcvr = rcvr.Elem()
+		}
+		if rcvr.Sym() == nil {
+			base.Fatalf("receiver with no sym: [%v] %L  (%v)", fn.Sym(), fn, rcvr)
+		}
+		return rcvr.Sym().Pkg
+	}
+
+	// non-method
+	return fn.Sym().Pkg
+}
+
+// closurename generates a new unique name for a closure within
+// outerfunc.
+func ClosureName(outerfunc *ir.Func) *types.Sym {
+	outer := "glob."
+	prefix := "func"
+	gen := &globClosgen
+
+	if outerfunc != nil {
+		if outerfunc.OClosure != nil {
+			prefix = ""
+		}
+
+		outer = ir.FuncName(outerfunc)
+
+		// There may be multiple functions named "_". In those
+		// cases, we can't use their individual Closgens as it
+		// would lead to name clashes.
+		if !ir.IsBlank(outerfunc.Nname) {
+			gen = &outerfunc.Closgen
+		}
+	}
+
+	*gen++
+	return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// MethodValueWrapper returns the DCLFUNC node representing the
+// wrapper function (*-fm) needed for the given method value. If the
+// wrapper function hasn't already been created yet, it's created and
+// added to Target.Decls.
+//
+// TODO(mdempsky): Move into walk. This isn't part of type checking.
+func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func {
+	if dot.Op() != ir.OCALLPART {
+		base.Fatalf("MethodValueWrapper: unexpected %v (%v)", dot, dot.Op())
+	}
+
+	t0 := dot.Type()
+	meth := dot.Sel
+	rcvrtype := dot.X.Type()
+	sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+	if sym.Uniq() {
+		return sym.Def.(*ir.Func)
+	}
+	sym.SetUniq(true)
+
+	savecurfn := ir.CurFunc
+	saveLineNo := base.Pos
+	ir.CurFunc = nil
+
+	// Set line number equal to the line number where the method is declared.
+	if pos := dot.Selection.Pos; pos.IsKnown() {
+		base.Pos = pos
+	}
+	// Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
+	// the method is implicitly declared. The Error method of the
+	// built-in error type is one such method.  We leave the line
+	// number at the use of the method expression in this
+	// case. See issue 29389.
+
+	tfn := ir.NewFuncType(base.Pos, nil,
+		NewFuncParams(t0.Params(), true),
+		NewFuncParams(t0.Results(), false))
+
+	fn := DeclFunc(sym, tfn)
+	fn.SetDupok(true)
+	fn.SetNeedctxt(true)
+	fn.SetWrapper(true)
+
+	// Declare and initialize variable holding receiver.
+	ptr := ir.NewNameAt(base.Pos, Lookup(".this"))
+	ptr.Class = ir.PAUTOHEAP
+	ptr.SetType(rcvrtype)
+	ptr.Curfn = fn
+	ptr.SetIsClosureVar(true)
+	ptr.SetByval(true)
+	fn.ClosureVars = append(fn.ClosureVars, ptr)
+
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
+	call.Args = ir.ParamNames(tfn.Type())
+	call.IsDDD = tfn.Type().IsVariadic()
+
+	var body ir.Node = call
+	if t0.NumResults() != 0 {
+		ret := ir.NewReturnStmt(base.Pos, nil)
+		ret.Results = []ir.Node{call}
+		body = ret
+	}
+
+	fn.Body = []ir.Node{body}
+	FinishFuncBody()
+
+	Func(fn)
+	// Need to typecheck the body of the just-generated wrapper.
+	// typecheckslice() requires that Curfn is set when processing an ORETURN.
+	ir.CurFunc = fn
+	Stmts(fn.Body)
+	sym.Def = fn
+	Target.Decls = append(Target.Decls, fn)
+	ir.CurFunc = savecurfn
+	base.Pos = saveLineNo
+
+	return fn
+}
+
+// tcClosure typechecks an OCLOSURE node. It also creates the named
+// function associated with the closure.
+// TODO: This creation of the named function should probably really be done in a
+// separate pass from type-checking.
+func tcClosure(clo *ir.ClosureExpr, top int) {
+	fn := clo.Func
+	// Set current associated iota value, so iota can be used inside
+	// function in ConstSpec, see issue #22344
+	if x := getIotaValue(); x >= 0 {
+		fn.Iota = x
+	}
+
+	fn.SetClosureCalled(top&ctxCallee != 0)
+
+	// Do not typecheck fn twice, otherwise, we will end up pushing
+	// fn to Target.Decls multiple times, causing InitLSym called twice.
+	// See #30709
+	if fn.Typecheck() == 1 {
+		clo.SetType(fn.Type())
+		return
+	}
+
+	// Don't give a name and add to xtop if we are typechecking an inlined
+	// body in ImportedBody(), since we only want to create the named function
+	// when the closure is actually inlined (and then we force a typecheck
+	// explicitly in (*inlsubst).node()).
+	inTypeCheckInl := ir.CurFunc != nil && ir.CurFunc.Body == nil
+	if !inTypeCheckInl {
+		fn.Nname.SetSym(ClosureName(ir.CurFunc))
+		ir.MarkFunc(fn.Nname)
+	}
+	Func(fn)
+	clo.SetType(fn.Type())
+
+	// Type check the body now, but only if we're inside a function.
+	// At top level (in a variable initialization: curfn==nil) we're not
+	// ready to type check code yet; we'll check it later, because the
+	// underlying closure function we create is added to Target.Decls.
+	if ir.CurFunc != nil && clo.Type() != nil {
+		oldfn := ir.CurFunc
+		ir.CurFunc = fn
+		Stmts(fn.Body)
+		ir.CurFunc = oldfn
+	}
+
+	out := 0
+	for _, v := range fn.ClosureVars {
+		if v.Type() == nil {
+			// If v.Type is nil, it means v looked like it was going to be
+			// used in the closure, but isn't. This happens in struct
+			// literals like s{f: x} where we can't distinguish whether f is
+			// a field identifier or expression until resolving s.
+			continue
+		}
+
+		// type check closed variables outside the closure, so that the
+		// outer frame also captures them.
+		Expr(v.Outer)
+
+		fn.ClosureVars[out] = v
+		out++
+	}
+	fn.ClosureVars = fn.ClosureVars[:out]
+
+	if base.Flag.W > 1 {
+		s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
+		ir.Dump(s, fn)
+	}
+	if !inTypeCheckInl {
+		// Add function to xtop once only when we give it a name
+		Target.Decls = append(Target.Decls, fn)
+	}
+}
+
+// type check function definition
+// To be called by typecheck, not directly.
+// (Call typecheck.Func instead.)
+func tcFunc(n *ir.Func) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("tcFunc", n)(nil)
+	}
+
+	n.Nname = AssignExpr(n.Nname).(*ir.Name)
+	t := n.Nname.Type()
+	if t == nil {
+		return
+	}
+	rcvr := t.Recv()
+	if rcvr != nil && n.Shortname != nil {
+		m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
+		if m == nil {
+			return
+		}
+
+		n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname))
+		Declare(n.Nname, ir.PFUNC)
+	}
+}
+
+// tcCall typechecks an OCALL node.
+func tcCall(n *ir.CallExpr, top int) ir.Node {
+	n.Use = ir.CallUseExpr
+	if top == ctxStmt {
+		n.Use = ir.CallUseStmt
+	}
+	Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
+	n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
+	if n.X.Diag() {
+		n.SetDiag(true)
+	}
+
+	l := n.X
+
+	if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
+		l := l.(*ir.Name)
+		if n.IsDDD && l.BuiltinOp != ir.OAPPEND {
+			base.Errorf("invalid use of ... with builtin %v", l)
+		}
+
+		// builtin: OLEN, OCAP, etc.
+		switch l.BuiltinOp {
+		default:
+			base.Fatalf("unknown builtin %v", l)
+
+		case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+			n.SetOp(l.BuiltinOp)
+			n.X = nil
+			n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
+			return typecheck(n, top)
+
+		case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
+			typecheckargs(n)
+			fallthrough
+		case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+			arg, ok := needOneArg(n, "%v", n.Op())
+			if !ok {
+				n.SetType(nil)
+				return n
+			}
+			u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg)
+			return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init
+
+		case ir.OCOMPLEX, ir.OCOPY:
+			typecheckargs(n)
+			arg1, arg2, ok := needTwoArgs(n)
+			if !ok {
+				n.SetType(nil)
+				return n
+			}
+			b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2)
+			return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init
+		}
+		panic("unreachable")
+	}
+
+	n.X = DefaultLit(n.X, nil)
+	l = n.X
+	if l.Op() == ir.OTYPE {
+		if n.IsDDD {
+			if !l.Type().Broke() {
+				base.Errorf("invalid use of ... in type conversion to %v", l.Type())
+			}
+			n.SetDiag(true)
+		}
+
+		// pick off before type-checking arguments
+		arg, ok := needOneArg(n, "conversion to %v", l.Type())
+		if !ok {
+			n.SetType(nil)
+			return n
+		}
+
+		n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
+		n.SetType(l.Type())
+		return typecheck1(n, top)
+	}
+
+	typecheckargs(n)
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	types.CheckSize(t)
+
+	switch l.Op() {
+	case ir.ODOTINTER:
+		n.SetOp(ir.OCALLINTER)
+
+	case ir.ODOTMETH:
+		l := l.(*ir.SelectorExpr)
+		n.SetOp(ir.OCALLMETH)
+
+		// typecheckaste was used here but there wasn't enough
+		// information further down the call chain to know if we
+		// were testing a method receiver for unexported fields.
+		// It isn't necessary, so just do a sanity check.
+		tp := t.Recv().Type
+
+		if l.X == nil || !types.Identical(l.X.Type(), tp) {
+			base.Fatalf("method receiver")
+		}
+
+	default:
+		n.SetOp(ir.OCALLFUNC)
+		if t.Kind() != types.TFUNC {
+			if o := ir.Orig(l); o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+				// be more specific when the non-function
+				// name matches a predeclared function
+				base.Errorf("cannot call non-function %L, declared at %s",
+					l, base.FmtPos(o.Name().Pos()))
+			} else {
+				base.Errorf("cannot call non-function %L", l)
+			}
+			n.SetType(nil)
+			return n
+		}
+	}
+
+	typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+	if t.NumResults() == 0 {
+		return n
+	}
+	if t.NumResults() == 1 {
+		n.SetType(l.Type().Results().Field(0).Type)
+
+		if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
+			if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+				// Emit code for runtime.getg() directly instead of calling function.
+				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+				// so that the ordering pass can make sure to preserve the semantics of the original code
+				// (in particular, the exact time of the function call) by introducing temporaries.
+				// In this case, we know getg() always returns the same result within a given function
+				// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+				n.SetOp(ir.OGETG)
+			}
+		}
+		return n
+	}
+
+	// multiple return
+	if top&(ctxMultiOK|ctxStmt) == 0 {
+		base.Errorf("multiple-value %v() in single-value context", l)
+		return n
+	}
+
+	n.SetType(l.Type().Results())
+	return n
+}
+
+// tcAppend typechecks an OAPPEND node.
+func tcAppend(n *ir.CallExpr) ir.Node {
+	typecheckargs(n)
+	args := n.Args
+	if len(args) == 0 {
+		base.Errorf("missing arguments to append")
+		n.SetType(nil)
+		return n
+	}
+
+	t := args[0].Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetType(t)
+	if !t.IsSlice() {
+		if ir.IsNil(args[0]) {
+			base.Errorf("first argument to append must be typed slice; have untyped nil")
+			n.SetType(nil)
+			return n
+		}
+
+		base.Errorf("first argument to append must be slice; have %L", t)
+		n.SetType(nil)
+		return n
+	}
+
+	if n.IsDDD {
+		if len(args) == 1 {
+			base.Errorf("cannot use ... on first argument to append")
+			n.SetType(nil)
+			return n
+		}
+
+		if len(args) != 2 {
+			base.Errorf("too many arguments to append")
+			n.SetType(nil)
+			return n
+		}
+
+		if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() {
+			args[1] = DefaultLit(args[1], types.Types[types.TSTRING])
+			return n
+		}
+
+		args[1] = AssignConv(args[1], t.Underlying(), "append")
+		return n
+	}
+
+	as := args[1:]
+	for i, n := range as {
+		if n.Type() == nil {
+			continue
+		}
+		as[i] = AssignConv(n, t.Elem(), "append")
+		types.CheckSize(as[i].Type()) // ensure width is calculated for backend
+	}
+	return n
+}
+
+// tcClose typechecks an OCLOSE node.
+func tcClose(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	if !t.IsChan() {
+		base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+		n.SetType(nil)
+		return n
+	}
+
+	if !t.ChanDir().CanSend() {
+		base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+		n.SetType(nil)
+		return n
+	}
+	return n
+}
+
+// tcComplex typechecks an OCOMPLEX node.
+func tcComplex(n *ir.BinaryExpr) ir.Node {
+	l := Expr(n.X)
+	r := Expr(n.Y)
+	if l.Type() == nil || r.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	l, r = defaultlit2(l, r, false)
+	if l.Type() == nil || r.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	n.X = l
+	n.Y = r
+
+	if !types.Identical(l.Type(), r.Type()) {
+		base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+		n.SetType(nil)
+		return n
+	}
+
+	var t *types.Type
+	switch l.Type().Kind() {
+	default:
+		base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+		n.SetType(nil)
+		return n
+
+	case types.TIDEAL:
+		t = types.UntypedComplex
+
+	case types.TFLOAT32:
+		t = types.Types[types.TCOMPLEX64]
+
+	case types.TFLOAT64:
+		t = types.Types[types.TCOMPLEX128]
+	}
+	n.SetType(t)
+	return n
+}
+
+// tcCopy typechecks an OCOPY node.
+func tcCopy(n *ir.BinaryExpr) ir.Node {
+	n.SetType(types.Types[types.TINT])
+	n.X = Expr(n.X)
+	n.X = DefaultLit(n.X, nil)
+	n.Y = Expr(n.Y)
+	n.Y = DefaultLit(n.Y, nil)
+	if n.X.Type() == nil || n.Y.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	// copy([]byte, string)
+	if n.X.Type().IsSlice() && n.Y.Type().IsString() {
+		if types.Identical(n.X.Type().Elem(), types.ByteType) {
+			return n
+		}
+		base.Errorf("arguments to copy have different element types: %L and string", n.X.Type())
+		n.SetType(nil)
+		return n
+	}
+
+	if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() {
+		if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() {
+			base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type())
+		} else if !n.X.Type().IsSlice() {
+			base.Errorf("first argument to copy should be slice; have %L", n.X.Type())
+		} else {
+			base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type())
+		}
+		n.SetType(nil)
+		return n
+	}
+
+	if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) {
+		base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type())
+		n.SetType(nil)
+		return n
+	}
+	return n
+}
+
+// tcDelete typechecks an ODELETE node.
+func tcDelete(n *ir.CallExpr) ir.Node {
+	typecheckargs(n)
+	args := n.Args
+	if len(args) == 0 {
+		base.Errorf("missing arguments to delete")
+		n.SetType(nil)
+		return n
+	}
+
+	if len(args) == 1 {
+		base.Errorf("missing second (key) argument to delete")
+		n.SetType(nil)
+		return n
+	}
+
+	if len(args) != 2 {
+		base.Errorf("too many arguments to delete")
+		n.SetType(nil)
+		return n
+	}
+
+	l := args[0]
+	r := args[1]
+	if l.Type() != nil && !l.Type().IsMap() {
+		base.Errorf("first argument to delete must be map; have %L", l.Type())
+		n.SetType(nil)
+		return n
+	}
+
+	args[1] = AssignConv(r, l.Type().Key(), "delete")
+	return n
+}
+
+// tcMake typechecks an OMAKE node.
+func tcMake(n *ir.CallExpr) ir.Node {
+	args := n.Args
+	if len(args) == 0 {
+		base.Errorf("missing argument to make")
+		n.SetType(nil)
+		return n
+	}
+
+	n.Args = nil
+	l := args[0]
+	l = typecheck(l, ctxType)
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	i := 1
+	var nn ir.Node
+	switch t.Kind() {
+	default:
+		base.Errorf("cannot make type %v", t)
+		n.SetType(nil)
+		return n
+
+	case types.TSLICE:
+		if i >= len(args) {
+			base.Errorf("missing len argument to make(%v)", t)
+			n.SetType(nil)
+			return n
+		}
+
+		l = args[i]
+		i++
+		l = Expr(l)
+		var r ir.Node
+		if i < len(args) {
+			r = args[i]
+			i++
+			r = Expr(r)
+		}
+
+		if l.Type() == nil || (r != nil && r.Type() == nil) {
+			n.SetType(nil)
+			return n
+		}
+		if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
+			n.SetType(nil)
+			return n
+		}
+		if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+			base.Errorf("len larger than cap in make(%v)", t)
+			n.SetType(nil)
+			return n
+		}
+		nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r)
+
+	case types.TMAP:
+		if i < len(args) {
+			l = args[i]
+			i++
+			l = Expr(l)
+			l = DefaultLit(l, types.Types[types.TINT])
+			if l.Type() == nil {
+				n.SetType(nil)
+				return n
+			}
+			if !checkmake(t, "size", &l) {
+				n.SetType(nil)
+				return n
+			}
+		} else {
+			l = ir.NewInt(0)
+		}
+		nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil)
+		nn.SetEsc(n.Esc())
+
+	case types.TCHAN:
+		l = nil
+		if i < len(args) {
+			l = args[i]
+			i++
+			l = Expr(l)
+			l = DefaultLit(l, types.Types[types.TINT])
+			if l.Type() == nil {
+				n.SetType(nil)
+				return n
+			}
+			if !checkmake(t, "buffer", &l) {
+				n.SetType(nil)
+				return n
+			}
+		} else {
+			l = ir.NewInt(0)
+		}
+		nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil)
+	}
+
+	if i < len(args) {
+		base.Errorf("too many arguments to make(%v)", t)
+		n.SetType(nil)
+		return n
+	}
+
+	nn.SetType(t)
+	return nn
+}
+
+// tcMakeSliceCopy typechecks an OMAKESLICECOPY node.
+func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node {
+	// Errors here are Fatalf instead of Errorf because only the compiler
+	// can construct an OMAKESLICECOPY node.
+	// Components used in OMAKESCLICECOPY that are supplied by parsed source code
+	// have already been typechecked in OMAKE and OCOPY earlier.
+	t := n.Type()
+
+	if t == nil {
+		base.Fatalf("no type specified for OMAKESLICECOPY")
+	}
+
+	if !t.IsSlice() {
+		base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
+	}
+
+	if n.Len == nil {
+		base.Fatalf("missing len argument for OMAKESLICECOPY")
+	}
+
+	if n.Cap == nil {
+		base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+	}
+
+	n.Len = Expr(n.Len)
+	n.Cap = Expr(n.Cap)
+
+	n.Len = DefaultLit(n.Len, types.Types[types.TINT])
+
+	if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+		base.Errorf("non-integer len argument in OMAKESLICECOPY")
+	}
+
+	if ir.IsConst(n.Len, constant.Int) {
+		if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) {
+			base.Fatalf("len for OMAKESLICECOPY too large")
+		}
+		if constant.Sign(n.Len.Val()) < 0 {
+			base.Fatalf("len for OMAKESLICECOPY must be non-negative")
+		}
+	}
+	return n
+}
+
+// tcNew typechecks an ONEW node.
+func tcNew(n *ir.UnaryExpr) ir.Node {
+	if n.X == nil {
+		// Fatalf because the OCALL above checked for us,
+		// so this must be an internally-generated mistake.
+		base.Fatalf("missing argument to new")
+	}
+	l := n.X
+	l = typecheck(l, ctxType)
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+	n.X = l
+	n.SetType(types.NewPtr(t))
+	return n
+}
+
+// tcPanic typechecks an OPANIC node.
+func tcPanic(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic")
+	if n.X.Type() == nil {
+		n.SetType(nil)
+		return n
+	}
+	return n
+}
+
+// tcPrint typechecks an OPRINT or OPRINTN node.
+func tcPrint(n *ir.CallExpr) ir.Node {
+	typecheckargs(n)
+	ls := n.Args
+	for i1, n1 := range ls {
+		// Special case for print: int constant is int64, not int.
+		if ir.IsConst(n1, constant.Int) {
+			ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64])
+		} else {
+			ls[i1] = DefaultLit(ls[i1], nil)
+		}
+	}
+	return n
+}
+
+// tcRealImag typechecks an OREAL or OIMAG node.
+func tcRealImag(n *ir.UnaryExpr) ir.Node {
+	n.X = Expr(n.X)
+	l := n.X
+	t := l.Type()
+	if t == nil {
+		n.SetType(nil)
+		return n
+	}
+
+	// Determine result type.
+	switch t.Kind() {
+	case types.TIDEAL:
+		n.SetType(types.UntypedFloat)
+	case types.TCOMPLEX64:
+		n.SetType(types.Types[types.TFLOAT32])
+	case types.TCOMPLEX128:
+		n.SetType(types.Types[types.TFLOAT64])
+	default:
+		base.Errorf("invalid argument %L for %v", l, n.Op())
+		n.SetType(nil)
+		return n
+	}
+	return n
+}
+
+// tcRecover typechecks an ORECOVER node.
+func tcRecover(n *ir.CallExpr) ir.Node {
+	if len(n.Args) != 0 {
+		base.Errorf("too many arguments to recover")
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetType(types.Types[types.TINTER])
+	return n
+}
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
new file mode 100644
index 0000000..6fab74e
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -0,0 +1,1673 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+//     Header struct {
+//         Tag        byte   // 'i'
+//         Version    uvarint
+//         StringSize uvarint
+//         DataSize   uvarint
+//     }
+//
+//     Strings [StringSize]byte
+//     Data    [DataSize]byte
+//
+//     MainIndex []struct{
+//         PkgPath   stringOff
+//         PkgName   stringOff
+//         PkgHeight uvarint
+//
+//         Decls []struct{
+//             Name   stringOff
+//             Offset declOff
+//         }
+//     }
+//
+//     Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+//     Len   uvarint
+//     Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+//     type Var struct {
+//         Tag  byte // 'V'
+//         Pos  Pos
+//         Type typeOff
+//     }
+//
+//     type Func struct {
+//         Tag       byte // 'F'
+//         Pos       Pos
+//         Signature Signature
+//     }
+//
+//     type Const struct {
+//         Tag   byte // 'C'
+//         Pos   Pos
+//         Value Value
+//     }
+//
+//     type Type struct {
+//         Tag        byte // 'T'
+//         Pos        Pos
+//         Underlying typeOff
+//
+//         Methods []struct{  // omitted if Underlying is an interface type
+//             Pos       Pos
+//             Name      stringOff
+//             Recv      Param
+//             Signature Signature
+//         }
+//     }
+//
+//     type Alias struct {
+//         Tag  byte // 'A'
+//         Pos  Pos
+//         Type typeOff
+//     }
+//
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are nine kinds of type descriptors, distinguished by an itag:
+//
+//     type DefinedType struct {
+//         Tag     itag // definedType
+//         Name    stringOff
+//         PkgPath stringOff
+//     }
+//
+//     type PointerType struct {
+//         Tag  itag // pointerType
+//         Elem typeOff
+//     }
+//
+//     type SliceType struct {
+//         Tag  itag // sliceType
+//         Elem typeOff
+//     }
+//
+//     type ArrayType struct {
+//         Tag  itag // arrayType
+//         Len  uint64
+//         Elem typeOff
+//     }
+//
+//     type ChanType struct {
+//         Tag  itag   // chanType
+//         Dir  uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+//         Elem typeOff
+//     }
+//
+//     type MapType struct {
+//         Tag  itag // mapType
+//         Key  typeOff
+//         Elem typeOff
+//     }
+//
+//     type FuncType struct {
+//         Tag       itag // signatureType
+//         PkgPath   stringOff
+//         Signature Signature
+//     }
+//
+//     type StructType struct {
+//         Tag     itag // structType
+//         PkgPath stringOff
+//         Fields []struct {
+//             Pos      Pos
+//             Name     stringOff
+//             Type     typeOff
+//             Embedded bool
+//             Note     stringOff
+//         }
+//     }
+//
+//     type InterfaceType struct {
+//         Tag     itag // interfaceType
+//         PkgPath stringOff
+//         Embeddeds []struct {
+//             Pos  Pos
+//             Type typeOff
+//         }
+//         Methods []struct {
+//             Pos       Pos
+//             Name      stringOff
+//             Signature Signature
+//         }
+//     }
+//
+//
+//     type Signature struct {
+//         Params   []Param
+//         Results  []Param
+//         Variadic bool  // omitted if Results is empty
+//     }
+//
+//     type Param struct {
+//         Pos  Pos
+//         Name stringOff
+//         Type typOff
+//     }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+//
+//
+// Compiler-specific details.
+//
+// cmd/compile writes out a second index for inline bodies and also
+// appends additional compiler-specific details after declarations.
+// Third-party tools are not expected to depend on these details and
+// they're expected to change much more rapidly, so they're omitted
+// here. See exportWriter's varExt/funcExt/etc methods for details.
+
+package typecheck
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"io"
+	"math/big"
+	"sort"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/goobj"
+	"cmd/internal/src"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 1: added column details to Pos
+// 0: Go1.11 encoding
+const iexportVersion = 1
+
+// predeclReserved is the number of type offsets reserved for types
+// implicitly declared in the universe block.
+const predeclReserved = 32
+
+// An itag distinguishes the kind of type that was written into the
+// indexed export format.
+type itag uint64
+
+const (
+	// Types
+	definedType itag = iota
+	pointerType
+	sliceType
+	arrayType
+	chanType
+	mapType
+	signatureType
+	structType
+	interfaceType
+)
+
+func WriteExports(out *bufio.Writer) {
+	p := iexporter{
+		allPkgs:     map[*types.Pkg]bool{},
+		stringIndex: map[string]uint64{},
+		declIndex:   map[*types.Sym]uint64{},
+		inlineIndex: map[*types.Sym]uint64{},
+		typIndex:    map[*types.Type]uint64{},
+	}
+
+	for i, pt := range predeclared() {
+		p.typIndex[pt] = uint64(i)
+	}
+	if len(p.typIndex) > predeclReserved {
+		base.Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
+	}
+
+	// Initialize work queue with exported declarations.
+	for _, n := range Target.Exports {
+		p.pushDecl(n)
+	}
+
+	// Loop until no more work. We use a queue because while
+	// writing out inline bodies, we may discover additional
+	// declarations that are needed.
+	for !p.declTodo.Empty() {
+		p.doDecl(p.declTodo.PopLeft())
+	}
+
+	// Append indices to data0 section.
+	dataLen := uint64(p.data0.Len())
+	w := p.newWriter()
+	w.writeIndex(p.declIndex, true)
+	w.writeIndex(p.inlineIndex, false)
+	w.flush()
+
+	if *base.Flag.LowerV {
+		fmt.Printf("export: hdr strings %v, data %v, index %v\n", p.strings.Len(), dataLen, p.data0.Len())
+	}
+
+	// Assemble header.
+	var hdr intWriter
+	hdr.WriteByte('i')
+	hdr.uint64(iexportVersion)
+	hdr.uint64(uint64(p.strings.Len()))
+	hdr.uint64(dataLen)
+
+	// Flush output.
+	h := md5.New()
+	wr := io.MultiWriter(out, h)
+	io.Copy(wr, &hdr)
+	io.Copy(wr, &p.strings)
+	io.Copy(wr, &p.data0)
+
+	// Add fingerprint (used by linker object file).
+	// Attach this to the end, so tools (e.g. gcimporter) don't care.
+	copy(base.Ctxt.Fingerprint[:], h.Sum(nil)[:])
+	out.Write(base.Ctxt.Fingerprint[:])
+}
+
+// writeIndex writes out a symbol index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) {
+	// Build a map from packages to symbols from that package.
+	pkgSyms := map[*types.Pkg][]*types.Sym{}
+
+	// For the main index, make sure to include every package that
+	// we reference, even if we're not exporting (or reexporting)
+	// any symbols from it.
+	if mainIndex {
+		pkgSyms[types.LocalPkg] = nil
+		for pkg := range w.p.allPkgs {
+			pkgSyms[pkg] = nil
+		}
+	}
+
+	// Group symbols by package.
+	for sym := range index {
+		pkgSyms[sym.Pkg] = append(pkgSyms[sym.Pkg], sym)
+	}
+
+	// Sort packages by path.
+	var pkgs []*types.Pkg
+	for pkg := range pkgSyms {
+		pkgs = append(pkgs, pkg)
+	}
+	sort.Slice(pkgs, func(i, j int) bool {
+		return pkgs[i].Path < pkgs[j].Path
+	})
+
+	w.uint64(uint64(len(pkgs)))
+	for _, pkg := range pkgs {
+		w.string(pkg.Path)
+		if mainIndex {
+			w.string(pkg.Name)
+			w.uint64(uint64(pkg.Height))
+		}
+
+		// Sort symbols within a package by name.
+		syms := pkgSyms[pkg]
+		sort.Slice(syms, func(i, j int) bool {
+			return syms[i].Name < syms[j].Name
+		})
+
+		w.uint64(uint64(len(syms)))
+		for _, sym := range syms {
+			w.string(sym.Name)
+			w.uint64(index[sym])
+		}
+	}
+}
+
+type iexporter struct {
+	// allPkgs tracks all packages that have been referenced by
+	// the export data, so we can ensure to include them in the
+	// main index.
+	allPkgs map[*types.Pkg]bool
+
+	declTodo ir.NameQueue
+
+	strings     intWriter
+	stringIndex map[string]uint64
+
+	data0       intWriter
+	declIndex   map[*types.Sym]uint64
+	inlineIndex map[*types.Sym]uint64
+	typIndex    map[*types.Type]uint64
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+	off, ok := p.stringIndex[s]
+	if !ok {
+		off = uint64(p.strings.Len())
+		p.stringIndex[s] = off
+
+		if *base.Flag.LowerV {
+			fmt.Printf("export: str %v %.40q\n", off, s)
+		}
+
+		p.strings.uint64(uint64(len(s)))
+		p.strings.WriteString(s)
+	}
+	return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(n *ir.Name) {
+	if n.Sym() == nil || n.Sym().Def != n && n.Op() != ir.OTYPE {
+		base.Fatalf("weird Sym: %v, %v", n, n.Sym())
+	}
+
+	// Don't export predeclared declarations.
+	if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == ir.Pkgs.Unsafe {
+		return
+	}
+
+	if _, ok := p.declIndex[n.Sym()]; ok {
+		return
+	}
+
+	p.declIndex[n.Sym()] = ^uint64(0) // mark n present in work queue
+	p.declTodo.PushRight(n)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+	p *iexporter
+
+	data       intWriter
+	currPkg    *types.Pkg
+	prevFile   string
+	prevLine   int64
+	prevColumn int64
+
+	// dclIndex maps function-scoped declarations to an int used to refer to
+	// them later in the function. For local variables/params, the int is
+	// non-negative and in order of the appearance in the Func's Dcl list. For
+	// closure variables, the index is negative starting at -2.
+	dclIndex           map[*ir.Name]int
+	maxDclIndex        int
+	maxClosureVarIndex int
+}
+
+func (p *iexporter) doDecl(n *ir.Name) {
+	w := p.newWriter()
+	w.setPkg(n.Sym().Pkg, false)
+
+	switch n.Op() {
+	case ir.ONAME:
+		switch n.Class {
+		case ir.PEXTERN:
+			// Variable.
+			w.tag('V')
+			w.pos(n.Pos())
+			w.typ(n.Type())
+			w.varExt(n)
+
+		case ir.PFUNC:
+			if ir.IsMethod(n) {
+				base.Fatalf("unexpected method: %v", n)
+			}
+
+			// Function.
+			w.tag('F')
+			w.pos(n.Pos())
+			w.signature(n.Type())
+			w.funcExt(n)
+
+		default:
+			base.Fatalf("unexpected class: %v, %v", n, n.Class)
+		}
+
+	case ir.OLITERAL:
+		// TODO(mdempsky): Extend check to all declarations.
+		if n.Typecheck() == 0 {
+			base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+		}
+
+		// Constant.
+		w.tag('C')
+		w.pos(n.Pos())
+		w.value(n.Type(), n.Val())
+		w.constExt(n)
+
+	case ir.OTYPE:
+		if types.IsDotAlias(n.Sym()) {
+			// Alias.
+			w.tag('A')
+			w.pos(n.Pos())
+			w.typ(n.Type())
+			break
+		}
+
+		// Defined type.
+		w.tag('T')
+		w.pos(n.Pos())
+
+		underlying := n.Type().Underlying()
+		if underlying == types.ErrorType.Underlying() {
+			// For "type T error", use error as the
+			// underlying type instead of error's own
+			// underlying anonymous interface. This
+			// ensures consistency with how importers may
+			// declare error (e.g., go/types uses nil Pkg
+			// for predeclared objects).
+			underlying = types.ErrorType
+		}
+		w.typ(underlying)
+
+		t := n.Type()
+		if t.IsInterface() {
+			w.typeExt(t)
+			break
+		}
+
+		ms := t.Methods()
+		w.uint64(uint64(ms.Len()))
+		for _, m := range ms.Slice() {
+			w.pos(m.Pos)
+			w.selector(m.Sym)
+			w.param(m.Type.Recv())
+			w.signature(m.Type)
+		}
+
+		w.typeExt(t)
+		for _, m := range ms.Slice() {
+			w.methExt(m)
+		}
+
+	default:
+		base.Fatalf("unexpected node: %v", n)
+	}
+
+	w.finish("dcl", p.declIndex, n.Sym())
+}
+
+func (w *exportWriter) tag(tag byte) {
+	w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) finish(what string, index map[*types.Sym]uint64, sym *types.Sym) {
+	off := w.flush()
+	if *base.Flag.LowerV {
+		fmt.Printf("export: %v %v %v\n", what, off, sym)
+	}
+	index[sym] = off
+}
+
+func (p *iexporter) doInline(f *ir.Name) {
+	w := p.newWriter()
+	w.setPkg(fnpkg(f), false)
+
+	w.dclIndex = make(map[*ir.Name]int, len(f.Func.Inl.Dcl))
+	w.funcBody(f.Func)
+
+	w.finish("inl", p.inlineIndex, f.Sym())
+}
+
+func (w *exportWriter) pos(pos src.XPos) {
+	p := base.Ctxt.PosTable.Pos(pos)
+	file := p.Base().AbsFilename()
+	line := int64(p.RelLine())
+	column := int64(p.RelCol())
+
+	// Encode position relative to the last position: column
+	// delta, then line delta, then file name. We reserve the
+	// bottom bit of the column and line deltas to encode whether
+	// the remaining fields are present.
+	//
+	// Note: Because data objects may be read out of order (or not
+	// at all), we can only apply delta encoding within a single
+	// object. This is handled implicitly by tracking prevFile,
+	// prevLine, and prevColumn as fields of exportWriter.
+
+	deltaColumn := (column - w.prevColumn) << 1
+	deltaLine := (line - w.prevLine) << 1
+
+	if file != w.prevFile {
+		deltaLine |= 1
+	}
+	if deltaLine != 0 {
+		deltaColumn |= 1
+	}
+
+	w.int64(deltaColumn)
+	if deltaColumn&1 != 0 {
+		w.int64(deltaLine)
+		if deltaLine&1 != 0 {
+			w.string(file)
+		}
+	}
+
+	w.prevFile = file
+	w.prevLine = line
+	w.prevColumn = column
+}
+
+func (w *exportWriter) pkg(pkg *types.Pkg) {
+	// TODO(mdempsky): Add flag to types.Pkg to mark pseudo-packages.
+	if pkg == ir.Pkgs.Go {
+		base.Fatalf("export of pseudo-package: %q", pkg.Path)
+	}
+
+	// Ensure any referenced packages are declared in the main index.
+	w.p.allPkgs[pkg] = true
+
+	w.string(pkg.Path)
+}
+
+func (w *exportWriter) qualifiedIdent(n ir.Node) {
+	// Ensure any referenced declarations are written out too.
+	w.p.pushDecl(n.Name())
+
+	s := n.Sym()
+	w.string(s.Name)
+	w.pkg(s.Pkg)
+}
+
+func (w *exportWriter) selector(s *types.Sym) {
+	if w.currPkg == nil {
+		base.Fatalf("missing currPkg")
+	}
+
+	pkg := w.currPkg
+	if types.IsExported(s.Name) {
+		pkg = types.LocalPkg
+	}
+	if s.Pkg != pkg {
+		base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
+	}
+
+	w.string(s.Name)
+}
+
+func (w *exportWriter) typ(t *types.Type) {
+	w.data.uint64(w.p.typOff(t))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+	return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+	off := uint64(w.p.data0.Len())
+	io.Copy(&w.p.data0, &w.data)
+	return off
+}
+
+func (p *iexporter) typOff(t *types.Type) uint64 {
+	off, ok := p.typIndex[t]
+	if !ok {
+		w := p.newWriter()
+		w.doTyp(t)
+		rawOff := w.flush()
+		if *base.Flag.LowerV {
+			fmt.Printf("export: typ %v %v\n", rawOff, t)
+		}
+		off = predeclReserved + rawOff
+		p.typIndex[t] = off
+	}
+	return off
+}
+
+func (w *exportWriter) startType(k itag) {
+	w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t *types.Type) {
+	if t.Sym() != nil {
+		if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe {
+			base.Fatalf("builtin type missing from typIndex: %v", t)
+		}
+
+		w.startType(definedType)
+		w.qualifiedIdent(t.Obj().(*ir.Name))
+		return
+	}
+
+	switch t.Kind() {
+	case types.TPTR:
+		w.startType(pointerType)
+		w.typ(t.Elem())
+
+	case types.TSLICE:
+		w.startType(sliceType)
+		w.typ(t.Elem())
+
+	case types.TARRAY:
+		w.startType(arrayType)
+		w.uint64(uint64(t.NumElem()))
+		w.typ(t.Elem())
+
+	case types.TCHAN:
+		w.startType(chanType)
+		w.uint64(uint64(t.ChanDir()))
+		w.typ(t.Elem())
+
+	case types.TMAP:
+		w.startType(mapType)
+		w.typ(t.Key())
+		w.typ(t.Elem())
+
+	case types.TFUNC:
+		w.startType(signatureType)
+		w.setPkg(t.Pkg(), true)
+		w.signature(t)
+
+	case types.TSTRUCT:
+		w.startType(structType)
+		w.setPkg(t.Pkg(), true)
+
+		w.uint64(uint64(t.NumFields()))
+		for _, f := range t.FieldSlice() {
+			w.pos(f.Pos)
+			w.selector(f.Sym)
+			w.typ(f.Type)
+			w.bool(f.Embedded != 0)
+			w.string(f.Note)
+		}
+
+	case types.TINTER:
+		var embeddeds, methods []*types.Field
+		for _, m := range t.Methods().Slice() {
+			if m.Sym != nil {
+				methods = append(methods, m)
+			} else {
+				embeddeds = append(embeddeds, m)
+			}
+		}
+
+		w.startType(interfaceType)
+		w.setPkg(t.Pkg(), true)
+
+		w.uint64(uint64(len(embeddeds)))
+		for _, f := range embeddeds {
+			w.pos(f.Pos)
+			w.typ(f.Type)
+		}
+
+		w.uint64(uint64(len(methods)))
+		for _, f := range methods {
+			w.pos(f.Pos)
+			w.selector(f.Sym)
+			w.signature(f.Type)
+		}
+
+	default:
+		base.Fatalf("unexpected type: %v", t)
+	}
+}
+
+func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
+	if pkg == types.NoPkg {
+		base.Fatalf("missing pkg")
+	}
+
+	if write {
+		w.pkg(pkg)
+	}
+
+	w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(t *types.Type) {
+	w.paramList(t.Params().FieldSlice())
+	w.paramList(t.Results().FieldSlice())
+	if n := t.Params().NumFields(); n > 0 {
+		w.bool(t.Params().Field(n - 1).IsDDD())
+	}
+}
+
+func (w *exportWriter) paramList(fs []*types.Field) {
+	w.uint64(uint64(len(fs)))
+	for _, f := range fs {
+		w.param(f)
+	}
+}
+
+func (w *exportWriter) param(f *types.Field) {
+	w.pos(f.Pos)
+	w.localIdent(types.OrigSym(f.Sym))
+	w.typ(f.Type)
+}
+
+func constTypeOf(typ *types.Type) constant.Kind {
+	switch typ {
+	case types.UntypedInt, types.UntypedRune:
+		return constant.Int
+	case types.UntypedFloat:
+		return constant.Float
+	case types.UntypedComplex:
+		return constant.Complex
+	}
+
+	switch typ.Kind() {
+	case types.TBOOL:
+		return constant.Bool
+	case types.TSTRING:
+		return constant.String
+	case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+		types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+		return constant.Int
+	case types.TFLOAT32, types.TFLOAT64:
+		return constant.Float
+	case types.TCOMPLEX64, types.TCOMPLEX128:
+		return constant.Complex
+	}
+
+	base.Fatalf("unexpected constant type: %v", typ)
+	return 0
+}
+
+func (w *exportWriter) value(typ *types.Type, v constant.Value) {
+	ir.AssertValidTypeForConst(typ, v)
+	w.typ(typ)
+
+	// Each type has only one admissible constant representation,
+	// so we could type switch directly on v.U here. However,
+	// switching on the type increases symmetry with import logic
+	// and provides a useful consistency check.
+
+	switch constTypeOf(typ) {
+	case constant.Bool:
+		w.bool(constant.BoolVal(v))
+	case constant.String:
+		w.string(constant.StringVal(v))
+	case constant.Int:
+		w.mpint(v, typ)
+	case constant.Float:
+		w.mpfloat(v, typ)
+	case constant.Complex:
+		w.mpfloat(constant.Real(v), typ)
+		w.mpfloat(constant.Imag(v), typ)
+	}
+}
+
+func intSize(typ *types.Type) (signed bool, maxBytes uint) {
+	if typ.IsUntyped() {
+		return true, ir.ConstPrec / 8
+	}
+
+	switch typ.Kind() {
+	case types.TFLOAT32, types.TCOMPLEX64:
+		return true, 3
+	case types.TFLOAT64, types.TCOMPLEX128:
+		return true, 7
+	}
+
+	signed = typ.IsSigned()
+	maxBytes = uint(typ.Size())
+
+	// The go/types API doesn't expose sizes to importers, so they
+	// don't know how big these types are.
+	switch typ.Kind() {
+	case types.TINT, types.TUINT, types.TUINTPTR:
+		maxBytes = 8
+	}
+
+	return
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+func (w *exportWriter) mpint(x constant.Value, typ *types.Type) {
+	signed, maxBytes := intSize(typ)
+
+	negative := constant.Sign(x) < 0
+	if !signed && negative {
+		base.Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+	}
+
+	b := constant.Bytes(x) // little endian
+	for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
+		b[i], b[j] = b[j], b[i]
+	}
+
+	if len(b) > 0 && b[0] == 0 {
+		base.Fatalf("leading zeros")
+	}
+	if uint(len(b)) > maxBytes {
+		base.Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
+	}
+
+	maxSmall := 256 - maxBytes
+	if signed {
+		maxSmall = 256 - 2*maxBytes
+	}
+	if maxBytes == 1 {
+		maxSmall = 256
+	}
+
+	// Check if x can use small value encoding.
+	if len(b) <= 1 {
+		var ux uint
+		if len(b) == 1 {
+			ux = uint(b[0])
+		}
+		if signed {
+			ux <<= 1
+			if negative {
+				ux--
+			}
+		}
+		if ux < maxSmall {
+			w.data.WriteByte(byte(ux))
+			return
+		}
+	}
+
+	n := 256 - uint(len(b))
+	if signed {
+		n = 256 - 2*uint(len(b))
+		if negative {
+			n |= 1
+		}
+	}
+	if n < maxSmall || n >= 256 {
+		base.Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
+	}
+
+	w.data.WriteByte(byte(n))
+	w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) {
+	f := ir.BigFloat(v)
+	if f.IsInf() {
+		base.Fatalf("infinite constant")
+	}
+
+	// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+	var mant big.Float
+	exp := int64(f.MantExp(&mant))
+
+	// Scale so that mant is an integer.
+	prec := mant.MinPrec()
+	mant.SetMantExp(&mant, int(prec))
+	exp -= int64(prec)
+
+	manti, acc := mant.Int(nil)
+	if acc != big.Exact {
+		base.Fatalf("mantissa scaling failed for %f (%s)", f, acc)
+	}
+	w.mpint(constant.Make(manti), typ)
+	if manti.Sign() != 0 {
+		w.int64(exp)
+	}
+}
+
+func (w *exportWriter) mprat(v constant.Value) {
+	r, ok := constant.Val(v).(*big.Rat)
+	if !w.bool(ok) {
+		return
+	}
+	// TODO(mdempsky): Come up with a more efficient binary
+	// encoding before bumping iexportVersion to expose to
+	// gcimporter.
+	w.string(r.String())
+}
+
+func (w *exportWriter) bool(b bool) bool {
+	var x uint64
+	if b {
+		x = 1
+	}
+	w.uint64(x)
+	return b
+}
+
+func (w *exportWriter) int64(x int64)   { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+// Compiler-specific extensions.
+
+func (w *exportWriter) constExt(n *ir.Name) {
+	// Internally, we now represent untyped float and complex
+	// constants with infinite-precision rational numbers using
+	// go/constant, but the "public" export data format known to
+	// gcimporter only supports 512-bit floating point constants.
+	// In case rationals turn out to be a bad idea and we want to
+	// switch back to fixed-precision constants, for now we
+	// continue writing out the 512-bit truncation in the public
+	// data section, and write the exact, rational constant in the
+	// compiler's extension data. Also, we only need to worry
+	// about exporting rationals for declared constants, because
+	// constants that appear in an expression will already have
+	// been coerced to a concrete, fixed-precision type.
+	//
+	// Eventually, assuming we stick with using rationals, we
+	// should bump iexportVersion to support rationals, and do the
+	// whole gcimporter update song-and-dance.
+	//
+	// TODO(mdempsky): Prepare vocals for that.
+
+	switch n.Type() {
+	case types.UntypedFloat:
+		w.mprat(n.Val())
+	case types.UntypedComplex:
+		v := n.Val()
+		w.mprat(constant.Real(v))
+		w.mprat(constant.Imag(v))
+	}
+}
+
+func (w *exportWriter) varExt(n *ir.Name) {
+	w.linkname(n.Sym())
+	w.symIdx(n.Sym())
+}
+
+func (w *exportWriter) funcExt(n *ir.Name) {
+	w.linkname(n.Sym())
+	w.symIdx(n.Sym())
+
+	// TODO remove after register abi is working.
+	w.uint64(uint64(n.Func.Pragma))
+
+	// Escape analysis.
+	for _, fs := range &types.RecvsParams {
+		for _, f := range fs(n.Type()).FieldSlice() {
+			w.string(f.Note)
+		}
+	}
+
+	// Inline body.
+	if n.Func.Inl != nil {
+		w.uint64(1 + uint64(n.Func.Inl.Cost))
+		if n.Func.ExportInline() {
+			w.p.doInline(n)
+		}
+
+		// Endlineno for inlined function.
+		w.pos(n.Func.Endlineno)
+	} else {
+		w.uint64(0)
+	}
+}
+
+func (w *exportWriter) methExt(m *types.Field) {
+	w.bool(m.Nointerface())
+	w.funcExt(m.Nname.(*ir.Name))
+}
+
+func (w *exportWriter) linkname(s *types.Sym) {
+	w.string(s.Linkname)
+}
+
+func (w *exportWriter) symIdx(s *types.Sym) {
+	lsym := s.Linksym()
+	if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
+		// Don't export index for non-package symbols, linkname'd symbols,
+		// and symbols without an index. They can only be referenced by
+		// name.
+		w.int64(-1)
+	} else {
+		// For a defined symbol, export its index.
+		// For re-exporting an imported symbol, pass its index through.
+		w.int64(int64(lsym.SymIdx))
+	}
+}
+
+func (w *exportWriter) typeExt(t *types.Type) {
+	// Export whether this type is marked notinheap.
+	w.bool(t.NotInHeap())
+	// For type T, export the index of type descriptor symbols of T and *T.
+	if i, ok := typeSymIdx[t]; ok {
+		w.int64(i[0])
+		w.int64(i[1])
+		return
+	}
+	w.symIdx(types.TypeSym(t))
+	w.symIdx(types.TypeSym(t.PtrTo()))
+}
+
+// Inline bodies.
+
+func (w *exportWriter) writeNames(dcl []*ir.Name) {
+	w.int64(int64(len(dcl)))
+	for i, n := range dcl {
+		w.pos(n.Pos())
+		w.localIdent(n.Sym())
+		w.typ(n.Type())
+		w.dclIndex[n] = w.maxDclIndex + i
+	}
+	w.maxDclIndex += len(dcl)
+}
+
+func (w *exportWriter) funcBody(fn *ir.Func) {
+	w.writeNames(fn.Inl.Dcl)
+
+	w.stmtList(fn.Inl.Body)
+}
+
+func (w *exportWriter) stmtList(list []ir.Node) {
+	for _, n := range list {
+		w.node(n)
+	}
+	w.op(ir.OEND)
+}
+
+func (w *exportWriter) node(n ir.Node) {
+	if ir.OpPrec[n.Op()] < 0 {
+		w.stmt(n)
+	} else {
+		w.expr(n)
+	}
+}
+
+// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
+// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
+func (w *exportWriter) stmt(n ir.Node) {
+	if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) {
+		// can't use stmtList here since we don't want the final OEND
+		for _, n := range n.Init() {
+			w.stmt(n)
+		}
+	}
+
+	switch n.Op() {
+	case ir.OBLOCK:
+		// No OBLOCK in export data.
+		// Inline content into this statement list,
+		// like the init list above.
+		// (At the moment neither the parser nor the typechecker
+		// generate OBLOCK nodes except to denote an empty
+		// function body, although that may change.)
+		n := n.(*ir.BlockStmt)
+		for _, n := range n.List {
+			w.stmt(n)
+		}
+
+	case ir.ODCL:
+		n := n.(*ir.Decl)
+		if ir.IsBlank(n.X) {
+			return // blank declarations not useful to importers
+		}
+		w.op(ir.ODCL)
+		w.localName(n.X)
+
+	case ir.OAS:
+		// Don't export "v = <N>" initializing statements, hope they're always
+		// preceded by the DCL which will be re-parsed and typecheck to reproduce
+		// the "v = <N>" again.
+		n := n.(*ir.AssignStmt)
+		if n.Y != nil {
+			w.op(ir.OAS)
+			w.pos(n.Pos())
+			w.expr(n.X)
+			w.expr(n.Y)
+		}
+
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		w.op(ir.OASOP)
+		w.pos(n.Pos())
+		w.op(n.AsOp)
+		w.expr(n.X)
+		if w.bool(!n.IncDec) {
+			w.expr(n.Y)
+		}
+
+	case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+		n := n.(*ir.AssignListStmt)
+		w.op(ir.OAS2)
+		w.pos(n.Pos())
+		w.exprList(n.Lhs)
+		w.exprList(n.Rhs)
+
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		w.op(ir.ORETURN)
+		w.pos(n.Pos())
+		w.exprList(n.Results)
+
+	// case ORETJMP:
+	// 	unreachable - generated by compiler for trampolin routines
+
+	case ir.OGO, ir.ODEFER:
+		n := n.(*ir.GoDeferStmt)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.Call)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		w.op(ir.OIF)
+		w.pos(n.Pos())
+		w.stmtList(n.Init())
+		w.expr(n.Cond)
+		w.stmtList(n.Body)
+		w.stmtList(n.Else)
+
+	case ir.OFOR:
+		n := n.(*ir.ForStmt)
+		w.op(ir.OFOR)
+		w.pos(n.Pos())
+		w.stmtList(n.Init())
+		w.exprsOrNil(n.Cond, n.Post)
+		w.stmtList(n.Body)
+
+	case ir.ORANGE:
+		n := n.(*ir.RangeStmt)
+		w.op(ir.ORANGE)
+		w.pos(n.Pos())
+		w.exprsOrNil(n.Key, n.Value)
+		w.expr(n.X)
+		w.stmtList(n.Body)
+
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.stmtList(n.Init())
+		w.commList(n.Cases)
+
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.stmtList(n.Init())
+		w.exprsOrNil(n.Tag, nil)
+		w.caseList(n.Cases, isNamedTypeSwitch(n.Tag))
+
+	// case OCASE:
+	//	handled by caseList
+
+	case ir.OFALL:
+		n := n.(*ir.BranchStmt)
+		w.op(ir.OFALL)
+		w.pos(n.Pos())
+
+	case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL:
+		w.op(n.Op())
+		w.pos(n.Pos())
+		label := ""
+		if sym := n.Sym(); sym != nil {
+			label = sym.Name
+		}
+		w.string(label)
+
+	default:
+		base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op())
+	}
+}
+
+func isNamedTypeSwitch(x ir.Node) bool {
+	guard, ok := x.(*ir.TypeSwitchGuard)
+	return ok && guard.Tag != nil
+}
+
+func (w *exportWriter) caseList(cases []*ir.CaseClause, namedTypeSwitch bool) {
+	w.uint64(uint64(len(cases)))
+	for _, cas := range cases {
+		w.pos(cas.Pos())
+		w.stmtList(cas.List)
+		if namedTypeSwitch {
+			w.localName(cas.Var)
+		}
+		w.stmtList(cas.Body)
+	}
+}
+
+func (w *exportWriter) commList(cases []*ir.CommClause) {
+	w.uint64(uint64(len(cases)))
+	for _, cas := range cases {
+		w.pos(cas.Pos())
+		w.node(cas.Comm)
+		w.stmtList(cas.Body)
+	}
+}
+
+func (w *exportWriter) exprList(list ir.Nodes) {
+	for _, n := range list {
+		w.expr(n)
+	}
+	w.op(ir.OEND)
+}
+
+func simplifyForExport(n ir.Node) ir.Node {
+	switch n.Op() {
+	case ir.OPAREN:
+		n := n.(*ir.ParenExpr)
+		return simplifyForExport(n.X)
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		if n.Implicit() {
+			return simplifyForExport(n.X)
+		}
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		if n.Implicit() {
+			return simplifyForExport(n.X)
+		}
+	case ir.ODOT, ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		if n.Implicit() {
+			return simplifyForExport(n.X)
+		}
+	}
+	return n
+}
+
+func (w *exportWriter) expr(n ir.Node) {
+	n = simplifyForExport(n)
+	switch n.Op() {
+	// expressions
+	// (somewhat closely following the structure of exprfmt in fmt.go)
+	case ir.ONIL:
+		n := n.(*ir.NilExpr)
+		if !n.Type().HasNil() {
+			base.Fatalf("unexpected type for nil: %v", n.Type())
+		}
+		w.op(ir.ONIL)
+		w.pos(n.Pos())
+		w.typ(n.Type())
+
+	case ir.OLITERAL:
+		w.op(ir.OLITERAL)
+		w.pos(n.Pos())
+		w.value(n.Type(), n.Val())
+
+	case ir.ONAME:
+		// Package scope name.
+		n := n.(*ir.Name)
+		if (n.Class == ir.PEXTERN || n.Class == ir.PFUNC) && !ir.IsBlank(n) {
+			w.op(ir.ONONAME)
+			w.qualifiedIdent(n)
+			break
+		}
+
+		// Function scope name.
+		w.op(ir.ONAME)
+		w.localName(n)
+
+	// case OPACK, ONONAME:
+	// 	should have been resolved by typechecking - handled by default case
+
+	case ir.OTYPE:
+		w.op(ir.OTYPE)
+		w.typ(n.Type())
+
+	case ir.OTYPESW:
+		n := n.(*ir.TypeSwitchGuard)
+		w.op(ir.OTYPESW)
+		w.pos(n.Pos())
+		var s *types.Sym
+		if n.Tag != nil {
+			if n.Tag.Op() != ir.ONONAME {
+				base.Fatalf("expected ONONAME, got %v", n.Tag)
+			}
+			s = n.Tag.Sym()
+		}
+		w.localIdent(s) // declared pseudo-variable, if any
+		w.expr(n.X)
+
+	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+	// 	should have been resolved by typechecking - handled by default case
+
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		w.op(ir.OCLOSURE)
+		w.pos(n.Pos())
+		w.signature(n.Type())
+
+		// Write out id for the Outer of each conditional variable. The
+		// conditional variable itself for this closure will be re-created
+		// during import.
+		w.int64(int64(len(n.Func.ClosureVars)))
+		for i, cv := range n.Func.ClosureVars {
+			w.pos(cv.Pos())
+			w.localName(cv.Outer)
+			// Closure variable (which will be re-created during
+			// import) is given via a negative id, starting at -2,
+			// which is used to refer to it later in the function
+			// during export. -1 represents blanks.
+			w.dclIndex[cv] = -(i + 2) - w.maxClosureVarIndex
+		}
+		w.maxClosureVarIndex += len(n.Func.ClosureVars)
+
+		// like w.funcBody(n.Func), but not for .Inl
+		w.writeNames(n.Func.Dcl)
+		w.stmtList(n.Func.Body)
+
+	// case OCOMPLIT:
+	// 	should have been resolved by typechecking - handled by default case
+
+	case ir.OPTRLIT:
+		n := n.(*ir.AddrExpr)
+		w.op(ir.OADDR)
+		w.pos(n.Pos())
+		w.expr(n.X)
+
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		w.op(ir.OSTRUCTLIT)
+		w.pos(n.Pos())
+		w.typ(n.Type())
+		w.fieldList(n.List) // special handling of field names
+
+	case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+		n := n.(*ir.CompLitExpr)
+		w.op(ir.OCOMPLIT)
+		w.pos(n.Pos())
+		w.typ(n.Type())
+		w.exprList(n.List)
+
+	case ir.OKEY:
+		n := n.(*ir.KeyExpr)
+		w.op(ir.OKEY)
+		w.pos(n.Pos())
+		w.expr(n.Key)
+		w.expr(n.Value)
+
+	// case OSTRUCTKEY:
+	//	unreachable - handled in case OSTRUCTLIT by elemList
+
+	case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+		n := n.(*ir.SelectorExpr)
+		w.op(ir.OXDOT)
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.selector(n.Sel)
+
+	case ir.ODOTTYPE, ir.ODOTTYPE2:
+		n := n.(*ir.TypeAssertExpr)
+		w.op(ir.ODOTTYPE)
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.typ(n.Type())
+
+	case ir.OINDEX, ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		w.op(ir.OINDEX)
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.expr(n.Index)
+
+	case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
+		n := n.(*ir.SliceExpr)
+		w.op(ir.OSLICE)
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.exprsOrNil(n.Low, n.High)
+
+	case ir.OSLICE3, ir.OSLICE3ARR:
+		n := n.(*ir.SliceExpr)
+		w.op(ir.OSLICE3)
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.exprsOrNil(n.Low, n.High)
+		w.expr(n.Max)
+
+	case ir.OCOPY, ir.OCOMPLEX:
+		// treated like other builtin calls (see e.g., OREAL)
+		n := n.(*ir.BinaryExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.expr(n.Y)
+		w.op(ir.OEND)
+
+	case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR:
+		n := n.(*ir.ConvExpr)
+		w.op(ir.OCONV)
+		w.pos(n.Pos())
+		w.typ(n.Type())
+		w.expr(n.X)
+
+	case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
+		n := n.(*ir.UnaryExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.op(ir.OEND)
+
+	case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+		n := n.(*ir.CallExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.exprList(n.Args) // emits terminating OEND
+		// only append() calls may contain '...' arguments
+		if n.Op() == ir.OAPPEND {
+			w.bool(n.IsDDD)
+		} else if n.IsDDD {
+			base.Fatalf("exporter: unexpected '...' with %v call", n.Op())
+		}
+
+	case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+		n := n.(*ir.CallExpr)
+		w.op(ir.OCALL)
+		w.pos(n.Pos())
+		w.stmtList(n.Init())
+		w.expr(n.X)
+		w.exprList(n.Args)
+		w.bool(n.IsDDD)
+
+	case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+		n := n.(*ir.MakeExpr)
+		w.op(n.Op()) // must keep separate from OMAKE for importer
+		w.pos(n.Pos())
+		w.typ(n.Type())
+		switch {
+		default:
+			// empty list
+			w.op(ir.OEND)
+		case n.Cap != nil:
+			w.expr(n.Len)
+			w.expr(n.Cap)
+			w.op(ir.OEND)
+		case n.Len != nil && (n.Op() == ir.OMAKESLICE || !n.Len.Type().IsUntyped()):
+			w.expr(n.Len)
+			w.op(ir.OEND)
+		}
+
+	// unary expressions
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.Chan)
+		w.expr(n.Value)
+
+	// binary expressions
+	case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+		ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+		n := n.(*ir.BinaryExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.expr(n.Y)
+
+	case ir.OANDAND, ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		w.op(n.Op())
+		w.pos(n.Pos())
+		w.expr(n.X)
+		w.expr(n.Y)
+
+	case ir.OADDSTR:
+		n := n.(*ir.AddStringExpr)
+		w.op(ir.OADDSTR)
+		w.pos(n.Pos())
+		w.exprList(n.List)
+
+	case ir.ODCLCONST:
+		// if exporting, DCLCONST should just be removed as its usage
+		// has already been replaced with literals
+
+	default:
+		base.Fatalf("cannot export %v (%d) node\n"+
+			"\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
+	}
+}
+
+func (w *exportWriter) op(op ir.Op) {
+	w.uint64(uint64(op))
+}
+
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
+	ab := 0
+	if a != nil {
+		ab |= 1
+	}
+	if b != nil {
+		ab |= 2
+	}
+	w.uint64(uint64(ab))
+	if ab&1 != 0 {
+		w.expr(a)
+	}
+	if ab&2 != 0 {
+		w.node(b)
+	}
+}
+
+func (w *exportWriter) fieldList(list ir.Nodes) {
+	w.uint64(uint64(len(list)))
+	for _, n := range list {
+		n := n.(*ir.StructKeyExpr)
+		w.pos(n.Pos())
+		w.selector(n.Field)
+		w.expr(n.Value)
+	}
+}
+
+func (w *exportWriter) localName(n *ir.Name) {
+	if ir.IsBlank(n) {
+		w.int64(-1)
+		return
+	}
+
+	i, ok := w.dclIndex[n]
+	if !ok {
+		base.FatalfAt(n.Pos(), "missing from dclIndex: %+v", n)
+	}
+	w.int64(int64(i))
+}
+
+func (w *exportWriter) localIdent(s *types.Sym) {
+	if w.currPkg == nil {
+		base.Fatalf("missing currPkg")
+	}
+
+	// Anonymous parameters.
+	if s == nil {
+		w.string("")
+		return
+	}
+
+	name := s.Name
+	if name == "_" {
+		w.string("_")
+		return
+	}
+
+	// TODO(mdempsky): Fix autotmp hack.
+	if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
+		base.Fatalf("unexpected dot in identifier: %v", name)
+	}
+
+	if s.Pkg != w.currPkg {
+		base.Fatalf("weird package in name: %v => %v from %q, not %q", s, name, s.Pkg.Path, w.currPkg.Path)
+	}
+
+	w.string(name)
+}
+
+type intWriter struct {
+	bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+	var buf [binary.MaxVarintLen64]byte
+	n := binary.PutVarint(buf[:], x)
+	w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	n := binary.PutUvarint(buf[:], x)
+	w.Write(buf[:n])
+}
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
new file mode 100644
index 0000000..b73ef51
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -0,0 +1,1240 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package typecheck
+
+import (
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"io"
+	"math/big"
+	"os"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/bio"
+	"cmd/internal/goobj"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+// An iimporterAndOffset identifies an importer and an offset within
+// its data section.
+type iimporterAndOffset struct {
+	p   *iimporter
+	off uint64
+}
+
+var (
+	// DeclImporter maps from imported identifiers to an importer
+	// and offset where that identifier's declaration can be read.
+	DeclImporter = map[*types.Sym]iimporterAndOffset{}
+
+	// inlineImporter is like DeclImporter, but for inline bodies
+	// for function and method symbols.
+	inlineImporter = map[*types.Sym]iimporterAndOffset{}
+)
+
+func expandDecl(n ir.Node) ir.Node {
+	if n, ok := n.(*ir.Name); ok {
+		return n
+	}
+
+	id := n.(*ir.Ident)
+	if n := id.Sym().PkgDef(); n != nil {
+		return n.(*ir.Name)
+	}
+
+	r := importReaderFor(id.Sym(), DeclImporter)
+	if r == nil {
+		// Can happen if user tries to reference an undeclared name.
+		return n
+	}
+
+	return r.doDecl(n.Sym())
+}
+
+func ImportBody(fn *ir.Func) {
+	if fn.Inl.Body != nil {
+		return
+	}
+
+	r := importReaderFor(fn.Nname.Sym(), inlineImporter)
+	if r == nil {
+		base.Fatalf("missing import reader for %v", fn)
+	}
+
+	if inimport {
+		base.Fatalf("recursive inimport")
+	}
+	inimport = true
+	r.doInline(fn)
+	inimport = false
+}
+
+func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset) *importReader {
+	x, ok := importers[sym]
+	if !ok {
+		return nil
+	}
+
+	return x.p.newReader(x.off, sym.Pkg)
+}
+
+type intReader struct {
+	*bio.Reader
+	pkg *types.Pkg
+}
+
+func (r *intReader) int64() int64 {
+	i, err := binary.ReadVarint(r.Reader)
+	if err != nil {
+		base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+		base.ErrorExit()
+	}
+	return i
+}
+
+func (r *intReader) uint64() uint64 {
+	i, err := binary.ReadUvarint(r.Reader)
+	if err != nil {
+		base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+		base.ErrorExit()
+	}
+	return i
+}
+
+func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
+	ird := &intReader{in, pkg}
+
+	version := ird.uint64()
+	if version != iexportVersion {
+		base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+		base.ErrorExit()
+	}
+
+	sLen := ird.uint64()
+	dLen := ird.uint64()
+
+	// Map string (and data) section into memory as a single large
+	// string. This reduces heap fragmentation and allows
+	// returning individual substrings very efficiently.
+	data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
+	if err != nil {
+		base.Errorf("import %q: mapping input: %v", pkg.Path, err)
+		base.ErrorExit()
+	}
+	stringData := data[:sLen]
+	declData := data[sLen:]
+
+	in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
+
+	p := &iimporter{
+		ipkg: pkg,
+
+		pkgCache:     map[uint64]*types.Pkg{},
+		posBaseCache: map[uint64]*src.PosBase{},
+		typCache:     map[uint64]*types.Type{},
+
+		stringData: stringData,
+		declData:   declData,
+	}
+
+	for i, pt := range predeclared() {
+		p.typCache[uint64(i)] = pt
+	}
+
+	// Declaration index.
+	for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+		pkg := p.pkgAt(ird.uint64())
+		pkgName := p.stringAt(ird.uint64())
+		pkgHeight := int(ird.uint64())
+		if pkg.Name == "" {
+			pkg.Name = pkgName
+			pkg.Height = pkgHeight
+			types.NumImport[pkgName]++
+
+			// TODO(mdempsky): This belongs somewhere else.
+			pkg.Lookup("_").Def = ir.BlankNode
+		} else {
+			if pkg.Name != pkgName {
+				base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
+			}
+			if pkg.Height != pkgHeight {
+				base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
+			}
+		}
+
+		for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+			s := pkg.Lookup(p.stringAt(ird.uint64()))
+			off := ird.uint64()
+
+			if _, ok := DeclImporter[s]; !ok {
+				DeclImporter[s] = iimporterAndOffset{p, off}
+			}
+		}
+	}
+
+	// Inline body index.
+	for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+		pkg := p.pkgAt(ird.uint64())
+
+		for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+			s := pkg.Lookup(p.stringAt(ird.uint64()))
+			off := ird.uint64()
+
+			if _, ok := inlineImporter[s]; !ok {
+				inlineImporter[s] = iimporterAndOffset{p, off}
+			}
+		}
+	}
+
+	// Fingerprint.
+	_, err = io.ReadFull(in, fingerprint[:])
+	if err != nil {
+		base.Errorf("import %s: error reading fingerprint", pkg.Path)
+		base.ErrorExit()
+	}
+	return fingerprint
+}
+
+type iimporter struct {
+	ipkg *types.Pkg
+
+	pkgCache     map[uint64]*types.Pkg
+	posBaseCache map[uint64]*src.PosBase
+	typCache     map[uint64]*types.Type
+
+	stringData string
+	declData   string
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+	var x [binary.MaxVarintLen64]byte
+	n := copy(x[:], p.stringData[off:])
+
+	slen, n := binary.Uvarint(x[:n])
+	if n <= 0 {
+		base.Fatalf("varint failed")
+	}
+	spos := off + uint64(n)
+	return p.stringData[spos : spos+slen]
+}
+
+func (p *iimporter) posBaseAt(off uint64) *src.PosBase {
+	if posBase, ok := p.posBaseCache[off]; ok {
+		return posBase
+	}
+
+	file := p.stringAt(off)
+	posBase := src.NewFileBase(file, file)
+	p.posBaseCache[off] = posBase
+	return posBase
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Pkg {
+	if pkg, ok := p.pkgCache[off]; ok {
+		return pkg
+	}
+
+	pkg := p.ipkg
+	if pkgPath := p.stringAt(off); pkgPath != "" {
+		pkg = types.NewPkg(pkgPath, "")
+	}
+	p.pkgCache[off] = pkg
+	return pkg
+}
+
+// An importReader keeps state for reading an individual imported
+// object (declaration or inline body).
+type importReader struct {
+	strings.Reader
+	p *iimporter
+
+	currPkg    *types.Pkg
+	prevBase   *src.PosBase
+	prevLine   int64
+	prevColumn int64
+
+	// curfn is the current function we're importing into.
+	curfn *ir.Func
+	// Slice of all dcls for function, including any interior closures
+	allDcls        []*ir.Name
+	allClosureVars []*ir.Name
+}
+
+func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
+	r := &importReader{
+		p:       p,
+		currPkg: pkg,
+	}
+	// (*strings.Reader).Reset wasn't added until Go 1.7, and we
+	// need to build with Go 1.4.
+	r.Reader = *strings.NewReader(p.declData[off:])
+	return r
+}
+
+func (r *importReader) string() string        { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *src.PosBase { return r.p.posBaseAt(r.uint64()) }
+func (r *importReader) pkg() *types.Pkg       { return r.p.pkgAt(r.uint64()) }
+
+func (r *importReader) setPkg() {
+	r.currPkg = r.pkg()
+}
+
+func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
+	tag := r.byte()
+	pos := r.pos()
+
+	switch tag {
+	case 'A':
+		typ := r.typ()
+
+		return importalias(r.p.ipkg, pos, sym, typ)
+
+	case 'C':
+		typ := r.typ()
+		val := r.value(typ)
+
+		n := importconst(r.p.ipkg, pos, sym, typ, val)
+		r.constExt(n)
+		return n
+
+	case 'F':
+		typ := r.signature(nil)
+
+		n := importfunc(r.p.ipkg, pos, sym, typ)
+		r.funcExt(n)
+		return n
+
+	case 'T':
+		// Types can be recursive. We need to setup a stub
+		// declaration before recursing.
+		n := importtype(r.p.ipkg, pos, sym)
+		t := n.Type()
+
+		// We also need to defer width calculations until
+		// after the underlying type has been assigned.
+		types.DeferCheckSize()
+		underlying := r.typ()
+		t.SetUnderlying(underlying)
+		types.ResumeCheckSize()
+
+		if underlying.IsInterface() {
+			r.typeExt(t)
+			return n
+		}
+
+		ms := make([]*types.Field, r.uint64())
+		for i := range ms {
+			mpos := r.pos()
+			msym := r.selector()
+			recv := r.param()
+			mtyp := r.signature(recv)
+
+			// MethodSym already marked m.Sym as a function.
+			m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
+			m.Class = ir.PFUNC
+			m.SetType(mtyp)
+
+			m.Func = ir.NewFunc(mpos)
+			m.Func.Nname = m
+
+			f := types.NewField(mpos, msym, mtyp)
+			f.Nname = m
+			ms[i] = f
+		}
+		t.Methods().Set(ms)
+
+		r.typeExt(t)
+		for _, m := range ms {
+			r.methExt(m)
+		}
+		return n
+
+	case 'V':
+		typ := r.typ()
+
+		n := importvar(r.p.ipkg, pos, sym, typ)
+		r.varExt(n)
+		return n
+
+	default:
+		base.Fatalf("unexpected tag: %v", tag)
+		panic("unreachable")
+	}
+}
+
+func (p *importReader) value(typ *types.Type) constant.Value {
+	switch constTypeOf(typ) {
+	case constant.Bool:
+		return constant.MakeBool(p.bool())
+	case constant.String:
+		return constant.MakeString(p.string())
+	case constant.Int:
+		var i big.Int
+		p.mpint(&i, typ)
+		return constant.Make(&i)
+	case constant.Float:
+		return p.float(typ)
+	case constant.Complex:
+		return makeComplex(p.float(typ), p.float(typ))
+	}
+
+	base.Fatalf("unexpected value type: %v", typ)
+	panic("unreachable")
+}
+
+func (p *importReader) mpint(x *big.Int, typ *types.Type) {
+	signed, maxBytes := intSize(typ)
+
+	maxSmall := 256 - maxBytes
+	if signed {
+		maxSmall = 256 - 2*maxBytes
+	}
+	if maxBytes == 1 {
+		maxSmall = 256
+	}
+
+	n, _ := p.ReadByte()
+	if uint(n) < maxSmall {
+		v := int64(n)
+		if signed {
+			v >>= 1
+			if n&1 != 0 {
+				v = ^v
+			}
+		}
+		x.SetInt64(v)
+		return
+	}
+
+	v := -n
+	if signed {
+		v = -(n &^ 1) >> 1
+	}
+	if v < 1 || uint(v) > maxBytes {
+		base.Fatalf("weird decoding: %v, %v => %v", n, signed, v)
+	}
+	b := make([]byte, v)
+	p.Read(b)
+	x.SetBytes(b)
+	if signed && n&1 != 0 {
+		x.Neg(x)
+	}
+}
+
+func (p *importReader) float(typ *types.Type) constant.Value {
+	var mant big.Int
+	p.mpint(&mant, typ)
+	var f big.Float
+	f.SetInt(&mant)
+	if f.Sign() != 0 {
+		f.SetMantExp(&f, int(p.int64()))
+	}
+	return constant.Make(&f)
+}
+
+func (p *importReader) mprat(orig constant.Value) constant.Value {
+	if !p.bool() {
+		return orig
+	}
+	var rat big.Rat
+	rat.SetString(p.string())
+	return constant.Make(&rat)
+}
+
+func (r *importReader) ident(selector bool) *types.Sym {
+	name := r.string()
+	if name == "" {
+		return nil
+	}
+	pkg := r.currPkg
+	if selector && types.IsExported(name) {
+		pkg = types.LocalPkg
+	}
+	return pkg.Lookup(name)
+}
+
+func (r *importReader) localIdent() *types.Sym { return r.ident(false) }
+func (r *importReader) selector() *types.Sym   { return r.ident(true) }
+
+func (r *importReader) qualifiedIdent() *ir.Ident {
+	name := r.string()
+	pkg := r.pkg()
+	sym := pkg.Lookup(name)
+	return ir.NewIdent(src.NoXPos, sym)
+}
+
+func (r *importReader) pos() src.XPos {
+	delta := r.int64()
+	r.prevColumn += delta >> 1
+	if delta&1 != 0 {
+		delta = r.int64()
+		r.prevLine += delta >> 1
+		if delta&1 != 0 {
+			r.prevBase = r.posBase()
+		}
+	}
+
+	if (r.prevBase == nil || r.prevBase.AbsFilename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
+		// TODO(mdempsky): Remove once we reliably write
+		// position information for all nodes.
+		return src.NoXPos
+	}
+
+	if r.prevBase == nil {
+		base.Fatalf("missing posbase")
+	}
+	pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
+	return base.Ctxt.PosTable.XPos(pos)
+}
+
+func (r *importReader) typ() *types.Type {
+	return r.p.typAt(r.uint64())
+}
+
+func (p *iimporter) typAt(off uint64) *types.Type {
+	t, ok := p.typCache[off]
+	if !ok {
+		if off < predeclReserved {
+			base.Fatalf("predeclared type missing from cache: %d", off)
+		}
+		t = p.newReader(off-predeclReserved, nil).typ1()
+		p.typCache[off] = t
+	}
+	return t
+}
+
+func (r *importReader) typ1() *types.Type {
+	switch k := r.kind(); k {
+	default:
+		base.Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
+		return nil
+
+	case definedType:
+		// We might be called from within doInline, in which
+		// case Sym.Def can point to declared parameters
+		// instead of the top-level types. Also, we don't
+		// support inlining functions with local defined
+		// types. Therefore, this must be a package-scope
+		// type.
+		n := expandDecl(r.qualifiedIdent())
+		if n.Op() != ir.OTYPE {
+			base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+		}
+		return n.Type()
+	case pointerType:
+		return types.NewPtr(r.typ())
+	case sliceType:
+		return types.NewSlice(r.typ())
+	case arrayType:
+		n := r.uint64()
+		return types.NewArray(r.typ(), int64(n))
+	case chanType:
+		dir := types.ChanDir(r.uint64())
+		return types.NewChan(r.typ(), dir)
+	case mapType:
+		return types.NewMap(r.typ(), r.typ())
+
+	case signatureType:
+		r.setPkg()
+		return r.signature(nil)
+
+	case structType:
+		r.setPkg()
+
+		fs := make([]*types.Field, r.uint64())
+		for i := range fs {
+			pos := r.pos()
+			sym := r.selector()
+			typ := r.typ()
+			emb := r.bool()
+			note := r.string()
+
+			f := types.NewField(pos, sym, typ)
+			if emb {
+				f.Embedded = 1
+			}
+			f.Note = note
+			fs[i] = f
+		}
+
+		return types.NewStruct(r.currPkg, fs)
+
+	case interfaceType:
+		r.setPkg()
+
+		embeddeds := make([]*types.Field, r.uint64())
+		for i := range embeddeds {
+			pos := r.pos()
+			typ := r.typ()
+
+			embeddeds[i] = types.NewField(pos, nil, typ)
+		}
+
+		methods := make([]*types.Field, r.uint64())
+		for i := range methods {
+			pos := r.pos()
+			sym := r.selector()
+			typ := r.signature(fakeRecvField())
+
+			methods[i] = types.NewField(pos, sym, typ)
+		}
+
+		t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
+
+		// Ensure we expand the interface in the frontend (#25055).
+		types.CheckSize(t)
+		return t
+	}
+}
+
+func (r *importReader) kind() itag {
+	return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Field) *types.Type {
+	params := r.paramList()
+	results := r.paramList()
+	if n := len(params); n > 0 {
+		params[n-1].SetIsDDD(r.bool())
+	}
+	return types.NewSignature(r.currPkg, recv, params, results)
+}
+
+func (r *importReader) paramList() []*types.Field {
+	fs := make([]*types.Field, r.uint64())
+	for i := range fs {
+		fs[i] = r.param()
+	}
+	return fs
+}
+
+func (r *importReader) param() *types.Field {
+	return types.NewField(r.pos(), r.localIdent(), r.typ())
+}
+
+func (r *importReader) bool() bool {
+	return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+	n, err := binary.ReadVarint(r)
+	if err != nil {
+		base.Fatalf("readVarint: %v", err)
+	}
+	return n
+}
+
+func (r *importReader) uint64() uint64 {
+	n, err := binary.ReadUvarint(r)
+	if err != nil {
+		base.Fatalf("readVarint: %v", err)
+	}
+	return n
+}
+
+func (r *importReader) byte() byte {
+	x, err := r.ReadByte()
+	if err != nil {
+		base.Fatalf("declReader.ReadByte: %v", err)
+	}
+	return x
+}
+
+// Compiler-specific extensions.
+
+func (r *importReader) constExt(n *ir.Name) {
+	switch n.Type() {
+	case types.UntypedFloat:
+		n.SetVal(r.mprat(n.Val()))
+	case types.UntypedComplex:
+		v := n.Val()
+		re := r.mprat(constant.Real(v))
+		im := r.mprat(constant.Imag(v))
+		n.SetVal(makeComplex(re, im))
+	}
+}
+
+func (r *importReader) varExt(n *ir.Name) {
+	r.linkname(n.Sym())
+	r.symIdx(n.Sym())
+}
+
+func (r *importReader) funcExt(n *ir.Name) {
+	r.linkname(n.Sym())
+	r.symIdx(n.Sym())
+
+	// TODO remove after register abi is working
+	n.SetPragma(ir.PragmaFlag(r.uint64()))
+
+	// Escape analysis.
+	for _, fs := range &types.RecvsParams {
+		for _, f := range fs(n.Type()).FieldSlice() {
+			f.Note = r.string()
+		}
+	}
+
+	// Inline body.
+	if u := r.uint64(); u > 0 {
+		n.Func.Inl = &ir.Inline{
+			Cost: int32(u - 1),
+		}
+		n.Func.Endlineno = r.pos()
+	}
+}
+
+func (r *importReader) methExt(m *types.Field) {
+	if r.bool() {
+		m.SetNointerface(true)
+	}
+	r.funcExt(m.Nname.(*ir.Name))
+}
+
+func (r *importReader) linkname(s *types.Sym) {
+	s.Linkname = r.string()
+}
+
+func (r *importReader) symIdx(s *types.Sym) {
+	lsym := s.Linksym()
+	idx := int32(r.int64())
+	if idx != -1 {
+		if s.Linkname != "" {
+			base.Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
+		}
+		lsym.SymIdx = idx
+		lsym.Set(obj.AttrIndexed, true)
+	}
+}
+
+func (r *importReader) typeExt(t *types.Type) {
+	t.SetNotInHeap(r.bool())
+	i, pi := r.int64(), r.int64()
+	if i != -1 && pi != -1 {
+		typeSymIdx[t] = [2]int64{i, pi}
+	}
+}
+
+// Map imported type T to the index of type descriptor symbols of T and *T,
+// so we can use index to reference the symbol.
+var typeSymIdx = make(map[*types.Type][2]int64)
+
+func BaseTypeIndex(t *types.Type) int64 {
+	tbase := t
+	if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+		tbase = t.Elem()
+	}
+	i, ok := typeSymIdx[tbase]
+	if !ok {
+		return -1
+	}
+	if t != tbase {
+		return i[1]
+	}
+	return i[0]
+}
+
+func (r *importReader) doInline(fn *ir.Func) {
+	if len(fn.Inl.Body) != 0 {
+		base.Fatalf("%v already has inline body", fn)
+	}
+
+	//fmt.Printf("Importing %v\n", n)
+	r.funcBody(fn)
+
+	importlist = append(importlist, fn)
+
+	if base.Flag.E > 0 && base.Flag.LowerM > 2 {
+		if base.Flag.LowerM > 3 {
+			fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
+		} else {
+			fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
+		}
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Inlined function bodies
+
+// Approach: Read nodes and use them to create/declare the same data structures
+// as done originally by the (hidden) parser by closely following the parser's
+// original code. In other words, "parsing" the import data (which happens to
+// be encoded in binary rather textual form) is the best way at the moment to
+// re-establish the syntax tree's invariants. At some future point we might be
+// able to avoid this round-about way and create the rewritten nodes directly,
+// possibly avoiding a lot of duplicate work (name resolution, type checking).
+//
+// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
+// unrefined nodes (since this is what the importer uses). The respective case
+// entries are unreachable in the importer.
+
+func (r *importReader) funcBody(fn *ir.Func) {
+	outerfn := r.curfn
+	r.curfn = fn
+
+	// Import local declarations.
+	fn.Inl.Dcl = r.readFuncDcls(fn)
+
+	// Import function body.
+	body := r.stmtList()
+	if body == nil {
+		// Make sure empty body is not interpreted as
+		// no inlineable body (see also parser.fnbody)
+		// (not doing so can cause significant performance
+		// degradation due to unnecessary calls to empty
+		// functions).
+		body = []ir.Node{}
+	}
+	fn.Inl.Body = body
+
+	r.curfn = outerfn
+}
+
+func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
+	dcls := make([]*ir.Name, r.int64())
+	for i := range dcls {
+		n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent())
+		n.Class = ir.PAUTO // overwritten below for parameters/results
+		n.Curfn = fn
+		n.SetType(r.typ())
+		dcls[i] = n
+	}
+	r.allDcls = append(r.allDcls, dcls...)
+	return dcls
+}
+
+func (r *importReader) readFuncDcls(fn *ir.Func) []*ir.Name {
+	dcls := r.readNames(fn)
+
+	// Fixup parameter classes and associate with their
+	// signature's type fields.
+	i := 0
+	fix := func(f *types.Field, class ir.Class) {
+		if class == ir.PPARAM && (f.Sym == nil || f.Sym.Name == "_") {
+			return
+		}
+		n := dcls[i]
+		n.Class = class
+		f.Nname = n
+		i++
+	}
+
+	typ := fn.Type()
+	if recv := typ.Recv(); recv != nil {
+		fix(recv, ir.PPARAM)
+	}
+	for _, f := range typ.Params().FieldSlice() {
+		fix(f, ir.PPARAM)
+	}
+	for _, f := range typ.Results().FieldSlice() {
+		fix(f, ir.PPARAMOUT)
+	}
+	return dcls
+}
+
+func (r *importReader) localName() *ir.Name {
+	i := r.int64()
+	if i == -1 {
+		return ir.BlankNode.(*ir.Name)
+	}
+	if i < 0 {
+		return r.allClosureVars[-i-2]
+	}
+	return r.allDcls[i]
+}
+
+func (r *importReader) stmtList() []ir.Node {
+	var list []ir.Node
+	for {
+		n := r.node()
+		if n == nil {
+			break
+		}
+		// OBLOCK nodes are not written to the import data directly,
+		// but the handling of ODCL calls liststmt, which creates one.
+		// Inline them into the statement list.
+		if n.Op() == ir.OBLOCK {
+			n := n.(*ir.BlockStmt)
+			list = append(list, n.List...)
+		} else {
+			list = append(list, n)
+		}
+
+	}
+	return list
+}
+
+func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause {
+	namedTypeSwitch := isNamedTypeSwitch(switchExpr)
+
+	cases := make([]*ir.CaseClause, r.uint64())
+	for i := range cases {
+		cas := ir.NewCaseStmt(r.pos(), nil, nil)
+		cas.List = r.stmtList()
+		if namedTypeSwitch {
+			cas.Var = r.localName()
+			cas.Var.Defn = switchExpr
+		}
+		cas.Body = r.stmtList()
+		cases[i] = cas
+	}
+	return cases
+}
+
+func (r *importReader) commList() []*ir.CommClause {
+	cases := make([]*ir.CommClause, r.uint64())
+	for i := range cases {
+		cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList())
+	}
+	return cases
+}
+
+func (r *importReader) exprList() []ir.Node {
+	var list []ir.Node
+	for {
+		n := r.expr()
+		if n == nil {
+			break
+		}
+		list = append(list, n)
+	}
+	return list
+}
+
+func (r *importReader) expr() ir.Node {
+	n := r.node()
+	if n != nil && n.Op() == ir.OBLOCK {
+		n := n.(*ir.BlockStmt)
+		base.Fatalf("unexpected block node: %v", n)
+	}
+	return n
+}
+
+// TODO(gri) split into expr and stmt
+func (r *importReader) node() ir.Node {
+	switch op := r.op(); op {
+	// expressions
+	// case OPAREN:
+	// 	unreachable - unpacked by exporter
+
+	case ir.ONIL:
+		pos := r.pos()
+		typ := r.typ()
+
+		n := ir.NewNilExpr(pos)
+		n.SetType(typ)
+		return n
+
+	case ir.OLITERAL:
+		pos := r.pos()
+		typ := r.typ()
+
+		n := ir.NewBasicLit(pos, r.value(typ))
+		n.SetType(typ)
+		return n
+
+	case ir.ONONAME:
+		return r.qualifiedIdent()
+
+	case ir.ONAME:
+		return r.localName()
+
+	// case OPACK, ONONAME:
+	// 	unreachable - should have been resolved by typechecking
+
+	case ir.OTYPE:
+		return ir.TypeNode(r.typ())
+
+	case ir.OTYPESW:
+		pos := r.pos()
+		var tag *ir.Ident
+		if s := r.localIdent(); s != nil {
+			tag = ir.NewIdent(pos, s)
+		}
+		return ir.NewTypeSwitchGuard(pos, tag, r.expr())
+
+	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+	//      unreachable - should have been resolved by typechecking
+
+	case ir.OCLOSURE:
+		//println("Importing CLOSURE")
+		pos := r.pos()
+		typ := r.signature(nil)
+
+		// All the remaining code below is similar to (*noder).funcLit(), but
+		// with Dcls and ClosureVars lists already set up
+		fn := ir.NewFunc(pos)
+		fn.SetIsHiddenClosure(true)
+		fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
+		fn.Nname.Func = fn
+		fn.Nname.Ntype = ir.TypeNode(typ)
+		fn.Nname.Defn = fn
+		fn.Nname.SetType(typ)
+
+		cvars := make([]*ir.Name, r.int64())
+		for i := range cvars {
+			cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical())
+		}
+		fn.ClosureVars = cvars
+		r.allClosureVars = append(r.allClosureVars, cvars...)
+
+		fn.Dcl = r.readFuncDcls(fn)
+		body := r.stmtList()
+		ir.FinishCaptureNames(pos, r.curfn, fn)
+
+		clo := ir.NewClosureExpr(pos, fn)
+		fn.OClosure = clo
+
+		fn.Body = body
+
+		return clo
+
+	// case OPTRLIT:
+	//	unreachable - mapped to case OADDR below by exporter
+
+	case ir.OSTRUCTLIT:
+		return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.fieldList())
+
+	// case OARRAYLIT, OSLICELIT, OMAPLIT:
+	// 	unreachable - mapped to case OCOMPLIT below by exporter
+
+	case ir.OCOMPLIT:
+		return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.exprList())
+
+	case ir.OKEY:
+		return ir.NewKeyExpr(r.pos(), r.expr(), r.expr())
+
+	// case OSTRUCTKEY:
+	//	unreachable - handled in case OSTRUCTLIT by elemList
+
+	// case OCALLPART:
+	//	unreachable - mapped to case OXDOT below by exporter
+
+	// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
+	// 	unreachable - mapped to case OXDOT below by exporter
+
+	case ir.OXDOT:
+		// see parser.new_dotname
+		return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.selector())
+
+	// case ODOTTYPE, ODOTTYPE2:
+	// 	unreachable - mapped to case ODOTTYPE below by exporter
+
+	case ir.ODOTTYPE:
+		n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil)
+		n.SetType(r.typ())
+		return n
+
+	// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+	// 	unreachable - mapped to cases below by exporter
+
+	case ir.OINDEX:
+		return ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
+
+	case ir.OSLICE, ir.OSLICE3:
+		pos, x := r.pos(), r.expr()
+		low, high := r.exprsOrNil()
+		var max ir.Node
+		if op.IsSlice3() {
+			max = r.expr()
+		}
+		return ir.NewSliceExpr(pos, op, x, low, high, max)
+
+	// case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
+	// 	unreachable - mapped to OCONV case below by exporter
+
+	case ir.OCONV:
+		return ir.NewConvExpr(r.pos(), ir.OCONV, r.typ(), r.expr())
+
+	case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+		n := builtinCall(r.pos(), op)
+		n.Args = r.exprList()
+		if op == ir.OAPPEND {
+			n.IsDDD = r.bool()
+		}
+		return n
+
+	// case OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+	// 	unreachable - mapped to OCALL case below by exporter
+
+	case ir.OCALL:
+		pos := r.pos()
+		init := r.stmtList()
+		n := ir.NewCallExpr(pos, ir.OCALL, r.expr(), r.exprList())
+		*n.PtrInit() = init
+		n.IsDDD = r.bool()
+		return n
+
+	case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+		n := builtinCall(r.pos(), ir.OMAKE)
+		n.Args.Append(ir.TypeNode(r.typ()))
+		n.Args.Append(r.exprList()...)
+		return n
+
+	// unary expressions
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+		return ir.NewUnaryExpr(r.pos(), op, r.expr())
+
+	case ir.OADDR:
+		return NodAddrAt(r.pos(), r.expr())
+
+	case ir.ODEREF:
+		return ir.NewStarExpr(r.pos(), r.expr())
+
+	// binary expressions
+	case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+		ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+		return ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
+
+	case ir.OANDAND, ir.OOROR:
+		return ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr())
+
+	case ir.OSEND:
+		return ir.NewSendStmt(r.pos(), r.expr(), r.expr())
+
+	case ir.OADDSTR:
+		pos := r.pos()
+		list := r.exprList()
+		x := list[0]
+		for _, y := range list[1:] {
+			x = ir.NewBinaryExpr(pos, ir.OADD, x, y)
+		}
+		return x
+
+	// --------------------------------------------------------------------
+	// statements
+	case ir.ODCL:
+		var stmts ir.Nodes
+		n := r.localName()
+		stmts.Append(ir.NewDecl(n.Pos(), ir.ODCL, n))
+		stmts.Append(ir.NewAssignStmt(n.Pos(), n, nil))
+		return ir.NewBlockStmt(n.Pos(), stmts)
+
+	// case OAS, OASWB:
+	// 	unreachable - mapped to OAS case below by exporter
+
+	case ir.OAS:
+		return ir.NewAssignStmt(r.pos(), r.expr(), r.expr())
+
+	case ir.OASOP:
+		n := ir.NewAssignOpStmt(r.pos(), r.op(), r.expr(), nil)
+		if !r.bool() {
+			n.Y = ir.NewInt(1)
+			n.IncDec = true
+		} else {
+			n.Y = r.expr()
+		}
+		return n
+
+	// case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+	// 	unreachable - mapped to OAS2 case below by exporter
+
+	case ir.OAS2:
+		return ir.NewAssignListStmt(r.pos(), ir.OAS2, r.exprList(), r.exprList())
+
+	case ir.ORETURN:
+		return ir.NewReturnStmt(r.pos(), r.exprList())
+
+	// case ORETJMP:
+	// 	unreachable - generated by compiler for trampolin routines (not exported)
+
+	case ir.OGO, ir.ODEFER:
+		return ir.NewGoDeferStmt(r.pos(), op, r.expr())
+
+	case ir.OIF:
+		pos, init := r.pos(), r.stmtList()
+		n := ir.NewIfStmt(pos, r.expr(), r.stmtList(), r.stmtList())
+		*n.PtrInit() = init
+		return n
+
+	case ir.OFOR:
+		pos, init := r.pos(), r.stmtList()
+		cond, post := r.exprsOrNil()
+		n := ir.NewForStmt(pos, nil, cond, post, r.stmtList())
+		*n.PtrInit() = init
+		return n
+
+	case ir.ORANGE:
+		pos := r.pos()
+		k, v := r.exprsOrNil()
+		return ir.NewRangeStmt(pos, k, v, r.expr(), r.stmtList())
+
+	case ir.OSELECT:
+		pos := r.pos()
+		init := r.stmtList()
+		n := ir.NewSelectStmt(pos, r.commList())
+		*n.PtrInit() = init
+		return n
+
+	case ir.OSWITCH:
+		pos := r.pos()
+		init := r.stmtList()
+		x, _ := r.exprsOrNil()
+		n := ir.NewSwitchStmt(pos, x, r.caseList(x))
+		*n.PtrInit() = init
+		return n
+
+	// case OCASE:
+	//	handled by caseList
+
+	case ir.OFALL:
+		return ir.NewBranchStmt(r.pos(), ir.OFALL, nil)
+
+	// case OEMPTY:
+	// 	unreachable - not emitted by exporter
+
+	case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+		pos := r.pos()
+		var sym *types.Sym
+		if label := r.string(); label != "" {
+			sym = Lookup(label)
+		}
+		return ir.NewBranchStmt(pos, op, sym)
+
+	case ir.OLABEL:
+		return ir.NewLabelStmt(r.pos(), Lookup(r.string()))
+
+	case ir.OEND:
+		return nil
+
+	default:
+		base.Fatalf("cannot import %v (%d) node\n"+
+			"\t==> please file an issue and assign to gri@", op, int(op))
+		panic("unreachable") // satisfy compiler
+	}
+}
+
+func (r *importReader) op() ir.Op {
+	return ir.Op(r.uint64())
+}
+
+func (r *importReader) fieldList() []ir.Node {
+	list := make([]ir.Node, r.uint64())
+	for i := range list {
+		list[i] = ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr())
+	}
+	return list
+}
+
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
+	ab := r.uint64()
+	if ab&1 != 0 {
+		a = r.expr()
+	}
+	if ab&2 != 0 {
+		b = r.node()
+	}
+	return
+}
+
+func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
+	return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
+}
diff --git a/src/cmd/compile/internal/gc/mapfile_mmap.go b/src/cmd/compile/internal/typecheck/mapfile_mmap.go
similarity index 98%
rename from src/cmd/compile/internal/gc/mapfile_mmap.go
rename to src/cmd/compile/internal/typecheck/mapfile_mmap.go
index 9483688..2f3aa16 100644
--- a/src/cmd/compile/internal/gc/mapfile_mmap.go
+++ b/src/cmd/compile/internal/typecheck/mapfile_mmap.go
@@ -4,7 +4,7 @@
 
 // +build darwin dragonfly freebsd linux netbsd openbsd
 
-package gc
+package typecheck
 
 import (
 	"os"
diff --git a/src/cmd/compile/internal/gc/mapfile_read.go b/src/cmd/compile/internal/typecheck/mapfile_read.go
similarity index 96%
rename from src/cmd/compile/internal/gc/mapfile_read.go
rename to src/cmd/compile/internal/typecheck/mapfile_read.go
index c6f68ed..4059f26 100644
--- a/src/cmd/compile/internal/gc/mapfile_read.go
+++ b/src/cmd/compile/internal/typecheck/mapfile_read.go
@@ -4,7 +4,7 @@
 
 // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
 
-package gc
+package typecheck
 
 import (
 	"io"
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go
similarity index 83%
rename from src/cmd/compile/internal/gc/mkbuiltin.go
rename to src/cmd/compile/internal/typecheck/mkbuiltin.go
index 63d2a12..07f4b76 100644
--- a/src/cmd/compile/internal/gc/mkbuiltin.go
+++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go
@@ -33,9 +33,12 @@
 	var b bytes.Buffer
 	fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
 	fmt.Fprintln(&b)
-	fmt.Fprintln(&b, "package gc")
+	fmt.Fprintln(&b, "package typecheck")
 	fmt.Fprintln(&b)
-	fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
+	fmt.Fprintln(&b, `import (`)
+	fmt.Fprintln(&b, `      "cmd/compile/internal/types"`)
+	fmt.Fprintln(&b, `      "cmd/internal/src"`)
+	fmt.Fprintln(&b, `)`)
 
 	mkbuiltin(&b, "runtime")
 
@@ -140,16 +143,16 @@
 	case *ast.Ident:
 		switch t.Name {
 		case "byte":
-			return "types.Bytetype"
+			return "types.ByteType"
 		case "rune":
-			return "types.Runetype"
+			return "types.RuneType"
 		}
-		return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
+		return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
 	case *ast.SelectorExpr:
 		if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
 			log.Fatalf("unhandled type: %#v", t)
 		}
-		return "types.Types[TUNSAFEPTR]"
+		return "types.Types[types.TUNSAFEPTR]"
 
 	case *ast.ArrayType:
 		if t.Len == nil {
@@ -166,18 +169,18 @@
 		}
 		return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
 	case *ast.FuncType:
-		return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
+		return fmt.Sprintf("types.NewSignature(types.NoPkg, nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
 	case *ast.InterfaceType:
 		if len(t.Methods.List) != 0 {
 			log.Fatal("non-empty interfaces unsupported")
 		}
-		return "types.Types[TINTER]"
+		return "types.Types[types.TINTER]"
 	case *ast.MapType:
 		return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
 	case *ast.StarExpr:
 		return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
 	case *ast.StructType:
-		return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true))
+		return fmt.Sprintf("types.NewStruct(types.NoPkg, %s)", i.fields(t.Fields, true))
 
 	default:
 		log.Fatalf("unhandled type: %#v", t)
@@ -193,18 +196,18 @@
 	for _, f := range fl.List {
 		typ := i.subtype(f.Type)
 		if len(f.Names) == 0 {
-			res = append(res, fmt.Sprintf("anonfield(%s)", typ))
+			res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ))
 		} else {
 			for _, name := range f.Names {
 				if keepNames {
-					res = append(res, fmt.Sprintf("namedfield(%q, %s)", name.Name, typ))
+					res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ))
 				} else {
-					res = append(res, fmt.Sprintf("anonfield(%s)", typ))
+					res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, nil, %s)", typ))
 				}
 			}
 		}
 	}
-	return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
+	return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", "))
 }
 
 func intconst(e ast.Expr) int64 {
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
new file mode 100644
index 0000000..14ed175
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -0,0 +1,669 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+func RangeExprType(t *types.Type) *types.Type {
+	if t.IsPtr() && t.Elem().IsArray() {
+		return t.Elem()
+	}
+	return t
+}
+
+func typecheckrangeExpr(n *ir.RangeStmt) {
+	n.X = Expr(n.X)
+	if n.X.Type() == nil {
+		return
+	}
+
+	t := RangeExprType(n.X.Type())
+	// delicate little dance.  see tcAssignList
+	if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
+		n.Key = AssignExpr(n.Key)
+	}
+	if n.Value != nil && !ir.DeclaredBy(n.Value, n) {
+		n.Value = AssignExpr(n.Value)
+	}
+
+	var tk, tv *types.Type
+	toomany := false
+	switch t.Kind() {
+	default:
+		base.ErrorfAt(n.Pos(), "cannot range over %L", n.X)
+		return
+
+	case types.TARRAY, types.TSLICE:
+		tk = types.Types[types.TINT]
+		tv = t.Elem()
+
+	case types.TMAP:
+		tk = t.Key()
+		tv = t.Elem()
+
+	case types.TCHAN:
+		if !t.ChanDir().CanRecv() {
+			base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type())
+			return
+		}
+
+		tk = t.Elem()
+		tv = nil
+		if n.Value != nil {
+			toomany = true
+		}
+
+	case types.TSTRING:
+		tk = types.Types[types.TINT]
+		tv = types.RuneType
+	}
+
+	if toomany {
+		base.ErrorfAt(n.Pos(), "too many variables in range")
+	}
+
+	do := func(nn ir.Node, t *types.Type) {
+		if nn != nil {
+			if ir.DeclaredBy(nn, n) {
+				nn.SetType(t)
+			} else if nn.Type() != nil {
+				if op, why := assignop(t, nn.Type()); op == ir.OXXX {
+					base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t, nn, why)
+				}
+			}
+			checkassign(n, nn)
+		}
+	}
+	do(n.Key, tk)
+	do(n.Value, tv)
+}
+
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
+func tcAssign(n *ir.AssignStmt) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("tcAssign", n)(nil)
+	}
+
+	if n.Y == nil {
+		n.X = AssignExpr(n.X)
+		return
+	}
+
+	lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
+	assign(n, lhs, rhs)
+	n.X, n.Y = lhs[0], rhs[0]
+
+	// TODO(mdempsky): This seems out of place.
+	if !ir.IsBlank(n.X) {
+		types.CheckSize(n.X.Type()) // ensure width is calculated for backend
+	}
+}
+
+func tcAssignList(n *ir.AssignListStmt) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("tcAssignList", n)(nil)
+	}
+
+	assign(n, n.Lhs, n.Rhs)
+}
+
+func assign(stmt ir.Node, lhs, rhs []ir.Node) {
+	// delicate little dance.
+	// the definition of lhs may refer to this assignment
+	// as its definition, in which case it will call tcAssign.
+	// in that case, do not call typecheck back, or it will cycle.
+	// if the variable has a type (ntype) then typechecking
+	// will not look at defn, so it is okay (and desirable,
+	// so that the conversion below happens).
+
+	checkLHS := func(i int, typ *types.Type) {
+		lhs[i] = Resolve(lhs[i])
+		if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Name().Ntype == nil {
+			if typ.Kind() != types.TNIL {
+				n.SetType(defaultType(typ))
+			} else {
+				base.Errorf("use of untyped nil")
+			}
+		}
+		if lhs[i].Typecheck() == 0 {
+			lhs[i] = AssignExpr(lhs[i])
+		}
+		checkassign(stmt, lhs[i])
+	}
+
+	assignType := func(i int, typ *types.Type) {
+		checkLHS(i, typ)
+		if typ != nil {
+			checkassignto(typ, lhs[i])
+		}
+	}
+
+	cr := len(rhs)
+	if len(rhs) == 1 {
+		rhs[0] = typecheck(rhs[0], ctxExpr|ctxMultiOK)
+		if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() {
+			cr = rtyp.NumFields()
+		}
+	} else {
+		Exprs(rhs)
+	}
+
+	// x, ok = y
+assignOK:
+	for len(lhs) == 2 && cr == 1 {
+		stmt := stmt.(*ir.AssignListStmt)
+		r := rhs[0]
+
+		switch r.Op() {
+		case ir.OINDEXMAP:
+			stmt.SetOp(ir.OAS2MAPR)
+		case ir.ORECV:
+			stmt.SetOp(ir.OAS2RECV)
+		case ir.ODOTTYPE:
+			r := r.(*ir.TypeAssertExpr)
+			stmt.SetOp(ir.OAS2DOTTYPE)
+			r.SetOp(ir.ODOTTYPE2)
+		default:
+			break assignOK
+		}
+
+		assignType(0, r.Type())
+		assignType(1, types.UntypedBool)
+		return
+	}
+
+	if len(lhs) != cr {
+		if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 {
+			if r.Type() != nil {
+				base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.X, cr, plural(cr))
+			}
+		} else {
+			base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs)))
+		}
+
+		for i := range lhs {
+			checkLHS(i, nil)
+		}
+		return
+	}
+
+	// x,y,z = f()
+	if cr > len(rhs) {
+		stmt := stmt.(*ir.AssignListStmt)
+		stmt.SetOp(ir.OAS2FUNC)
+		r := rhs[0].(*ir.CallExpr)
+		r.Use = ir.CallUseList
+		rtyp := r.Type()
+
+		for i := range lhs {
+			assignType(i, rtyp.Field(i).Type)
+		}
+		return
+	}
+
+	for i, r := range rhs {
+		checkLHS(i, r.Type())
+		if lhs[i].Type() != nil {
+			rhs[i] = AssignConv(r, lhs[i].Type(), "assignment")
+		}
+	}
+}
+
+func plural(n int) string {
+	if n == 1 {
+		return ""
+	}
+	return "s"
+}
+
+// tcFor typechecks an OFOR node.
+func tcFor(n *ir.ForStmt) ir.Node {
+	Stmts(n.Init())
+	n.Cond = Expr(n.Cond)
+	n.Cond = DefaultLit(n.Cond, nil)
+	if n.Cond != nil {
+		t := n.Cond.Type()
+		if t != nil && !t.IsBoolean() {
+			base.Errorf("non-bool %L used as for condition", n.Cond)
+		}
+	}
+	n.Post = Stmt(n.Post)
+	if n.Op() == ir.OFORUNTIL {
+		Stmts(n.Late)
+	}
+	Stmts(n.Body)
+	return n
+}
+
+func tcGoDefer(n *ir.GoDeferStmt) {
+	what := "defer"
+	if n.Op() == ir.OGO {
+		what = "go"
+	}
+
+	switch n.Call.Op() {
+	// ok
+	case ir.OCALLINTER,
+		ir.OCALLMETH,
+		ir.OCALLFUNC,
+		ir.OCLOSE,
+		ir.OCOPY,
+		ir.ODELETE,
+		ir.OPANIC,
+		ir.OPRINT,
+		ir.OPRINTN,
+		ir.ORECOVER:
+		return
+
+	case ir.OAPPEND,
+		ir.OCAP,
+		ir.OCOMPLEX,
+		ir.OIMAG,
+		ir.OLEN,
+		ir.OMAKE,
+		ir.OMAKESLICE,
+		ir.OMAKECHAN,
+		ir.OMAKEMAP,
+		ir.ONEW,
+		ir.OREAL,
+		ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+		if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV {
+			break
+		}
+		base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call)
+		return
+	}
+
+	// type is broken or missing, most likely a method call on a broken type
+	// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+	if n.Call.Type() == nil || n.Call.Type().Broke() {
+		return
+	}
+
+	if !n.Diag() {
+		// The syntax made sure it was a call, so this must be
+		// a conversion.
+		n.SetDiag(true)
+		base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
+	}
+}
+
+// tcIf typechecks an OIF node.
+func tcIf(n *ir.IfStmt) ir.Node {
+	Stmts(n.Init())
+	n.Cond = Expr(n.Cond)
+	n.Cond = DefaultLit(n.Cond, nil)
+	if n.Cond != nil {
+		t := n.Cond.Type()
+		if t != nil && !t.IsBoolean() {
+			base.Errorf("non-bool %L used as if condition", n.Cond)
+		}
+	}
+	Stmts(n.Body)
+	Stmts(n.Else)
+	return n
+}
+
+// range
+func tcRange(n *ir.RangeStmt) {
+	// Typechecking order is important here:
+	// 0. first typecheck range expression (slice/map/chan),
+	//	it is evaluated only once and so logically it is not part of the loop.
+	// 1. typecheck produced values,
+	//	this part can declare new vars and so it must be typechecked before body,
+	//	because body can contain a closure that captures the vars.
+	// 2. decldepth++ to denote loop body.
+	// 3. typecheck body.
+	// 4. decldepth--.
+	typecheckrangeExpr(n)
+
+	// second half of dance, the first half being typecheckrangeExpr
+	n.SetTypecheck(1)
+	if n.Key != nil && n.Key.Typecheck() == 0 {
+		n.Key = AssignExpr(n.Key)
+	}
+	if n.Value != nil && n.Value.Typecheck() == 0 {
+		n.Value = AssignExpr(n.Value)
+	}
+
+	Stmts(n.Body)
+}
+
+// tcReturn typechecks an ORETURN node.
+func tcReturn(n *ir.ReturnStmt) ir.Node {
+	typecheckargs(n)
+	if ir.CurFunc == nil {
+		base.Errorf("return outside function")
+		n.SetType(nil)
+		return n
+	}
+
+	if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 {
+		return n
+	}
+	typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
+	return n
+}
+
+// select
+func tcSelect(sel *ir.SelectStmt) {
+	var def *ir.CommClause
+	lno := ir.SetPos(sel)
+	Stmts(sel.Init())
+	for _, ncase := range sel.Cases {
+		if ncase.Comm == nil {
+			// default
+			if def != nil {
+				base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
+			} else {
+				def = ncase
+			}
+		} else {
+			n := Stmt(ncase.Comm)
+			ncase.Comm = n
+			oselrecv2 := func(dst, recv ir.Node, def bool) {
+				n := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
+				n.Def = def
+				n.SetTypecheck(1)
+				ncase.Comm = n
+			}
+			switch n.Op() {
+			default:
+				pos := n.Pos()
+				if n.Op() == ir.ONAME {
+					// We don't have the right position for ONAME nodes (see #15459 and
+					// others). Using ncase.Pos for now as it will provide the correct
+					// line number (assuming the expression follows the "case" keyword
+					// on the same line). This matches the approach before 1.10.
+					pos = ncase.Pos()
+				}
+				base.ErrorfAt(pos, "select case must be receive, send or assign recv")
+
+			case ir.OAS:
+				// convert x = <-c into x, _ = <-c
+				// remove implicit conversions; the eventual assignment
+				// will reintroduce them.
+				n := n.(*ir.AssignStmt)
+				if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+					r := r.(*ir.ConvExpr)
+					if r.Implicit() {
+						n.Y = r.X
+					}
+				}
+				if n.Y.Op() != ir.ORECV {
+					base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+					break
+				}
+				oselrecv2(n.X, n.Y, n.Def)
+
+			case ir.OAS2RECV:
+				n := n.(*ir.AssignListStmt)
+				if n.Rhs[0].Op() != ir.ORECV {
+					base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+					break
+				}
+				n.SetOp(ir.OSELRECV2)
+
+			case ir.ORECV:
+				// convert <-c into _, _ = <-c
+				n := n.(*ir.UnaryExpr)
+				oselrecv2(ir.BlankNode, n, false)
+
+			case ir.OSEND:
+				break
+			}
+		}
+
+		Stmts(ncase.Body)
+	}
+
+	base.Pos = lno
+}
+
+// tcSend typechecks an OSEND node.
+func tcSend(n *ir.SendStmt) ir.Node {
+	n.Chan = Expr(n.Chan)
+	n.Value = Expr(n.Value)
+	n.Chan = DefaultLit(n.Chan, nil)
+	t := n.Chan.Type()
+	if t == nil {
+		return n
+	}
+	if !t.IsChan() {
+		base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
+		return n
+	}
+
+	if !t.ChanDir().CanSend() {
+		base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
+		return n
+	}
+
+	n.Value = AssignConv(n.Value, t.Elem(), "send")
+	if n.Value.Type() == nil {
+		return n
+	}
+	return n
+}
+
+// tcSwitch typechecks a switch statement.
+func tcSwitch(n *ir.SwitchStmt) {
+	Stmts(n.Init())
+	if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
+		tcSwitchType(n)
+	} else {
+		tcSwitchExpr(n)
+	}
+}
+
+func tcSwitchExpr(n *ir.SwitchStmt) {
+	t := types.Types[types.TBOOL]
+	if n.Tag != nil {
+		n.Tag = Expr(n.Tag)
+		n.Tag = DefaultLit(n.Tag, nil)
+		t = n.Tag.Type()
+	}
+
+	var nilonly string
+	if t != nil {
+		switch {
+		case t.IsMap():
+			nilonly = "map"
+		case t.Kind() == types.TFUNC:
+			nilonly = "func"
+		case t.IsSlice():
+			nilonly = "slice"
+
+		case !types.IsComparable(t):
+			if t.IsStruct() {
+				base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type)
+			} else {
+				base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag)
+			}
+			t = nil
+		}
+	}
+
+	var defCase ir.Node
+	var cs constSet
+	for _, ncase := range n.Cases {
+		ls := ncase.List
+		if len(ls) == 0 { // default:
+			if defCase != nil {
+				base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+			} else {
+				defCase = ncase
+			}
+		}
+
+		for i := range ls {
+			ir.SetPos(ncase)
+			ls[i] = Expr(ls[i])
+			ls[i] = DefaultLit(ls[i], t)
+			n1 := ls[i]
+			if t == nil || n1.Type() == nil {
+				continue
+			}
+
+			if nilonly != "" && !ir.IsNil(n1) {
+				base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag)
+			} else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
+				base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
+			} else {
+				op1, _ := assignop(n1.Type(), t)
+				op2, _ := assignop(t, n1.Type())
+				if op1 == ir.OXXX && op2 == ir.OXXX {
+					if n.Tag != nil {
+						base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
+					} else {
+						base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
+					}
+				}
+			}
+
+			// Don't check for duplicate bools. Although the spec allows it,
+			// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
+			// (2) it would disallow useful things like
+			//       case GOARCH == "arm" && GOARM == "5":
+			//       case GOARCH == "arm":
+			//     which would both evaluate to false for non-ARM compiles.
+			if !n1.Type().IsBoolean() {
+				cs.add(ncase.Pos(), n1, "case", "switch")
+			}
+		}
+
+		Stmts(ncase.Body)
+	}
+}
+
+func tcSwitchType(n *ir.SwitchStmt) {
+	guard := n.Tag.(*ir.TypeSwitchGuard)
+	guard.X = Expr(guard.X)
+	t := guard.X.Type()
+	if t != nil && !t.IsInterface() {
+		base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X)
+		t = nil
+	}
+
+	// We don't actually declare the type switch's guarded
+	// declaration itself. So if there are no cases, we won't
+	// notice that it went unused.
+	if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 {
+		base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
+	}
+
+	var defCase, nilCase ir.Node
+	var ts typeSet
+	for _, ncase := range n.Cases {
+		ls := ncase.List
+		if len(ls) == 0 { // default:
+			if defCase != nil {
+				base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+			} else {
+				defCase = ncase
+			}
+		}
+
+		for i := range ls {
+			ls[i] = typecheck(ls[i], ctxExpr|ctxType)
+			n1 := ls[i]
+			if t == nil || n1.Type() == nil {
+				continue
+			}
+
+			var missing, have *types.Field
+			var ptr int
+			if ir.IsNil(n1) { // case nil:
+				if nilCase != nil {
+					base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
+				} else {
+					nilCase = ncase
+				}
+				continue
+			}
+			if n1.Op() != ir.OTYPE {
+				base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+				continue
+			}
+			if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
+				if have != nil && !have.Broke() {
+					base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+						" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+				} else if ptr != 0 {
+					base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+						" (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym)
+				} else {
+					base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+						" (missing %v method)", guard.X, n1.Type(), missing.Sym)
+				}
+				continue
+			}
+
+			ts.add(ncase.Pos(), n1.Type())
+		}
+
+		if ncase.Var != nil {
+			// Assign the clause variable's type.
+			vt := t
+			if len(ls) == 1 {
+				if ls[0].Op() == ir.OTYPE {
+					vt = ls[0].Type()
+				} else if !ir.IsNil(ls[0]) {
+					// Invalid single-type case;
+					// mark variable as broken.
+					vt = nil
+				}
+			}
+
+			nvar := ncase.Var
+			nvar.SetType(vt)
+			if vt != nil {
+				nvar = AssignExpr(nvar).(*ir.Name)
+			} else {
+				// Clause variable is broken; prevent typechecking.
+				nvar.SetTypecheck(1)
+				nvar.SetWalkdef(1)
+			}
+			ncase.Var = nvar
+		}
+
+		Stmts(ncase.Body)
+	}
+}
+
+type typeSet struct {
+	m map[string][]typeSetEntry
+}
+
+type typeSetEntry struct {
+	pos src.XPos
+	typ *types.Type
+}
+
+func (s *typeSet) add(pos src.XPos, typ *types.Type) {
+	if s.m == nil {
+		s.m = make(map[string][]typeSetEntry)
+	}
+
+	// LongString does not uniquely identify types, so we need to
+	// disambiguate collisions with types.Identical.
+	// TODO(mdempsky): Add a method that *is* unique.
+	ls := typ.LongString()
+	prevs := s.m[ls]
+	for _, prev := range prevs {
+		if types.Identical(typ, prev.typ) {
+			base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
+			return
+		}
+	}
+	s.m[ls] = append(prevs, typeSetEntry{pos, typ})
+}
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
new file mode 100644
index 0000000..b88a9f2
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -0,0 +1,843 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+func AssignConv(n ir.Node, t *types.Type, context string) ir.Node {
+	return assignconvfn(n, t, func() string { return context })
+}
+
+// DotImportRefs maps idents introduced by importDot back to the
+// ir.PkgName they were dot-imported through.
+var DotImportRefs map[*ir.Ident]*ir.PkgName
+
+// LookupNum looks up the symbol starting with prefix and ending with
+// the decimal n. If prefix is too long, LookupNum panics.
+func LookupNum(prefix string, n int) *types.Sym {
+	var buf [20]byte // plenty long enough for all current users
+	copy(buf[:], prefix)
+	b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
+	return types.LocalPkg.LookupBytes(b)
+}
+
+// Given funarg struct list, return list of fn args.
+func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field {
+	var args []*ir.Field
+	gen := 0
+	for _, t := range tl.Fields().Slice() {
+		s := t.Sym
+		if mustname && (s == nil || s.Name == "_") {
+			// invent a name so that we can refer to it in the trampoline
+			s = LookupNum(".anon", gen)
+			gen++
+		} else if s != nil && s.Pkg != types.LocalPkg {
+			// TODO(mdempsky): Preserve original position, name, and package.
+			s = Lookup(s.Name)
+		}
+		a := ir.NewField(base.Pos, s, nil, t.Type)
+		a.Pos = t.Pos
+		a.IsDDD = t.IsDDD()
+		args = append(args, a)
+	}
+
+	return args
+}
+
+// newname returns a new ONAME Node associated with symbol s.
+func NewName(s *types.Sym) *ir.Name {
+	n := ir.NewNameAt(base.Pos, s)
+	n.Curfn = ir.CurFunc
+	return n
+}
+
+// NodAddr returns a node representing &n at base.Pos.
+func NodAddr(n ir.Node) *ir.AddrExpr {
+	return NodAddrAt(base.Pos, n)
+}
+
+// nodAddrPos returns a node representing &n at position pos.
+func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
+	n = markAddrOf(n)
+	return ir.NewAddrExpr(pos, n)
+}
+
+func markAddrOf(n ir.Node) ir.Node {
+	if IncrementalAddrtaken {
+		// We can only do incremental addrtaken computation when it is ok
+		// to typecheck the argument of the OADDR. That's only safe after the
+		// main typecheck has completed.
+		// The argument to OADDR needs to be typechecked because &x[i] takes
+		// the address of x if x is an array, but not if x is a slice.
+		// Note: OuterValue doesn't work correctly until n is typechecked.
+		n = typecheck(n, ctxExpr)
+		if x := ir.OuterValue(n); x.Op() == ir.ONAME {
+			x.Name().SetAddrtaken(true)
+		}
+	} else {
+		// Remember that we built an OADDR without computing the Addrtaken bit for
+		// its argument. We'll do that later in bulk using computeAddrtaken.
+		DirtyAddrtaken = true
+	}
+	return n
+}
+
+// If IncrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node
+// when it is built. The Addrtaken bits are set in bulk by computeAddrtaken.
+// If IncrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken
+// field of its argument is updated immediately.
+var IncrementalAddrtaken = false
+
+// If DirtyAddrtaken is true, then there are OADDR whose corresponding arguments
+// have not yet been marked as Addrtaken.
+var DirtyAddrtaken = false
+
+func ComputeAddrtaken(top []ir.Node) {
+	for _, n := range top {
+		ir.Visit(n, func(n ir.Node) {
+			if n.Op() == ir.OADDR {
+				if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME {
+					x.Name().SetAddrtaken(true)
+					if x.Name().IsClosureVar() {
+						// Mark the original variable as Addrtaken so that capturevars
+						// knows not to pass it by value.
+						x.Name().Defn.Name().SetAddrtaken(true)
+					}
+				}
+			}
+		})
+	}
+}
+
+func NodNil() ir.Node {
+	n := ir.NewNilExpr(base.Pos)
+	n.SetType(types.Types[types.TNIL])
+	return n
+}
+
+// AddImplicitDots finds missing fields in obj.field that
+// will give the shortest unique addressing and
+// modifies the tree with missing field names.
+func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
+	n.X = typecheck(n.X, ctxType|ctxExpr)
+	if n.X.Diag() {
+		n.SetDiag(true)
+	}
+	t := n.X.Type()
+	if t == nil {
+		return n
+	}
+
+	if n.X.Op() == ir.OTYPE {
+		return n
+	}
+
+	s := n.Sel
+	if s == nil {
+		return n
+	}
+
+	switch path, ambig := dotpath(s, t, nil, false); {
+	case path != nil:
+		// rebuild elided dots
+		for c := len(path) - 1; c >= 0; c-- {
+			dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym)
+			dot.SetImplicit(true)
+			dot.SetType(path[c].field.Type)
+			n.X = dot
+		}
+	case ambig:
+		base.Errorf("ambiguous selector %v", n)
+		n.X = nil
+	}
+
+	return n
+}
+
+func CalcMethods(t *types.Type) {
+	if t == nil || t.AllMethods().Len() != 0 {
+		return
+	}
+
+	// mark top-level method symbols
+	// so that expand1 doesn't consider them.
+	for _, f := range t.Methods().Slice() {
+		f.Sym.SetUniq(true)
+	}
+
+	// generate all reachable methods
+	slist = slist[:0]
+	expand1(t, true)
+
+	// check each method to be uniquely reachable
+	var ms []*types.Field
+	for i, sl := range slist {
+		slist[i].field = nil
+		sl.field.Sym.SetUniq(false)
+
+		var f *types.Field
+		path, _ := dotpath(sl.field.Sym, t, &f, false)
+		if path == nil {
+			continue
+		}
+
+		// dotpath may have dug out arbitrary fields, we only want methods.
+		if !f.IsMethod() {
+			continue
+		}
+
+		// add it to the base type method list
+		f = f.Copy()
+		f.Embedded = 1 // needs a trampoline
+		for _, d := range path {
+			if d.field.Type.IsPtr() {
+				f.Embedded = 2
+				break
+			}
+		}
+		ms = append(ms, f)
+	}
+
+	for _, f := range t.Methods().Slice() {
+		f.Sym.SetUniq(false)
+	}
+
+	ms = append(ms, t.Methods().Slice()...)
+	sort.Sort(types.MethodsByName(ms))
+	t.AllMethods().Set(ms)
+}
+
+// adddot1 returns the number of fields or methods named s at depth d in Type t.
+// If exactly one exists, it will be returned in *save (if save is not nil),
+// and dotlist will contain the path of embedded fields traversed to find it,
+// in reverse order. If none exist, more will indicate whether t contains any
+// embedded fields at depth d, so callers can decide whether to retry at
+// a greater depth.
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
+	if t.Recur() {
+		return
+	}
+	t.SetRecur(true)
+	defer t.SetRecur(false)
+
+	var u *types.Type
+	d--
+	if d < 0 {
+		// We've reached our target depth. If t has any fields/methods
+		// named s, then we're done. Otherwise, we still need to check
+		// below for embedded fields.
+		c = lookdot0(s, t, save, ignorecase)
+		if c != 0 {
+			return c, false
+		}
+	}
+
+	u = t
+	if u.IsPtr() {
+		u = u.Elem()
+	}
+	if !u.IsStruct() && !u.IsInterface() {
+		return c, false
+	}
+
+	for _, f := range u.Fields().Slice() {
+		if f.Embedded == 0 || f.Sym == nil {
+			continue
+		}
+		if d < 0 {
+			// Found an embedded field at target depth.
+			return c, true
+		}
+		a, more1 := adddot1(s, f.Type, d, save, ignorecase)
+		if a != 0 && c == 0 {
+			dotlist[d].field = f
+		}
+		c += a
+		if more1 {
+			more = true
+		}
+	}
+
+	return c, more
+}
+
+// dotlist is used by adddot1 to record the path of embedded fields
+// used to access a target field or method.
+// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
+var dotlist = make([]dlist, 10)
+
+// Convert node n for assignment to type t.
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+	if n == nil || n.Type() == nil || n.Type().Broke() {
+		return n
+	}
+
+	if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
+		base.Errorf("use of untyped nil")
+	}
+
+	n = convlit1(n, t, false, context)
+	if n.Type() == nil {
+		return n
+	}
+	if t.Kind() == types.TBLANK {
+		return n
+	}
+
+	// Convert ideal bool from comparison to plain bool
+	// if the next step is non-bool (like interface{}).
+	if n.Type() == types.UntypedBool && !t.IsBoolean() {
+		if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
+			r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+			r.SetType(types.Types[types.TBOOL])
+			r.SetTypecheck(1)
+			r.SetImplicit(true)
+			n = r
+		}
+	}
+
+	if types.Identical(n.Type(), t) {
+		return n
+	}
+
+	op, why := assignop(n.Type(), t)
+	if op == ir.OXXX {
+		base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+		op = ir.OCONV
+	}
+
+	r := ir.NewConvExpr(base.Pos, op, t, n)
+	r.SetTypecheck(1)
+	r.SetImplicit(true)
+	return r
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignop(src, dst *types.Type) (ir.Op, string) {
+	if src == dst {
+		return ir.OCONVNOP, ""
+	}
+	if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+		return ir.OXXX, ""
+	}
+
+	// 1. src type is identical to dst.
+	if types.Identical(src, dst) {
+		return ir.OCONVNOP, ""
+	}
+
+	// 2. src and dst have identical underlying types
+	// and either src or dst is not a named type or
+	// both are empty interface types.
+	// For assignable but different non-empty interface types,
+	// we want to recompute the itab. Recomputing the itab ensures
+	// that itabs are unique (thus an interface with a compile-time
+	// type I has an itab with interface type I).
+	if types.Identical(src.Underlying(), dst.Underlying()) {
+		if src.IsEmptyInterface() {
+			// Conversion between two empty interfaces
+			// requires no code.
+			return ir.OCONVNOP, ""
+		}
+		if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
+			// Conversion between two types, at least one unnamed,
+			// needs no conversion. The exception is nonempty interfaces
+			// which need to have their itab updated.
+			return ir.OCONVNOP, ""
+		}
+	}
+
+	// 3. dst is an interface type and src implements dst.
+	if dst.IsInterface() && src.Kind() != types.TNIL {
+		var missing, have *types.Field
+		var ptr int
+		if implements(src, dst, &missing, &have, &ptr) {
+			// Call NeedITab/ITabAddr so that (src, dst)
+			// gets added to itabs early, which allows
+			// us to de-virtualize calls through this
+			// type/interface pair later. See CompileITabs in reflect.go
+			if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
+				NeedITab(src, dst)
+			}
+
+			return ir.OCONVIFACE, ""
+		}
+
+		// we'll have complained about this method anyway, suppress spurious messages.
+		if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
+			return ir.OCONVIFACE, ""
+		}
+
+		var why string
+		if isptrto(src, types.TINTER) {
+			why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+		} else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+			why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+		} else if have != nil && have.Sym == missing.Sym {
+			why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+				"\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+		} else if ptr != 0 {
+			why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+		} else if have != nil {
+			why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+				"\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+		} else {
+			why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+		}
+
+		return ir.OXXX, why
+	}
+
+	if isptrto(dst, types.TINTER) {
+		why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+		return ir.OXXX, why
+	}
+
+	if src.IsInterface() && dst.Kind() != types.TBLANK {
+		var missing, have *types.Field
+		var ptr int
+		var why string
+		if implements(dst, src, &missing, &have, &ptr) {
+			why = ": need type assertion"
+		}
+		return ir.OXXX, why
+	}
+
+	// 4. src is a bidirectional channel value, dst is a channel type,
+	// src and dst have identical element types, and
+	// either src or dst is not a named type.
+	if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
+		if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
+			return ir.OCONVNOP, ""
+		}
+	}
+
+	// 5. src is the predeclared identifier nil and dst is a nillable type.
+	if src.Kind() == types.TNIL {
+		switch dst.Kind() {
+		case types.TPTR,
+			types.TFUNC,
+			types.TMAP,
+			types.TCHAN,
+			types.TINTER,
+			types.TSLICE:
+			return ir.OCONVNOP, ""
+		}
+	}
+
+	// 6. rule about untyped constants - already converted by DefaultLit.
+
+	// 7. Any typed value can be assigned to the blank identifier.
+	if dst.Kind() == types.TBLANK {
+		return ir.OCONVNOP, ""
+	}
+
+	return ir.OXXX, ""
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+// srcConstant indicates whether the value of type src is a constant.
+func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+	if src == dst {
+		return ir.OCONVNOP, ""
+	}
+	if src == nil || dst == nil {
+		return ir.OXXX, ""
+	}
+
+	// Conversions from regular to go:notinheap are not allowed
+	// (unless it's unsafe.Pointer). These are runtime-specific
+	// rules.
+	// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
+	if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
+		why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+		return ir.OXXX, why
+	}
+	// (b) Disallow string to []T where T is go:notinheap.
+	if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
+		why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+		return ir.OXXX, why
+	}
+
+	// 1. src can be assigned to dst.
+	op, why := assignop(src, dst)
+	if op != ir.OXXX {
+		return op, why
+	}
+
+	// The rules for interfaces are no different in conversions
+	// than assignments. If interfaces are involved, stop now
+	// with the good message from assignop.
+	// Otherwise clear the error.
+	if src.IsInterface() || dst.IsInterface() {
+		return ir.OXXX, why
+	}
+
+	// 2. Ignoring struct tags, src and dst have identical underlying types.
+	if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
+		return ir.OCONVNOP, ""
+	}
+
+	// 3. src and dst are unnamed pointer types and, ignoring struct tags,
+	// their base types have identical underlying types.
+	if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+		if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
+			return ir.OCONVNOP, ""
+		}
+	}
+
+	// 4. src and dst are both integer or floating point types.
+	if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
+		if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+			return ir.OCONVNOP, ""
+		}
+		return ir.OCONV, ""
+	}
+
+	// 5. src and dst are both complex types.
+	if src.IsComplex() && dst.IsComplex() {
+		if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+			return ir.OCONVNOP, ""
+		}
+		return ir.OCONV, ""
+	}
+
+	// Special case for constant conversions: any numeric
+	// conversion is potentially okay. We'll validate further
+	// within evconst. See #38117.
+	if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
+		return ir.OCONV, ""
+	}
+
+	// 6. src is an integer or has type []byte or []rune
+	// and dst is a string type.
+	if src.IsInteger() && dst.IsString() {
+		return ir.ORUNESTR, ""
+	}
+
+	if src.IsSlice() && dst.IsString() {
+		if src.Elem().Kind() == types.ByteType.Kind() {
+			return ir.OBYTES2STR, ""
+		}
+		if src.Elem().Kind() == types.RuneType.Kind() {
+			return ir.ORUNES2STR, ""
+		}
+	}
+
+	// 7. src is a string and dst is []byte or []rune.
+	// String to slice.
+	if src.IsString() && dst.IsSlice() {
+		if dst.Elem().Kind() == types.ByteType.Kind() {
+			return ir.OSTR2BYTES, ""
+		}
+		if dst.Elem().Kind() == types.RuneType.Kind() {
+			return ir.OSTR2RUNES, ""
+		}
+	}
+
+	// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+	if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
+		return ir.OCONVNOP, ""
+	}
+
+	// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+	if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
+		return ir.OCONVNOP, ""
+	}
+
+	// src is map and dst is a pointer to corresponding hmap.
+	// This rule is needed for the implementation detail that
+	// go gc maps are implemented as a pointer to a hmap struct.
+	if src.Kind() == types.TMAP && dst.IsPtr() &&
+		src.MapType().Hmap == dst.Elem() {
+		return ir.OCONVNOP, ""
+	}
+
+	return ir.OXXX, ""
+}
+
+// Code to resolve elided DOTs in embedded types.
+
+// A dlist stores a pointer to a TFIELD Type embedded within
+// a TSTRUCT or TINTER Type.
+type dlist struct {
+	field *types.Field
+}
+
+// dotpath computes the unique shortest explicit selector path to fully qualify
+// a selection expression x.f, where x is of type t and f is the symbol s.
+// If no such path exists, dotpath returns nil.
+// If there are multiple shortest paths to the same depth, ambig is true.
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) {
+	// The embedding of types within structs imposes a tree structure onto
+	// types: structs parent the types they embed, and types parent their
+	// fields or methods. Our goal here is to find the shortest path to
+	// a field or method named s in the subtree rooted at t. To accomplish
+	// that, we iteratively perform depth-first searches of increasing depth
+	// until we either find the named field/method or exhaust the tree.
+	for d := 0; ; d++ {
+		if d > len(dotlist) {
+			dotlist = append(dotlist, dlist{})
+		}
+		if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
+			return dotlist[:d], false
+		} else if c > 1 {
+			return nil, true
+		} else if !more {
+			return nil, false
+		}
+	}
+}
+
+func expand0(t *types.Type) {
+	u := t
+	if u.IsPtr() {
+		u = u.Elem()
+	}
+
+	if u.IsInterface() {
+		for _, f := range u.Fields().Slice() {
+			if f.Sym.Uniq() {
+				continue
+			}
+			f.Sym.SetUniq(true)
+			slist = append(slist, symlink{field: f})
+		}
+
+		return
+	}
+
+	u = types.ReceiverBaseType(t)
+	if u != nil {
+		for _, f := range u.Methods().Slice() {
+			if f.Sym.Uniq() {
+				continue
+			}
+			f.Sym.SetUniq(true)
+			slist = append(slist, symlink{field: f})
+		}
+	}
+}
+
+func expand1(t *types.Type, top bool) {
+	if t.Recur() {
+		return
+	}
+	t.SetRecur(true)
+
+	if !top {
+		expand0(t)
+	}
+
+	u := t
+	if u.IsPtr() {
+		u = u.Elem()
+	}
+
+	if u.IsStruct() || u.IsInterface() {
+		for _, f := range u.Fields().Slice() {
+			if f.Embedded == 0 {
+				continue
+			}
+			if f.Sym == nil {
+				continue
+			}
+			expand1(f.Type, false)
+		}
+	}
+
+	t.SetRecur(false)
+}
+
+func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
+	if t == nil {
+		return nil, false
+	}
+
+	path, ambig := dotpath(s, t, &m, ignorecase)
+	if path == nil {
+		if ambig {
+			base.Errorf("%v.%v is ambiguous", t, s)
+		}
+		return nil, false
+	}
+
+	for _, d := range path {
+		if d.field.Type.IsPtr() {
+			followptr = true
+			break
+		}
+	}
+
+	if !m.IsMethod() {
+		base.Errorf("%v.%v is a field, not a method", t, s)
+		return nil, followptr
+	}
+
+	return m, followptr
+}
+
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
+	t0 := t
+	if t == nil {
+		return false
+	}
+
+	if t.IsInterface() {
+		i := 0
+		tms := t.Fields().Slice()
+		for _, im := range iface.Fields().Slice() {
+			for i < len(tms) && tms[i].Sym != im.Sym {
+				i++
+			}
+			if i == len(tms) {
+				*m = im
+				*samename = nil
+				*ptr = 0
+				return false
+			}
+			tm := tms[i]
+			if !types.Identical(tm.Type, im.Type) {
+				*m = im
+				*samename = tm
+				*ptr = 0
+				return false
+			}
+		}
+
+		return true
+	}
+
+	t = types.ReceiverBaseType(t)
+	var tms []*types.Field
+	if t != nil {
+		CalcMethods(t)
+		tms = t.AllMethods().Slice()
+	}
+	i := 0
+	for _, im := range iface.Fields().Slice() {
+		if im.Broke() {
+			continue
+		}
+		for i < len(tms) && tms[i].Sym != im.Sym {
+			i++
+		}
+		if i == len(tms) {
+			*m = im
+			*samename, _ = ifacelookdot(im.Sym, t, true)
+			*ptr = 0
+			return false
+		}
+		tm := tms[i]
+		if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
+			*m = im
+			*samename = tm
+			*ptr = 0
+			return false
+		}
+		followptr := tm.Embedded == 2
+
+		// if pointer receiver in method,
+		// the method does not exist for value types.
+		rcvr := tm.Type.Recv().Type
+		if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
+			if false && base.Flag.LowerR != 0 {
+				base.Errorf("interface pointer mismatch")
+			}
+
+			*m = im
+			*samename = nil
+			*ptr = 1
+			return false
+		}
+	}
+
+	return true
+}
+
+func isptrto(t *types.Type, et types.Kind) bool {
+	if t == nil {
+		return false
+	}
+	if !t.IsPtr() {
+		return false
+	}
+	t = t.Elem()
+	if t == nil {
+		return false
+	}
+	if t.Kind() != et {
+		return false
+	}
+	return true
+}
+
+// lookdot0 returns the number of fields or methods named s associated
+// with Type t. If exactly one exists, it will be returned in *save
+// (if save is not nil).
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
+	u := t
+	if u.IsPtr() {
+		u = u.Elem()
+	}
+
+	c := 0
+	if u.IsStruct() || u.IsInterface() {
+		for _, f := range u.Fields().Slice() {
+			if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
+				if save != nil {
+					*save = f
+				}
+				c++
+			}
+		}
+	}
+
+	u = t
+	if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
+		// If t is a defined pointer type, then x.m is shorthand for (*x).m.
+		u = t.Elem()
+	}
+	u = types.ReceiverBaseType(u)
+	if u != nil {
+		for _, f := range u.Methods().Slice() {
+			if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
+				if save != nil {
+					*save = f
+				}
+				c++
+			}
+		}
+	}
+
+	return c
+}
+
+var slist []symlink
+
+// Code to help generate trampoline functions for methods on embedded
+// types. These are approx the same as the corresponding AddImplicitDots
+// routines except that they expect to be called with unique tasks and
+// they return the actual methods.
+
+type symlink struct {
+	field *types.Field
+}
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
new file mode 100644
index 0000000..202a932
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/src"
+)
+
+func LookupRuntime(name string) *ir.Name {
+	s := ir.Pkgs.Runtime.Lookup(name)
+	if s == nil || s.Def == nil {
+		base.Fatalf("LookupRuntime: can't find runtime.%s", name)
+	}
+	return ir.AsNode(s.Def).(*ir.Name)
+}
+
+// SubstArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+// The result of SubstArgTypes MUST be assigned back to old, e.g.
+// 	n.Left = SubstArgTypes(n.Left, t1, t2)
+func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+	for _, t := range types_ {
+		types.CalcSize(t)
+	}
+	n := ir.NewNameAt(old.Pos(), old.Sym())
+	n.Class = old.Class
+	n.SetType(types.SubstAny(old.Type(), &types_))
+	if len(types_) > 0 {
+		base.Fatalf("SubstArgTypes: too many argument types")
+	}
+	return n
+}
+
+// AutoLabel generates a new Name node for use with
+// an automatically generated label.
+// prefix is a short mnemonic (e.g. ".s" for switch)
+// to help with debugging.
+// It should begin with "." to avoid conflicts with
+// user labels.
+func AutoLabel(prefix string) *types.Sym {
+	if prefix[0] != '.' {
+		base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
+	}
+	fn := ir.CurFunc
+	if ir.CurFunc == nil {
+		base.Fatalf("autolabel outside function")
+	}
+	n := fn.Label
+	fn.Label++
+	return LookupNum(prefix, int(n))
+}
+
+func Lookup(name string) *types.Sym {
+	return types.LocalPkg.Lookup(name)
+}
+
+// InitRuntime loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make them visible to user code.
+func InitRuntime() {
+	base.Timer.Start("fe", "loadsys")
+	types.Block = 1
+
+	typs := runtimeTypes()
+	for _, d := range &runtimeDecls {
+		sym := ir.Pkgs.Runtime.Lookup(d.name)
+		typ := typs[d.typ]
+		switch d.tag {
+		case funcTag:
+			importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+		case varTag:
+			importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+		default:
+			base.Fatalf("unhandled declaration tag %v", d.tag)
+		}
+	}
+}
+
+// LookupRuntimeFunc looks up Go function name in package runtime. This function
+// must follow the internal calling convention.
+func LookupRuntimeFunc(name string) *obj.LSym {
+	return LookupRuntimeABI(name, obj.ABIInternal)
+}
+
+// LookupRuntimeVar looks up a variable (or assembly function) name in package
+// runtime. If this is a function, it may have a special calling
+// convention.
+func LookupRuntimeVar(name string) *obj.LSym {
+	return LookupRuntimeABI(name, obj.ABI0)
+}
+
+// LookupRuntimeABI looks up a name in package runtime using the given ABI.
+func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym {
+	return base.PkgLinksym("runtime", name, abi)
+}
diff --git a/src/cmd/compile/internal/typecheck/target.go b/src/cmd/compile/internal/typecheck/target.go
new file mode 100644
index 0000000..018614d
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/target.go
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package typecheck
+
+import "cmd/compile/internal/ir"
+
+// Target is the package being compiled.
+var Target *ir.Package
diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go
new file mode 100644
index 0000000..6fdafef
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/type.go
@@ -0,0 +1,188 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+)
+
+// tcArrayType typechecks an OTARRAY node.
+func tcArrayType(n *ir.ArrayType) ir.Node {
+	n.Elem = typecheckNtype(n.Elem)
+	if n.Elem.Type() == nil {
+		return n
+	}
+	if n.Len == nil { // [...]T
+		if !n.Diag() {
+			n.SetDiag(true)
+			base.Errorf("use of [...] array outside of array literal")
+		}
+		return n
+	}
+	n.Len = indexlit(Expr(n.Len))
+	size := n.Len
+	if ir.ConstType(size) != constant.Int {
+		switch {
+		case size.Type() == nil:
+			// Error already reported elsewhere.
+		case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
+			base.Errorf("non-constant array bound %v", size)
+		default:
+			base.Errorf("invalid array bound %v", size)
+		}
+		return n
+	}
+
+	v := size.Val()
+	if ir.ConstOverflow(v, types.Types[types.TINT]) {
+		base.Errorf("array bound is too large")
+		return n
+	}
+
+	if constant.Sign(v) < 0 {
+		base.Errorf("array bound must be non-negative")
+		return n
+	}
+
+	bound, _ := constant.Int64Val(v)
+	t := types.NewArray(n.Elem.Type(), bound)
+	n.SetOTYPE(t)
+	types.CheckSize(t)
+	return n
+}
+
+// tcChanType typechecks an OTCHAN node.
+func tcChanType(n *ir.ChanType) ir.Node {
+	n.Elem = typecheckNtype(n.Elem)
+	l := n.Elem
+	if l.Type() == nil {
+		return n
+	}
+	if l.Type().NotInHeap() {
+		base.Errorf("chan of incomplete (or unallocatable) type not allowed")
+	}
+	n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
+	return n
+}
+
+// tcFuncType typechecks an OTFUNC node.
+func tcFuncType(n *ir.FuncType) ir.Node {
+	misc := func(f *types.Field, nf *ir.Field) {
+		f.SetIsDDD(nf.IsDDD)
+		if nf.Decl != nil {
+			nf.Decl.SetType(f.Type)
+			f.Nname = nf.Decl
+		}
+	}
+
+	lno := base.Pos
+
+	var recv *types.Field
+	if n.Recv != nil {
+		recv = tcField(n.Recv, misc)
+	}
+
+	t := types.NewSignature(types.LocalPkg, recv, tcFields(n.Params, misc), tcFields(n.Results, misc))
+	checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
+
+	base.Pos = lno
+
+	n.SetOTYPE(t)
+	return n
+}
+
+// tcInterfaceType typechecks an OTINTER node.
+func tcInterfaceType(n *ir.InterfaceType) ir.Node {
+	if len(n.Methods) == 0 {
+		n.SetOTYPE(types.Types[types.TINTER])
+		return n
+	}
+
+	lno := base.Pos
+	methods := tcFields(n.Methods, nil)
+	base.Pos = lno
+
+	n.SetOTYPE(types.NewInterface(types.LocalPkg, methods))
+	return n
+}
+
+// tcMapType typechecks an OTMAP node.
+func tcMapType(n *ir.MapType) ir.Node {
+	n.Key = typecheckNtype(n.Key)
+	n.Elem = typecheckNtype(n.Elem)
+	l := n.Key
+	r := n.Elem
+	if l.Type() == nil || r.Type() == nil {
+		return n
+	}
+	if l.Type().NotInHeap() {
+		base.Errorf("incomplete (or unallocatable) map key not allowed")
+	}
+	if r.Type().NotInHeap() {
+		base.Errorf("incomplete (or unallocatable) map value not allowed")
+	}
+	n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
+	mapqueue = append(mapqueue, n) // check map keys when all types are settled
+	return n
+}
+
+// tcSliceType typechecks an OTSLICE node.
+func tcSliceType(n *ir.SliceType) ir.Node {
+	n.Elem = typecheckNtype(n.Elem)
+	if n.Elem.Type() == nil {
+		return n
+	}
+	t := types.NewSlice(n.Elem.Type())
+	n.SetOTYPE(t)
+	types.CheckSize(t)
+	return n
+}
+
+// tcStructType typechecks an OTSTRUCT node.
+func tcStructType(n *ir.StructType) ir.Node {
+	lno := base.Pos
+
+	fields := tcFields(n.Fields, func(f *types.Field, nf *ir.Field) {
+		if nf.Embedded {
+			checkembeddedtype(f.Type)
+			f.Embedded = 1
+		}
+		f.Note = nf.Note
+	})
+	checkdupfields("field", fields)
+
+	base.Pos = lno
+	n.SetOTYPE(types.NewStruct(types.LocalPkg, fields))
+	return n
+}
+
+// tcField typechecks a generic Field.
+// misc can be provided to handle specialized typechecking.
+func tcField(n *ir.Field, misc func(*types.Field, *ir.Field)) *types.Field {
+	base.Pos = n.Pos
+	if n.Ntype != nil {
+		n.Type = typecheckNtype(n.Ntype).Type()
+		n.Ntype = nil
+	}
+	f := types.NewField(n.Pos, n.Sym, n.Type)
+	if misc != nil {
+		misc(f, n)
+	}
+	return f
+}
+
+// tcFields typechecks a slice of generic Fields.
+// misc can be provided to handle specialized typechecking.
+func tcFields(l []*ir.Field, misc func(*types.Field, *ir.Field)) []*types.Field {
+	fields := make([]*types.Field, len(l))
+	for i, n := range l {
+		fields[i] = tcField(n, misc)
+	}
+	return fields
+}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
new file mode 100644
index 0000000..cb43457
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -0,0 +1,2164 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"fmt"
+	"go/constant"
+	"go/token"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+)
+
+// Function collecting autotmps generated during typechecking,
+// to be included in the package-level init function.
+var InitTodoFunc = ir.NewFunc(base.Pos)
+
+var inimport bool // set during import
+
+var TypecheckAllowed bool
+
+var (
+	NeedITab        = func(t, itype *types.Type) {}
+	NeedRuntimeType = func(*types.Type) {}
+)
+
+func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
+func Expr(n ir.Node) ir.Node       { return typecheck(n, ctxExpr) }
+func Stmt(n ir.Node) ir.Node       { return typecheck(n, ctxStmt) }
+
+func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
+func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
+
+func Call(call *ir.CallExpr) {
+	t := call.X.Type()
+	if t == nil {
+		panic("misuse of Call")
+	}
+	ctx := ctxStmt
+	if t.NumResults() > 0 {
+		ctx = ctxExpr | ctxMultiOK
+	}
+	if typecheck(call, ctx) != call {
+		panic("bad typecheck")
+	}
+}
+
+func Callee(n ir.Node) ir.Node {
+	return typecheck(n, ctxExpr|ctxCallee)
+}
+
+func FuncBody(n *ir.Func) {
+	ir.CurFunc = n
+	errorsBefore := base.Errors()
+	Stmts(n.Body)
+	CheckUnused(n)
+	CheckReturn(n)
+	if base.Errors() > errorsBefore {
+		n.Body = nil // type errors; do not compile
+	}
+}
+
+var importlist []*ir.Func
+
+func AllImportedBodies() {
+	for _, n := range importlist {
+		if n.Inl != nil {
+			ImportedBody(n)
+		}
+	}
+}
+
+var traceIndent []byte
+
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
+	indent := traceIndent
+
+	// guard against nil
+	var pos, op string
+	var tc uint8
+	if n != nil {
+		pos = base.FmtPos(n.Pos())
+		op = n.Op().String()
+		tc = n.Typecheck()
+	}
+
+	types.SkipSizeForTracing = true
+	defer func() { types.SkipSizeForTracing = false }()
+	fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
+	traceIndent = append(traceIndent, ". "...)
+
+	return func(np *ir.Node) {
+		traceIndent = traceIndent[:len(traceIndent)-2]
+
+		// if we have a result, use that
+		if np != nil {
+			n = *np
+		}
+
+		// guard against nil
+		// use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
+		var tc uint8
+		var typ *types.Type
+		if n != nil {
+			pos = base.FmtPos(n.Pos())
+			op = n.Op().String()
+			tc = n.Typecheck()
+			typ = n.Type()
+		}
+
+		types.SkipSizeForTracing = true
+		defer func() { types.SkipSizeForTracing = false }()
+		fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
+	}
+}
+
+const (
+	ctxStmt    = 1 << iota // evaluated at statement level
+	ctxExpr                // evaluated in value context
+	ctxType                // evaluated in type context
+	ctxCallee              // call-only expressions are ok
+	ctxMultiOK             // multivalue function returns are ok
+	ctxAssign              // assigning to expression
+)
+
+// type checks the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n.Op to be more specific in some cases.
+
+var typecheckdefstack []*ir.Name
+
+// Resolve ONONAME to definition, if any.
+func Resolve(n ir.Node) (res ir.Node) {
+	if n == nil || n.Op() != ir.ONONAME {
+		return n
+	}
+
+	// only trace if there's work to do
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("resolve", n)(&res)
+	}
+
+	if sym := n.Sym(); sym.Pkg != types.LocalPkg {
+		// We might have an ir.Ident from oldname or importDot.
+		if id, ok := n.(*ir.Ident); ok {
+			if pkgName := DotImportRefs[id]; pkgName != nil {
+				pkgName.Used = true
+			}
+		}
+
+		return expandDecl(n)
+	}
+
+	r := ir.AsNode(n.Sym().Def)
+	if r == nil {
+		return n
+	}
+
+	if r.Op() == ir.OIOTA {
+		if x := getIotaValue(); x >= 0 {
+			return ir.NewInt(x)
+		}
+		return n
+	}
+
+	return r
+}
+
+func typecheckslice(l []ir.Node, top int) {
+	for i := range l {
+		l[i] = typecheck(l[i], top)
+	}
+}
+
+var _typekind = []string{
+	types.TINT:        "int",
+	types.TUINT:       "uint",
+	types.TINT8:       "int8",
+	types.TUINT8:      "uint8",
+	types.TINT16:      "int16",
+	types.TUINT16:     "uint16",
+	types.TINT32:      "int32",
+	types.TUINT32:     "uint32",
+	types.TINT64:      "int64",
+	types.TUINT64:     "uint64",
+	types.TUINTPTR:    "uintptr",
+	types.TCOMPLEX64:  "complex64",
+	types.TCOMPLEX128: "complex128",
+	types.TFLOAT32:    "float32",
+	types.TFLOAT64:    "float64",
+	types.TBOOL:       "bool",
+	types.TSTRING:     "string",
+	types.TPTR:        "pointer",
+	types.TUNSAFEPTR:  "unsafe.Pointer",
+	types.TSTRUCT:     "struct",
+	types.TINTER:      "interface",
+	types.TCHAN:       "chan",
+	types.TMAP:        "map",
+	types.TARRAY:      "array",
+	types.TSLICE:      "slice",
+	types.TFUNC:       "func",
+	types.TNIL:        "nil",
+	types.TIDEAL:      "untyped number",
+}
+
+func typekind(t *types.Type) string {
+	if t.IsUntyped() {
+		return fmt.Sprintf("%v", t)
+	}
+	et := t.Kind()
+	if int(et) < len(_typekind) {
+		s := _typekind[et]
+		if s != "" {
+			return s
+		}
+	}
+	return fmt.Sprintf("etype=%d", et)
+}
+
+func cycleFor(start ir.Node) []ir.Node {
+	// Find the start node in typecheck_tcstack.
+	// We know that it must exist because each time we mark
+	// a node with n.SetTypecheck(2) we push it on the stack,
+	// and each time we mark a node with n.SetTypecheck(2) we
+	// pop it from the stack. We hit a cycle when we encounter
+	// a node marked 2 in which case is must be on the stack.
+	i := len(typecheck_tcstack) - 1
+	for i > 0 && typecheck_tcstack[i] != start {
+		i--
+	}
+
+	// collect all nodes with same Op
+	var cycle []ir.Node
+	for _, n := range typecheck_tcstack[i:] {
+		if n.Op() == start.Op() {
+			cycle = append(cycle, n)
+		}
+	}
+
+	return cycle
+}
+
+func cycleTrace(cycle []ir.Node) string {
+	var s string
+	for i, n := range cycle {
+		s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
+	}
+	return s
+}
+
+var typecheck_tcstack []ir.Node
+
+func Func(fn *ir.Func) {
+	new := Stmt(fn)
+	if new != fn {
+		base.Fatalf("typecheck changed func")
+	}
+}
+
+func typecheckNtype(n ir.Ntype) ir.Ntype {
+	return typecheck(n, ctxType).(ir.Ntype)
+}
+
+// typecheck type checks node n.
+// The result of typecheck MUST be assigned back to n, e.g.
+// 	n.Left = typecheck(n.Left, top)
+func typecheck(n ir.Node, top int) (res ir.Node) {
+	// cannot type check until all the source has been parsed
+	if !TypecheckAllowed {
+		base.Fatalf("early typecheck")
+	}
+
+	if n == nil {
+		return nil
+	}
+
+	// only trace if there's work to do
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("typecheck", n)(&res)
+	}
+
+	lno := ir.SetPos(n)
+
+	// Skip over parens.
+	for n.Op() == ir.OPAREN {
+		n = n.(*ir.ParenExpr).X
+	}
+
+	// Resolve definition of name and value of iota lazily.
+	n = Resolve(n)
+
+	// Skip typecheck if already done.
+	// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+	if n.Typecheck() == 1 {
+		switch n.Op() {
+		case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
+			break
+
+		default:
+			base.Pos = lno
+			return n
+		}
+	}
+
+	if n.Typecheck() == 2 {
+		// Typechecking loop. Trying printing a meaningful message,
+		// otherwise a stack trace of typechecking.
+		switch n.Op() {
+		// We can already diagnose variables used as types.
+		case ir.ONAME:
+			n := n.(*ir.Name)
+			if top&(ctxExpr|ctxType) == ctxType {
+				base.Errorf("%v is not a type", n)
+			}
+
+		case ir.OTYPE:
+			// Only report a type cycle if we are expecting a type.
+			// Otherwise let other code report an error.
+			if top&ctxType == ctxType {
+				// A cycle containing only alias types is an error
+				// since it would expand indefinitely when aliases
+				// are substituted.
+				cycle := cycleFor(n)
+				for _, n1 := range cycle {
+					if n1.Name() != nil && !n1.Name().Alias() {
+						// Cycle is ok. But if n is an alias type and doesn't
+						// have a type yet, we have a recursive type declaration
+						// with aliases that we can't handle properly yet.
+						// Report an error rather than crashing later.
+						if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
+							base.Pos = n.Pos()
+							base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+						}
+						base.Pos = lno
+						return n
+					}
+				}
+				base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+			}
+
+		case ir.OLITERAL:
+			if top&(ctxExpr|ctxType) == ctxType {
+				base.Errorf("%v is not a type", n)
+				break
+			}
+			base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
+		}
+
+		if base.Errors() == 0 {
+			var trace string
+			for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
+				x := typecheck_tcstack[i]
+				trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
+			}
+			base.Errorf("typechecking loop involving %v%s", n, trace)
+		}
+
+		base.Pos = lno
+		return n
+	}
+
+	typecheck_tcstack = append(typecheck_tcstack, n)
+
+	n.SetTypecheck(2)
+	n = typecheck1(n, top)
+	n.SetTypecheck(1)
+
+	last := len(typecheck_tcstack) - 1
+	typecheck_tcstack[last] = nil
+	typecheck_tcstack = typecheck_tcstack[:last]
+
+	_, isExpr := n.(ir.Expr)
+	_, isStmt := n.(ir.Stmt)
+	isMulti := false
+	switch n.Op() {
+	case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC {
+			nr := t.NumResults()
+			isMulti = nr > 1
+			if nr == 0 {
+				isExpr = false
+			}
+		}
+	case ir.OAPPEND:
+		// Must be used (and not BinaryExpr/UnaryExpr).
+		isStmt = false
+	case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE:
+		// Must not be used.
+		isExpr = false
+		isStmt = true
+	case ir.OCOPY, ir.ORECOVER, ir.ORECV:
+		// Can be used or not.
+		isStmt = true
+	}
+
+	t := n.Type()
+	if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
+		switch t.Kind() {
+		case types.TFUNC, // might have TANY; wait until it's called
+			types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
+			break
+
+		default:
+			types.CheckSize(t)
+		}
+	}
+	if t != nil {
+		n = EvalConst(n)
+		t = n.Type()
+	}
+
+	// TODO(rsc): Lots of the complexity here is because typecheck can
+	// see OTYPE, ONAME, and OLITERAL nodes multiple times.
+	// Once we make the IR a proper tree, we should be able to simplify
+	// this code a bit, especially the final case.
+	switch {
+	case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
+		if !n.Diag() {
+			base.Errorf("%v used as value", n)
+			n.SetDiag(true)
+		}
+		if t != nil {
+			n.SetType(nil)
+		}
+
+	case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
+		if !n.Type().Broke() {
+			base.Errorf("type %v is not an expression", n.Type())
+		}
+		n.SetType(nil)
+
+	case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
+		if !n.Diag() {
+			base.Errorf("%v evaluated but not used", n)
+			n.SetDiag(true)
+		}
+		n.SetType(nil)
+
+	case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
+		base.Errorf("%v is not a type", n)
+		if t != nil {
+			n.SetType(nil)
+		}
+
+	}
+
+	base.Pos = lno
+	return n
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is almost equivalent to DefaultLit
+// but also accepts untyped numeric values representable as
+// value of type int (see also checkmake for comparison).
+// The result of indexlit MUST be assigned back to n, e.g.
+// 	n.Left = indexlit(n.Left)
+func indexlit(n ir.Node) ir.Node {
+	if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+		return DefaultLit(n, types.Types[types.TINT])
+	}
+	return n
+}
+
+// typecheck1 should ONLY be called from typecheck.
+func typecheck1(n ir.Node, top int) ir.Node {
+	if n, ok := n.(*ir.Name); ok {
+		typecheckdef(n)
+	}
+
+	switch n.Op() {
+	default:
+		ir.Dump("typecheck", n)
+		base.Fatalf("typecheck %v", n.Op())
+		panic("unreachable")
+
+	case ir.OLITERAL:
+		if n.Sym() == nil && n.Type() == nil {
+			base.Fatalf("literal missing type: %v", n)
+		}
+		return n
+
+	case ir.ONIL:
+		return n
+
+	// names
+	case ir.ONONAME:
+		if !n.Diag() {
+			// Note: adderrorname looks for this string and
+			// adds context about the outer expression
+			base.ErrorfAt(n.Pos(), "undefined: %v", n.Sym())
+			n.SetDiag(true)
+		}
+		n.SetType(nil)
+		return n
+
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.BuiltinOp != 0 {
+			if top&ctxCallee == 0 {
+				base.Errorf("use of builtin %v not in function call", n.Sym())
+				n.SetType(nil)
+				return n
+			}
+			return n
+		}
+		if top&ctxAssign == 0 {
+			// not a write to the variable
+			if ir.IsBlank(n) {
+				base.Errorf("cannot use _ as value")
+				n.SetType(nil)
+				return n
+			}
+			n.SetUsed(true)
+		}
+		return n
+
+	case ir.OLINKSYMOFFSET:
+		// type already set
+		return n
+
+	case ir.OPACK:
+		n := n.(*ir.PkgName)
+		base.Errorf("use of package %v without selector", n.Sym())
+		n.SetType(nil)
+		return n
+
+	// types (ODEREF is with exprs)
+	case ir.OTYPE:
+		return n
+
+	case ir.OTSLICE:
+		n := n.(*ir.SliceType)
+		return tcSliceType(n)
+
+	case ir.OTARRAY:
+		n := n.(*ir.ArrayType)
+		return tcArrayType(n)
+
+	case ir.OTMAP:
+		n := n.(*ir.MapType)
+		return tcMapType(n)
+
+	case ir.OTCHAN:
+		n := n.(*ir.ChanType)
+		return tcChanType(n)
+
+	case ir.OTSTRUCT:
+		n := n.(*ir.StructType)
+		return tcStructType(n)
+
+	case ir.OTINTER:
+		n := n.(*ir.InterfaceType)
+		return tcInterfaceType(n)
+
+	case ir.OTFUNC:
+		n := n.(*ir.FuncType)
+		return tcFuncType(n)
+	// type or expr
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		return tcStar(n, top)
+
+	// x op= y
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		n.X, n.Y = Expr(n.X), Expr(n.Y)
+		checkassign(n, n.X)
+		if n.IncDec && !okforarith[n.X.Type().Kind()] {
+			base.Errorf("invalid operation: %v (non-numeric type %v)", n, n.X.Type())
+			return n
+		}
+		switch n.AsOp {
+		case ir.OLSH, ir.ORSH:
+			n.X, n.Y, _ = tcShift(n, n.X, n.Y)
+		case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+			n.X, n.Y, _ = tcArith(n, n.AsOp, n.X, n.Y)
+		default:
+			base.Fatalf("invalid assign op: %v", n.AsOp)
+		}
+		return n
+
+	// logical operators
+	case ir.OANDAND, ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		n.X, n.Y = Expr(n.X), Expr(n.Y)
+		// For "x == x && len(s)", it's better to report that "len(s)" (type int)
+		// can't be used with "&&" than to report that "x == x" (type untyped bool)
+		// can't be converted to int (see issue #41500).
+		if !n.X.Type().IsBoolean() {
+			base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type()))
+			n.SetType(nil)
+			return n
+		}
+		if !n.Y.Type().IsBoolean() {
+			base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type()))
+			n.SetType(nil)
+			return n
+		}
+		l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+		n.X, n.Y = l, r
+		n.SetType(t)
+		return n
+
+	// shift operators
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		n.X, n.Y = Expr(n.X), Expr(n.Y)
+		l, r, t := tcShift(n, n.X, n.Y)
+		n.X, n.Y = l, r
+		n.SetType(t)
+		return n
+
+	// comparison operators
+	case ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.ONE:
+		n := n.(*ir.BinaryExpr)
+		n.X, n.Y = Expr(n.X), Expr(n.Y)
+		l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+		if t != nil {
+			n.X, n.Y = l, r
+			n.SetType(types.UntypedBool)
+			if con := EvalConst(n); con.Op() == ir.OLITERAL {
+				return con
+			}
+			n.X, n.Y = defaultlit2(l, r, true)
+		}
+		return n
+
+	// binary operators
+	case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+		n := n.(*ir.BinaryExpr)
+		n.X, n.Y = Expr(n.X), Expr(n.Y)
+		l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+		if t != nil && t.Kind() == types.TSTRING && n.Op() == ir.OADD {
+			// create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+			var add *ir.AddStringExpr
+			if l.Op() == ir.OADDSTR {
+				add = l.(*ir.AddStringExpr)
+				add.SetPos(n.Pos())
+			} else {
+				add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
+			}
+			if r.Op() == ir.OADDSTR {
+				r := r.(*ir.AddStringExpr)
+				add.List.Append(r.List.Take()...)
+			} else {
+				add.List.Append(r)
+			}
+			add.SetType(t)
+			return add
+		}
+		n.X, n.Y = l, r
+		n.SetType(t)
+		return n
+
+	case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
+		n := n.(*ir.UnaryExpr)
+		return tcUnaryArith(n)
+
+	// exprs
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		return tcAddr(n)
+
+	case ir.OCOMPLIT:
+		return tcCompLit(n.(*ir.CompLitExpr))
+
+	case ir.OXDOT, ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		return tcDot(n, top)
+
+	case ir.ODOTTYPE:
+		n := n.(*ir.TypeAssertExpr)
+		return tcDotType(n)
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		return tcIndex(n)
+
+	case ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		return tcRecv(n)
+
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		return tcSend(n)
+
+	case ir.OSLICEHEADER:
+		n := n.(*ir.SliceHeaderExpr)
+		return tcSliceHeader(n)
+
+	case ir.OMAKESLICECOPY:
+		n := n.(*ir.MakeExpr)
+		return tcMakeSliceCopy(n)
+
+	case ir.OSLICE, ir.OSLICE3:
+		n := n.(*ir.SliceExpr)
+		return tcSlice(n)
+
+	// call and call like
+	case ir.OCALL:
+		n := n.(*ir.CallExpr)
+		return tcCall(n, top)
+
+	case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+		n := n.(*ir.UnaryExpr)
+		n.SetType(types.Types[types.TUINTPTR])
+		return n
+
+	case ir.OCAP, ir.OLEN:
+		n := n.(*ir.UnaryExpr)
+		return tcLenCap(n)
+
+	case ir.OREAL, ir.OIMAG:
+		n := n.(*ir.UnaryExpr)
+		return tcRealImag(n)
+
+	case ir.OCOMPLEX:
+		n := n.(*ir.BinaryExpr)
+		return tcComplex(n)
+
+	case ir.OCLOSE:
+		n := n.(*ir.UnaryExpr)
+		return tcClose(n)
+
+	case ir.ODELETE:
+		n := n.(*ir.CallExpr)
+		return tcDelete(n)
+
+	case ir.OAPPEND:
+		n := n.(*ir.CallExpr)
+		return tcAppend(n)
+
+	case ir.OCOPY:
+		n := n.(*ir.BinaryExpr)
+		return tcCopy(n)
+
+	case ir.OCONV:
+		n := n.(*ir.ConvExpr)
+		return tcConv(n)
+
+	case ir.OMAKE:
+		n := n.(*ir.CallExpr)
+		return tcMake(n)
+
+	case ir.ONEW:
+		n := n.(*ir.UnaryExpr)
+		return tcNew(n)
+
+	case ir.OPRINT, ir.OPRINTN:
+		n := n.(*ir.CallExpr)
+		return tcPrint(n)
+
+	case ir.OPANIC:
+		n := n.(*ir.UnaryExpr)
+		return tcPanic(n)
+
+	case ir.ORECOVER:
+		n := n.(*ir.CallExpr)
+		return tcRecover(n)
+
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		tcClosure(n, top)
+		if n.Type() == nil {
+			return n
+		}
+		return n
+
+	case ir.OITAB:
+		n := n.(*ir.UnaryExpr)
+		return tcITab(n)
+
+	case ir.OIDATA:
+		// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
+		// usually by just having checked the OITAB.
+		n := n.(*ir.UnaryExpr)
+		base.Fatalf("cannot typecheck interface data %v", n)
+		panic("unreachable")
+
+	case ir.OSPTR:
+		n := n.(*ir.UnaryExpr)
+		return tcSPtr(n)
+
+	case ir.OCFUNC:
+		n := n.(*ir.UnaryExpr)
+		n.X = Expr(n.X)
+		n.SetType(types.Types[types.TUINTPTR])
+		return n
+
+	case ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		n.X = Expr(n.X)
+		return n
+
+	// statements
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		tcAssign(n)
+
+		// Code that creates temps does not bother to set defn, so do it here.
+		if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) {
+			n.X.Name().Defn = n
+		}
+		return n
+
+	case ir.OAS2:
+		tcAssignList(n.(*ir.AssignListStmt))
+		return n
+
+	case ir.OBREAK,
+		ir.OCONTINUE,
+		ir.ODCL,
+		ir.OGOTO,
+		ir.OFALL,
+		ir.OVARKILL,
+		ir.OVARLIVE:
+		return n
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		Stmts(n.List)
+		return n
+
+	case ir.OLABEL:
+		if n.Sym().IsBlank() {
+			// Empty identifier is valid but useless.
+			// Eliminate now to simplify life later.
+			// See issues 7538, 11589, 11593.
+			n = ir.NewBlockStmt(n.Pos(), nil)
+		}
+		return n
+
+	case ir.ODEFER, ir.OGO:
+		n := n.(*ir.GoDeferStmt)
+		n.Call = typecheck(n.Call, ctxStmt|ctxExpr)
+		if !n.Call.Diag() {
+			tcGoDefer(n)
+		}
+		return n
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		return tcFor(n)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		return tcIf(n)
+
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		return tcReturn(n)
+
+	case ir.OTAILCALL:
+		n := n.(*ir.TailCallStmt)
+		return n
+
+	case ir.OSELECT:
+		tcSelect(n.(*ir.SelectStmt))
+		return n
+
+	case ir.OSWITCH:
+		tcSwitch(n.(*ir.SwitchStmt))
+		return n
+
+	case ir.ORANGE:
+		tcRange(n.(*ir.RangeStmt))
+		return n
+
+	case ir.OTYPESW:
+		n := n.(*ir.TypeSwitchGuard)
+		base.Errorf("use of .(type) outside type switch")
+		n.SetType(nil)
+		return n
+
+	case ir.ODCLFUNC:
+		tcFunc(n.(*ir.Func))
+		return n
+
+	case ir.ODCLCONST:
+		n := n.(*ir.Decl)
+		n.X = Expr(n.X).(*ir.Name)
+		return n
+
+	case ir.ODCLTYPE:
+		n := n.(*ir.Decl)
+		n.X = typecheck(n.X, ctxType).(*ir.Name)
+		types.CheckSize(n.X.Type())
+		return n
+	}
+
+	// No return n here!
+	// Individual cases can type-assert n, introducing a new one.
+	// Each must execute its own return n.
+}
+
+func typecheckargs(n ir.InitNode) {
+	var list []ir.Node
+	switch n := n.(type) {
+	default:
+		base.Fatalf("typecheckargs %+v", n.Op())
+	case *ir.CallExpr:
+		list = n.Args
+		if n.IsDDD {
+			Exprs(list)
+			return
+		}
+	case *ir.ReturnStmt:
+		list = n.Results
+	}
+	if len(list) != 1 {
+		Exprs(list)
+		return
+	}
+
+	typecheckslice(list, ctxExpr|ctxMultiOK)
+	t := list[0].Type()
+	if t == nil || !t.IsFuncArgStruct() {
+		return
+	}
+
+	// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+
+	// Save n as n.Orig for fmt.go.
+	if ir.Orig(n) == n {
+		n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
+	}
+
+	as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+	as.Rhs.Append(list...)
+
+	// If we're outside of function context, then this call will
+	// be executed during the generated init function. However,
+	// init.go hasn't yet created it. Instead, associate the
+	// temporary variables with  InitTodoFunc for now, and init.go
+	// will reassociate them later when it's appropriate.
+	static := ir.CurFunc == nil
+	if static {
+		ir.CurFunc = InitTodoFunc
+	}
+	list = nil
+	for _, f := range t.FieldSlice() {
+		t := Temp(f.Type)
+		as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
+		as.Lhs.Append(t)
+		list = append(list, t)
+	}
+	if static {
+		ir.CurFunc = nil
+	}
+
+	switch n := n.(type) {
+	case *ir.CallExpr:
+		n.Args = list
+	case *ir.ReturnStmt:
+		n.Results = list
+	}
+
+	n.PtrInit().Append(Stmt(as))
+}
+
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+	t := r.Type()
+	if t == nil {
+		return false
+	}
+	if !t.IsInteger() {
+		base.Errorf("invalid slice index %v (type %v)", r, t)
+		return false
+	}
+
+	if r.Op() == ir.OLITERAL {
+		x := r.Val()
+		if constant.Sign(x) < 0 {
+			base.Errorf("invalid slice index %v (index must be non-negative)", r)
+			return false
+		} else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+			base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+			return false
+		} else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
+			base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
+			return false
+		} else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+			base.Errorf("invalid slice index %v (index too large)", r)
+			return false
+		}
+	}
+
+	return true
+}
+
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+	if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+		base.Errorf("invalid slice index: %v > %v", lo, hi)
+		return false
+	}
+
+	return true
+}
+
+// The result of implicitstar MUST be assigned back to n, e.g.
+// 	n.Left = implicitstar(n.Left)
+func implicitstar(n ir.Node) ir.Node {
+	// insert implicit * if needed for fixed array
+	t := n.Type()
+	if t == nil || !t.IsPtr() {
+		return n
+	}
+	t = t.Elem()
+	if t == nil {
+		return n
+	}
+	if !t.IsArray() {
+		return n
+	}
+	star := ir.NewStarExpr(base.Pos, n)
+	star.SetImplicit(true)
+	return Expr(star)
+}
+
+func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
+	if len(n.Args) == 0 {
+		p := fmt.Sprintf(f, args...)
+		base.Errorf("missing argument to %s: %v", p, n)
+		return nil, false
+	}
+
+	if len(n.Args) > 1 {
+		p := fmt.Sprintf(f, args...)
+		base.Errorf("too many arguments to %s: %v", p, n)
+		return n.Args[0], false
+	}
+
+	return n.Args[0], true
+}
+
+func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
+	if len(n.Args) != 2 {
+		if len(n.Args) < 2 {
+			base.Errorf("not enough arguments in call to %v", n)
+		} else {
+			base.Errorf("too many arguments in call to %v", n)
+		}
+		return nil, nil, false
+	}
+	return n.Args[0], n.Args[1], true
+}
+
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+	var r *types.Field
+	for _, f := range fs.Slice() {
+		if dostrcmp != 0 && f.Sym.Name == s.Name {
+			return f
+		}
+		if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+			return f
+		}
+		if f.Sym != s {
+			continue
+		}
+		if r != nil {
+			if errnode != nil {
+				base.Errorf("ambiguous selector %v", errnode)
+			} else if t.IsPtr() {
+				base.Errorf("ambiguous selector (%v).%v", t, s)
+			} else {
+				base.Errorf("ambiguous selector %v.%v", t, s)
+			}
+			break
+		}
+
+		r = f
+	}
+
+	return r
+}
+
+// typecheckMethodExpr checks selector expressions (ODOT) where the
+// base expression is a type expression (OTYPE).
+func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("typecheckMethodExpr", n)(&res)
+	}
+
+	t := n.X.Type()
+
+	// Compute the method set for t.
+	var ms *types.Fields
+	if t.IsInterface() {
+		ms = t.Fields()
+	} else {
+		mt := types.ReceiverBaseType(t)
+		if mt == nil {
+			base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel)
+			n.SetType(nil)
+			return n
+		}
+		CalcMethods(mt)
+		ms = mt.AllMethods()
+
+		// The method expression T.m requires a wrapper when T
+		// is different from m's declared receiver type. We
+		// normally generate these wrappers while writing out
+		// runtime type descriptors, which is always done for
+		// types declared at package scope. However, we need
+		// to make sure to generate wrappers for anonymous
+		// receiver types too.
+		if mt.Sym() == nil {
+			NeedRuntimeType(t)
+		}
+	}
+
+	s := n.Sel
+	m := lookdot1(n, s, t, ms, 0)
+	if m == nil {
+		if lookdot1(n, s, t, ms, 1) != nil {
+			base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
+		} else if _, ambig := dotpath(s, t, nil, false); ambig {
+			base.Errorf("%v undefined (ambiguous selector)", n) // method or field
+		} else {
+			base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
+		}
+		n.SetType(nil)
+		return n
+	}
+
+	if !types.IsMethodApplicable(t, m) {
+		base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+		n.SetType(nil)
+		return n
+	}
+
+	n.SetOp(ir.OMETHEXPR)
+	n.Selection = m
+	n.SetType(NewMethodType(m.Type, n.X.Type()))
+	return n
+}
+
+func derefall(t *types.Type) *types.Type {
+	for t != nil && t.IsPtr() {
+		t = t.Elem()
+	}
+	return t
+}
+
+func lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
+	s := n.Sel
+
+	types.CalcSize(t)
+	var f1 *types.Field
+	if t.IsStruct() || t.IsInterface() {
+		f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
+	}
+
+	var f2 *types.Field
+	if n.X.Type() == t || n.X.Type().Sym() == nil {
+		mt := types.ReceiverBaseType(t)
+		if mt != nil {
+			f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
+		}
+	}
+
+	if f1 != nil {
+		if dostrcmp > 1 || f1.Broke() {
+			// Already in the process of diagnosing an error.
+			return f1
+		}
+		if f2 != nil {
+			base.Errorf("%v is both field and method", n.Sel)
+		}
+		if f1.Offset == types.BADWIDTH {
+			base.Fatalf("lookdot badwidth %v %p", f1, f1)
+		}
+		n.Selection = f1
+		n.SetType(f1.Type)
+		if t.IsInterface() {
+			if n.X.Type().IsPtr() {
+				star := ir.NewStarExpr(base.Pos, n.X)
+				star.SetImplicit(true)
+				n.X = Expr(star)
+			}
+
+			n.SetOp(ir.ODOTINTER)
+		}
+		return f1
+	}
+
+	if f2 != nil {
+		if dostrcmp > 1 {
+			// Already in the process of diagnosing an error.
+			return f2
+		}
+		orig := n.X
+		tt := n.X.Type()
+		types.CalcSize(tt)
+		rcvr := f2.Type.Recv().Type
+		if !types.Identical(rcvr, tt) {
+			if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
+				checklvalue(n.X, "call pointer method on")
+				addr := NodAddr(n.X)
+				addr.SetImplicit(true)
+				n.X = typecheck(addr, ctxType|ctxExpr)
+			} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
+				star := ir.NewStarExpr(base.Pos, n.X)
+				star.SetImplicit(true)
+				n.X = typecheck(star, ctxType|ctxExpr)
+			} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
+				base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X)
+				for tt.IsPtr() {
+					// Stop one level early for method with pointer receiver.
+					if rcvr.IsPtr() && !tt.Elem().IsPtr() {
+						break
+					}
+					star := ir.NewStarExpr(base.Pos, n.X)
+					star.SetImplicit(true)
+					n.X = typecheck(star, ctxType|ctxExpr)
+					tt = tt.Elem()
+				}
+			} else {
+				base.Fatalf("method mismatch: %v for %v", rcvr, tt)
+			}
+		}
+
+		// Check that we haven't implicitly dereferenced any defined pointer types.
+		for x := n.X; ; {
+			var inner ir.Node
+			implicit := false
+			switch x := x.(type) {
+			case *ir.AddrExpr:
+				inner, implicit = x.X, x.Implicit()
+			case *ir.SelectorExpr:
+				inner, implicit = x.X, x.Implicit()
+			case *ir.StarExpr:
+				inner, implicit = x.X, x.Implicit()
+			}
+			if !implicit {
+				break
+			}
+			if inner.Type().Sym() != nil && (x.Op() == ir.ODEREF || x.Op() == ir.ODOTPTR) {
+				// Found an implicit dereference of a defined pointer type.
+				// Restore n.X for better error message.
+				n.X = orig
+				return nil
+			}
+			x = inner
+		}
+
+		n.Selection = f2
+		n.SetType(f2.Type)
+		n.SetOp(ir.ODOTMETH)
+
+		return f2
+	}
+
+	return nil
+}
+
+func nokeys(l ir.Nodes) bool {
+	for _, n := range l {
+		if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
+			return false
+		}
+	}
+	return true
+}
+
+func hasddd(t *types.Type) bool {
+	for _, tl := range t.Fields().Slice() {
+		if tl.IsDDD() {
+			return true
+		}
+	}
+
+	return false
+}
+
+// typecheck assignment: type list = expression list
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+	var t *types.Type
+	var i int
+
+	lno := base.Pos
+	defer func() { base.Pos = lno }()
+
+	if tstruct.Broke() {
+		return
+	}
+
+	var n ir.Node
+	if len(nl) == 1 {
+		n = nl[0]
+	}
+
+	n1 := tstruct.NumFields()
+	n2 := len(nl)
+	if !hasddd(tstruct) {
+		if n2 > n1 {
+			goto toomany
+		}
+		if n2 < n1 {
+			goto notenough
+		}
+	} else {
+		if !isddd {
+			if n2 < n1-1 {
+				goto notenough
+			}
+		} else {
+			if n2 > n1 {
+				goto toomany
+			}
+			if n2 < n1 {
+				goto notenough
+			}
+		}
+	}
+
+	i = 0
+	for _, tl := range tstruct.Fields().Slice() {
+		t = tl.Type
+		if tl.IsDDD() {
+			if isddd {
+				if i >= len(nl) {
+					goto notenough
+				}
+				if len(nl)-i > 1 {
+					goto toomany
+				}
+				n = nl[i]
+				ir.SetPos(n)
+				if n.Type() != nil {
+					nl[i] = assignconvfn(n, t, desc)
+				}
+				return
+			}
+
+			// TODO(mdempsky): Make into ... call with implicit slice.
+			for ; i < len(nl); i++ {
+				n = nl[i]
+				ir.SetPos(n)
+				if n.Type() != nil {
+					nl[i] = assignconvfn(n, t.Elem(), desc)
+				}
+			}
+			return
+		}
+
+		if i >= len(nl) {
+			goto notenough
+		}
+		n = nl[i]
+		ir.SetPos(n)
+		if n.Type() != nil {
+			nl[i] = assignconvfn(n, t, desc)
+		}
+		i++
+	}
+
+	if i < len(nl) {
+		goto toomany
+	}
+	if isddd {
+		if call != nil {
+			base.Errorf("invalid use of ... in call to %v", call)
+		} else {
+			base.Errorf("invalid use of ... in %v", op)
+		}
+	}
+	return
+
+notenough:
+	if n == nil || (!n.Diag() && n.Type() != nil) {
+		details := errorDetails(nl, tstruct, isddd)
+		if call != nil {
+			// call is the expression being called, not the overall call.
+			// Method expressions have the form T.M, and the compiler has
+			// rewritten those to ONAME nodes but left T in Left.
+			if call.Op() == ir.OMETHEXPR {
+				call := call.(*ir.SelectorExpr)
+				base.Errorf("not enough arguments in call to method expression %v%s", call, details)
+			} else {
+				base.Errorf("not enough arguments in call to %v%s", call, details)
+			}
+		} else {
+			base.Errorf("not enough arguments to %v%s", op, details)
+		}
+		if n != nil {
+			n.SetDiag(true)
+		}
+	}
+	return
+
+toomany:
+	details := errorDetails(nl, tstruct, isddd)
+	if call != nil {
+		base.Errorf("too many arguments in call to %v%s", call, details)
+	} else {
+		base.Errorf("too many arguments to %v%s", op, details)
+	}
+}
+
+func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
+	// If we don't know any type at a call site, let's suppress any return
+	// message signatures. See Issue https://golang.org/issues/19012.
+	if tstruct == nil {
+		return ""
+	}
+	// If any node has an unknown type, suppress it as well
+	for _, n := range nl {
+		if n.Type() == nil {
+			return ""
+		}
+	}
+	return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
+}
+
+// sigrepr is a type's representation to the outside world,
+// in string representations of return signatures
+// e.g in error messages about wrong arguments to return.
+func sigrepr(t *types.Type, isddd bool) string {
+	switch t {
+	case types.UntypedString:
+		return "string"
+	case types.UntypedBool:
+		return "bool"
+	}
+
+	if t.Kind() == types.TIDEAL {
+		// "untyped number" is not commonly used
+		// outside of the compiler, so let's use "number".
+		// TODO(mdempsky): Revisit this.
+		return "number"
+	}
+
+	// Turn []T... argument to ...T for clearer error message.
+	if isddd {
+		if !t.IsSlice() {
+			base.Fatalf("bad type for ... argument: %v", t)
+		}
+		return "..." + t.Elem().String()
+	}
+	return t.String()
+}
+
+// sigerr returns the signature of the types at the call or return.
+func fmtSignature(nl ir.Nodes, isddd bool) string {
+	if len(nl) < 1 {
+		return "()"
+	}
+
+	var typeStrings []string
+	for i, n := range nl {
+		isdddArg := isddd && i == len(nl)-1
+		typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
+	}
+
+	return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+}
+
+// type check composite
+func fielddup(name string, hash map[string]bool) {
+	if hash[name] {
+		base.Errorf("duplicate field name in struct literal: %s", name)
+		return
+	}
+	hash[name] = true
+}
+
+// iscomptype reports whether type t is a composite literal type.
+func iscomptype(t *types.Type) bool {
+	switch t.Kind() {
+	case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
+		return true
+	default:
+		return false
+	}
+}
+
+// pushtype adds elided type information for composite literals if
+// appropriate, and returns the resulting expression.
+func pushtype(nn ir.Node, t *types.Type) ir.Node {
+	if nn == nil || nn.Op() != ir.OCOMPLIT {
+		return nn
+	}
+	n := nn.(*ir.CompLitExpr)
+	if n.Ntype != nil {
+		return n
+	}
+
+	switch {
+	case iscomptype(t):
+		// For T, return T{...}.
+		n.Ntype = ir.TypeNode(t)
+
+	case t.IsPtr() && iscomptype(t.Elem()):
+		// For *T, return &T{...}.
+		n.Ntype = ir.TypeNode(t.Elem())
+
+		addr := NodAddrAt(n.Pos(), n)
+		addr.SetImplicit(true)
+		return addr
+	}
+	return n
+}
+
+// typecheckarraylit type-checks a sequence of slice/array literal elements.
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
+	// If there are key/value pairs, create a map to keep seen
+	// keys so we can check for duplicate indices.
+	var indices map[int64]bool
+	for _, elt := range elts {
+		if elt.Op() == ir.OKEY {
+			indices = make(map[int64]bool)
+			break
+		}
+	}
+
+	var key, length int64
+	for i, elt := range elts {
+		ir.SetPos(elt)
+		r := elts[i]
+		var kv *ir.KeyExpr
+		if elt.Op() == ir.OKEY {
+			elt := elt.(*ir.KeyExpr)
+			elt.Key = Expr(elt.Key)
+			key = IndexConst(elt.Key)
+			if key < 0 {
+				if !elt.Key.Diag() {
+					if key == -2 {
+						base.Errorf("index too large")
+					} else {
+						base.Errorf("index must be non-negative integer constant")
+					}
+					elt.Key.SetDiag(true)
+				}
+				key = -(1 << 30) // stay negative for a while
+			}
+			kv = elt
+			r = elt.Value
+		}
+
+		r = pushtype(r, elemType)
+		r = Expr(r)
+		r = AssignConv(r, elemType, ctx)
+		if kv != nil {
+			kv.Value = r
+		} else {
+			elts[i] = r
+		}
+
+		if key >= 0 {
+			if indices != nil {
+				if indices[key] {
+					base.Errorf("duplicate index in %s: %d", ctx, key)
+				} else {
+					indices[key] = true
+				}
+			}
+
+			if bound >= 0 && key >= bound {
+				base.Errorf("array index %d out of bounds [0:%d]", key, bound)
+				bound = -1
+			}
+		}
+
+		key++
+		if key > length {
+			length = key
+		}
+	}
+
+	return length
+}
+
+// visible reports whether sym is exported or locally defined.
+func visible(sym *types.Sym) bool {
+	return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
+}
+
+// nonexported reports whether sym is an unexported field.
+func nonexported(sym *types.Sym) bool {
+	return sym != nil && !types.IsExported(sym.Name)
+}
+
+func checklvalue(n ir.Node, verb string) {
+	if !ir.IsAddressable(n) {
+		base.Errorf("cannot %s %v", verb, n)
+	}
+}
+
+func checkassign(stmt ir.Node, n ir.Node) {
+	// have already complained about n being invalid
+	if n.Type() == nil {
+		if base.Errors() == 0 {
+			base.Fatalf("expected an error about %v", n)
+		}
+		return
+	}
+
+	if ir.IsAddressable(n) {
+		return
+	}
+	if n.Op() == ir.OINDEXMAP {
+		n := n.(*ir.IndexExpr)
+		n.Assigned = true
+		return
+	}
+
+	switch {
+	case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP:
+		base.Errorf("cannot assign to struct field %v in map", n)
+	case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR:
+		base.Errorf("cannot assign to %v (strings are immutable)", n)
+	case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n):
+		base.Errorf("cannot assign to %v (declared const)", n)
+	default:
+		base.Errorf("cannot assign to %v", n)
+	}
+	n.SetType(nil)
+}
+
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
+	for _, n := range l {
+		checkassign(stmt, n)
+	}
+}
+
+func checkassignto(src *types.Type, dst ir.Node) {
+	// TODO(mdempsky): Handle all untyped types correctly.
+	if src == types.UntypedBool && dst.Type().IsBoolean() {
+		return
+	}
+
+	if op, why := assignop(src, dst.Type()); op == ir.OXXX {
+		base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+		return
+	}
+}
+
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+// 	n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+	if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
+		base.Fatalf("stringtoarraylit %v", n)
+	}
+
+	var l []ir.Node
+	i := 0
+	for _, r := range ir.StringVal(n.X) {
+		l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r))))
+		i++
+	}
+
+	nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil)
+	nn.List = l
+	return Expr(nn)
+}
+
+var mapqueue []*ir.MapType
+
+func CheckMapKeys() {
+	for _, n := range mapqueue {
+		k := n.Type().MapType().Key
+		if !k.Broke() && !types.IsComparable(k) {
+			base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
+		}
+	}
+	mapqueue = nil
+}
+
+// TypeGen tracks the number of function-scoped defined types that
+// have been declared. It's used to generate unique linker symbols for
+// their runtime type descriptors.
+var TypeGen int32
+
+func typecheckdeftype(n *ir.Name) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("typecheckdeftype", n)(nil)
+	}
+
+	t := types.NewNamed(n)
+	if n.Curfn != nil {
+		TypeGen++
+		t.Vargen = TypeGen
+	}
+
+	if n.Pragma()&ir.NotInHeap != 0 {
+		t.SetNotInHeap(true)
+	}
+
+	n.SetType(t)
+	n.SetTypecheck(1)
+	n.SetWalkdef(1)
+
+	types.DeferCheckSize()
+	errorsBefore := base.Errors()
+	n.Ntype = typecheckNtype(n.Ntype)
+	if underlying := n.Ntype.Type(); underlying != nil {
+		t.SetUnderlying(underlying)
+	} else {
+		n.SetDiag(true)
+		n.SetType(nil)
+	}
+	if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
+		// Something went wrong during type-checking,
+		// but it was reported. Silence future errors.
+		t.SetBroke(true)
+	}
+	types.ResumeCheckSize()
+}
+
+func typecheckdef(n *ir.Name) {
+	if base.EnableTrace && base.Flag.LowerT {
+		defer tracePrint("typecheckdef", n)(nil)
+	}
+
+	if n.Walkdef() == 1 {
+		return
+	}
+
+	if n.Type() != nil { // builtin
+		// Mark as Walkdef so that if n.SetType(nil) is called later, we
+		// won't try walking again.
+		if got := n.Walkdef(); got != 0 {
+			base.Fatalf("unexpected walkdef: %v", got)
+		}
+		n.SetWalkdef(1)
+		return
+	}
+
+	lno := ir.SetPos(n)
+	typecheckdefstack = append(typecheckdefstack, n)
+	if n.Walkdef() == 2 {
+		base.FlushErrors()
+		fmt.Printf("typecheckdef loop:")
+		for i := len(typecheckdefstack) - 1; i >= 0; i-- {
+			n := typecheckdefstack[i]
+			fmt.Printf(" %v", n.Sym())
+		}
+		fmt.Printf("\n")
+		base.Fatalf("typecheckdef loop")
+	}
+
+	n.SetWalkdef(2)
+
+	switch n.Op() {
+	default:
+		base.Fatalf("typecheckdef %v", n.Op())
+
+	case ir.OLITERAL:
+		if n.Ntype != nil {
+			n.Ntype = typecheckNtype(n.Ntype)
+			n.SetType(n.Ntype.Type())
+			n.Ntype = nil
+			if n.Type() == nil {
+				n.SetDiag(true)
+				goto ret
+			}
+		}
+
+		e := n.Defn
+		n.Defn = nil
+		if e == nil {
+			ir.Dump("typecheckdef nil defn", n)
+			base.ErrorfAt(n.Pos(), "xxx")
+		}
+
+		e = Expr(e)
+		if e.Type() == nil {
+			goto ret
+		}
+		if !ir.IsConstNode(e) {
+			if !e.Diag() {
+				if e.Op() == ir.ONIL {
+					base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
+				} else {
+					base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
+				}
+				e.SetDiag(true)
+			}
+			goto ret
+		}
+
+		t := n.Type()
+		if t != nil {
+			if !ir.OKForConst[t.Kind()] {
+				base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
+				goto ret
+			}
+
+			if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+				base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
+				goto ret
+			}
+
+			e = convlit(e, t)
+		}
+
+		n.SetType(e.Type())
+		if n.Type() != nil {
+			n.SetVal(e.Val())
+		}
+
+	case ir.ONAME:
+		if n.Ntype != nil {
+			n.Ntype = typecheckNtype(n.Ntype)
+			n.SetType(n.Ntype.Type())
+			if n.Type() == nil {
+				n.SetDiag(true)
+				goto ret
+			}
+		}
+
+		if n.Type() != nil {
+			break
+		}
+		if n.Defn == nil {
+			if n.BuiltinOp != 0 { // like OPRINTN
+				break
+			}
+			if base.Errors() > 0 {
+				// Can have undefined variables in x := foo
+				// that make x have an n.name.Defn == nil.
+				// If there are other errors anyway, don't
+				// bother adding to the noise.
+				break
+			}
+
+			base.Fatalf("var without type, init: %v", n.Sym())
+		}
+
+		if n.Defn.Op() == ir.ONAME {
+			n.Defn = Expr(n.Defn)
+			n.SetType(n.Defn.Type())
+			break
+		}
+
+		n.Defn = Stmt(n.Defn) // fills in n.Type
+
+	case ir.OTYPE:
+		if n.Alias() {
+			// Type alias declaration: Simply use the rhs type - no need
+			// to create a new type.
+			// If we have a syntax error, name.Ntype may be nil.
+			if n.Ntype != nil {
+				n.Ntype = typecheckNtype(n.Ntype)
+				n.SetType(n.Ntype.Type())
+				if n.Type() == nil {
+					n.SetDiag(true)
+					goto ret
+				}
+				// For package-level type aliases, set n.Sym.Def so we can identify
+				// it as a type alias during export. See also #31959.
+				if n.Curfn == nil {
+					n.Sym().Def = n.Ntype
+				}
+			}
+			break
+		}
+
+		// regular type declaration
+		typecheckdeftype(n)
+	}
+
+ret:
+	if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+		base.Fatalf("got %v for %v", n.Type(), n)
+	}
+	last := len(typecheckdefstack) - 1
+	if typecheckdefstack[last] != n {
+		base.Fatalf("typecheckdefstack mismatch")
+	}
+	typecheckdefstack[last] = nil
+	typecheckdefstack = typecheckdefstack[:last]
+
+	base.Pos = lno
+	n.SetWalkdef(1)
+}
+
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
+	n := *np
+	if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+		base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
+		return false
+	}
+
+	// Do range checks for constants before DefaultLit
+	// to avoid redundant "constant NNN overflows int" errors.
+	if n.Op() == ir.OLITERAL {
+		v := toint(n.Val())
+		if constant.Sign(v) < 0 {
+			base.Errorf("negative %s argument in make(%v)", arg, t)
+			return false
+		}
+		if ir.ConstOverflow(v, types.Types[types.TINT]) {
+			base.Errorf("%s argument too large in make(%v)", arg, t)
+			return false
+		}
+	}
+
+	// DefaultLit is necessary for non-constants too: n might be 1.1<<k.
+	// TODO(gri) The length argument requirements for (array/slice) make
+	// are the same as for index expressions. Factor the code better;
+	// for instance, indexlit might be called here and incorporate some
+	// of the bounds checks done for make.
+	n = DefaultLit(n, types.Types[types.TINT])
+	*np = n
+
+	return true
+}
+
+// markBreak marks control statements containing break statements with SetHasBreak(true).
+func markBreak(fn *ir.Func) {
+	var labels map[*types.Sym]ir.Node
+	var implicit ir.Node
+
+	var mark func(ir.Node) bool
+	mark = func(n ir.Node) bool {
+		switch n.Op() {
+		default:
+			ir.DoChildren(n, mark)
+
+		case ir.OBREAK:
+			n := n.(*ir.BranchStmt)
+			if n.Label == nil {
+				setHasBreak(implicit)
+			} else {
+				setHasBreak(labels[n.Label])
+			}
+
+		case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
+			old := implicit
+			implicit = n
+			var sym *types.Sym
+			switch n := n.(type) {
+			case *ir.ForStmt:
+				sym = n.Label
+			case *ir.RangeStmt:
+				sym = n.Label
+			case *ir.SelectStmt:
+				sym = n.Label
+			case *ir.SwitchStmt:
+				sym = n.Label
+			}
+			if sym != nil {
+				if labels == nil {
+					// Map creation delayed until we need it - most functions don't.
+					labels = make(map[*types.Sym]ir.Node)
+				}
+				labels[sym] = n
+			}
+			ir.DoChildren(n, mark)
+			if sym != nil {
+				delete(labels, sym)
+			}
+			implicit = old
+		}
+		return false
+	}
+
+	mark(fn)
+}
+
+func controlLabel(n ir.Node) *types.Sym {
+	switch n := n.(type) {
+	default:
+		base.Fatalf("controlLabel %+v", n.Op())
+		return nil
+	case *ir.ForStmt:
+		return n.Label
+	case *ir.RangeStmt:
+		return n.Label
+	case *ir.SelectStmt:
+		return n.Label
+	case *ir.SwitchStmt:
+		return n.Label
+	}
+}
+
+func setHasBreak(n ir.Node) {
+	switch n := n.(type) {
+	default:
+		base.Fatalf("setHasBreak %+v", n.Op())
+	case nil:
+		// ignore
+	case *ir.ForStmt:
+		n.HasBreak = true
+	case *ir.RangeStmt:
+		n.HasBreak = true
+	case *ir.SelectStmt:
+		n.HasBreak = true
+	case *ir.SwitchStmt:
+		n.HasBreak = true
+	}
+}
+
+// isTermNodes reports whether the Nodes list ends with a terminating statement.
+func isTermNodes(l ir.Nodes) bool {
+	s := l
+	c := len(s)
+	if c == 0 {
+		return false
+	}
+	return isTermNode(s[c-1])
+}
+
+// isTermNode reports whether the node n, the last one in a
+// statement list, is a terminating statement.
+func isTermNode(n ir.Node) bool {
+	switch n.Op() {
+	// NOTE: OLABEL is treated as a separate statement,
+	// not a separate prefix, so skipping to the last statement
+	// in the block handles the labeled statement case by
+	// skipping over the label. No case OLABEL here.
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		return isTermNodes(n.List)
+
+	case ir.OGOTO, ir.ORETURN, ir.OTAILCALL, ir.OPANIC, ir.OFALL:
+		return true
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		if n.Cond != nil {
+			return false
+		}
+		if n.HasBreak {
+			return false
+		}
+		return true
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		return isTermNodes(n.Body) && isTermNodes(n.Else)
+
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		if n.HasBreak {
+			return false
+		}
+		def := false
+		for _, cas := range n.Cases {
+			if !isTermNodes(cas.Body) {
+				return false
+			}
+			if len(cas.List) == 0 { // default
+				def = true
+			}
+		}
+		return def
+
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		if n.HasBreak {
+			return false
+		}
+		for _, cas := range n.Cases {
+			if !isTermNodes(cas.Body) {
+				return false
+			}
+		}
+		return true
+	}
+
+	return false
+}
+
+// CheckUnused checks for any declared variables that weren't used.
+func CheckUnused(fn *ir.Func) {
+	// Only report unused variables if we haven't seen any type-checking
+	// errors yet.
+	if base.Errors() != 0 {
+		return
+	}
+
+	// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
+	for _, ln := range fn.Dcl {
+		if ln.Op() == ir.ONAME && ln.Class == ir.PAUTO && ln.Used() {
+			if guard, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+				guard.Used = true
+			}
+		}
+	}
+
+	for _, ln := range fn.Dcl {
+		if ln.Op() != ir.ONAME || ln.Class != ir.PAUTO || ln.Used() {
+			continue
+		}
+		if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+			if defn.Used {
+				continue
+			}
+			base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
+			defn.Used = true // suppress repeats
+		} else {
+			base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
+		}
+	}
+}
+
+// CheckReturn makes sure that fn terminates appropriately.
+func CheckReturn(fn *ir.Func) {
+	if fn.Type().NumResults() != 0 && len(fn.Body) != 0 {
+		markBreak(fn)
+		if !isTermNodes(fn.Body) {
+			base.ErrorfAt(fn.Endlineno, "missing return at end of function")
+		}
+	}
+}
+
+// getIotaValue returns the current value for "iota",
+// or -1 if not within a ConstSpec.
+func getIotaValue() int64 {
+	if i := len(typecheckdefstack); i > 0 {
+		if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
+			return x.Iota()
+		}
+	}
+
+	if ir.CurFunc != nil && ir.CurFunc.Iota >= 0 {
+		return ir.CurFunc.Iota
+	}
+
+	return -1
+}
+
+// curpkg returns the current package, based on Curfn.
+func curpkg() *types.Pkg {
+	fn := ir.CurFunc
+	if fn == nil {
+		// Initialization expressions for package-scope variables.
+		return types.LocalPkg
+	}
+	return fnpkg(fn.Nname)
+}
+
+func Conv(n ir.Node, t *types.Type) ir.Node {
+	if types.Identical(n.Type(), t) {
+		return n
+	}
+	n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+	n.SetType(t)
+	n = Expr(n)
+	return n
+}
+
+// ConvNop converts node n to type t using the OCONVNOP op
+// and typechecks the result with ctxExpr.
+func ConvNop(n ir.Node, t *types.Type) ir.Node {
+	if types.Identical(n.Type(), t) {
+		return n
+	}
+	n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+	n.SetType(t)
+	n = Expr(n)
+	return n
+}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
new file mode 100644
index 0000000..402b8de
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -0,0 +1,362 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+var (
+	okfor [ir.OEND][]bool
+	iscmp [ir.OEND]bool
+)
+
+var (
+	okforeq    [types.NTYPE]bool
+	okforadd   [types.NTYPE]bool
+	okforand   [types.NTYPE]bool
+	okfornone  [types.NTYPE]bool
+	okforbool  [types.NTYPE]bool
+	okforcap   [types.NTYPE]bool
+	okforlen   [types.NTYPE]bool
+	okforarith [types.NTYPE]bool
+)
+
+var basicTypes = [...]struct {
+	name  string
+	etype types.Kind
+}{
+	{"int8", types.TINT8},
+	{"int16", types.TINT16},
+	{"int32", types.TINT32},
+	{"int64", types.TINT64},
+	{"uint8", types.TUINT8},
+	{"uint16", types.TUINT16},
+	{"uint32", types.TUINT32},
+	{"uint64", types.TUINT64},
+	{"float32", types.TFLOAT32},
+	{"float64", types.TFLOAT64},
+	{"complex64", types.TCOMPLEX64},
+	{"complex128", types.TCOMPLEX128},
+	{"bool", types.TBOOL},
+	{"string", types.TSTRING},
+}
+
+var typedefs = [...]struct {
+	name     string
+	etype    types.Kind
+	sameas32 types.Kind
+	sameas64 types.Kind
+}{
+	{"int", types.TINT, types.TINT32, types.TINT64},
+	{"uint", types.TUINT, types.TUINT32, types.TUINT64},
+	{"uintptr", types.TUINTPTR, types.TUINT32, types.TUINT64},
+}
+
+var builtinFuncs = [...]struct {
+	name string
+	op   ir.Op
+}{
+	{"append", ir.OAPPEND},
+	{"cap", ir.OCAP},
+	{"close", ir.OCLOSE},
+	{"complex", ir.OCOMPLEX},
+	{"copy", ir.OCOPY},
+	{"delete", ir.ODELETE},
+	{"imag", ir.OIMAG},
+	{"len", ir.OLEN},
+	{"make", ir.OMAKE},
+	{"new", ir.ONEW},
+	{"panic", ir.OPANIC},
+	{"print", ir.OPRINT},
+	{"println", ir.OPRINTN},
+	{"real", ir.OREAL},
+	{"recover", ir.ORECOVER},
+}
+
+var unsafeFuncs = [...]struct {
+	name string
+	op   ir.Op
+}{
+	{"Alignof", ir.OALIGNOF},
+	{"Offsetof", ir.OOFFSETOF},
+	{"Sizeof", ir.OSIZEOF},
+}
+
+// InitUniverse initializes the universe block.
+func InitUniverse() {
+	if types.PtrSize == 0 {
+		base.Fatalf("typeinit before betypeinit")
+	}
+
+	types.SlicePtrOffset = 0
+	types.SliceLenOffset = types.Rnd(types.SlicePtrOffset+int64(types.PtrSize), int64(types.PtrSize))
+	types.SliceCapOffset = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize))
+	types.SliceSize = types.Rnd(types.SliceCapOffset+int64(types.PtrSize), int64(types.PtrSize))
+
+	// string is same as slice wo the cap
+	types.StringSize = types.Rnd(types.SliceLenOffset+int64(types.PtrSize), int64(types.PtrSize))
+
+	for et := types.Kind(0); et < types.NTYPE; et++ {
+		types.SimType[et] = et
+	}
+
+	types.Types[types.TANY] = types.New(types.TANY)
+	types.Types[types.TINTER] = types.NewInterface(types.LocalPkg, nil)
+
+	defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type {
+		sym := pkg.Lookup(name)
+		n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
+		t := types.NewBasic(kind, n)
+		n.SetType(t)
+		sym.Def = n
+		if kind != types.TANY {
+			types.CalcSize(t)
+		}
+		return t
+	}
+
+	for _, s := range &basicTypes {
+		types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name)
+	}
+
+	for _, s := range &typedefs {
+		sameas := s.sameas32
+		if types.PtrSize == 8 {
+			sameas = s.sameas64
+		}
+		types.SimType[s.etype] = sameas
+
+		types.Types[s.etype] = defBasic(s.etype, types.BuiltinPkg, s.name)
+	}
+
+	// We create separate byte and rune types for better error messages
+	// rather than just creating type alias *types.Sym's for the uint8 and
+	// int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
+	// TODO(gri) Should we get rid of this special case (at the cost
+	// of less informative error messages involving bytes and runes)?
+	// (Alternatively, we could introduce an OTALIAS node representing
+	// type aliases, albeit at the cost of having to deal with it everywhere).
+	types.ByteType = defBasic(types.TUINT8, types.BuiltinPkg, "byte")
+	types.RuneType = defBasic(types.TINT32, types.BuiltinPkg, "rune")
+
+	// error type
+	s := types.BuiltinPkg.Lookup("error")
+	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, s)
+	types.ErrorType = types.NewNamed(n)
+	types.ErrorType.SetUnderlying(makeErrorInterface())
+	n.SetType(types.ErrorType)
+	s.Def = n
+	types.CalcSize(types.ErrorType)
+
+	types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, ir.Pkgs.Unsafe, "Pointer")
+
+	// simple aliases
+	types.SimType[types.TMAP] = types.TPTR
+	types.SimType[types.TCHAN] = types.TPTR
+	types.SimType[types.TFUNC] = types.TPTR
+	types.SimType[types.TUNSAFEPTR] = types.TPTR
+
+	for _, s := range &builtinFuncs {
+		s2 := types.BuiltinPkg.Lookup(s.name)
+		def := NewName(s2)
+		def.BuiltinOp = s.op
+		s2.Def = def
+	}
+
+	for _, s := range &unsafeFuncs {
+		s2 := ir.Pkgs.Unsafe.Lookup(s.name)
+		def := NewName(s2)
+		def.BuiltinOp = s.op
+		s2.Def = def
+	}
+
+	s = types.BuiltinPkg.Lookup("true")
+	s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(true))
+
+	s = types.BuiltinPkg.Lookup("false")
+	s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false))
+
+	s = Lookup("_")
+	types.BlankSym = s
+	s.Block = -100
+	s.Def = NewName(s)
+	types.Types[types.TBLANK] = types.New(types.TBLANK)
+	ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+	ir.BlankNode = ir.AsNode(s.Def)
+	ir.BlankNode.SetTypecheck(1)
+
+	s = types.BuiltinPkg.Lookup("_")
+	s.Block = -100
+	s.Def = NewName(s)
+	types.Types[types.TBLANK] = types.New(types.TBLANK)
+	ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+
+	types.Types[types.TNIL] = types.New(types.TNIL)
+	s = types.BuiltinPkg.Lookup("nil")
+	nnil := NodNil()
+	nnil.(*ir.NilExpr).SetSym(s)
+	s.Def = nnil
+
+	s = types.BuiltinPkg.Lookup("iota")
+	s.Def = ir.NewIota(base.Pos, s)
+
+	for et := types.TINT8; et <= types.TUINT64; et++ {
+		types.IsInt[et] = true
+	}
+	types.IsInt[types.TINT] = true
+	types.IsInt[types.TUINT] = true
+	types.IsInt[types.TUINTPTR] = true
+
+	types.IsFloat[types.TFLOAT32] = true
+	types.IsFloat[types.TFLOAT64] = true
+
+	types.IsComplex[types.TCOMPLEX64] = true
+	types.IsComplex[types.TCOMPLEX128] = true
+
+	// initialize okfor
+	for et := types.Kind(0); et < types.NTYPE; et++ {
+		if types.IsInt[et] || et == types.TIDEAL {
+			okforeq[et] = true
+			types.IsOrdered[et] = true
+			okforarith[et] = true
+			okforadd[et] = true
+			okforand[et] = true
+			ir.OKForConst[et] = true
+			types.IsSimple[et] = true
+		}
+
+		if types.IsFloat[et] {
+			okforeq[et] = true
+			types.IsOrdered[et] = true
+			okforadd[et] = true
+			okforarith[et] = true
+			ir.OKForConst[et] = true
+			types.IsSimple[et] = true
+		}
+
+		if types.IsComplex[et] {
+			okforeq[et] = true
+			okforadd[et] = true
+			okforarith[et] = true
+			ir.OKForConst[et] = true
+			types.IsSimple[et] = true
+		}
+	}
+
+	types.IsSimple[types.TBOOL] = true
+
+	okforadd[types.TSTRING] = true
+
+	okforbool[types.TBOOL] = true
+
+	okforcap[types.TARRAY] = true
+	okforcap[types.TCHAN] = true
+	okforcap[types.TSLICE] = true
+
+	ir.OKForConst[types.TBOOL] = true
+	ir.OKForConst[types.TSTRING] = true
+
+	okforlen[types.TARRAY] = true
+	okforlen[types.TCHAN] = true
+	okforlen[types.TMAP] = true
+	okforlen[types.TSLICE] = true
+	okforlen[types.TSTRING] = true
+
+	okforeq[types.TPTR] = true
+	okforeq[types.TUNSAFEPTR] = true
+	okforeq[types.TINTER] = true
+	okforeq[types.TCHAN] = true
+	okforeq[types.TSTRING] = true
+	okforeq[types.TBOOL] = true
+	okforeq[types.TMAP] = true    // nil only; refined in typecheck
+	okforeq[types.TFUNC] = true   // nil only; refined in typecheck
+	okforeq[types.TSLICE] = true  // nil only; refined in typecheck
+	okforeq[types.TARRAY] = true  // only if element type is comparable; refined in typecheck
+	okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+
+	types.IsOrdered[types.TSTRING] = true
+
+	for i := range okfor {
+		okfor[i] = okfornone[:]
+	}
+
+	// binary
+	okfor[ir.OADD] = okforadd[:]
+	okfor[ir.OAND] = okforand[:]
+	okfor[ir.OANDAND] = okforbool[:]
+	okfor[ir.OANDNOT] = okforand[:]
+	okfor[ir.ODIV] = okforarith[:]
+	okfor[ir.OEQ] = okforeq[:]
+	okfor[ir.OGE] = types.IsOrdered[:]
+	okfor[ir.OGT] = types.IsOrdered[:]
+	okfor[ir.OLE] = types.IsOrdered[:]
+	okfor[ir.OLT] = types.IsOrdered[:]
+	okfor[ir.OMOD] = okforand[:]
+	okfor[ir.OMUL] = okforarith[:]
+	okfor[ir.ONE] = okforeq[:]
+	okfor[ir.OOR] = okforand[:]
+	okfor[ir.OOROR] = okforbool[:]
+	okfor[ir.OSUB] = okforarith[:]
+	okfor[ir.OXOR] = okforand[:]
+	okfor[ir.OLSH] = okforand[:]
+	okfor[ir.ORSH] = okforand[:]
+
+	// unary
+	okfor[ir.OBITNOT] = okforand[:]
+	okfor[ir.ONEG] = okforarith[:]
+	okfor[ir.ONOT] = okforbool[:]
+	okfor[ir.OPLUS] = okforarith[:]
+
+	// special
+	okfor[ir.OCAP] = okforcap[:]
+	okfor[ir.OLEN] = okforlen[:]
+
+	// comparison
+	iscmp[ir.OLT] = true
+	iscmp[ir.OGT] = true
+	iscmp[ir.OGE] = true
+	iscmp[ir.OLE] = true
+	iscmp[ir.OEQ] = true
+	iscmp[ir.ONE] = true
+}
+
+func makeErrorInterface() *types.Type {
+	sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, []*types.Field{
+		types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]),
+	})
+	method := types.NewField(src.NoXPos, Lookup("Error"), sig)
+	return types.NewInterface(types.NoPkg, []*types.Field{method})
+}
+
+// DeclareUniverse makes the universe block visible within the current package.
+func DeclareUniverse() {
+	// Operationally, this is similar to a dot import of builtinpkg, except
+	// that we silently skip symbols that are already declared in the
+	// package block rather than emitting a redeclared symbol error.
+
+	for _, s := range types.BuiltinPkg.Syms {
+		if s.Def == nil {
+			continue
+		}
+		s1 := Lookup(s.Name)
+		if s1.Def != nil {
+			continue
+		}
+
+		s1.Def = s.Def
+		s1.Block = s.Block
+	}
+
+	ir.RegFP = NewName(Lookup(".fp"))
+	ir.RegFP.SetType(types.Types[types.TINT32])
+	ir.RegFP.Class = ir.PPARAM
+	ir.RegFP.SetUsed(true)
+}
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
new file mode 100644
index 0000000..2c2700f
--- /dev/null
+++ b/src/cmd/compile/internal/types/alg.go
@@ -0,0 +1,173 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/compile/internal/base"
+
+// AlgKind describes the kind of algorithms used for comparing and
+// hashing a Type.
+type AlgKind int
+
+//go:generate stringer -type AlgKind -trimprefix A alg.go
+
+const (
+	// These values are known by runtime.
+	ANOEQ AlgKind = iota
+	AMEM0
+	AMEM8
+	AMEM16
+	AMEM32
+	AMEM64
+	AMEM128
+	ASTRING
+	AINTER
+	ANILINTER
+	AFLOAT32
+	AFLOAT64
+	ACPLX64
+	ACPLX128
+
+	// Type can be compared/hashed as regular memory.
+	AMEM AlgKind = 100
+
+	// Type needs special comparison/hashing functions.
+	ASPECIAL AlgKind = -1
+)
+
+// AlgType returns the AlgKind used for comparing and hashing Type t.
+// If it returns ANOEQ, it also returns the component type of t that
+// makes it incomparable.
+func AlgType(t *Type) (AlgKind, *Type) {
+	if t.Broke() {
+		return AMEM, nil
+	}
+	if t.Noalg() {
+		return ANOEQ, t
+	}
+
+	switch t.Kind() {
+	case TANY, TFORW:
+		// will be defined later.
+		return ANOEQ, t
+
+	case TINT8, TUINT8, TINT16, TUINT16,
+		TINT32, TUINT32, TINT64, TUINT64,
+		TINT, TUINT, TUINTPTR,
+		TBOOL, TPTR,
+		TCHAN, TUNSAFEPTR:
+		return AMEM, nil
+
+	case TFUNC, TMAP:
+		return ANOEQ, t
+
+	case TFLOAT32:
+		return AFLOAT32, nil
+
+	case TFLOAT64:
+		return AFLOAT64, nil
+
+	case TCOMPLEX64:
+		return ACPLX64, nil
+
+	case TCOMPLEX128:
+		return ACPLX128, nil
+
+	case TSTRING:
+		return ASTRING, nil
+
+	case TINTER:
+		if t.IsEmptyInterface() {
+			return ANILINTER, nil
+		}
+		return AINTER, nil
+
+	case TSLICE:
+		return ANOEQ, t
+
+	case TARRAY:
+		a, bad := AlgType(t.Elem())
+		switch a {
+		case AMEM:
+			return AMEM, nil
+		case ANOEQ:
+			return ANOEQ, bad
+		}
+
+		switch t.NumElem() {
+		case 0:
+			// We checked above that the element type is comparable.
+			return AMEM, nil
+		case 1:
+			// Single-element array is same as its lone element.
+			return a, nil
+		}
+
+		return ASPECIAL, nil
+
+	case TSTRUCT:
+		fields := t.FieldSlice()
+
+		// One-field struct is same as that one field alone.
+		if len(fields) == 1 && !fields[0].Sym.IsBlank() {
+			return AlgType(fields[0].Type)
+		}
+
+		ret := AMEM
+		for i, f := range fields {
+			// All fields must be comparable.
+			a, bad := AlgType(f.Type)
+			if a == ANOEQ {
+				return ANOEQ, bad
+			}
+
+			// Blank fields, padded fields, fields with non-memory
+			// equality need special compare.
+			if a != AMEM || f.Sym.IsBlank() || IsPaddedField(t, i) {
+				ret = ASPECIAL
+			}
+		}
+
+		return ret, nil
+	}
+
+	base.Fatalf("AlgType: unexpected type %v", t)
+	return 0, nil
+}
+
+// TypeHasNoAlg reports whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func TypeHasNoAlg(t *Type) bool {
+	a, bad := AlgType(t)
+	return a == ANOEQ && bad.Noalg()
+}
+
+// IsComparable reports whether t is a comparable type.
+func IsComparable(t *Type) bool {
+	a, _ := AlgType(t)
+	return a != ANOEQ
+}
+
+// IncomparableField returns an incomparable Field of struct Type t, if any.
+func IncomparableField(t *Type) *Field {
+	for _, f := range t.FieldSlice() {
+		if !IsComparable(f.Type) {
+			return f
+		}
+	}
+	return nil
+}
+
+// IsPaddedField reports whether the i'th field of struct type t is followed
+// by padding.
+func IsPaddedField(t *Type, i int) bool {
+	if !t.IsStruct() {
+		base.Fatalf("IsPaddedField called non-struct %v", t)
+	}
+	end := t.Width
+	if i+1 < t.NumFields() {
+		end = t.Field(i + 1).Offset
+	}
+	return t.Field(i).End() != end
+}
diff --git a/src/cmd/compile/internal/gc/algkind_string.go b/src/cmd/compile/internal/types/algkind_string.go
similarity index 91%
rename from src/cmd/compile/internal/gc/algkind_string.go
rename to src/cmd/compile/internal/types/algkind_string.go
index 52b5399..a1b518e 100644
--- a/src/cmd/compile/internal/gc/algkind_string.go
+++ b/src/cmd/compile/internal/types/algkind_string.go
@@ -1,6 +1,6 @@
-// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT.
+// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT.
 
-package gc
+package types
 
 import "strconv"
 
diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go
deleted file mode 100644
index 14fd5b7..0000000
--- a/src/cmd/compile/internal/types/etype_string.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Code generated by "stringer -type EType -trimprefix T"; DO NOT EDIT.
-
-package types
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[Txxx-0]
-	_ = x[TINT8-1]
-	_ = x[TUINT8-2]
-	_ = x[TINT16-3]
-	_ = x[TUINT16-4]
-	_ = x[TINT32-5]
-	_ = x[TUINT32-6]
-	_ = x[TINT64-7]
-	_ = x[TUINT64-8]
-	_ = x[TINT-9]
-	_ = x[TUINT-10]
-	_ = x[TUINTPTR-11]
-	_ = x[TCOMPLEX64-12]
-	_ = x[TCOMPLEX128-13]
-	_ = x[TFLOAT32-14]
-	_ = x[TFLOAT64-15]
-	_ = x[TBOOL-16]
-	_ = x[TPTR-17]
-	_ = x[TFUNC-18]
-	_ = x[TSLICE-19]
-	_ = x[TARRAY-20]
-	_ = x[TSTRUCT-21]
-	_ = x[TCHAN-22]
-	_ = x[TMAP-23]
-	_ = x[TINTER-24]
-	_ = x[TFORW-25]
-	_ = x[TANY-26]
-	_ = x[TSTRING-27]
-	_ = x[TUNSAFEPTR-28]
-	_ = x[TIDEAL-29]
-	_ = x[TNIL-30]
-	_ = x[TBLANK-31]
-	_ = x[TFUNCARGS-32]
-	_ = x[TCHANARGS-33]
-	_ = x[TSSA-34]
-	_ = x[TTUPLE-35]
-	_ = x[TRESULTS-36]
-	_ = x[NTYPE-37]
-}
-
-const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
-
-var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
-
-func (i EType) String() string {
-	if i >= EType(len(_EType_index)-1) {
-		return "EType(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _EType_name[_EType_index[i]:_EType_index[i+1]]
-}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
new file mode 100644
index 0000000..da224d4
--- /dev/null
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -0,0 +1,666 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"go/constant"
+	"strconv"
+	"strings"
+	"sync"
+
+	"cmd/compile/internal/base"
+)
+
+// BuiltinPkg is a fake package that declares the universe block.
+var BuiltinPkg *Pkg
+
+// LocalPkg is the package being compiled.
+var LocalPkg *Pkg
+
+// BlankSym is the blank (_) symbol.
+var BlankSym *Sym
+
+// OrigSym returns the original symbol written by the user.
+func OrigSym(s *Sym) *Sym {
+	if s == nil {
+		return nil
+	}
+
+	if len(s.Name) > 1 && s.Name[0] == '~' {
+		switch s.Name[1] {
+		case 'r': // originally an unnamed result
+			return nil
+		case 'b': // originally the blank identifier _
+			// TODO(mdempsky): Does s.Pkg matter here?
+			return BlankSym
+		}
+		return s
+	}
+
+	if strings.HasPrefix(s.Name, ".anon") {
+		// originally an unnamed or _ name (see subr.go: NewFuncParams)
+		return nil
+	}
+
+	return s
+}
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var NumImport = make(map[string]int)
+
+// fmtMode represents the kind of printing being done.
+// The default is regular Go syntax (fmtGo).
+// fmtDebug is like fmtGo but for debugging dumps and prints the type kind too.
+// fmtTypeID and fmtTypeIDName are for generating various unique representations
+// of types used in hashes and the linker.
+type fmtMode int
+
+const (
+	fmtGo fmtMode = iota
+	fmtDebug
+	fmtTypeID
+	fmtTypeIDName
+)
+
+// Sym
+
+// Format implements formatting for a Sym.
+// The valid formats are:
+//
+//	%v	Go syntax: Name for symbols in the local package, PkgName.Name for imported symbols.
+//	%+v	Debug syntax: always include PkgName. prefix even for local names.
+//	%S	Short syntax: Name only, no matter what.
+//
+func (s *Sym) Format(f fmt.State, verb rune) {
+	mode := fmtGo
+	switch verb {
+	case 'v', 'S':
+		if verb == 'v' && f.Flag('+') {
+			mode = fmtDebug
+		}
+		fmt.Fprint(f, sconv(s, verb, mode))
+
+	default:
+		fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
+	}
+}
+
+func (s *Sym) String() string {
+	return sconv(s, 0, fmtGo)
+}
+
+// See #16897 for details about performance implications
+// before changing the implementation of sconv.
+func sconv(s *Sym, verb rune, mode fmtMode) string {
+	if verb == 'L' {
+		panic("linksymfmt")
+	}
+
+	if s == nil {
+		return "<S>"
+	}
+
+	if s.Name == "_" {
+		return "_"
+	}
+	buf := fmtBufferPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	defer fmtBufferPool.Put(buf)
+
+	symfmt(buf, s, verb, mode)
+	return InternString(buf.Bytes())
+}
+
+func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+	if verb == 'L' {
+		panic("linksymfmt")
+	}
+	if s == nil {
+		b.WriteString("<S>")
+		return
+	}
+	if s.Name == "_" {
+		b.WriteString("_")
+		return
+	}
+
+	symfmt(b, s, verb, mode)
+}
+
+func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+	if verb != 'S' {
+		switch mode {
+		case fmtGo: // This is for the user
+			if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg {
+				b.WriteString(s.Name)
+				return
+			}
+
+			// If the name was used by multiple packages, display the full path,
+			if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 {
+				fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
+				return
+			}
+			b.WriteString(s.Pkg.Name)
+			b.WriteByte('.')
+			b.WriteString(s.Name)
+			return
+
+		case fmtDebug:
+			b.WriteString(s.Pkg.Name)
+			b.WriteByte('.')
+			b.WriteString(s.Name)
+			return
+
+		case fmtTypeIDName:
+			// dcommontype, typehash
+			b.WriteString(s.Pkg.Name)
+			b.WriteByte('.')
+			b.WriteString(s.Name)
+			return
+
+		case fmtTypeID:
+			// (methodsym), typesym, weaksym
+			b.WriteString(s.Pkg.Prefix)
+			b.WriteByte('.')
+			b.WriteString(s.Name)
+			return
+		}
+	}
+
+	b.WriteString(s.Name)
+}
+
+// Type
+
+var BasicTypeNames = []string{
+	TINT:        "int",
+	TUINT:       "uint",
+	TINT8:       "int8",
+	TUINT8:      "uint8",
+	TINT16:      "int16",
+	TUINT16:     "uint16",
+	TINT32:      "int32",
+	TUINT32:     "uint32",
+	TINT64:      "int64",
+	TUINT64:     "uint64",
+	TUINTPTR:    "uintptr",
+	TFLOAT32:    "float32",
+	TFLOAT64:    "float64",
+	TCOMPLEX64:  "complex64",
+	TCOMPLEX128: "complex128",
+	TBOOL:       "bool",
+	TANY:        "any",
+	TSTRING:     "string",
+	TNIL:        "nil",
+	TIDEAL:      "untyped number",
+	TBLANK:      "blank",
+}
+
+var fmtBufferPool = sync.Pool{
+	New: func() interface{} {
+		return new(bytes.Buffer)
+	},
+}
+
+// Format implements formatting for a Type.
+// The valid formats are:
+//
+//	%v	Go syntax
+//	%+v	Debug syntax: Go syntax with a KIND- prefix for all but builtins.
+//	%L	Go syntax for underlying type if t is named
+//	%S	short Go syntax: drop leading "func" in function type
+//	%-S	special case for method receiver symbol
+//
+func (t *Type) Format(s fmt.State, verb rune) {
+	mode := fmtGo
+	switch verb {
+	case 'v', 'S', 'L':
+		if verb == 'v' && s.Flag('+') { // %+v is debug format
+			mode = fmtDebug
+		}
+		if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format
+			mode = fmtTypeID
+		}
+		fmt.Fprint(s, tconv(t, verb, mode))
+	default:
+		fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
+	}
+}
+
+// String returns the Go syntax for the type t.
+func (t *Type) String() string {
+	return tconv(t, 0, fmtGo)
+}
+
+// ShortString generates a short description of t.
+// It is used in autogenerated method names, reflection,
+// and itab names.
+func (t *Type) ShortString() string {
+	return tconv(t, 0, fmtTypeID)
+}
+
+// LongString generates a complete description of t.
+// It is useful for reflection,
+// or when a unique fingerprint or hash of a type is required.
+func (t *Type) LongString() string {
+	return tconv(t, 0, fmtTypeIDName)
+}
+
+func tconv(t *Type, verb rune, mode fmtMode) string {
+	buf := fmtBufferPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	defer fmtBufferPool.Put(buf)
+
+	tconv2(buf, t, verb, mode, nil)
+	return InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) {
+	if off, ok := visited[t]; ok {
+		// We've seen this type before, so we're trying to print it recursively.
+		// Print a reference to it instead.
+		fmt.Fprintf(b, "@%d", off)
+		return
+	}
+	if t == nil {
+		b.WriteString("<T>")
+		return
+	}
+	if t.Kind() == TSSA {
+		b.WriteString(t.Extra.(string))
+		return
+	}
+	if t.Kind() == TTUPLE {
+		b.WriteString(t.FieldType(0).String())
+		b.WriteByte(',')
+		b.WriteString(t.FieldType(1).String())
+		return
+	}
+
+	if t.Kind() == TRESULTS {
+		tys := t.Extra.(*Results).Types
+		for i, et := range tys {
+			if i > 0 {
+				b.WriteByte(',')
+			}
+			b.WriteString(et.String())
+		}
+		return
+	}
+
+	if t == ByteType || t == RuneType {
+		// in %-T mode collapse rune and byte with their originals.
+		switch mode {
+		case fmtTypeIDName, fmtTypeID:
+			t = Types[t.Kind()]
+		default:
+			sconv2(b, t.Sym(), 'S', mode)
+			return
+		}
+	}
+	if t == ErrorType {
+		b.WriteString("error")
+		return
+	}
+
+	// Unless the 'L' flag was specified, if the type has a name, just print that name.
+	if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
+		switch mode {
+		case fmtTypeID, fmtTypeIDName:
+			if verb == 'S' {
+				if t.Vargen != 0 {
+					sconv2(b, t.Sym(), 'S', mode)
+					fmt.Fprintf(b, "·%d", t.Vargen)
+					return
+				}
+				sconv2(b, t.Sym(), 'S', mode)
+				return
+			}
+
+			if mode == fmtTypeIDName {
+				sconv2(b, t.Sym(), 'v', fmtTypeIDName)
+				return
+			}
+
+			if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
+				sconv2(b, t.Sym(), 'v', mode)
+				fmt.Fprintf(b, "·%d", t.Vargen)
+				return
+			}
+		}
+
+		sconv2(b, t.Sym(), 'v', mode)
+		return
+	}
+
+	if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" {
+		var name string
+		switch t {
+		case UntypedBool:
+			name = "untyped bool"
+		case UntypedString:
+			name = "untyped string"
+		case UntypedInt:
+			name = "untyped int"
+		case UntypedRune:
+			name = "untyped rune"
+		case UntypedFloat:
+			name = "untyped float"
+		case UntypedComplex:
+			name = "untyped complex"
+		default:
+			name = BasicTypeNames[t.Kind()]
+		}
+		b.WriteString(name)
+		return
+	}
+
+	if mode == fmtDebug {
+		b.WriteString(t.Kind().String())
+		b.WriteByte('-')
+		tconv2(b, t, 'v', fmtGo, visited)
+		return
+	}
+
+	// At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+	// try to print it recursively.
+	// We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+	// point for any later references to the same type.
+	// Note that we remove the type from the visited map as soon as the recursive call is done.
+	// This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+	// but I'd like to use the @ notation only when strictly necessary.)
+	if visited == nil {
+		visited = map[*Type]int{}
+	}
+	visited[t] = b.Len()
+	defer delete(visited, t)
+
+	switch t.Kind() {
+	case TPTR:
+		b.WriteByte('*')
+		switch mode {
+		case fmtTypeID, fmtTypeIDName:
+			if verb == 'S' {
+				tconv2(b, t.Elem(), 'S', mode, visited)
+				return
+			}
+		}
+		tconv2(b, t.Elem(), 'v', mode, visited)
+
+	case TARRAY:
+		b.WriteByte('[')
+		b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+		b.WriteByte(']')
+		tconv2(b, t.Elem(), 0, mode, visited)
+
+	case TSLICE:
+		b.WriteString("[]")
+		tconv2(b, t.Elem(), 0, mode, visited)
+
+	case TCHAN:
+		switch t.ChanDir() {
+		case Crecv:
+			b.WriteString("<-chan ")
+			tconv2(b, t.Elem(), 0, mode, visited)
+		case Csend:
+			b.WriteString("chan<- ")
+			tconv2(b, t.Elem(), 0, mode, visited)
+		default:
+			b.WriteString("chan ")
+			if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv {
+				b.WriteByte('(')
+				tconv2(b, t.Elem(), 0, mode, visited)
+				b.WriteByte(')')
+			} else {
+				tconv2(b, t.Elem(), 0, mode, visited)
+			}
+		}
+
+	case TMAP:
+		b.WriteString("map[")
+		tconv2(b, t.Key(), 0, mode, visited)
+		b.WriteByte(']')
+		tconv2(b, t.Elem(), 0, mode, visited)
+
+	case TINTER:
+		if t.IsEmptyInterface() {
+			b.WriteString("interface {}")
+			break
+		}
+		b.WriteString("interface {")
+		for i, f := range t.Fields().Slice() {
+			if i != 0 {
+				b.WriteByte(';')
+			}
+			b.WriteByte(' ')
+			switch {
+			case f.Sym == nil:
+				// Check first that a symbol is defined for this type.
+				// Wrong interface definitions may have types lacking a symbol.
+				break
+			case IsExported(f.Sym.Name):
+				sconv2(b, f.Sym, 'S', mode)
+			default:
+				if mode != fmtTypeIDName {
+					mode = fmtTypeID
+				}
+				sconv2(b, f.Sym, 'v', mode)
+			}
+			tconv2(b, f.Type, 'S', mode, visited)
+		}
+		if t.NumFields() != 0 {
+			b.WriteByte(' ')
+		}
+		b.WriteByte('}')
+
+	case TFUNC:
+		if verb == 'S' {
+			// no leading func
+		} else {
+			if t.Recv() != nil {
+				b.WriteString("method")
+				tconv2(b, t.Recvs(), 0, mode, visited)
+				b.WriteByte(' ')
+			}
+			b.WriteString("func")
+		}
+		tconv2(b, t.Params(), 0, mode, visited)
+
+		switch t.NumResults() {
+		case 0:
+			// nothing to do
+
+		case 1:
+			b.WriteByte(' ')
+			tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+
+		default:
+			b.WriteByte(' ')
+			tconv2(b, t.Results(), 0, mode, visited)
+		}
+
+	case TSTRUCT:
+		if m := t.StructType().Map; m != nil {
+			mt := m.MapType()
+			// Format the bucket struct for map[x]y as map.bucket[x]y.
+			// This avoids a recursive print that generates very long names.
+			switch t {
+			case mt.Bucket:
+				b.WriteString("map.bucket[")
+			case mt.Hmap:
+				b.WriteString("map.hdr[")
+			case mt.Hiter:
+				b.WriteString("map.iter[")
+			default:
+				base.Fatalf("unknown internal map type")
+			}
+			tconv2(b, m.Key(), 0, mode, visited)
+			b.WriteByte(']')
+			tconv2(b, m.Elem(), 0, mode, visited)
+			break
+		}
+
+		if funarg := t.StructType().Funarg; funarg != FunargNone {
+			b.WriteByte('(')
+			fieldVerb := 'v'
+			switch mode {
+			case fmtTypeID, fmtTypeIDName, fmtGo:
+				// no argument names on function signature, and no "noescape"/"nosplit" tags
+				fieldVerb = 'S'
+			}
+			for i, f := range t.Fields().Slice() {
+				if i != 0 {
+					b.WriteString(", ")
+				}
+				fldconv(b, f, fieldVerb, mode, visited, funarg)
+			}
+			b.WriteByte(')')
+		} else {
+			b.WriteString("struct {")
+			for i, f := range t.Fields().Slice() {
+				if i != 0 {
+					b.WriteByte(';')
+				}
+				b.WriteByte(' ')
+				fldconv(b, f, 'L', mode, visited, funarg)
+			}
+			if t.NumFields() != 0 {
+				b.WriteByte(' ')
+			}
+			b.WriteByte('}')
+		}
+
+	case TFORW:
+		b.WriteString("undefined")
+		if t.Sym() != nil {
+			b.WriteByte(' ')
+			sconv2(b, t.Sym(), 'v', mode)
+		}
+
+	case TUNSAFEPTR:
+		b.WriteString("unsafe.Pointer")
+
+	case Txxx:
+		b.WriteString("Txxx")
+
+	default:
+		// Don't know how to handle - fall back to detailed prints
+		b.WriteString(t.Kind().String())
+		b.WriteString(" <")
+		sconv2(b, t.Sym(), 'v', mode)
+		b.WriteString(">")
+
+	}
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) {
+	if f == nil {
+		b.WriteString("<T>")
+		return
+	}
+
+	var name string
+	if verb != 'S' {
+		s := f.Sym
+
+		// Take the name from the original.
+		if mode == fmtGo {
+			s = OrigSym(s)
+		}
+
+		if s != nil && f.Embedded == 0 {
+			if funarg != FunargNone {
+				name = fmt.Sprint(f.Nname)
+			} else if verb == 'L' {
+				name = s.Name
+				if name == ".F" {
+					name = "F" // Hack for toolstash -cmp.
+				}
+				if !IsExported(name) && mode != fmtTypeIDName {
+					name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
+				}
+			} else {
+				name = sconv(s, 0, mode)
+			}
+		}
+	}
+
+	if name != "" {
+		b.WriteString(name)
+		b.WriteString(" ")
+	}
+
+	if f.IsDDD() {
+		var et *Type
+		if f.Type != nil {
+			et = f.Type.Elem()
+		}
+		b.WriteString("...")
+		tconv2(b, et, 0, mode, visited)
+	} else {
+		tconv2(b, f.Type, 0, mode, visited)
+	}
+
+	if verb != 'S' && funarg == FunargNone && f.Note != "" {
+		b.WriteString(" ")
+		b.WriteString(strconv.Quote(f.Note))
+	}
+}
+
+// Val
+
+func FmtConst(v constant.Value, sharp bool) string {
+	if !sharp && v.Kind() == constant.Complex {
+		real, imag := constant.Real(v), constant.Imag(v)
+
+		var re string
+		sre := constant.Sign(real)
+		if sre != 0 {
+			re = real.String()
+		}
+
+		var im string
+		sim := constant.Sign(imag)
+		if sim != 0 {
+			im = imag.String()
+		}
+
+		switch {
+		case sre == 0 && sim == 0:
+			return "0"
+		case sre == 0:
+			return im + "i"
+		case sim == 0:
+			return re
+		case sim < 0:
+			return fmt.Sprintf("(%s%si)", re, im)
+		default:
+			return fmt.Sprintf("(%s+%si)", re, im)
+		}
+	}
+
+	return v.String()
+}
+
+// TypeHash computes a hash value for type t to use in type switch statements.
+func TypeHash(t *Type) uint32 {
+	p := t.LongString()
+
+	// Using MD5 is overkill, but reduces accidental collisions.
+	h := md5.Sum([]byte(p))
+	return binary.LittleEndian.Uint32(h[:4])
+}
diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go
new file mode 100644
index 0000000..1a324aa
--- /dev/null
+++ b/src/cmd/compile/internal/types/goversion.go
@@ -0,0 +1,94 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+	"fmt"
+	"internal/goversion"
+	"log"
+	"regexp"
+	"strconv"
+
+	"cmd/compile/internal/base"
+)
+
+// A lang is a language version broken into major and minor numbers.
+type lang struct {
+	major, minor int
+}
+
+// langWant is the desired language version set by the -lang flag.
+// If the -lang flag is not set, this is the zero value, meaning that
+// any language version is supported.
+var langWant lang
+
+// AllowsGoVersion reports whether a particular package
+// is allowed to use Go version major.minor.
+// We assume the imported packages have all been checked,
+// so we only have to check the local package against the -lang flag.
+func AllowsGoVersion(pkg *Pkg, major, minor int) bool {
+	if pkg == nil {
+		// TODO(mdempsky): Set Pkg for local types earlier.
+		pkg = LocalPkg
+	}
+	if pkg != LocalPkg {
+		// Assume imported packages passed type-checking.
+		return true
+	}
+	if langWant.major == 0 && langWant.minor == 0 {
+		return true
+	}
+	return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
+}
+
+// ParseLangFlag verifies that the -lang flag holds a valid value, and
+// exits if not. It initializes data used by langSupported.
+func ParseLangFlag() {
+	if base.Flag.Lang == "" {
+		return
+	}
+
+	var err error
+	langWant, err = parseLang(base.Flag.Lang)
+	if err != nil {
+		log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
+	}
+
+	if def := currentLang(); base.Flag.Lang != def {
+		defVers, err := parseLang(def)
+		if err != nil {
+			log.Fatalf("internal error parsing default lang %q: %v", def, err)
+		}
+		if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
+			log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
+		}
+	}
+}
+
+// parseLang parses a -lang option into a langVer.
+func parseLang(s string) (lang, error) {
+	matches := goVersionRE.FindStringSubmatch(s)
+	if matches == nil {
+		return lang{}, fmt.Errorf(`should be something like "go1.12"`)
+	}
+	major, err := strconv.Atoi(matches[1])
+	if err != nil {
+		return lang{}, err
+	}
+	minor, err := strconv.Atoi(matches[2])
+	if err != nil {
+		return lang{}, err
+	}
+	return lang{major: major, minor: minor}, nil
+}
+
+// currentLang returns the current language version.
+func currentLang() string {
+	return fmt.Sprintf("go1.%d", goversion.Version)
+}
+
+// goVersionRE is a regular expression that matches the valid
+// arguments to the -lang flag.
+var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index a77f514..9bc636d 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -25,17 +25,17 @@
 	if t1 == t2 {
 		return true
 	}
-	if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
+	if t1 == nil || t2 == nil || t1.kind != t2.kind || t1.Broke() || t2.Broke() {
 		return false
 	}
-	if t1.Sym != nil || t2.Sym != nil {
+	if t1.sym != nil || t2.sym != nil {
 		// Special case: we keep byte/uint8 and rune/int32
 		// separate for error messages. Treat them as equal.
-		switch t1.Etype {
+		switch t1.kind {
 		case TUINT8:
-			return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype)
+			return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
 		case TINT32:
-			return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype)
+			return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
 		default:
 			return false
 		}
@@ -52,7 +52,7 @@
 	}
 	assumedEqual[typePair{t1, t2}] = struct{}{}
 
-	switch t1.Etype {
+	switch t1.kind {
 	case TIDEAL:
 		// Historically, cmd/compile used a single "untyped
 		// number" type, so all untyped number types were
diff --git a/src/cmd/compile/internal/types/kind_string.go b/src/cmd/compile/internal/types/kind_string.go
new file mode 100644
index 0000000..1e1e846
--- /dev/null
+++ b/src/cmd/compile/internal/types/kind_string.go
@@ -0,0 +1,60 @@
+// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Txxx-0]
+	_ = x[TINT8-1]
+	_ = x[TUINT8-2]
+	_ = x[TINT16-3]
+	_ = x[TUINT16-4]
+	_ = x[TINT32-5]
+	_ = x[TUINT32-6]
+	_ = x[TINT64-7]
+	_ = x[TUINT64-8]
+	_ = x[TINT-9]
+	_ = x[TUINT-10]
+	_ = x[TUINTPTR-11]
+	_ = x[TCOMPLEX64-12]
+	_ = x[TCOMPLEX128-13]
+	_ = x[TFLOAT32-14]
+	_ = x[TFLOAT64-15]
+	_ = x[TBOOL-16]
+	_ = x[TPTR-17]
+	_ = x[TFUNC-18]
+	_ = x[TSLICE-19]
+	_ = x[TARRAY-20]
+	_ = x[TSTRUCT-21]
+	_ = x[TCHAN-22]
+	_ = x[TMAP-23]
+	_ = x[TINTER-24]
+	_ = x[TFORW-25]
+	_ = x[TANY-26]
+	_ = x[TSTRING-27]
+	_ = x[TUNSAFEPTR-28]
+	_ = x[TIDEAL-29]
+	_ = x[TNIL-30]
+	_ = x[TBLANK-31]
+	_ = x[TFUNCARGS-32]
+	_ = x[TCHANARGS-33]
+	_ = x[TSSA-34]
+	_ = x[TTUPLE-35]
+	_ = x[TRESULTS-36]
+	_ = x[NTYPE-37]
+}
+
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
+
+func (i Kind) String() string {
+	if i >= Kind(len(_Kind_index)-1) {
+		return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index bcc6789..a6d2e20 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -31,8 +31,7 @@
 	// height of their imported packages.
 	Height int
 
-	Imported bool // export data of this package was parsed
-	Direct   bool // imported directly
+	Direct bool // imported directly
 }
 
 // NewPkg returns a new Pkg for the given package path and name.
@@ -84,9 +83,6 @@
 	return s
 }
 
-// List of .inittask entries in imported packages, in source code order.
-var InitSyms []*Sym
-
 // LookupOK looks up name in pkg and reports whether it previously existed.
 func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
 	// TODO(gri) remove this check in favor of specialized lookup
@@ -101,9 +97,6 @@
 		Name: name,
 		Pkg:  pkg,
 	}
-	if name == ".inittask" {
-		InitSyms = append(InitSyms, s)
-	}
 	pkg.Syms[name] = s
 	return s, false
 }
@@ -144,3 +137,7 @@
 	f()
 	pkgMap = saved
 }
+
+func IsDotAlias(sym *Sym) bool {
+	return sym.Def != nil && sym.Def.Sym() != sym
+}
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
index 40d3d86..d7c454f 100644
--- a/src/cmd/compile/internal/types/scope.go
+++ b/src/cmd/compile/internal/types/scope.go
@@ -4,18 +4,21 @@
 
 package types
 
-import "cmd/internal/src"
+import (
+	"cmd/compile/internal/base"
+	"cmd/internal/src"
+)
 
 // Declaration stack & operations
 
 var blockgen int32 = 1 // max block number
-var Block int32        // current block number
+var Block int32 = 1    // current block number
 
 // A dsym stores a symbol's shadowed declaration so that it can be
 // restored once the block scope ends.
 type dsym struct {
 	sym        *Sym // sym == nil indicates stack mark
-	def        *Node
+	def        Object
 	block      int32
 	lastlineno src.XPos // last declaration for diagnostic
 }
@@ -56,7 +59,7 @@
 		d.sym = nil
 		d.def = nil
 	}
-	Fatalf("popdcl: no stack mark")
+	base.Fatalf("popdcl: no stack mark")
 }
 
 // Markdcl records the start of a new block scope for declarations.
@@ -69,7 +72,7 @@
 	Block = blockgen
 }
 
-func IsDclstackValid() bool {
+func isDclstackValid() bool {
 	for _, d := range dclstack {
 		if d.sym == nil {
 			return false
@@ -79,19 +82,20 @@
 }
 
 // PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() *Node {
+func (s *Sym) PkgDef() Object {
 	return *s.pkgDefPtr()
 }
 
 // SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n *Node) {
+func (s *Sym) SetPkgDef(n Object) {
 	*s.pkgDefPtr() = n
 }
 
-func (s *Sym) pkgDefPtr() **Node {
+func (s *Sym) pkgDefPtr() *Object {
 	// Look for outermost saved declaration, which must be the
 	// package scope definition, if present.
-	for _, d := range dclstack {
+	for i := range dclstack {
+		d := &dclstack[i]
 		if s == d.sym {
 			return &d.def
 		}
@@ -101,3 +105,9 @@
 	// function scope.
 	return &s.Def
 }
+
+func CheckDclstack() {
+	if !isDclstackValid() {
+		base.Fatalf("mark left on the dclstack")
+	}
+}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
new file mode 100644
index 0000000..98540ee
--- /dev/null
+++ b/src/cmd/compile/internal/types/size.go
@@ -0,0 +1,633 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/internal/src"
+)
+
+var PtrSize int
+
+var RegSize int
+
+// Slices in the runtime are represented by three components:
+//
+// type slice struct {
+// 	ptr unsafe.Pointer
+// 	len int
+// 	cap int
+// }
+//
+// Strings in the runtime are represented by two components:
+//
+// type string struct {
+// 	ptr unsafe.Pointer
+// 	len int
+// }
+//
+// These variables are the offsets of fields and sizes of these structs.
+var (
+	SlicePtrOffset int64
+	SliceLenOffset int64
+	SliceCapOffset int64
+
+	SliceSize  int64
+	StringSize int64
+)
+
+var SkipSizeForTracing bool
+
+// typePos returns the position associated with t.
+// This is where t was declared or where it appeared as a type expression.
+func typePos(t *Type) src.XPos {
+	if pos := t.Pos(); pos.IsKnown() {
+		return pos
+	}
+	base.Fatalf("bad type: %v", t)
+	panic("unreachable")
+}
+
+// MaxWidth is the maximum size of a value on the target architecture.
+var MaxWidth int64
+
+// CalcSizeDisabled indicates whether it is safe
+// to calculate Types' widths and alignments. See CalcSize.
+var CalcSizeDisabled bool
+
+// machine size and rounding alignment is dictated around
+// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+	if r < 1 || r > 8 || r&(r-1) != 0 {
+		base.Fatalf("rnd %d", r)
+	}
+	return (o + r - 1) &^ (r - 1)
+}
+
+// expandiface computes the method set for interface type t by
+// expanding embedded interfaces.
+func expandiface(t *Type) {
+	seen := make(map[*Sym]*Field)
+	var methods []*Field
+
+	addMethod := func(m *Field, explicit bool) {
+		switch prev := seen[m.Sym]; {
+		case prev == nil:
+			seen[m.Sym] = m
+		case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type):
+			return
+		default:
+			base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
+		}
+		methods = append(methods, m)
+	}
+
+	for _, m := range t.Methods().Slice() {
+		if m.Sym == nil {
+			continue
+		}
+
+		CheckSize(m.Type)
+		addMethod(m, true)
+	}
+
+	for _, m := range t.Methods().Slice() {
+		if m.Sym != nil {
+			continue
+		}
+
+		if !m.Type.IsInterface() {
+			base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
+			m.SetBroke(true)
+			t.SetBroke(true)
+			// Add to fields so that error messages
+			// include the broken embedded type when
+			// printing t.
+			// TODO(mdempsky): Revisit this.
+			methods = append(methods, m)
+			continue
+		}
+
+		// Embedded interface: duplicate all methods
+		// (including broken ones, if any) and add to t's
+		// method set.
+		for _, t1 := range m.Type.Fields().Slice() {
+			// Use m.Pos rather than t1.Pos to preserve embedding position.
+			f := NewField(m.Pos, t1.Sym, t1.Type)
+			addMethod(f, false)
+		}
+	}
+
+	sort.Sort(MethodsByName(methods))
+
+	if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
+		base.ErrorfAt(typePos(t), "interface too large")
+	}
+	for i, m := range methods {
+		m.Offset = int64(i) * int64(PtrSize)
+	}
+
+	// Access fields directly to avoid recursively calling CalcSize
+	// within Type.Fields().
+	t.Extra.(*Interface).Fields.Set(methods)
+}
+
+func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
+	starto := o
+	maxalign := int32(flag)
+	if maxalign < 1 {
+		maxalign = 1
+	}
+	lastzero := int64(0)
+	for _, f := range t.Fields().Slice() {
+		if f.Type == nil {
+			// broken field, just skip it so that other valid fields
+			// get a width.
+			continue
+		}
+
+		CalcSize(f.Type)
+		if int32(f.Type.Align) > maxalign {
+			maxalign = int32(f.Type.Align)
+		}
+		if f.Type.Align > 0 {
+			o = Rnd(o, int64(f.Type.Align))
+		}
+		f.Offset = o
+		if f.Nname != nil {
+			// addrescapes has similar code to update these offsets.
+			// Usually addrescapes runs after calcStructOffset,
+			// in which case we could drop this,
+			// but function closure functions are the exception.
+			// NOTE(rsc): This comment may be stale.
+			// It's possible the ordering has changed and this is
+			// now the common case. I'm not sure.
+			f.Nname.(VarObject).RecordFrameOffset(o)
+		}
+
+		w := f.Type.Width
+		if w < 0 {
+			base.Fatalf("invalid width %d", f.Type.Width)
+		}
+		if w == 0 {
+			lastzero = o
+		}
+		o += w
+		maxwidth := MaxWidth
+		// On 32-bit systems, reflect tables impose an additional constraint
+		// that each field start offset must fit in 31 bits.
+		if maxwidth < 1<<32 {
+			maxwidth = 1<<31 - 1
+		}
+		if o >= maxwidth {
+			base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
+			o = 8 // small but nonzero
+		}
+	}
+
+	// For nonzero-sized structs which end in a zero-sized thing, we add
+	// an extra byte of padding to the type. This padding ensures that
+	// taking the address of the zero-sized thing can't manufacture a
+	// pointer to the next object in the heap. See issue 9401.
+	if flag == 1 && o > starto && o == lastzero {
+		o++
+	}
+
+	// final width is rounded
+	if flag != 0 {
+		o = Rnd(o, int64(maxalign))
+	}
+	t.Align = uint8(maxalign)
+
+	// type width only includes back to first field's offset
+	t.Width = o - starto
+
+	return o
+}
+
+// findTypeLoop searches for an invalid type declaration loop involving
+// type t and reports whether one is found. If so, path contains the
+// loop.
+//
+// path points to a slice used for tracking the sequence of types
+// visited. Using a pointer to a slice allows the slice capacity to
+// grow and limit reallocations.
+func findTypeLoop(t *Type, path *[]*Type) bool {
+	// We implement a simple DFS loop-finding algorithm. This
+	// could be faster, but type cycles are rare.
+
+	if t.Sym() != nil {
+		// Declared type. Check for loops and otherwise
+		// recurse on the type expression used in the type
+		// declaration.
+
+		// Type imported from package, so it can't be part of
+		// a type loop (otherwise that package should have
+		// failed to compile).
+		if t.Sym().Pkg != LocalPkg {
+			return false
+		}
+
+		for i, x := range *path {
+			if x == t {
+				*path = (*path)[i:]
+				return true
+			}
+		}
+
+		*path = append(*path, t)
+		if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) {
+			return true
+		}
+		*path = (*path)[:len(*path)-1]
+	} else {
+		// Anonymous type. Recurse on contained types.
+
+		switch t.Kind() {
+		case TARRAY:
+			if findTypeLoop(t.Elem(), path) {
+				return true
+			}
+		case TSTRUCT:
+			for _, f := range t.Fields().Slice() {
+				if findTypeLoop(f.Type, path) {
+					return true
+				}
+			}
+		case TINTER:
+			for _, m := range t.Methods().Slice() {
+				if m.Type.IsInterface() { // embedded interface
+					if findTypeLoop(m.Type, path) {
+						return true
+					}
+				}
+			}
+		}
+	}
+
+	return false
+}
+
+func reportTypeLoop(t *Type) {
+	if t.Broke() {
+		return
+	}
+
+	var l []*Type
+	if !findTypeLoop(t, &l) {
+		base.Fatalf("failed to find type loop for: %v", t)
+	}
+
+	// Rotate loop so that the earliest type declaration is first.
+	i := 0
+	for j, t := range l[1:] {
+		if typePos(t).Before(typePos(l[i])) {
+			i = j + 1
+		}
+	}
+	l = append(l[i:], l[:i]...)
+
+	var msg bytes.Buffer
+	fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
+	for _, t := range l {
+		fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
+		t.SetBroke(true)
+	}
+	fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
+	base.ErrorfAt(typePos(l[0]), msg.String())
+}
+
+// CalcSize calculates and stores the size and alignment for t.
+// If CalcSizeDisabled is set, and the size/alignment
+// have not already been calculated, it calls Fatal.
+// This is used to prevent data races in the back end.
+func CalcSize(t *Type) {
+	// Calling CalcSize when typecheck tracing enabled is not safe.
+	// See issue #33658.
+	if base.EnableTrace && SkipSizeForTracing {
+		return
+	}
+	if PtrSize == 0 {
+		// Assume this is a test.
+		return
+	}
+
+	if t == nil {
+		return
+	}
+
+	if t.Width == -2 {
+		reportTypeLoop(t)
+		t.Width = 0
+		t.Align = 1
+		return
+	}
+
+	if t.WidthCalculated() {
+		return
+	}
+
+	if CalcSizeDisabled {
+		if t.Broke() {
+			// break infinite recursion from Fatal call below
+			return
+		}
+		t.SetBroke(true)
+		base.Fatalf("width not calculated: %v", t)
+	}
+
+	// break infinite recursion if the broken recursive type
+	// is referenced again
+	if t.Broke() && t.Width == 0 {
+		return
+	}
+
+	// defer CheckSize calls until after we're done
+	DeferCheckSize()
+
+	lno := base.Pos
+	if pos := t.Pos(); pos.IsKnown() {
+		base.Pos = pos
+	}
+
+	t.Width = -2
+	t.Align = 0 // 0 means use t.Width, below
+
+	et := t.Kind()
+	switch et {
+	case TFUNC, TCHAN, TMAP, TSTRING:
+		break
+
+	// SimType == 0 during bootstrap
+	default:
+		if SimType[t.Kind()] != 0 {
+			et = SimType[t.Kind()]
+		}
+	}
+
+	var w int64
+	switch et {
+	default:
+		base.Fatalf("CalcSize: unknown type: %v", t)
+
+	// compiler-specific stuff
+	case TINT8, TUINT8, TBOOL:
+		// bool is int8
+		w = 1
+
+	case TINT16, TUINT16:
+		w = 2
+
+	case TINT32, TUINT32, TFLOAT32:
+		w = 4
+
+	case TINT64, TUINT64, TFLOAT64:
+		w = 8
+		t.Align = uint8(RegSize)
+
+	case TCOMPLEX64:
+		w = 8
+		t.Align = 4
+
+	case TCOMPLEX128:
+		w = 16
+		t.Align = uint8(RegSize)
+
+	case TPTR:
+		w = int64(PtrSize)
+		CheckSize(t.Elem())
+
+	case TUNSAFEPTR:
+		w = int64(PtrSize)
+
+	case TINTER: // implemented as 2 pointers
+		w = 2 * int64(PtrSize)
+		t.Align = uint8(PtrSize)
+		expandiface(t)
+
+	case TCHAN: // implemented as pointer
+		w = int64(PtrSize)
+
+		CheckSize(t.Elem())
+
+		// make fake type to check later to
+		// trigger channel argument check.
+		t1 := NewChanArgs(t)
+		CheckSize(t1)
+
+	case TCHANARGS:
+		t1 := t.ChanArgs()
+		CalcSize(t1) // just in case
+		if t1.Elem().Width >= 1<<16 {
+			base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
+		}
+		w = 1 // anything will do
+
+	case TMAP: // implemented as pointer
+		w = int64(PtrSize)
+		CheckSize(t.Elem())
+		CheckSize(t.Key())
+
+	case TFORW: // should have been filled in
+		reportTypeLoop(t)
+		w = 1 // anything will do
+
+	case TANY:
+		// not a real type; should be replaced before use.
+		base.Fatalf("CalcSize any")
+
+	case TSTRING:
+		if StringSize == 0 {
+			base.Fatalf("early CalcSize string")
+		}
+		w = StringSize
+		t.Align = uint8(PtrSize)
+
+	case TARRAY:
+		if t.Elem() == nil {
+			break
+		}
+
+		CalcSize(t.Elem())
+		if t.Elem().Width != 0 {
+			cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().Width)
+			if uint64(t.NumElem()) > cap {
+				base.ErrorfAt(typePos(t), "type %L larger than address space", t)
+			}
+		}
+		w = t.NumElem() * t.Elem().Width
+		t.Align = t.Elem().Align
+
+	case TSLICE:
+		if t.Elem() == nil {
+			break
+		}
+		w = SliceSize
+		CheckSize(t.Elem())
+		t.Align = uint8(PtrSize)
+
+	case TSTRUCT:
+		if t.IsFuncArgStruct() {
+			base.Fatalf("CalcSize fn struct %v", t)
+		}
+		w = calcStructOffset(t, t, 0, 1)
+
+	// make fake type to check later to
+	// trigger function argument computation.
+	case TFUNC:
+		t1 := NewFuncArgs(t)
+		CheckSize(t1)
+		w = int64(PtrSize) // width of func type is pointer
+
+	// function is 3 cated structures;
+	// compute their widths as side-effect.
+	case TFUNCARGS:
+		t1 := t.FuncArgs()
+		w = calcStructOffset(t1, t1.Recvs(), 0, 0)
+		w = calcStructOffset(t1, t1.Params(), w, RegSize)
+		w = calcStructOffset(t1, t1.Results(), w, RegSize)
+		t1.Extra.(*Func).Argwid = w
+		if w%int64(RegSize) != 0 {
+			base.Warn("bad type %v %d\n", t1, w)
+		}
+		t.Align = 1
+	}
+
+	if PtrSize == 4 && w != int64(int32(w)) {
+		base.ErrorfAt(typePos(t), "type %v too large", t)
+	}
+
+	t.Width = w
+	if t.Align == 0 {
+		if w == 0 || w > 8 || w&(w-1) != 0 {
+			base.Fatalf("invalid alignment for %v", t)
+		}
+		t.Align = uint8(w)
+	}
+
+	base.Pos = lno
+
+	ResumeCheckSize()
+}
+
+// CalcStructSize calculates the size of s,
+// filling in s.Width and s.Align,
+// even if size calculation is otherwise disabled.
+func CalcStructSize(s *Type) {
+	s.Width = calcStructOffset(s, s, 0, 1) // sets align
+}
+
+// when a type's width should be known, we call CheckSize
+// to compute it.  during a declaration like
+//
+//	type T *struct { next T }
+//
+// it is necessary to defer the calculation of the struct width
+// until after T has been initialized to be a pointer to that struct.
+// similarly, during import processing structs may be used
+// before their definition.  in those situations, calling
+// DeferCheckSize() stops width calculations until
+// ResumeCheckSize() is called, at which point all the
+// CalcSizes that were deferred are executed.
+// CalcSize should only be called when the type's size
+// is needed immediately.  CheckSize makes sure the
+// size is evaluated eventually.
+
+var deferredTypeStack []*Type
+
+func CheckSize(t *Type) {
+	if t == nil {
+		return
+	}
+
+	// function arg structs should not be checked
+	// outside of the enclosing function.
+	if t.IsFuncArgStruct() {
+		base.Fatalf("CheckSize %v", t)
+	}
+
+	if defercalc == 0 {
+		CalcSize(t)
+		return
+	}
+
+	// if type has not yet been pushed on deferredTypeStack yet, do it now
+	if !t.Deferwidth() {
+		t.SetDeferwidth(true)
+		deferredTypeStack = append(deferredTypeStack, t)
+	}
+}
+
+func DeferCheckSize() {
+	defercalc++
+}
+
+func ResumeCheckSize() {
+	if defercalc == 1 {
+		for len(deferredTypeStack) > 0 {
+			t := deferredTypeStack[len(deferredTypeStack)-1]
+			deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
+			t.SetDeferwidth(false)
+			CalcSize(t)
+		}
+	}
+
+	defercalc--
+}
+
+// PtrDataSize returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+func PtrDataSize(t *Type) int64 {
+	if !t.HasPointers() {
+		return 0
+	}
+
+	switch t.Kind() {
+	case TPTR,
+		TUNSAFEPTR,
+		TFUNC,
+		TCHAN,
+		TMAP:
+		return int64(PtrSize)
+
+	case TSTRING:
+		// struct { byte *str; intgo len; }
+		return int64(PtrSize)
+
+	case TINTER:
+		// struct { Itab *tab;	void *data; } or
+		// struct { Type *type; void *data; }
+		// Note: see comment in typebits.Set
+		return 2 * int64(PtrSize)
+
+	case TSLICE:
+		// struct { byte *array; uintgo len; uintgo cap; }
+		return int64(PtrSize)
+
+	case TARRAY:
+		// haspointers already eliminated t.NumElem() == 0.
+		return (t.NumElem()-1)*t.Elem().Width + PtrDataSize(t.Elem())
+
+	case TSTRUCT:
+		// Find the last field that has pointers.
+		var lastPtrField *Field
+		for _, t1 := range t.Fields().Slice() {
+			if t1.Type.HasPointers() {
+				lastPtrField = t1
+			}
+		}
+		return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
+
+	default:
+		base.Fatalf("PtrDataSize: unexpected type, %v", t)
+		return 0
+	}
+}
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index ea947d8..675739f 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -20,11 +20,11 @@
 		_32bit uintptr     // size on 32bit platforms
 		_64bit uintptr     // size on 64bit platforms
 	}{
-		{Sym{}, 52, 88},
-		{Type{}, 52, 88},
+		{Sym{}, 44, 72},
+		{Type{}, 56, 96},
 		{Map{}, 20, 40},
 		{Forward{}, 20, 32},
-		{Func{}, 32, 56},
+		{Func{}, 24, 40},
 		{Struct{}, 16, 32},
 		{Interface{}, 8, 16},
 		{Chan{}, 8, 16},
diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go
new file mode 100644
index 0000000..dc59b06
--- /dev/null
+++ b/src/cmd/compile/internal/types/sort.go
@@ -0,0 +1,14 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// MethodsByName sorts methods by symbol.
+type MethodsByName []*Field
+
+func (x MethodsByName) Len() int { return len(x) }
+
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
index 07bce4d..0e66ed3 100644
--- a/src/cmd/compile/internal/types/sym.go
+++ b/src/cmd/compile/internal/types/sym.go
@@ -5,6 +5,7 @@
 package types
 
 import (
+	"cmd/compile/internal/base"
 	"cmd/internal/obj"
 	"cmd/internal/src"
 	"unicode"
@@ -26,20 +27,17 @@
 // NOTE: In practice, things can be messier than the description above
 // for various reasons (historical, convenience).
 type Sym struct {
-	Importdef *Pkg   // where imported definition was found
-	Linkname  string // link name
+	Linkname string // link name
 
 	Pkg  *Pkg
 	Name string // object name
 
 	// saved and restored by dcopy
-	Def        *Node    // definition: ONAME OTYPE OPACK or OLITERAL
+	Def        Object   // definition: ONAME OTYPE OPACK or OLITERAL
 	Block      int32    // blocknumber to catch redeclaration
 	Lastlineno src.XPos // last declaration for diagnostic
 
-	flags   bitset8
-	Label   *Node // corresponding label (ephemeral)
-	Origpkg *Pkg  // original package for . import
+	flags bitset8
 }
 
 const (
@@ -66,32 +64,30 @@
 	return sym != nil && sym.Name == "_"
 }
 
-func (sym *Sym) LinksymName() string {
-	if sym.IsBlank() {
-		return "_"
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, reflectdata.TypeLinksym(t) instead
+// of reflectdata.TypeSym(t).Linksym().
+func (sym *Sym) Linksym() *obj.LSym {
+	abi := obj.ABI0
+	if sym.Func() {
+		abi = obj.ABIInternal
 	}
-	if sym.Linkname != "" {
-		return sym.Linkname
-	}
-	return sym.Pkg.Prefix + "." + sym.Name
+	return sym.LinksymABI(abi)
 }
 
-func (sym *Sym) Linksym() *obj.LSym {
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, (*ir.Name).LinksymABI(abi) instead
+// of (*ir.Name).Sym().LinksymABI(abi).
+func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym {
 	if sym == nil {
-		return nil
+		base.Fatalf("nil symbol")
 	}
-	initPkg := func(r *obj.LSym) {
-		if sym.Linkname != "" {
-			r.Pkg = "_"
-		} else {
-			r.Pkg = sym.Pkg.Prefix
-		}
+	if sym.Linkname != "" {
+		return base.Linkname(sym.Linkname, abi)
 	}
-	if sym.Func() {
-		// This is a function symbol. Mark it as "internal ABI".
-		return Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg)
-	}
-	return Ctxt.LookupInit(sym.LinksymName(), initPkg)
+	return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi)
 }
 
 // Less reports whether symbol a is ordered before symbol b.
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 023ab9a..0dfbef8 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -5,23 +5,40 @@
 package types
 
 import (
-	"cmd/internal/obj"
+	"cmd/compile/internal/base"
 	"cmd/internal/src"
 	"fmt"
+	"sync"
 )
 
-// Dummy Node so we can refer to *Node without actually
-// having a gc.Node. Necessary to break import cycles.
-// TODO(gri) try to eliminate soon
-type Node struct{ _ int }
+// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir,
+// which would cause an import cycle. The uses in other packages must type assert
+// values of type IRNode to ir.Node or a more specific type.
+type Object interface {
+	Pos() src.XPos
+	Sym() *Sym
+	Type() *Type
+}
 
-//go:generate stringer -type EType -trimprefix T
+// A TypeObject is an Object representing a named type.
+type TypeObject interface {
+	Object
+	TypeDefn() *Type // for "type T Defn", returns Defn
+}
 
-// EType describes a kind of type.
-type EType uint8
+// A VarObject is an Object representing a function argument, variable, or struct field.
+type VarObject interface {
+	Object
+	RecordFrameOffset(int64) // save frame offset
+}
+
+//go:generate stringer -type Kind -trimprefix T type.go
+
+// Kind describes a kind of type.
+type Kind uint8
 
 const (
-	Txxx EType = iota
+	Txxx Kind = iota
 
 	TINT8
 	TUINT8
@@ -90,7 +107,7 @@
 // Types stores pointers to predeclared named types.
 //
 // It also stores pointers to several special types:
-//   - Types[TANY] is the placeholder "any" type recognized by substArgTypes.
+//   - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes.
 //   - Types[TBLANK] represents the blank variable's type.
 //   - Types[TNIL] represents the predeclared "nil" value's type.
 //   - Types[TUNSAFEPTR] is package unsafe's Pointer type.
@@ -98,15 +115,15 @@
 
 var (
 	// Predeclared alias types. Kept separate for better error messages.
-	Bytetype *Type
-	Runetype *Type
+	ByteType *Type
+	RuneType *Type
 
 	// Predeclared error interface type.
-	Errortype *Type
+	ErrorType *Type
 
 	// Types to represent untyped string and boolean constants.
-	UntypedString *Type
-	UntypedBool   *Type
+	UntypedString = New(TSTRING)
+	UntypedBool   = New(TBOOL)
 
 	// Types to represent untyped numeric constants.
 	UntypedInt     = New(TIDEAL)
@@ -141,24 +158,26 @@
 	methods    Fields
 	allMethods Fields
 
-	Nod  *Node // canonical OTYPE node
-	Orig *Type // original type (type literal or predefined type)
+	nod        Object // canonical OTYPE node
+	underlying *Type  // original type (type literal or predefined type)
 
 	// Cache of composite types, with this type being the element type.
-	Cache struct {
+	cache struct {
 		ptr   *Type // *T, or nil
 		slice *Type // []T, or nil
 	}
 
-	Sym    *Sym  // symbol containing name, for named types
+	sym    *Sym  // symbol containing name, for named types
 	Vargen int32 // unique name for OTYPE/ONAME
 
-	Etype EType // kind of type
+	kind  Kind  // kind of type
 	Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
 
 	flags bitset8
 }
 
+func (*Type) CanBeAnSSAAux() {}
+
 const (
 	typeNotInHeap  = 1 << iota // type cannot be heap allocated
 	typeBroke                  // broken type definition
@@ -179,6 +198,38 @@
 func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
 func (t *Type) SetRecur(b bool)      { t.flags.set(typeRecur, b) }
 
+// Kind returns the kind of type t.
+func (t *Type) Kind() Kind { return t.kind }
+
+// Sym returns the name of type t.
+func (t *Type) Sym() *Sym { return t.sym }
+
+// Underlying returns the underlying type of type t.
+func (t *Type) Underlying() *Type { return t.underlying }
+
+// SetNod associates t with syntax node n.
+func (t *Type) SetNod(n Object) {
+	// t.nod can be non-nil already
+	// in the case of shared *Types, like []byte or interface{}.
+	if t.nod == nil {
+		t.nod = n
+	}
+}
+
+// Pos returns a position associated with t, if any.
+// This should only be used for diagnostics.
+func (t *Type) Pos() src.XPos {
+	if t.nod != nil {
+		return t.nod.Pos()
+	}
+	return src.NoXPos
+}
+
+// NoPkg is a nil *Pkg value for clarity.
+// It's intended for use when constructing types that aren't exported
+// and thus don't need to be associated with any package.
+var NoPkg *Pkg = nil
+
 // Pkg returns the package that t appeared in.
 //
 // Pkg is only defined for function, struct, and interface types
@@ -186,7 +237,7 @@
 // cmd/compile itself, but we need to track it because it's exposed by
 // the go/types API.
 func (t *Type) Pkg() *Pkg {
-	switch t.Etype {
+	switch t.kind {
 	case TFUNC:
 		return t.Extra.(*Func).pkg
 	case TSTRUCT:
@@ -194,25 +245,11 @@
 	case TINTER:
 		return t.Extra.(*Interface).pkg
 	default:
-		Fatalf("Pkg: unexpected kind: %v", t)
+		base.Fatalf("Pkg: unexpected kind: %v", t)
 		return nil
 	}
 }
 
-// SetPkg sets the package that t appeared in.
-func (t *Type) SetPkg(pkg *Pkg) {
-	switch t.Etype {
-	case TFUNC:
-		t.Extra.(*Func).pkg = pkg
-	case TSTRUCT:
-		t.Extra.(*Struct).pkg = pkg
-	case TINTER:
-		t.Extra.(*Interface).pkg = pkg
-	default:
-		Fatalf("Pkg: unexpected kind: %v", t)
-	}
-}
-
 // Map contains Type fields specific to maps.
 type Map struct {
 	Key  *Type // Key type
@@ -247,15 +284,12 @@
 	Results  *Type // function results
 	Params   *Type // function params
 
-	Nname *Node
-	pkg   *Pkg
+	pkg *Pkg
 
 	// Argwid is the total width of the function receiver, params, and results.
 	// It gets calculated via a temporary TFUNCARGS type.
 	// Note that TFUNC's Width is Widthptr.
 	Argwid int64
-
-	Outnamed bool
 }
 
 // FuncType returns t's extra func-specific fields.
@@ -361,7 +395,7 @@
 
 	// For fields that represent function parameters, Nname points
 	// to the associated ONAME Node.
-	Nname *Node
+	Nname Object
 
 	// Offset in bytes of this field or method within its enclosing struct
 	// or interface Type.
@@ -389,7 +423,7 @@
 
 // IsMethod reports whether f represents a method rather than a struct field.
 func (f *Field) IsMethod() bool {
-	return f.Type.Etype == TFUNC && f.Type.Recv() != nil
+	return f.Type.kind == TFUNC && f.Type.Recv() != nil
 }
 
 // Fields is a pointer to a slice of *Field.
@@ -444,14 +478,14 @@
 }
 
 // New returns a new Type of the specified kind.
-func New(et EType) *Type {
+func New(et Kind) *Type {
 	t := &Type{
-		Etype: et,
+		kind:  et,
 		Width: BADWIDTH,
 	}
-	t.Orig = t
+	t.underlying = t
 	// TODO(josharian): lazily initialize some of these?
-	switch t.Etype {
+	switch t.kind {
 	case TMAP:
 		t.Extra = new(Map)
 	case TFORW:
@@ -481,7 +515,7 @@
 // NewArray returns a new fixed-length array Type.
 func NewArray(elem *Type, bound int64) *Type {
 	if bound < 0 {
-		Fatalf("NewArray: invalid bound %v", bound)
+		base.Fatalf("NewArray: invalid bound %v", bound)
 	}
 	t := New(TARRAY)
 	t.Extra = &Array{Elem: elem, Bound: bound}
@@ -491,16 +525,16 @@
 
 // NewSlice returns the slice Type with element type elem.
 func NewSlice(elem *Type) *Type {
-	if t := elem.Cache.slice; t != nil {
+	if t := elem.cache.slice; t != nil {
 		if t.Elem() != elem {
-			Fatalf("elem mismatch")
+			base.Fatalf("elem mismatch")
 		}
 		return t
 	}
 
 	t := New(TSLICE)
 	t.Extra = Slice{Elem: elem}
-	elem.Cache.slice = t
+	elem.cache.slice = t
 	return t
 }
 
@@ -549,22 +583,22 @@
 // NewPtr returns the pointer type pointing to t.
 func NewPtr(elem *Type) *Type {
 	if elem == nil {
-		Fatalf("NewPtr: pointer to elem Type is nil")
+		base.Fatalf("NewPtr: pointer to elem Type is nil")
 	}
 
-	if t := elem.Cache.ptr; t != nil {
+	if t := elem.cache.ptr; t != nil {
 		if t.Elem() != elem {
-			Fatalf("NewPtr: elem mismatch")
+			base.Fatalf("NewPtr: elem mismatch")
 		}
 		return t
 	}
 
 	t := New(TPTR)
 	t.Extra = Ptr{Elem: elem}
-	t.Width = int64(Widthptr)
-	t.Align = uint8(Widthptr)
+	t.Width = int64(PtrSize)
+	t.Align = uint8(PtrSize)
 	if NewPtrCacheEnabled {
-		elem.Cache.ptr = t
+		elem.cache.ptr = t
 	}
 	return t
 }
@@ -583,10 +617,17 @@
 	return t
 }
 
-func NewField() *Field {
-	return &Field{
+func NewField(pos src.XPos, sym *Sym, typ *Type) *Field {
+	f := &Field{
+		Pos:    pos,
+		Sym:    sym,
+		Type:   typ,
 		Offset: BADWIDTH,
 	}
+	if typ == nil {
+		f.SetBroke(true)
+	}
+	return f
 }
 
 // SubstAny walks t, replacing instances of "any" with successive
@@ -596,13 +637,13 @@
 		return nil
 	}
 
-	switch t.Etype {
+	switch t.kind {
 	default:
 		// Leave the type unchanged.
 
 	case TANY:
 		if len(*types) == 0 {
-			Fatalf("substArgTypes: not enough argument types")
+			base.Fatalf("SubstArgTypes: not enough argument types")
 		}
 		t = (*types)[0]
 		*types = (*types)[1:]
@@ -680,7 +721,7 @@
 	}
 	nt := *t
 	// copy any *T Extra fields, to avoid aliasing
-	switch t.Etype {
+	switch t.kind {
 	case TMAP:
 		x := *t.Extra.(*Map)
 		nt.Extra = &x
@@ -703,11 +744,11 @@
 		x := *t.Extra.(*Array)
 		nt.Extra = &x
 	case TTUPLE, TSSA, TRESULTS:
-		Fatalf("ssa types cannot be copied")
+		base.Fatalf("ssa types cannot be copied")
 	}
 	// TODO(mdempsky): Find out why this is necessary and explain.
-	if t.Orig == t {
-		nt.Orig = &nt
+	if t.underlying == t {
+		nt.underlying = &nt
 	}
 	return &nt
 }
@@ -717,9 +758,9 @@
 	return &nf
 }
 
-func (t *Type) wantEtype(et EType) {
-	if t.Etype != et {
-		Fatalf("want %v, but have %v", et, t)
+func (t *Type) wantEtype(et Kind) {
+	if t.kind != et {
+		base.Fatalf("want %v, but have %v", et, t)
 	}
 }
 
@@ -772,7 +813,7 @@
 // Elem returns the type of elements of t.
 // Usable with pointers, channels, arrays, slices, and maps.
 func (t *Type) Elem() *Type {
-	switch t.Etype {
+	switch t.kind {
 	case TPTR:
 		return t.Extra.(Ptr).Elem
 	case TARRAY:
@@ -784,7 +825,7 @@
 	case TMAP:
 		return t.Extra.(*Map).Elem
 	}
-	Fatalf("Type.Elem %s", t.Etype)
+	base.Fatalf("Type.Elem %s", t.kind)
 	return nil
 }
 
@@ -800,29 +841,9 @@
 	return t.Extra.(FuncArgs).T
 }
 
-// Nname returns the associated function's nname.
-func (t *Type) Nname() *Node {
-	switch t.Etype {
-	case TFUNC:
-		return t.Extra.(*Func).Nname
-	}
-	Fatalf("Type.Nname %v %v", t.Etype, t)
-	return nil
-}
-
-// Nname sets the associated function's nname.
-func (t *Type) SetNname(n *Node) {
-	switch t.Etype {
-	case TFUNC:
-		t.Extra.(*Func).Nname = n
-	default:
-		Fatalf("Type.SetNname %v %v", t.Etype, t)
-	}
-}
-
 // IsFuncArgStruct reports whether t is a struct representing function parameters.
 func (t *Type) IsFuncArgStruct() bool {
-	return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
+	return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
 }
 
 func (t *Type) Methods() *Fields {
@@ -836,14 +857,14 @@
 }
 
 func (t *Type) Fields() *Fields {
-	switch t.Etype {
+	switch t.kind {
 	case TSTRUCT:
 		return &t.Extra.(*Struct).fields
 	case TINTER:
-		Dowidth(t)
+		CalcSize(t)
 		return &t.Extra.(*Interface).Fields
 	}
-	Fatalf("Fields: type %v does not have fields", t)
+	base.Fatalf("Fields: type %v does not have fields", t)
 	return nil
 }
 
@@ -867,7 +888,7 @@
 	// enforce that SetFields cannot be called once
 	// t's width has been calculated.
 	if t.WidthCalculated() {
-		Fatalf("SetFields of %v: width previously calculated", t)
+		base.Fatalf("SetFields of %v: width previously calculated", t)
 	}
 	t.wantEtype(TSTRUCT)
 	for _, f := range fields {
@@ -901,23 +922,23 @@
 }
 
 func (t *Type) Size() int64 {
-	if t.Etype == TSSA {
+	if t.kind == TSSA {
 		if t == TypeInt128 {
 			return 16
 		}
 		return 0
 	}
-	Dowidth(t)
+	CalcSize(t)
 	return t.Width
 }
 
 func (t *Type) Alignment() int64 {
-	Dowidth(t)
+	CalcSize(t)
 	return int64(t.Align)
 }
 
 func (t *Type) SimpleString() string {
-	return t.Etype.String()
+	return t.kind.String()
 }
 
 // Cmp is a comparison between values a and b.
@@ -1001,31 +1022,31 @@
 		return CMPgt
 	}
 
-	if t.Etype != x.Etype {
-		return cmpForNe(t.Etype < x.Etype)
+	if t.kind != x.kind {
+		return cmpForNe(t.kind < x.kind)
 	}
 
-	if t.Sym != nil || x.Sym != nil {
+	if t.sym != nil || x.sym != nil {
 		// Special case: we keep byte and uint8 separate
 		// for error messages. Treat them as equal.
-		switch t.Etype {
+		switch t.kind {
 		case TUINT8:
-			if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
+			if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) {
 				return CMPeq
 			}
 
 		case TINT32:
-			if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
+			if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
 				return CMPeq
 			}
 		}
 	}
 
-	if c := t.Sym.cmpsym(x.Sym); c != CMPeq {
+	if c := t.sym.cmpsym(x.sym); c != CMPeq {
 		return c
 	}
 
-	if x.Sym != nil {
+	if x.sym != nil {
 		// Syms non-nil, if vargens match then equal.
 		if t.Vargen != x.Vargen {
 			return cmpForNe(t.Vargen < x.Vargen)
@@ -1034,7 +1055,7 @@
 	}
 	// both syms nil, look at structure below.
 
-	switch t.Etype {
+	switch t.kind {
 	case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
 		TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
 		return CMPeq
@@ -1191,15 +1212,15 @@
 }
 
 // IsKind reports whether t is a Type of the specified kind.
-func (t *Type) IsKind(et EType) bool {
-	return t != nil && t.Etype == et
+func (t *Type) IsKind(et Kind) bool {
+	return t != nil && t.kind == et
 }
 
 func (t *Type) IsBoolean() bool {
-	return t.Etype == TBOOL
+	return t.kind == TBOOL
 }
 
-var unsignedEType = [...]EType{
+var unsignedEType = [...]Kind{
 	TINT8:    TUINT8,
 	TUINT8:   TUINT8,
 	TINT16:   TUINT16,
@@ -1216,54 +1237,62 @@
 // ToUnsigned returns the unsigned equivalent of integer type t.
 func (t *Type) ToUnsigned() *Type {
 	if !t.IsInteger() {
-		Fatalf("unsignedType(%v)", t)
+		base.Fatalf("unsignedType(%v)", t)
 	}
-	return Types[unsignedEType[t.Etype]]
+	return Types[unsignedEType[t.kind]]
 }
 
 func (t *Type) IsInteger() bool {
-	switch t.Etype {
+	switch t.kind {
 	case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
 		return true
 	}
-	return false
+	return t == UntypedInt || t == UntypedRune
 }
 
 func (t *Type) IsSigned() bool {
-	switch t.Etype {
+	switch t.kind {
 	case TINT8, TINT16, TINT32, TINT64, TINT:
 		return true
 	}
 	return false
 }
 
+func (t *Type) IsUnsigned() bool {
+	switch t.kind {
+	case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
+		return true
+	}
+	return false
+}
+
 func (t *Type) IsFloat() bool {
-	return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
+	return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat
 }
 
 func (t *Type) IsComplex() bool {
-	return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128
+	return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex
 }
 
 // IsPtr reports whether t is a regular Go pointer type.
 // This does not include unsafe.Pointer.
 func (t *Type) IsPtr() bool {
-	return t.Etype == TPTR
+	return t.kind == TPTR
 }
 
 // IsPtrElem reports whether t is the element of a pointer (to t).
 func (t *Type) IsPtrElem() bool {
-	return t.Cache.ptr != nil
+	return t.cache.ptr != nil
 }
 
 // IsUnsafePtr reports whether t is an unsafe pointer.
 func (t *Type) IsUnsafePtr() bool {
-	return t.Etype == TUNSAFEPTR
+	return t.kind == TUNSAFEPTR
 }
 
 // IsUintptr reports whether t is an uintptr.
 func (t *Type) IsUintptr() bool {
-	return t.Etype == TUINTPTR
+	return t.kind == TUINTPTR
 }
 
 // IsPtrShaped reports whether t is represented by a single machine pointer.
@@ -1272,45 +1301,45 @@
 // that consist of a single pointer shaped type.
 // TODO(mdempsky): Should it? See golang.org/issue/15028.
 func (t *Type) IsPtrShaped() bool {
-	return t.Etype == TPTR || t.Etype == TUNSAFEPTR ||
-		t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
+	return t.kind == TPTR || t.kind == TUNSAFEPTR ||
+		t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC
 }
 
 // HasNil reports whether the set of values determined by t includes nil.
 func (t *Type) HasNil() bool {
-	switch t.Etype {
-	case TCHAN, TFUNC, TINTER, TMAP, TPTR, TSLICE, TUNSAFEPTR:
+	switch t.kind {
+	case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
 		return true
 	}
 	return false
 }
 
 func (t *Type) IsString() bool {
-	return t.Etype == TSTRING
+	return t.kind == TSTRING
 }
 
 func (t *Type) IsMap() bool {
-	return t.Etype == TMAP
+	return t.kind == TMAP
 }
 
 func (t *Type) IsChan() bool {
-	return t.Etype == TCHAN
+	return t.kind == TCHAN
 }
 
 func (t *Type) IsSlice() bool {
-	return t.Etype == TSLICE
+	return t.kind == TSLICE
 }
 
 func (t *Type) IsArray() bool {
-	return t.Etype == TARRAY
+	return t.kind == TARRAY
 }
 
 func (t *Type) IsStruct() bool {
-	return t.Etype == TSTRUCT
+	return t.kind == TSTRUCT
 }
 
 func (t *Type) IsInterface() bool {
-	return t.Etype == TINTER
+	return t.kind == TINTER
 }
 
 // IsEmptyInterface reports whether t is an empty interface type.
@@ -1318,6 +1347,20 @@
 	return t.IsInterface() && t.NumFields() == 0
 }
 
+// IsScalar reports whether 't' is a scalar Go type, e.g.
+// bool/int/float/complex. Note that struct and array types consisting
+// of a single scalar element are not considered scalar, likewise
+// pointer types are also not considered scalar.
+func (t *Type) IsScalar() bool {
+	switch t.kind {
+	case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+		TUINT32, TINT64, TUINT64, TINT, TUINT,
+		TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+		return true
+	}
+	return false
+}
+
 func (t *Type) PtrTo() *Type {
 	return NewPtr(t)
 }
@@ -1326,7 +1369,7 @@
 	return t.Fields().Len()
 }
 func (t *Type) FieldType(i int) *Type {
-	if t.Etype == TTUPLE {
+	if t.kind == TTUPLE {
 		switch i {
 		case 0:
 			return t.Extra.(*Tuple).first
@@ -1336,7 +1379,7 @@
 			panic("bad tuple index")
 		}
 	}
-	if t.Etype == TRESULTS {
+	if t.kind == TRESULTS {
 		return t.Extra.(*Results).Types[i]
 	}
 	return t.Field(i).Type
@@ -1367,10 +1410,10 @@
 // (and their comprised elements) are excluded from the count.
 // struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
 func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
-	switch t.Etype {
+	switch t.kind {
 	case TSTRUCT:
 		if t.IsFuncArgStruct() {
-			Fatalf("NumComponents func arg struct")
+			base.Fatalf("NumComponents func arg struct")
 		}
 		var n int64
 		for _, f := range t.FieldSlice() {
@@ -1390,10 +1433,10 @@
 // if there is exactly one. Otherwise, it returns nil.
 // Components are counted as in NumComponents, including blank fields.
 func (t *Type) SoleComponent() *Type {
-	switch t.Etype {
+	switch t.kind {
 	case TSTRUCT:
 		if t.IsFuncArgStruct() {
-			Fatalf("SoleComponent func arg struct")
+			base.Fatalf("SoleComponent func arg struct")
 		}
 		if t.NumFields() != 1 {
 			return nil
@@ -1416,10 +1459,10 @@
 }
 
 func (t *Type) IsMemory() bool {
-	if t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
+	if t == TypeMem || t.kind == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
 		return true
 	}
-	if t.Etype == TRESULTS {
+	if t.kind == TRESULTS {
 		if types := t.Extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
 			return true
 		}
@@ -1428,8 +1471,8 @@
 }
 func (t *Type) IsFlags() bool   { return t == TypeFlags }
 func (t *Type) IsVoid() bool    { return t == TypeVoid }
-func (t *Type) IsTuple() bool   { return t.Etype == TTUPLE }
-func (t *Type) IsResults() bool { return t.Etype == TRESULTS }
+func (t *Type) IsTuple() bool   { return t.kind == TTUPLE }
+func (t *Type) IsResults() bool { return t.kind == TRESULTS }
 
 // IsUntyped reports whether t is an untyped type.
 func (t *Type) IsUntyped() bool {
@@ -1439,7 +1482,7 @@
 	if t == UntypedString || t == UntypedBool {
 		return true
 	}
-	switch t.Etype {
+	switch t.kind {
 	case TNIL, TIDEAL:
 		return true
 	}
@@ -1449,7 +1492,7 @@
 // HasPointers reports whether t contains a heap pointer.
 // Note that this function ignores pointers to go:notinheap types.
 func (t *Type) HasPointers() bool {
-	switch t.Etype {
+	switch t.kind {
 	case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
 		TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
 		return false
@@ -1488,10 +1531,6 @@
 	return true
 }
 
-func (t *Type) Symbol() *obj.LSym {
-	return TypeLinkSym(t)
-}
-
 // Tie returns 'T' if t is a concrete type,
 // 'I' if t is an interface type, and 'E' if t is an empty interface type.
 // It is used to build calls to the conv* and assert* runtime routines.
@@ -1523,3 +1562,333 @@
 	TypeVoid    = newSSA("void")
 	TypeInt128  = newSSA("int128")
 )
+
+// NewNamed returns a new named type for the given type name.
+func NewNamed(obj Object) *Type {
+	t := New(TFORW)
+	t.sym = obj.Sym()
+	t.nod = obj
+	return t
+}
+
+// Obj returns the type name for the named type t.
+func (t *Type) Obj() Object {
+	if t.sym != nil {
+		return t.nod
+	}
+	return nil
+}
+
+// SetUnderlying sets the underlying type.
+func (t *Type) SetUnderlying(underlying *Type) {
+	if underlying.kind == TFORW {
+		// This type isn't computed yet; when it is, update n.
+		underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+		return
+	}
+
+	ft := t.ForwardType()
+
+	// TODO(mdempsky): Fix Type rekinding.
+	t.kind = underlying.kind
+	t.Extra = underlying.Extra
+	t.Width = underlying.Width
+	t.Align = underlying.Align
+	t.underlying = underlying.underlying
+
+	if underlying.NotInHeap() {
+		t.SetNotInHeap(true)
+	}
+	if underlying.Broke() {
+		t.SetBroke(true)
+	}
+
+	// spec: "The declared type does not inherit any methods bound
+	// to the existing type, but the method set of an interface
+	// type [...] remains unchanged."
+	if t.IsInterface() {
+		t.methods = underlying.methods
+		t.allMethods = underlying.allMethods
+	}
+
+	// Update types waiting on this type.
+	for _, w := range ft.Copyto {
+		w.SetUnderlying(t)
+	}
+
+	// Double-check use of type as embedded type.
+	if ft.Embedlineno.IsKnown() {
+		if t.IsPtr() || t.IsUnsafePtr() {
+			base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer")
+		}
+	}
+}
+
+// NewBasic returns a new basic type of the given kind.
+func NewBasic(kind Kind, obj Object) *Type {
+	t := New(kind)
+	t.sym = obj.Sym()
+	t.nod = obj
+	return t
+}
+
+// NewInterface returns a new interface for the given methods and
+// embedded types. Embedded types are specified as fields with no Sym.
+func NewInterface(pkg *Pkg, methods []*Field) *Type {
+	t := New(TINTER)
+	t.SetInterface(methods)
+	if anyBroke(methods) {
+		t.SetBroke(true)
+	}
+	t.Extra.(*Interface).pkg = pkg
+	return t
+}
+
+// NewSignature returns a new function type for the given receiver,
+// parameters, and results, any of which may be nil.
+func NewSignature(pkg *Pkg, recv *Field, params, results []*Field) *Type {
+	var recvs []*Field
+	if recv != nil {
+		recvs = []*Field{recv}
+	}
+
+	t := New(TFUNC)
+	ft := t.FuncType()
+
+	funargs := func(fields []*Field, funarg Funarg) *Type {
+		s := NewStruct(NoPkg, fields)
+		s.StructType().Funarg = funarg
+		if s.Broke() {
+			t.SetBroke(true)
+		}
+		return s
+	}
+
+	ft.Receiver = funargs(recvs, FunargRcvr)
+	ft.Params = funargs(params, FunargParams)
+	ft.Results = funargs(results, FunargResults)
+	ft.pkg = pkg
+
+	return t
+}
+
+// NewStruct returns a new struct with the given fields.
+func NewStruct(pkg *Pkg, fields []*Field) *Type {
+	t := New(TSTRUCT)
+	t.SetFields(fields)
+	if anyBroke(fields) {
+		t.SetBroke(true)
+	}
+	t.Extra.(*Struct).pkg = pkg
+	return t
+}
+
+func anyBroke(fields []*Field) bool {
+	for _, f := range fields {
+		if f.Broke() {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	IsInt     [NTYPE]bool
+	IsFloat   [NTYPE]bool
+	IsComplex [NTYPE]bool
+	IsSimple  [NTYPE]bool
+)
+
+var IsOrdered [NTYPE]bool
+
+// IsReflexive reports whether t has a reflexive equality operator.
+// That is, if x==x for all x of type t.
+func IsReflexive(t *Type) bool {
+	switch t.Kind() {
+	case TBOOL,
+		TINT,
+		TUINT,
+		TINT8,
+		TUINT8,
+		TINT16,
+		TUINT16,
+		TINT32,
+		TUINT32,
+		TINT64,
+		TUINT64,
+		TUINTPTR,
+		TPTR,
+		TUNSAFEPTR,
+		TSTRING,
+		TCHAN:
+		return true
+
+	case TFLOAT32,
+		TFLOAT64,
+		TCOMPLEX64,
+		TCOMPLEX128,
+		TINTER:
+		return false
+
+	case TARRAY:
+		return IsReflexive(t.Elem())
+
+	case TSTRUCT:
+		for _, t1 := range t.Fields().Slice() {
+			if !IsReflexive(t1.Type) {
+				return false
+			}
+		}
+		return true
+
+	default:
+		base.Fatalf("bad type for map key: %v", t)
+		return false
+	}
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func IsDirectIface(t *Type) bool {
+	if t.Broke() {
+		return false
+	}
+
+	switch t.Kind() {
+	case TPTR:
+		// Pointers to notinheap types must be stored indirectly. See issue 42076.
+		return !t.Elem().NotInHeap()
+	case TCHAN,
+		TMAP,
+		TFUNC,
+		TUNSAFEPTR:
+		return true
+
+	case TARRAY:
+		// Array of 1 direct iface type can be direct.
+		return t.NumElem() == 1 && IsDirectIface(t.Elem())
+
+	case TSTRUCT:
+		// Struct with 1 field of direct iface type can be direct.
+		return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type)
+	}
+
+	return false
+}
+
+// IsInterfaceMethod reports whether (field) m is
+// an interface method. Such methods have the
+// special receiver type types.FakeRecvType().
+func IsInterfaceMethod(f *Type) bool {
+	return f.Recv().Type == FakeRecvType()
+}
+
+// IsMethodApplicable reports whether method m can be called on a
+// value of type t. This is necessary because we compute a single
+// method set for both T and *T, but some *T methods are not
+// applicable to T receivers.
+func IsMethodApplicable(t *Type, m *Field) bool {
+	return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
+}
+
+// IsRuntimePkg reports whether p is package runtime.
+func IsRuntimePkg(p *Pkg) bool {
+	if base.Flag.CompilingRuntime && p == LocalPkg {
+		return true
+	}
+	return p.Path == "runtime"
+}
+
+// IsReflectPkg reports whether p is package reflect.
+func IsReflectPkg(p *Pkg) bool {
+	if p == LocalPkg {
+		return base.Ctxt.Pkgpath == "reflect"
+	}
+	return p.Path == "reflect"
+}
+
+// ReceiverBaseType returns the underlying type, if any,
+// that owns methods with receiver parameter t.
+// The result is either a named type or an anonymous struct.
+func ReceiverBaseType(t *Type) *Type {
+	if t == nil {
+		return nil
+	}
+
+	// Strip away pointer if it's there.
+	if t.IsPtr() {
+		if t.Sym() != nil {
+			return nil
+		}
+		t = t.Elem()
+		if t == nil {
+			return nil
+		}
+	}
+
+	// Must be a named type or anonymous struct.
+	if t.Sym() == nil && !t.IsStruct() {
+		return nil
+	}
+
+	// Check types.
+	if IsSimple[t.Kind()] {
+		return t
+	}
+	switch t.Kind() {
+	case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+		return t
+	}
+	return nil
+}
+
+func FloatForComplex(t *Type) *Type {
+	switch t.Kind() {
+	case TCOMPLEX64:
+		return Types[TFLOAT32]
+	case TCOMPLEX128:
+		return Types[TFLOAT64]
+	}
+	base.Fatalf("unexpected type: %v", t)
+	return nil
+}
+
+func ComplexForFloat(t *Type) *Type {
+	switch t.Kind() {
+	case TFLOAT32:
+		return Types[TCOMPLEX64]
+	case TFLOAT64:
+		return Types[TCOMPLEX128]
+	}
+	base.Fatalf("unexpected type: %v", t)
+	return nil
+}
+
+func TypeSym(t *Type) *Sym {
+	return TypeSymLookup(TypeSymName(t))
+}
+
+func TypeSymLookup(name string) *Sym {
+	typepkgmu.Lock()
+	s := typepkg.Lookup(name)
+	typepkgmu.Unlock()
+	return s
+}
+
+func TypeSymName(t *Type) string {
+	name := t.ShortString()
+	// Use a separate symbol name for Noalg types for #17752.
+	if TypeHasNoAlg(t) {
+		name = "noalg." + name
+	}
+	return name
+}
+
+// Fake package for runtime type info (headers)
+// Don't access directly, use typeLookup below.
+var (
+	typepkgmu sync.Mutex // protects typepkg lookups
+	typepkg   = NewPkg("type", "type")
+)
+
+var SimType [NTYPE]Kind
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
index e8b1073..f9f629c 100644
--- a/src/cmd/compile/internal/types/utils.go
+++ b/src/cmd/compile/internal/types/utils.go
@@ -4,64 +4,8 @@
 
 package types
 
-import (
-	"cmd/internal/obj"
-	"fmt"
-)
-
 const BADWIDTH = -1000000000
 
-// The following variables must be initialized early by the frontend.
-// They are here to break import cycles.
-// TODO(gri) eliminate these dependencies.
-var (
-	Widthptr    int
-	Dowidth     func(*Type)
-	Fatalf      func(string, ...interface{})
-	Sconv       func(*Sym, int, int) string       // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string
-	Tconv       func(*Type, int, int) string      // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode) string
-	FormatSym   func(*Sym, fmt.State, rune, int)  // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode)
-	FormatType  func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode)
-	TypeLinkSym func(*Type) *obj.LSym
-	Ctxt        *obj.Link
-
-	FmtLeft     int
-	FmtUnsigned int
-	FErr        int
-)
-
-func (s *Sym) String() string {
-	return Sconv(s, 0, FErr)
-}
-
-func (sym *Sym) Format(s fmt.State, verb rune) {
-	FormatSym(sym, s, verb, FErr)
-}
-
-func (t *Type) String() string {
-	// The implementation of tconv (including typefmt and fldconv)
-	// must handle recursive types correctly.
-	return Tconv(t, 0, FErr)
-}
-
-// ShortString generates a short description of t.
-// It is used in autogenerated method names, reflection,
-// and itab names.
-func (t *Type) ShortString() string {
-	return Tconv(t, FmtLeft, FErr)
-}
-
-// LongString generates a complete description of t.
-// It is useful for reflection,
-// or when a unique fingerprint or hash of a type is required.
-func (t *Type) LongString() string {
-	return Tconv(t, FmtLeft|FmtUnsigned, FErr)
-}
-
-func (t *Type) Format(s fmt.State, verb rune) {
-	FormatType(t, s, verb, FErr)
-}
-
 type bitset8 uint8
 
 func (f *bitset8) set(mask uint8, b bool) {
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
new file mode 100644
index 0000000..230b544
--- /dev/null
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -0,0 +1,719 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
+func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+
+	var left, right ir.Node
+	switch n.Op() {
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		left, right = n.X, n.Y
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		left, right = n.X, n.Y
+	}
+
+	// Recognize m[k] = append(m[k], ...) so we can reuse
+	// the mapassign call.
+	var mapAppend *ir.CallExpr
+	if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
+		left := left.(*ir.IndexExpr)
+		mapAppend = right.(*ir.CallExpr)
+		if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
+			base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
+		}
+	}
+
+	left = walkExpr(left, init)
+	left = safeExpr(left, init)
+	if mapAppend != nil {
+		mapAppend.Args[0] = left
+	}
+
+	if n.Op() == ir.OASOP {
+		// Rewrite x op= y into x = x op y.
+		n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
+	} else {
+		n.(*ir.AssignStmt).X = left
+	}
+	as := n.(*ir.AssignStmt)
+
+	if oaslit(as, init) {
+		return ir.NewBlockStmt(as.Pos(), nil)
+	}
+
+	if as.Y == nil {
+		// TODO(austin): Check all "implicit zeroing"
+		return as
+	}
+
+	if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
+		return as
+	}
+
+	switch as.Y.Op() {
+	default:
+		as.Y = walkExpr(as.Y, init)
+
+	case ir.ORECV:
+		// x = <-c; as.Left is x, as.Right.Left is c.
+		// order.stmt made sure x is addressable.
+		recv := as.Y.(*ir.UnaryExpr)
+		recv.X = walkExpr(recv.X, init)
+
+		n1 := typecheck.NodAddr(as.X)
+		r := recv.X // the channel
+		return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
+
+	case ir.OAPPEND:
+		// x = append(...)
+		call := as.Y.(*ir.CallExpr)
+		if call.Type().Elem().NotInHeap() {
+			base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
+		}
+		var r ir.Node
+		switch {
+		case isAppendOfMake(call):
+			// x = append(y, make([]T, y)...)
+			r = extendSlice(call, init)
+		case call.IsDDD:
+			r = appendSlice(call, init) // also works for append(slice, string).
+		default:
+			r = walkAppend(call, init, as)
+		}
+		as.Y = r
+		if r.Op() == ir.OAPPEND {
+			// Left in place for back end.
+			// Do not add a new write barrier.
+			// Set up address of type for back end.
+			r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
+			return as
+		}
+		// Otherwise, lowered for race detector.
+		// Treat as ordinary assignment.
+	}
+
+	if as.X != nil && as.Y != nil {
+		return convas(as, init)
+	}
+	return as
+}
+
+// walkAssignDotType walks an OAS2DOTTYPE node.
+func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node {
+	walkExprListSafe(n.Lhs, init)
+	n.Rhs[0] = walkExpr(n.Rhs[0], init)
+	return n
+}
+
+// walkAssignFunc walks an OAS2FUNC node.
+func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+
+	r := n.Rhs[0]
+	walkExprListSafe(n.Lhs, init)
+	r = walkExpr(r, init)
+
+	if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
+		n.Rhs = []ir.Node{r}
+		return n
+	}
+	init.Append(r)
+
+	ll := ascompatet(n.Lhs, r.Type())
+	return ir.NewBlockStmt(src.NoXPos, ll)
+}
+
+// walkAssignList walks an OAS2 node.
+func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+	return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs))
+}
+
+// walkAssignMapRead walks an OAS2MAPR node.
+func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+
+	r := n.Rhs[0].(*ir.IndexExpr)
+	walkExprListSafe(n.Lhs, init)
+	r.X = walkExpr(r.X, init)
+	r.Index = walkExpr(r.Index, init)
+	t := r.X.Type()
+
+	fast := mapfast(t)
+	var key ir.Node
+	if fast != mapslow {
+		// fast versions take key by value
+		key = r.Index
+	} else {
+		// standard version takes key by reference
+		// order.expr made sure key is addressable.
+		key = typecheck.NodAddr(r.Index)
+	}
+
+	// from:
+	//   a,b = m[i]
+	// to:
+	//   var,b = mapaccess2*(t, m, i)
+	//   a = *var
+	a := n.Lhs[0]
+
+	var call *ir.CallExpr
+	if w := t.Elem().Width; w <= zeroValSize {
+		fn := mapfn(mapaccess2[fast], t)
+		call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
+	} else {
+		fn := mapfn("mapaccess2_fat", t)
+		z := reflectdata.ZeroAddr(w)
+		call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
+	}
+
+	// mapaccess2* returns a typed bool, but due to spec changes,
+	// the boolean result of i.(T) is now untyped so we make it the
+	// same type as the variable on the lhs.
+	if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+		call.Type().Field(1).Type = ok.Type()
+	}
+	n.Rhs = []ir.Node{call}
+	n.SetOp(ir.OAS2FUNC)
+
+	// don't generate a = *var if a is _
+	if ir.IsBlank(a) {
+		return walkExpr(typecheck.Stmt(n), init)
+	}
+
+	var_ := typecheck.Temp(types.NewPtr(t.Elem()))
+	var_.SetTypecheck(1)
+	var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+
+	n.Lhs[0] = var_
+	init.Append(walkExpr(n, init))
+
+	as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
+	return walkExpr(typecheck.Stmt(as), init)
+}
+
+// walkAssignRecv walks an OAS2RECV node.
+func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+
+	r := n.Rhs[0].(*ir.UnaryExpr) // recv
+	walkExprListSafe(n.Lhs, init)
+	r.X = walkExpr(r.X, init)
+	var n1 ir.Node
+	if ir.IsBlank(n.Lhs[0]) {
+		n1 = typecheck.NodNil()
+	} else {
+		n1 = typecheck.NodAddr(n.Lhs[0])
+	}
+	fn := chanfn("chanrecv2", 2, r.X.Type())
+	ok := n.Lhs[1]
+	call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
+	return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
+}
+
+// walkReturn walks an ORETURN node.
+func walkReturn(n *ir.ReturnStmt) ir.Node {
+	fn := ir.CurFunc
+
+	fn.NumReturns++
+	if len(n.Results) == 0 {
+		return n
+	}
+
+	results := fn.Type().Results().FieldSlice()
+	dsts := make([]ir.Node, len(results))
+	for i, v := range results {
+		// TODO(mdempsky): typecheck should have already checked the result variables.
+		dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name))
+	}
+
+	n.Results = ascompatee(n.Op(), dsts, n.Results)
+	return n
+}
+
+// check assign type list to
+// an expression list. called in
+//	expr-list = func()
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
+	if len(nl) != nr.NumFields() {
+		base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
+	}
+
+	var nn ir.Nodes
+	for i, l := range nl {
+		if ir.IsBlank(l) {
+			continue
+		}
+		r := nr.Field(i)
+
+		// Order should have created autotemps of the appropriate type for
+		// us to store results into.
+		if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) {
+			base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l)
+		}
+
+		res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
+		res.Offset = base.Ctxt.FixedFrameSize() + r.Offset
+		res.SetType(r.Type)
+		res.SetTypecheck(1)
+
+		nn.Append(ir.NewAssignStmt(base.Pos, l, res))
+	}
+	return nn
+}
+
+// check assign expression list to
+// an expression list. called in
+//	expr-list = expr-list
+func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
+	// cannot happen: should have been rejected during type checking
+	if len(nl) != len(nr) {
+		base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr))
+	}
+
+	var assigned ir.NameSet
+	var memWrite, deferResultWrite bool
+
+	// affected reports whether expression n could be affected by
+	// the assignments applied so far.
+	affected := func(n ir.Node) bool {
+		if deferResultWrite {
+			return true
+		}
+		return ir.Any(n, func(n ir.Node) bool {
+			if n.Op() == ir.ONAME && assigned.Has(n.(*ir.Name)) {
+				return true
+			}
+			if memWrite && readsMemory(n) {
+				return true
+			}
+			return false
+		})
+	}
+
+	// If a needed expression may be affected by an
+	// earlier assignment, make an early copy of that
+	// expression and use the copy instead.
+	var early ir.Nodes
+	save := func(np *ir.Node) {
+		if n := *np; affected(n) {
+			*np = copyExpr(n, n.Type(), &early)
+		}
+	}
+
+	var late ir.Nodes
+	for i, lorig := range nl {
+		l, r := lorig, nr[i]
+
+		// Do not generate 'x = x' during return. See issue 4014.
+		if op == ir.ORETURN && ir.SameSafeExpr(l, r) {
+			continue
+		}
+
+		// Save subexpressions needed on left side.
+		// Drill through non-dereferences.
+		for {
+			switch ll := l.(type) {
+			case *ir.IndexExpr:
+				if ll.X.Type().IsArray() {
+					save(&ll.Index)
+					l = ll.X
+					continue
+				}
+			case *ir.ParenExpr:
+				l = ll.X
+				continue
+			case *ir.SelectorExpr:
+				if ll.Op() == ir.ODOT {
+					l = ll.X
+					continue
+				}
+			}
+			break
+		}
+
+		var name *ir.Name
+		switch l.Op() {
+		default:
+			base.Fatalf("unexpected lvalue %v", l.Op())
+		case ir.ONAME:
+			name = l.(*ir.Name)
+		case ir.OINDEX, ir.OINDEXMAP:
+			l := l.(*ir.IndexExpr)
+			save(&l.X)
+			save(&l.Index)
+		case ir.ODEREF:
+			l := l.(*ir.StarExpr)
+			save(&l.X)
+		case ir.ODOTPTR:
+			l := l.(*ir.SelectorExpr)
+			save(&l.X)
+		}
+
+		// Save expression on right side.
+		save(&r)
+
+		appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late))
+
+		// Check for reasons why we may need to compute later expressions
+		// before this assignment happens.
+
+		if name == nil {
+			// Not a direct assignment to a declared variable.
+			// Conservatively assume any memory access might alias.
+			memWrite = true
+			continue
+		}
+
+		if name.Class == ir.PPARAMOUT && ir.CurFunc.HasDefer() {
+			// Assignments to a result parameter in a function with defers
+			// becomes visible early if evaluation of any later expression
+			// panics (#43835).
+			deferResultWrite = true
+			continue
+		}
+
+		if sym := types.OrigSym(name.Sym()); sym == nil || sym.IsBlank() {
+			// We can ignore assignments to blank or anonymous result parameters.
+			// These can't appear in expressions anyway.
+			continue
+		}
+
+		if name.Addrtaken() || !name.OnStack() {
+			// Global variable, heap escaped, or just addrtaken.
+			// Conservatively assume any memory access might alias.
+			memWrite = true
+			continue
+		}
+
+		// Local, non-addrtaken variable.
+		// Assignments can only alias with direct uses of this variable.
+		assigned.Add(name)
+	}
+
+	early.Append(late.Take()...)
+	return early
+}
+
+// readsMemory reports whether the evaluation n directly reads from
+// memory that might be written to indirectly.
+func readsMemory(n ir.Node) bool {
+	switch n.Op() {
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC {
+			return false
+		}
+		return n.Addrtaken() || !n.OnStack()
+
+	case ir.OADD,
+		ir.OAND,
+		ir.OANDAND,
+		ir.OANDNOT,
+		ir.OBITNOT,
+		ir.OCONV,
+		ir.OCONVIFACE,
+		ir.OCONVNOP,
+		ir.ODIV,
+		ir.ODOT,
+		ir.ODOTTYPE,
+		ir.OLITERAL,
+		ir.OLSH,
+		ir.OMOD,
+		ir.OMUL,
+		ir.ONEG,
+		ir.ONIL,
+		ir.OOR,
+		ir.OOROR,
+		ir.OPAREN,
+		ir.OPLUS,
+		ir.ORSH,
+		ir.OSUB,
+		ir.OXOR:
+		return false
+	}
+
+	// Be conservative.
+	return true
+}
+
+// expand append(l1, l2...) to
+//   init {
+//     s := l1
+//     n := len(s) + len(l2)
+//     // Compare as uint so growslice can panic on overflow.
+//     if uint(n) > uint(cap(s)) {
+//       s = growslice(s, n)
+//     }
+//     s = s[:n]
+//     memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+//   }
+//   s
+//
+// l2 is allowed to be a string.
+func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+	walkAppendArgs(n, init)
+
+	l1 := n.Args[0]
+	l2 := n.Args[1]
+	l2 = cheapExpr(l2, init)
+	n.Args[1] = l2
+
+	var nodes ir.Nodes
+
+	// var s []T
+	s := typecheck.Temp(l1.Type())
+	nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
+
+	elemtype := s.Type().Elem()
+
+	// n := len(s) + len(l2)
+	nn := typecheck.Temp(types.Types[types.TINT])
+	nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))))
+
+	// if uint(n) > uint(cap(s))
+	nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+	nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+	scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+	nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)
+
+	// instantiate growslice(typ *type, []any, int) []any
+	fn := typecheck.LookupRuntime("growslice")
+	fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+	// s = growslice(T, s, n)
+	nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+	nodes.Append(nif)
+
+	// s = s[:n]
+	nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil)
+	nt.SetBounded(true)
+	nodes.Append(ir.NewAssignStmt(base.Pos, s, nt))
+
+	var ncopy ir.Node
+	if elemtype.HasPointers() {
+		// copy(s[len(l1):], l2)
+		slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+		slice.SetType(s.Type())
+
+		ir.CurFunc.SetWBPos(n.Pos())
+
+		// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+		fn := typecheck.LookupRuntime("typedslicecopy")
+		fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+		ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+		ptr2, len2 := backingArrayPtrLen(l2)
+		ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
+	} else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
+		// rely on runtime to instrument:
+		//  copy(s[len(l1):], l2)
+		// l2 can be a slice or string.
+		slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+		slice.SetType(s.Type())
+
+		ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+		ptr2, len2 := backingArrayPtrLen(l2)
+
+		fn := typecheck.LookupRuntime("slicecopy")
+		fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+		ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Width))
+	} else {
+		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+		ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+		ix.SetBounded(true)
+		addr := typecheck.NodAddr(ix)
+
+		sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
+
+		nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
+		nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Width))
+
+		// instantiate func memmove(to *any, frm *any, length uintptr)
+		fn := typecheck.LookupRuntime("memmove")
+		fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+		ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
+	}
+	ln := append(nodes, ncopy)
+
+	typecheck.Stmts(ln)
+	walkStmtList(ln)
+	init.Append(ln...)
+	return s
+}
+
+// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
+// isAppendOfMake assumes n has already been typechecked.
+func isAppendOfMake(n ir.Node) bool {
+	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+		return false
+	}
+
+	if n.Typecheck() == 0 {
+		base.Fatalf("missing typecheck: %+v", n)
+	}
+
+	if n.Op() != ir.OAPPEND {
+		return false
+	}
+	call := n.(*ir.CallExpr)
+	if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
+		return false
+	}
+
+	mk := call.Args[1].(*ir.MakeExpr)
+	if mk.Cap != nil {
+		return false
+	}
+
+	// y must be either an integer constant or the largest possible positive value
+	// of variable y needs to fit into an uint.
+
+	// typecheck made sure that constant arguments to make are not negative and fit into an int.
+
+	// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
+	y := mk.Len
+	if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
+		return false
+	}
+
+	return true
+}
+
+// extendSlice rewrites append(l1, make([]T, l2)...) to
+//   init {
+//     if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+//     } else {
+//       panicmakeslicelen()
+//     }
+//     s := l1
+//     n := len(s) + l2
+//     // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+//     // cap is a positive int and n can become negative when len(s) + l2
+//     // overflows int. Interpreting n when negative as uint makes it larger
+//     // than cap(s). growslice will check the int n arg and panic if n is
+//     // negative. This prevents the overflow from being undetected.
+//     if uint(n) > uint(cap(s)) {
+//       s = growslice(T, s, n)
+//     }
+//     s = s[:n]
+//     lptr := &l1[0]
+//     sptr := &s[0]
+//     if lptr == sptr || !T.HasPointers() {
+//       // growslice did not clear the whole underlying array (or did not get called)
+//       hp := &s[len(l1)]
+//       hn := l2 * sizeof(T)
+//       memclr(hp, hn)
+//     }
+//   }
+//   s
+func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+	// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
+	// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
+	// check of l2 < 0 at runtime which is generated below.
+	l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
+	l2 = typecheck.Expr(l2)
+	n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
+
+	walkAppendArgs(n, init)
+
+	l1 := n.Args[0]
+	l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
+
+	var nodes []ir.Node
+
+	// if l2 >= 0 (likely happens), do nothing
+	nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil)
+	nifneg.Likely = true
+
+	// else panicmakeslicelen()
+	nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+	nodes = append(nodes, nifneg)
+
+	// s := l1
+	s := typecheck.Temp(l1.Type())
+	nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
+
+	elemtype := s.Type().Elem()
+
+	// n := len(s) + l2
+	nn := typecheck.Temp(types.Types[types.TINT])
+	nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
+
+	// if uint(n) > uint(cap(s))
+	nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+	capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+	nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil)
+
+	// instantiate growslice(typ *type, old []any, newcap int) []any
+	fn := typecheck.LookupRuntime("growslice")
+	fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+	// s = growslice(T, s, n)
+	nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+	nodes = append(nodes, nif)
+
+	// s = s[:n]
+	nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil)
+	nt.SetBounded(true)
+	nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt))
+
+	// lptr := &l1[0]
+	l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo())
+	tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1)
+	nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp))
+
+	// sptr := &s[0]
+	sptr := typecheck.Temp(elemtype.PtrTo())
+	tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+	nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp))
+
+	// hp := &s[len(l1)]
+	ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+	ix.SetBounded(true)
+	hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+
+	// hn := l2 * sizeof(elem(s))
+	hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Width)), types.Types[types.TUINTPTR])
+
+	clrname := "memclrNoHeapPointers"
+	hasPointers := elemtype.HasPointers()
+	if hasPointers {
+		clrname = "memclrHasPointers"
+		ir.CurFunc.SetWBPos(n.Pos())
+	}
+
+	var clr ir.Nodes
+	clrfn := mkcall(clrname, nil, &clr, hp, hn)
+	clr.Append(clrfn)
+
+	if hasPointers {
+		// if l1ptr == sptr
+		nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil)
+		nifclr.Body = clr
+		nodes = append(nodes, nifclr)
+	} else {
+		nodes = append(nodes, clr...)
+	}
+
+	typecheck.Stmts(nodes)
+	walkStmtList(nodes)
+	init.Append(nodes...)
+	return s
+}
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
new file mode 100644
index 0000000..97f9de9
--- /dev/null
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -0,0 +1,687 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"fmt"
+	"go/constant"
+	"go/token"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/escape"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+)
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+//   init {
+//     s := src
+//     const argc = len(args) - 1
+//     if cap(s) - len(s) < argc {
+//	    s = growslice(s, len(s)+argc)
+//     }
+//     n := len(s)
+//     s = s[:n+argc]
+//     s[n] = a
+//     s[n+1] = b
+//     ...
+//   }
+//   s
+func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
+	if !ir.SameSafeExpr(dst, n.Args[0]) {
+		n.Args[0] = safeExpr(n.Args[0], init)
+		n.Args[0] = walkExpr(n.Args[0], init)
+	}
+	walkExprListSafe(n.Args[1:], init)
+
+	nsrc := n.Args[0]
+
+	// walkExprListSafe will leave OINDEX (s[n]) alone if both s
+	// and n are name or literal, but those may index the slice we're
+	// modifying here. Fix explicitly.
+	// Using cheapExpr also makes sure that the evaluation
+	// of all arguments (and especially any panics) happen
+	// before we begin to modify the slice in a visible way.
+	ls := n.Args[1:]
+	for i, n := range ls {
+		n = cheapExpr(n, init)
+		if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+			n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
+			n = walkExpr(n, init)
+		}
+		ls[i] = n
+	}
+
+	argc := len(n.Args) - 1
+	if argc < 1 {
+		return nsrc
+	}
+
+	// General case, with no function calls left as arguments.
+	// Leave for gen, except that instrumentation requires old form.
+	if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
+		return n
+	}
+
+	var l []ir.Node
+
+	ns := typecheck.Temp(nsrc.Type())
+	l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
+
+	na := ir.NewInt(int64(argc))                 // const argc
+	nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
+	nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
+
+	fn := typecheck.LookupRuntime("growslice") //   growslice(<type>, old []T, mincap int) (ret []T)
+	fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
+
+	nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
+		ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
+
+	l = append(l, nif)
+
+	nn := typecheck.Temp(types.Types[types.TINT])
+	l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
+
+	slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns, nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) // ...s[:n+argc]
+	slice.SetBounded(true)
+	l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
+
+	ls = n.Args[1:]
+	for i, n := range ls {
+		ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
+		ix.SetBounded(true)
+		l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
+		if i+1 < len(ls) {
+			l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
+		}
+	}
+
+	typecheck.Stmts(l)
+	walkStmtList(l)
+	init.Append(l...)
+	return ns
+}
+
+// walkClose walks an OCLOSE node.
+func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+	// cannot use chanfn - closechan takes any, not chan any
+	fn := typecheck.LookupRuntime("closechan")
+	fn = typecheck.SubstArgTypes(fn, n.X.Type())
+	return mkcall1(fn, nil, init, n.X)
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+//   n := len(a)
+//   if n > len(b) { n = len(b) }
+//   if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
+	if n.X.Type().Elem().HasPointers() {
+		ir.CurFunc.SetWBPos(n.Pos())
+		fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
+		n.X = cheapExpr(n.X, init)
+		ptrL, lenL := backingArrayPtrLen(n.X)
+		n.Y = cheapExpr(n.Y, init)
+		ptrR, lenR := backingArrayPtrLen(n.Y)
+		return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
+	}
+
+	if runtimecall {
+		// rely on runtime to instrument:
+		//  copy(n.Left, n.Right)
+		// n.Right can be a slice or string.
+
+		n.X = cheapExpr(n.X, init)
+		ptrL, lenL := backingArrayPtrLen(n.X)
+		n.Y = cheapExpr(n.Y, init)
+		ptrR, lenR := backingArrayPtrLen(n.Y)
+
+		fn := typecheck.LookupRuntime("slicecopy")
+		fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
+
+		return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width))
+	}
+
+	n.X = walkExpr(n.X, init)
+	n.Y = walkExpr(n.Y, init)
+	nl := typecheck.Temp(n.X.Type())
+	nr := typecheck.Temp(n.Y.Type())
+	var l []ir.Node
+	l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
+	l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
+
+	nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
+	nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
+
+	nlen := typecheck.Temp(types.Types[types.TINT])
+
+	// n = len(to)
+	l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
+
+	// if n > len(frm) { n = len(frm) }
+	nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+
+	nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
+	nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
+	l = append(l, nif)
+
+	// if to.ptr != frm.ptr { memmove( ... ) }
+	ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
+	ne.Likely = true
+	l = append(l, ne)
+
+	fn := typecheck.LookupRuntime("memmove")
+	fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+	nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
+	setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
+	ne.Body.Append(setwid)
+	nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width))
+	call := mkcall1(fn, nil, init, nto, nfrm, nwid)
+	ne.Body.Append(call)
+
+	typecheck.Stmts(l)
+	walkStmtList(l)
+	init.Append(l...)
+	return nlen
+}
+
+// walkDelete walks an ODELETE node.
+func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
+	init.Append(ir.TakeInit(n)...)
+	map_ := n.Args[0]
+	key := n.Args[1]
+	map_ = walkExpr(map_, init)
+	key = walkExpr(key, init)
+
+	t := map_.Type()
+	fast := mapfast(t)
+	if fast == mapslow {
+		// order.stmt made sure key is addressable.
+		key = typecheck.NodAddr(key)
+	}
+	return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
+}
+
+// walkLenCap walks an OLEN or OCAP node.
+func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+	if isRuneCount(n) {
+		// Replace len([]rune(string)) with runtime.countrunes(string).
+		return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
+	}
+
+	n.X = walkExpr(n.X, init)
+
+	// replace len(*[10]int) with 10.
+	// delayed until now to preserve side effects.
+	t := n.X.Type()
+
+	if t.IsPtr() {
+		t = t.Elem()
+	}
+	if t.IsArray() {
+		safeExpr(n.X, init)
+		con := typecheck.OrigInt(n, t.NumElem())
+		con.SetTypecheck(1)
+		return con
+	}
+	return n
+}
+
+// walkMakeChan walks an OMAKECHAN node.
+func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+	// When size fits into int, use makechan instead of
+	// makechan64, which is faster and shorter on 32 bit platforms.
+	size := n.Len
+	fnname := "makechan64"
+	argtype := types.Types[types.TINT64]
+
+	// Type checking guarantees that TIDEAL size is positive and fits in an int.
+	// The case of size overflow when converting TUINT or TUINTPTR to TINT
+	// will be handled by the negative range checks in makechan during runtime.
+	if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
+		fnname = "makechan"
+		argtype = types.Types[types.TINT]
+	}
+
+	return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
+}
+
+// walkMakeMap walks an OMAKEMAP node.
+func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+	t := n.Type()
+	hmapType := reflectdata.MapType(t)
+	hint := n.Len
+
+	// var h *hmap
+	var h ir.Node
+	if n.Esc() == ir.EscNone {
+		// Allocate hmap on stack.
+
+		// var hv hmap
+		// h = &hv
+		h = stackTempAddr(init, hmapType)
+
+		// Allocate one bucket pointed to by hmap.buckets on stack if hint
+		// is not larger than BUCKETSIZE. In case hint is larger than
+		// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
+		// Maximum key and elem size is 128 bytes, larger objects
+		// are stored with an indirection. So max bucket size is 2048+eps.
+		if !ir.IsConst(hint, constant.Int) ||
+			constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+
+			// In case hint is larger than BUCKETSIZE runtime.makemap
+			// will allocate the buckets on the heap, see #20184
+			//
+			// if hint <= BUCKETSIZE {
+			//     var bv bmap
+			//     b = &bv
+			//     h.buckets = b
+			// }
+
+			nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
+			nif.Likely = true
+
+			// var bv bmap
+			// b = &bv
+			b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
+
+			// h.buckets = b
+			bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
+			na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
+			nif.Body.Append(na)
+			appendWalkStmt(init, nif)
+		}
+	}
+
+	if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+		// Handling make(map[any]any) and
+		// make(map[any]any, hint) where hint <= BUCKETSIZE
+		// special allows for faster map initialization and
+		// improves binary size by using calls with fewer arguments.
+		// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
+		// and no buckets will be allocated by makemap. Therefore,
+		// no buckets need to be allocated in this code path.
+		if n.Esc() == ir.EscNone {
+			// Only need to initialize h.hash0 since
+			// hmap h has been allocated on the stack already.
+			// h.hash0 = fastrand()
+			rand := mkcall("fastrand", types.Types[types.TUINT32], init)
+			hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
+			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
+			return typecheck.ConvNop(h, t)
+		}
+		// Call runtime.makehmap to allocate an
+		// hmap on the heap and initialize hmap's hash0 field.
+		fn := typecheck.LookupRuntime("makemap_small")
+		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+		return mkcall1(fn, n.Type(), init)
+	}
+
+	if n.Esc() != ir.EscNone {
+		h = typecheck.NodNil()
+	}
+	// Map initialization with a variable or large hint is
+	// more complicated. We therefore generate a call to
+	// runtime.makemap to initialize hmap and allocate the
+	// map buckets.
+
+	// When hint fits into int, use makemap instead of
+	// makemap64, which is faster and shorter on 32 bit platforms.
+	fnname := "makemap64"
+	argtype := types.Types[types.TINT64]
+
+	// Type checking guarantees that TIDEAL hint is positive and fits in an int.
+	// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+	// The case of hint overflow when converting TUINT or TUINTPTR to TINT
+	// will be handled by the negative range checks in makemap during runtime.
+	if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
+		fnname = "makemap"
+		argtype = types.Types[types.TINT]
+	}
+
+	fn := typecheck.LookupRuntime(fnname)
+	fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
+	return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
+}
+
+// walkMakeSlice walks an OMAKESLICE node.
+func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+	l := n.Len
+	r := n.Cap
+	if r == nil {
+		r = safeExpr(l, init)
+		l = r
+	}
+	t := n.Type()
+	if t.Elem().NotInHeap() {
+		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+	}
+	if n.Esc() == ir.EscNone {
+		if why := escape.HeapAllocReason(n); why != "" {
+			base.Fatalf("%v has EscNone, but %v", n, why)
+		}
+		// var arr [r]T
+		// n = arr[:l]
+		i := typecheck.IndexConst(r)
+		if i < 0 {
+			base.Fatalf("walkExpr: invalid index %v", r)
+		}
+
+		// cap is constrained to [0,2^31) or [0,2^63) depending on whether
+		// we're in 32-bit or 64-bit systems. So it's safe to do:
+		//
+		// if uint64(len) > cap {
+		//     if len < 0 { panicmakeslicelen() }
+		//     panicmakeslicecap()
+		// }
+		nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
+		niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
+		niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+		nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+		init.Append(typecheck.Stmt(nif))
+
+		t = types.NewArray(t.Elem(), i) // [r]T
+		var_ := typecheck.Temp(t)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))  // zero temp
+		r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
+		// The conv is necessary in case n.Type is named.
+		return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
+	}
+
+	// n escapes; set up a call to makeslice.
+	// When len and cap can fit into int, use makeslice instead of
+	// makeslice64, which is faster and shorter on 32 bit platforms.
+
+	len, cap := l, r
+
+	fnname := "makeslice64"
+	argtype := types.Types[types.TINT64]
+
+	// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+	// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+	// will be handled by the negative range checks in makeslice during runtime.
+	if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+		(cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
+		fnname = "makeslice"
+		argtype = types.Types[types.TINT]
+	}
+
+	m := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+	m.SetType(t)
+
+	fn := typecheck.LookupRuntime(fnname)
+	m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
+	m.Ptr.MarkNonNil()
+	m.Len = typecheck.Conv(len, types.Types[types.TINT])
+	m.Cap = typecheck.Conv(cap, types.Types[types.TINT])
+	return walkExpr(typecheck.Expr(m), init)
+}
+
+// walkMakeSliceCopy walks an OMAKESLICECOPY node.
+func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+	if n.Esc() == ir.EscNone {
+		base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+	}
+
+	t := n.Type()
+	if t.Elem().NotInHeap() {
+		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+	}
+
+	length := typecheck.Conv(n.Len, types.Types[types.TINT])
+	copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
+	copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
+
+	if !t.Elem().HasPointers() && n.Bounded() {
+		// When len(to)==len(from) and elements have no pointers:
+		// replace make+copy with runtime.mallocgc+runtime.memmove.
+
+		// We do not check for overflow of len(to)*elem.Width here
+		// since len(from) is an existing checked slice capacity
+		// with same elem.Width for the from slice.
+		size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR]))
+
+		// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+		fn := typecheck.LookupRuntime("mallocgc")
+		sh := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+		sh.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
+		sh.Ptr.MarkNonNil()
+		sh.Len = length
+		sh.Cap = length
+		sh.SetType(t)
+
+		s := typecheck.Temp(t)
+		r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
+		r = walkExpr(r, init)
+		init.Append(r)
+
+		// instantiate memmove(to *any, frm *any, size uintptr)
+		fn = typecheck.LookupRuntime("memmove")
+		fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+		ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
+		init.Append(walkExpr(typecheck.Stmt(ncopy), init))
+
+		return s
+	}
+	// Replace make+copy with runtime.makeslicecopy.
+	// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+	fn := typecheck.LookupRuntime("makeslicecopy")
+	s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
+	s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
+	s.Ptr.MarkNonNil()
+	s.Len = length
+	s.Cap = length
+	s.SetType(t)
+	return walkExpr(typecheck.Expr(s), init)
+}
+
+// walkNew walks an ONEW node.
+func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+	t := n.Type().Elem()
+	if t.NotInHeap() {
+		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
+	}
+	if n.Esc() == ir.EscNone {
+		if t.Size() >= ir.MaxImplicitStackVarSize {
+			base.Fatalf("large ONEW with EscNone: %v", n)
+		}
+		return stackTempAddr(init, t)
+	}
+	types.CalcSize(t)
+	n.MarkNonNil()
+	return n
+}
+
+// generate code for print
+func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+	// Hoist all the argument evaluation up before the lock.
+	walkExprListCheap(nn.Args, init)
+
+	// For println, add " " between elements and "\n" at the end.
+	if nn.Op() == ir.OPRINTN {
+		s := nn.Args
+		t := make([]ir.Node, 0, len(s)*2)
+		for i, n := range s {
+			if i != 0 {
+				t = append(t, ir.NewString(" "))
+			}
+			t = append(t, n)
+		}
+		t = append(t, ir.NewString("\n"))
+		nn.Args = t
+	}
+
+	// Collapse runs of constant strings.
+	s := nn.Args
+	t := make([]ir.Node, 0, len(s))
+	for i := 0; i < len(s); {
+		var strs []string
+		for i < len(s) && ir.IsConst(s[i], constant.String) {
+			strs = append(strs, ir.StringVal(s[i]))
+			i++
+		}
+		if len(strs) > 0 {
+			t = append(t, ir.NewString(strings.Join(strs, "")))
+		}
+		if i < len(s) {
+			t = append(t, s[i])
+			i++
+		}
+	}
+	nn.Args = t
+
+	calls := []ir.Node{mkcall("printlock", nil, init)}
+	for i, n := range nn.Args {
+		if n.Op() == ir.OLITERAL {
+			if n.Type() == types.UntypedRune {
+				n = typecheck.DefaultLit(n, types.RuneType)
+			}
+
+			switch n.Val().Kind() {
+			case constant.Int:
+				n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+
+			case constant.Float:
+				n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
+			}
+		}
+
+		if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+			n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+		}
+		n = typecheck.DefaultLit(n, nil)
+		nn.Args[i] = n
+		if n.Type() == nil || n.Type().Kind() == types.TFORW {
+			continue
+		}
+
+		var on *ir.Name
+		switch n.Type().Kind() {
+		case types.TINTER:
+			if n.Type().IsEmptyInterface() {
+				on = typecheck.LookupRuntime("printeface")
+			} else {
+				on = typecheck.LookupRuntime("printiface")
+			}
+			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+		case types.TPTR:
+			if n.Type().Elem().NotInHeap() {
+				on = typecheck.LookupRuntime("printuintptr")
+				n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+				n.SetType(types.Types[types.TUNSAFEPTR])
+				n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+				n.SetType(types.Types[types.TUINTPTR])
+				break
+			}
+			fallthrough
+		case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
+			on = typecheck.LookupRuntime("printpointer")
+			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+		case types.TSLICE:
+			on = typecheck.LookupRuntime("printslice")
+			on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+		case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+			if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
+				on = typecheck.LookupRuntime("printhex")
+			} else {
+				on = typecheck.LookupRuntime("printuint")
+			}
+		case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
+			on = typecheck.LookupRuntime("printint")
+		case types.TFLOAT32, types.TFLOAT64:
+			on = typecheck.LookupRuntime("printfloat")
+		case types.TCOMPLEX64, types.TCOMPLEX128:
+			on = typecheck.LookupRuntime("printcomplex")
+		case types.TBOOL:
+			on = typecheck.LookupRuntime("printbool")
+		case types.TSTRING:
+			cs := ""
+			if ir.IsConst(n, constant.String) {
+				cs = ir.StringVal(n)
+			}
+			switch cs {
+			case " ":
+				on = typecheck.LookupRuntime("printsp")
+			case "\n":
+				on = typecheck.LookupRuntime("printnl")
+			default:
+				on = typecheck.LookupRuntime("printstring")
+			}
+		default:
+			badtype(ir.OPRINT, n.Type(), nil)
+			continue
+		}
+
+		r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
+		if params := on.Type().Params().FieldSlice(); len(params) > 0 {
+			t := params[0].Type
+			if !types.Identical(t, n.Type()) {
+				n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+				n.SetType(t)
+			}
+			r.Args.Append(n)
+		}
+		calls = append(calls, r)
+	}
+
+	calls = append(calls, mkcall("printunlock", nil, init))
+
+	typecheck.Stmts(calls)
+	walkExprList(calls, init)
+
+	r := ir.NewBlockStmt(base.Pos, nil)
+	r.List = calls
+	return walkStmt(typecheck.Stmt(r))
+}
+
+func badtype(op ir.Op, tl, tr *types.Type) {
+	var s string
+	if tl != nil {
+		s += fmt.Sprintf("\n\t%v", tl)
+	}
+	if tr != nil {
+		s += fmt.Sprintf("\n\t%v", tr)
+	}
+
+	// common mistake: *struct and *interface.
+	if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
+		if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
+			s += "\n\t(*struct vs *interface)"
+		} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
+			s += "\n\t(*interface vs *struct)"
+		}
+	}
+
+	base.Errorf("illegal types for operand: %v%s", op, s)
+}
+
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
+	fn := typecheck.LookupRuntime(name)
+	fn = typecheck.SubstArgTypes(fn, l, r)
+	return fn
+}
+
+// isRuneCount reports whether n is of the form len([]rune(string)).
+// These are optimized into a call to runtime.countrunes.
+func isRuneCount(n ir.Node) bool {
+	return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
+}
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
new file mode 100644
index 0000000..1d1cbc2
--- /dev/null
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -0,0 +1,199 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// directClosureCall rewrites a direct call of a function literal into
+// a normal function call with closure variables passed as arguments.
+// This avoids allocation of a closure object.
+//
+// For illustration, the following call:
+//
+//	func(a int) {
+//		println(byval)
+//		byref++
+//	}(42)
+//
+// becomes:
+//
+//	func(byval int, &byref *int, a int) {
+//		println(byval)
+//		(*&byref)++
+//	}(byval, &byref, 42)
+func directClosureCall(n *ir.CallExpr) {
+	clo := n.X.(*ir.ClosureExpr)
+	clofn := clo.Func
+
+	if ir.IsTrivialClosure(clo) {
+		return // leave for walkClosure to handle
+	}
+
+	// We are going to insert captured variables before input args.
+	var params []*types.Field
+	var decls []*ir.Name
+	for _, v := range clofn.ClosureVars {
+		if !v.Byval() {
+			// If v of type T is captured by reference,
+			// we introduce function param &v *T
+			// and v remains PAUTOHEAP with &v heapaddr
+			// (accesses will implicitly deref &v).
+
+			addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name))
+			addr.Curfn = clofn
+			addr.SetType(types.NewPtr(v.Type()))
+			v.Heapaddr = addr
+			v = addr
+		}
+
+		v.Class = ir.PPARAM
+		decls = append(decls, v)
+
+		fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+		fld.Nname = v
+		params = append(params, fld)
+	}
+
+	// f is ONAME of the actual function.
+	f := clofn.Nname
+	typ := f.Type()
+
+	// Create new function type with parameters prepended, and
+	// then update type and declarations.
+	typ = types.NewSignature(typ.Pkg(), nil, append(params, typ.Params().FieldSlice()...), typ.Results().FieldSlice())
+	f.SetType(typ)
+	clofn.Dcl = append(decls, clofn.Dcl...)
+
+	// Rewrite call.
+	n.X = f
+	n.Args.Prepend(closureArgs(clo)...)
+
+	// Update the call expression's type. We need to do this
+	// because typecheck gave it the result type of the OCLOSURE
+	// node, but we only rewrote the ONAME node's type. Logically,
+	// they're the same, but the stack offsets probably changed.
+	if typ.NumResults() == 1 {
+		n.SetType(typ.Results().Field(0).Type)
+	} else {
+		n.SetType(typ.Results())
+	}
+
+	// Add to Closures for enqueueFunc. It's no longer a proper
+	// closure, but we may have already skipped over it in the
+	// functions list as a non-trivial closure, so this just
+	// ensures it's compiled.
+	ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+}
+
+func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
+	clofn := clo.Func
+
+	// If no closure vars, don't bother wrapping.
+	if ir.IsTrivialClosure(clo) {
+		if base.Debug.Closure > 0 {
+			base.WarnfAt(clo.Pos(), "closure converted to global")
+		}
+		return clofn.Nname
+	}
+
+	// The closure is not trivial or directly called, so it's going to stay a closure.
+	ir.ClosureDebugRuntimeCheck(clo)
+	clofn.SetNeedctxt(true)
+	ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+
+	typ := typecheck.ClosureType(clo)
+
+	clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
+	clos.SetEsc(clo.Esc())
+	clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+
+	addr := typecheck.NodAddr(clos)
+	addr.SetEsc(clo.Esc())
+
+	// Force type conversion from *struct to the func type.
+	cfn := typecheck.ConvNop(addr, clo.Type())
+
+	// non-escaping temp to use, if any.
+	if x := clo.Prealloc; x != nil {
+		if !types.Identical(typ, x.Type()) {
+			panic("closure type does not match order's assigned type")
+		}
+		addr.Prealloc = x
+		clo.Prealloc = nil
+	}
+
+	return walkExpr(cfn, init)
+}
+
+// closureArgs returns a slice of expressions that an be used to
+// initialize the given closure's free variables. These correspond
+// one-to-one with the variables in clo.Func.ClosureVars, and will be
+// either an ONAME node (if the variable is captured by value) or an
+// OADDR-of-ONAME node (if not).
+func closureArgs(clo *ir.ClosureExpr) []ir.Node {
+	fn := clo.Func
+
+	args := make([]ir.Node, len(fn.ClosureVars))
+	for i, v := range fn.ClosureVars {
+		var outer ir.Node
+		outer = v.Outer
+		if !v.Byval() {
+			outer = typecheck.NodAddrAt(fn.Pos(), outer)
+		}
+		args[i] = typecheck.Expr(outer)
+	}
+	return args
+}
+
+func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+	// Create closure in the form of a composite literal.
+	// For x.M with receiver (x) type T, the generated code looks like:
+	//
+	//	clos = &struct{F uintptr; R T}{T.M·f, x}
+	//
+	// Like walkClosure above.
+
+	if n.X.Type().IsInterface() {
+		// Trigger panic for method on nil interface now.
+		// Otherwise it happens in the wrapper and is confusing.
+		n.X = cheapExpr(n.X, init)
+		n.X = walkExpr(n.X, nil)
+
+		tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
+
+		c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+		c.SetTypecheck(1)
+		init.Append(c)
+	}
+
+	typ := typecheck.PartialCallType(n)
+
+	clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
+	clos.SetEsc(n.Esc())
+	clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, typecheck.MethodValueWrapper(n).Nname), n.X}
+
+	addr := typecheck.NodAddr(clos)
+	addr.SetEsc(n.Esc())
+
+	// Force type conversion from *struct to the func type.
+	cfn := typecheck.ConvNop(addr, n.Type())
+
+	// non-escaping temp to use, if any.
+	if x := n.Prealloc; x != nil {
+		if !types.Identical(typ, x.Type()) {
+			panic("partial call type does not match order's assigned type")
+		}
+		addr.Prealloc = x
+		n.Prealloc = nil
+	}
+
+	return walkExpr(cfn, init)
+}
diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go
new file mode 100644
index 0000000..7c385c0
--- /dev/null
+++ b/src/cmd/compile/internal/walk/compare.go
@@ -0,0 +1,507 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"encoding/binary"
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/sys"
+)
+
+// The result of walkCompare MUST be assigned back to n, e.g.
+// 	n.Left = walkCompare(n.Left, init)
+func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+	if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
+		return walkCompareInterface(n, init)
+	}
+
+	if n.X.Type().IsString() && n.Y.Type().IsString() {
+		return walkCompareString(n, init)
+	}
+
+	n.X = walkExpr(n.X, init)
+	n.Y = walkExpr(n.Y, init)
+
+	// Given mixed interface/concrete comparison,
+	// rewrite into types-equal && data-equal.
+	// This is efficient, avoids allocations, and avoids runtime calls.
+	if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
+		// Preserve side-effects in case of short-circuiting; see #32187.
+		l := cheapExpr(n.X, init)
+		r := cheapExpr(n.Y, init)
+		// Swap so that l is the interface value and r is the concrete value.
+		if n.Y.Type().IsInterface() {
+			l, r = r, l
+		}
+
+		// Handle both == and !=.
+		eq := n.Op()
+		andor := ir.OOROR
+		if eq == ir.OEQ {
+			andor = ir.OANDAND
+		}
+		// Check for types equal.
+		// For empty interface, this is:
+		//   l.tab == type(r)
+		// For non-empty interface, this is:
+		//   l.tab != nil && l.tab._type == type(r)
+		var eqtype ir.Node
+		tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
+		rtyp := reflectdata.TypePtr(r.Type())
+		if l.Type().IsEmptyInterface() {
+			tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
+			tab.SetTypecheck(1)
+			eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
+		} else {
+			nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
+			match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
+			eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
+		}
+		// Check for data equal.
+		eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
+		// Put it all together.
+		expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
+		return finishCompare(n, expr, init)
+	}
+
+	// Must be comparison of array or struct.
+	// Otherwise back end handles it.
+	// While we're here, decide whether to
+	// inline or call an eq alg.
+	t := n.X.Type()
+	var inline bool
+
+	maxcmpsize := int64(4)
+	unalignedLoad := canMergeLoads()
+	if unalignedLoad {
+		// Keep this low enough to generate less code than a function call.
+		maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
+	}
+
+	switch t.Kind() {
+	default:
+		if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+			n.X = cheapExpr(n.X, init)
+			n.Y = cheapExpr(n.Y, init)
+
+			// If exactly one comparison operand is
+			// constant, invoke the constcmp functions
+			// instead, and arrange for the constant
+			// operand to be the first argument.
+			l, r := n.X, n.Y
+			if r.Op() == ir.OLITERAL {
+				l, r = r, l
+			}
+			constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
+
+			var fn string
+			var paramType *types.Type
+			switch t.Size() {
+			case 1:
+				fn = "libfuzzerTraceCmp1"
+				if constcmp {
+					fn = "libfuzzerTraceConstCmp1"
+				}
+				paramType = types.Types[types.TUINT8]
+			case 2:
+				fn = "libfuzzerTraceCmp2"
+				if constcmp {
+					fn = "libfuzzerTraceConstCmp2"
+				}
+				paramType = types.Types[types.TUINT16]
+			case 4:
+				fn = "libfuzzerTraceCmp4"
+				if constcmp {
+					fn = "libfuzzerTraceConstCmp4"
+				}
+				paramType = types.Types[types.TUINT32]
+			case 8:
+				fn = "libfuzzerTraceCmp8"
+				if constcmp {
+					fn = "libfuzzerTraceConstCmp8"
+				}
+				paramType = types.Types[types.TUINT64]
+			default:
+				base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
+			}
+			init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
+		}
+		return n
+	case types.TARRAY:
+		// We can compare several elements at once with 2/4/8 byte integer compares
+		inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
+	case types.TSTRUCT:
+		inline = t.NumComponents(types.IgnoreBlankFields) <= 4
+	}
+
+	cmpl := n.X
+	for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+		cmpl = cmpl.(*ir.ConvExpr).X
+	}
+	cmpr := n.Y
+	for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+		cmpr = cmpr.(*ir.ConvExpr).X
+	}
+
+	// Chose not to inline. Call equality function directly.
+	if !inline {
+		// eq algs take pointers; cmpl and cmpr must be addressable
+		if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) {
+			base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+		}
+
+		fn, needsize := eqFor(t)
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+		call.Args.Append(typecheck.NodAddr(cmpl))
+		call.Args.Append(typecheck.NodAddr(cmpr))
+		if needsize {
+			call.Args.Append(ir.NewInt(t.Width))
+		}
+		res := ir.Node(call)
+		if n.Op() != ir.OEQ {
+			res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
+		}
+		return finishCompare(n, res, init)
+	}
+
+	// inline: build boolean expression comparing element by element
+	andor := ir.OANDAND
+	if n.Op() == ir.ONE {
+		andor = ir.OOROR
+	}
+	var expr ir.Node
+	compare := func(el, er ir.Node) {
+		a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
+		if expr == nil {
+			expr = a
+		} else {
+			expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
+		}
+	}
+	cmpl = safeExpr(cmpl, init)
+	cmpr = safeExpr(cmpr, init)
+	if t.IsStruct() {
+		for _, f := range t.Fields().Slice() {
+			sym := f.Sym
+			if sym.IsBlank() {
+				continue
+			}
+			compare(
+				ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
+				ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
+			)
+		}
+	} else {
+		step := int64(1)
+		remains := t.NumElem() * t.Elem().Width
+		combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
+		combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
+		combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
+		for i := int64(0); remains > 0; {
+			var convType *types.Type
+			switch {
+			case remains >= 8 && combine64bit:
+				convType = types.Types[types.TINT64]
+				step = 8 / t.Elem().Width
+			case remains >= 4 && combine32bit:
+				convType = types.Types[types.TUINT32]
+				step = 4 / t.Elem().Width
+			case remains >= 2 && combine16bit:
+				convType = types.Types[types.TUINT16]
+				step = 2 / t.Elem().Width
+			default:
+				step = 1
+			}
+			if step == 1 {
+				compare(
+					ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
+					ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
+				)
+				i++
+				remains -= t.Elem().Width
+			} else {
+				elemType := t.Elem().ToUnsigned()
+				cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)))
+				cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
+				cmplw = typecheck.Conv(cmplw, convType) // widen
+				cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)))
+				cmprw = typecheck.Conv(cmprw, elemType)
+				cmprw = typecheck.Conv(cmprw, convType)
+				// For code like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+				// ssa will generate a single large load.
+				for offset := int64(1); offset < step; offset++ {
+					lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset)))
+					lb = typecheck.Conv(lb, elemType)
+					lb = typecheck.Conv(lb, convType)
+					lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Width*offset))
+					cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
+					rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset)))
+					rb = typecheck.Conv(rb, elemType)
+					rb = typecheck.Conv(rb, convType)
+					rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Width*offset))
+					cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
+				}
+				compare(cmplw, cmprw)
+				i += step
+				remains -= step * t.Elem().Width
+			}
+		}
+	}
+	if expr == nil {
+		expr = ir.NewBool(n.Op() == ir.OEQ)
+		// We still need to use cmpl and cmpr, in case they contain
+		// an expression which might panic. See issue 23837.
+		t := typecheck.Temp(cmpl.Type())
+		a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl))
+		a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr))
+		init.Append(a1, a2)
+	}
+	return finishCompare(n, expr, init)
+}
+
+func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+	n.Y = cheapExpr(n.Y, init)
+	n.X = cheapExpr(n.X, init)
+	eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
+	var cmp ir.Node
+	if n.Op() == ir.OEQ {
+		cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
+	} else {
+		eqtab.SetOp(ir.ONE)
+		cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
+	}
+	return finishCompare(n, cmp, init)
+}
+
+func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+	// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+	var cs, ncs ir.Node // const string, non-const string
+	switch {
+	case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
+		// ignore; will be constant evaluated
+	case ir.IsConst(n.X, constant.String):
+		cs = n.X
+		ncs = n.Y
+	case ir.IsConst(n.Y, constant.String):
+		cs = n.Y
+		ncs = n.X
+	}
+	if cs != nil {
+		cmp := n.Op()
+		// Our comparison below assumes that the non-constant string
+		// is on the left hand side, so rewrite "" cmp x to x cmp "".
+		// See issue 24817.
+		if ir.IsConst(n.X, constant.String) {
+			cmp = brrev(cmp)
+		}
+
+		// maxRewriteLen was chosen empirically.
+		// It is the value that minimizes cmd/go file size
+		// across most architectures.
+		// See the commit description for CL 26758 for details.
+		maxRewriteLen := 6
+		// Some architectures can load unaligned byte sequence as 1 word.
+		// So we can cover longer strings with the same amount of code.
+		canCombineLoads := canMergeLoads()
+		combine64bit := false
+		if canCombineLoads {
+			// Keep this low enough to generate less code than a function call.
+			maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
+			combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
+		}
+
+		var and ir.Op
+		switch cmp {
+		case ir.OEQ:
+			and = ir.OANDAND
+		case ir.ONE:
+			and = ir.OOROR
+		default:
+			// Don't do byte-wise comparisons for <, <=, etc.
+			// They're fairly complicated.
+			// Length-only checks are ok, though.
+			maxRewriteLen = 0
+		}
+		if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
+			if len(s) > 0 {
+				ncs = safeExpr(ncs, init)
+			}
+			r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s)))))
+			remains := len(s)
+			for i := 0; remains > 0; {
+				if remains == 1 || !canCombineLoads {
+					cb := ir.NewInt(int64(s[i]))
+					ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i)))
+					r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
+					remains--
+					i++
+					continue
+				}
+				var step int
+				var convType *types.Type
+				switch {
+				case remains >= 8 && combine64bit:
+					convType = types.Types[types.TINT64]
+					step = 8
+				case remains >= 4:
+					convType = types.Types[types.TUINT32]
+					step = 4
+				case remains >= 2:
+					convType = types.Types[types.TUINT16]
+					step = 2
+				}
+				ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType)
+				csubstr := int64(s[i])
+				// Calculate large constant from bytes as sequence of shifts and ors.
+				// Like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+				// ssa will combine this into a single large load.
+				for offset := 1; offset < step; offset++ {
+					b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType)
+					b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset)))
+					ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
+					csubstr |= int64(s[i+offset]) << uint8(8*offset)
+				}
+				csubstrPart := ir.NewInt(csubstr)
+				// Compare "step" bytes as once
+				r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
+				remains -= step
+				i += step
+			}
+			return finishCompare(n, r, init)
+		}
+	}
+
+	var r ir.Node
+	if n.Op() == ir.OEQ || n.Op() == ir.ONE {
+		// prepare for rewrite below
+		n.X = cheapExpr(n.X, init)
+		n.Y = cheapExpr(n.Y, init)
+		eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
+		// quick check of len before full compare for == or !=.
+		// memequal then tests equality up to length len.
+		if n.Op() == ir.OEQ {
+			// len(left) == len(right) && memequal(left, right, len)
+			r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
+		} else {
+			// len(left) != len(right) || !memequal(left, right, len)
+			eqlen.SetOp(ir.ONE)
+			r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
+		}
+	} else {
+		// sys_cmpstring(s1, s2) :: 0
+		r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
+		r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0))
+	}
+
+	return finishCompare(n, r, init)
+}
+
+// The result of finishCompare MUST be assigned back to n, e.g.
+// 	n.Left = finishCompare(n.Left, x, r, init)
+func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
+	r = typecheck.Expr(r)
+	r = typecheck.Conv(r, n.Type())
+	r = walkExpr(r, init)
+	return r
+}
+
+func eqFor(t *types.Type) (n ir.Node, needsize bool) {
+	// Should only arrive here with large memory or
+	// a struct/array containing a non-memory field/element.
+	// Small memory is handled inline, and single non-memory
+	// is handled by walkCompare.
+	switch a, _ := types.AlgType(t); a {
+	case types.AMEM:
+		n := typecheck.LookupRuntime("memequal")
+		n = typecheck.SubstArgTypes(n, t, t)
+		return n, true
+	case types.ASPECIAL:
+		sym := reflectdata.TypeSymPrefix(".eq", t)
+		n := typecheck.NewName(sym)
+		ir.MarkFunc(n)
+		n.SetType(types.NewSignature(types.NoPkg, nil, []*types.Field{
+			types.NewField(base.Pos, nil, types.NewPtr(t)),
+			types.NewField(base.Pos, nil, types.NewPtr(t)),
+		}, []*types.Field{
+			types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
+		}))
+		return n, false
+	}
+	base.Fatalf("eqFor %v", t)
+	return nil, false
+}
+
+// brcom returns !(op).
+// For example, brcom(==) is !=.
+func brcom(op ir.Op) ir.Op {
+	switch op {
+	case ir.OEQ:
+		return ir.ONE
+	case ir.ONE:
+		return ir.OEQ
+	case ir.OLT:
+		return ir.OGE
+	case ir.OGT:
+		return ir.OLE
+	case ir.OLE:
+		return ir.OGT
+	case ir.OGE:
+		return ir.OLT
+	}
+	base.Fatalf("brcom: no com for %v\n", op)
+	return op
+}
+
+// brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func brrev(op ir.Op) ir.Op {
+	switch op {
+	case ir.OEQ:
+		return ir.OEQ
+	case ir.ONE:
+		return ir.ONE
+	case ir.OLT:
+		return ir.OGT
+	case ir.OGT:
+		return ir.OLT
+	case ir.OLE:
+		return ir.OGE
+	case ir.OGE:
+		return ir.OLE
+	}
+	base.Fatalf("brrev: no rev for %v\n", op)
+	return op
+}
+
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+	// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
+	if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
+		n = copyExpr(n, n.Type(), init)
+	}
+
+	return typecheck.Conv(n, t)
+}
+
+// canMergeLoads reports whether the backend optimization passes for
+// the current architecture can combine adjacent loads into a single
+// larger, possibly unaligned, load. Note that currently the
+// optimizations must be able to handle little endian byte order.
+func canMergeLoads() bool {
+	switch ssagen.Arch.LinkArch.Family {
+	case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
+		return true
+	case sys.PPC64:
+		// Load combining only supported on ppc64le.
+		return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian
+	}
+	return false
+}
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
new file mode 100644
index 0000000..73442dc
--- /dev/null
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -0,0 +1,663 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/staticinit"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+)
+
+// walkCompLit walks a composite literal node:
+// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
+func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
+	if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
+		n := n.(*ir.CompLitExpr) // not OPTRLIT
+		// n can be directly represented in the read-only data section.
+		// Make direct reference to the static data. See issue 12841.
+		vstat := readonlystaticname(n.Type())
+		fixedlit(inInitFunction, initKindStatic, n, vstat, init)
+		return typecheck.Expr(vstat)
+	}
+	var_ := typecheck.Temp(n.Type())
+	anylit(n, var_, init)
+	return var_
+}
+
+// initContext is the context in which static data is populated.
+// It is either in an init function or in any other function.
+// Static data populated in an init function will be written either
+// zero times (as a readonly, static data symbol) or
+// one time (during init function execution).
+// Either way, there is no opportunity for races or further modification,
+// so the data can be written to a (possibly readonly) data symbol.
+// Static data populated in any other function needs to be local to
+// that function to allow multiple instances of that function
+// to execute concurrently without clobbering each others' data.
+type initContext uint8
+
+const (
+	inInitFunction initContext = iota
+	inNonInitFunction
+)
+
+func (c initContext) String() string {
+	if c == inInitFunction {
+		return "inInitFunction"
+	}
+	return "inNonInitFunction"
+}
+
+// readonlystaticname returns a name backed by a (writable) static data symbol.
+func readonlystaticname(t *types.Type) *ir.Name {
+	n := staticinit.StaticName(t)
+	n.MarkReadonly()
+	n.Linksym().Set(obj.AttrContentAddressable, true)
+	return n
+}
+
+func isSimpleName(nn ir.Node) bool {
+	if nn.Op() != ir.ONAME || ir.IsBlank(nn) {
+		return false
+	}
+	n := nn.(*ir.Name)
+	return n.OnStack()
+}
+
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
+	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, r))
+}
+
+// initGenType is a bitmap indicating the types of generation that will occur for a static value.
+type initGenType uint8
+
+const (
+	initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
+	initConst                           // contains some constant values, which may be written into data symbols
+)
+
+// getdyn calculates the initGenType for n.
+// If top is false, getdyn is recursing.
+func getdyn(n ir.Node, top bool) initGenType {
+	switch n.Op() {
+	default:
+		if ir.IsConstNode(n) {
+			return initConst
+		}
+		return initDynamic
+
+	case ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		if !top {
+			return initDynamic
+		}
+		if n.Len/4 > int64(len(n.List)) {
+			// <25% of entries have explicit values.
+			// Very rough estimation, it takes 4 bytes of instructions
+			// to initialize 1 byte of result. So don't use a static
+			// initializer if the dynamic initialization code would be
+			// smaller than the static value.
+			// See issue 23780.
+			return initDynamic
+		}
+
+	case ir.OARRAYLIT, ir.OSTRUCTLIT:
+	}
+	lit := n.(*ir.CompLitExpr)
+
+	var mode initGenType
+	for _, n1 := range lit.List {
+		switch n1.Op() {
+		case ir.OKEY:
+			n1 = n1.(*ir.KeyExpr).Value
+		case ir.OSTRUCTKEY:
+			n1 = n1.(*ir.StructKeyExpr).Value
+		}
+		mode |= getdyn(n1, false)
+		if mode == initDynamic|initConst {
+			break
+		}
+	}
+	return mode
+}
+
+// isStaticCompositeLiteral reports whether n is a compile-time constant.
+func isStaticCompositeLiteral(n ir.Node) bool {
+	switch n.Op() {
+	case ir.OSLICELIT:
+		return false
+	case ir.OARRAYLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, r := range n.List {
+			if r.Op() == ir.OKEY {
+				r = r.(*ir.KeyExpr).Value
+			}
+			if !isStaticCompositeLiteral(r) {
+				return false
+			}
+		}
+		return true
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, r := range n.List {
+			r := r.(*ir.StructKeyExpr)
+			if !isStaticCompositeLiteral(r.Value) {
+				return false
+			}
+		}
+		return true
+	case ir.OLITERAL, ir.ONIL:
+		return true
+	case ir.OCONVIFACE:
+		// See staticassign's OCONVIFACE case for comments.
+		n := n.(*ir.ConvExpr)
+		val := ir.Node(n)
+		for val.Op() == ir.OCONVIFACE {
+			val = val.(*ir.ConvExpr).X
+		}
+		if val.Type().IsInterface() {
+			return val.Op() == ir.ONIL
+		}
+		if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL {
+			return true
+		}
+		return isStaticCompositeLiteral(val)
+	}
+	return false
+}
+
+// initKind is a kind of static initialization: static, dynamic, or local.
+// Static initialization represents literals and
+// literal components of composite literals.
+// Dynamic initialization represents non-literals and
+// non-literal components of composite literals.
+// LocalCode initialization represents initialization
+// that occurs purely in generated code local to the function of use.
+// Initialization code is sometimes generated in passes,
+// first static then dynamic.
+type initKind uint8
+
+const (
+	initKindStatic initKind = iota + 1
+	initKindDynamic
+	initKindLocalCode
+)
+
+// fixedlit handles struct, array, and slice literals.
+// TODO: expand documentation.
+func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+	isBlank := var_ == ir.BlankNode
+	var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+	switch n.Op() {
+	case ir.OARRAYLIT, ir.OSLICELIT:
+		var k int64
+		splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+			if r.Op() == ir.OKEY {
+				kv := r.(*ir.KeyExpr)
+				k = typecheck.IndexConst(kv.Key)
+				if k < 0 {
+					base.Fatalf("fixedlit: invalid index %v", kv.Key)
+				}
+				r = kv.Value
+			}
+			a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(k))
+			k++
+			if isBlank {
+				return ir.BlankNode, r
+			}
+			return a, r
+		}
+	case ir.OSTRUCTLIT:
+		splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
+			r := rn.(*ir.StructKeyExpr)
+			if r.Field.IsBlank() || isBlank {
+				return ir.BlankNode, r.Value
+			}
+			ir.SetPos(r)
+			return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
+		}
+	default:
+		base.Fatalf("fixedlit bad op: %v", n.Op())
+	}
+
+	for _, r := range n.List {
+		a, value := splitnode(r)
+		if a == ir.BlankNode && !staticinit.AnySideEffects(value) {
+			// Discard.
+			continue
+		}
+
+		switch value.Op() {
+		case ir.OSLICELIT:
+			value := value.(*ir.CompLitExpr)
+			if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
+				slicelit(ctxt, value, a, init)
+				continue
+			}
+
+		case ir.OARRAYLIT, ir.OSTRUCTLIT:
+			value := value.(*ir.CompLitExpr)
+			fixedlit(ctxt, kind, value, a, init)
+			continue
+		}
+
+		islit := ir.IsConstNode(value)
+		if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
+			continue
+		}
+
+		// build list of assignments: var[index] = expr
+		ir.SetPos(a)
+		as := ir.NewAssignStmt(base.Pos, a, value)
+		as = typecheck.Stmt(as).(*ir.AssignStmt)
+		switch kind {
+		case initKindStatic:
+			genAsStatic(as)
+		case initKindDynamic, initKindLocalCode:
+			a = orderStmtInPlace(as, map[string][]*ir.Name{})
+			a = walkStmt(a)
+			init.Append(a)
+		default:
+			base.Fatalf("fixedlit: bad kind %d", kind)
+		}
+
+	}
+}
+
+func isSmallSliceLit(n *ir.CompLitExpr) bool {
+	if n.Op() != ir.OSLICELIT {
+		return false
+	}
+
+	return n.Type().Elem().Width == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Width
+}
+
+func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+	// make an array type corresponding the number of elements we have
+	t := types.NewArray(n.Type().Elem(), n.Len)
+	types.CalcSize(t)
+
+	if ctxt == inNonInitFunction {
+		// put everything into static array
+		vstat := staticinit.StaticName(t)
+
+		fixedlit(ctxt, initKindStatic, n, vstat, init)
+		fixedlit(ctxt, initKindDynamic, n, vstat, init)
+
+		// copy static to slice
+		var_ = typecheck.AssignExpr(var_)
+		name, offset, ok := staticinit.StaticLoc(var_)
+		if !ok || name.Class != ir.PEXTERN {
+			base.Fatalf("slicelit: %v", var_)
+		}
+		staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem())
+		return
+	}
+
+	// recipe for var = []t{...}
+	// 1. make a static array
+	//	var vstat [...]t
+	// 2. assign (data statements) the constant part
+	//	vstat = constpart{}
+	// 3. make an auto pointer to array and allocate heap to it
+	//	var vauto *[...]t = new([...]t)
+	// 4. copy the static array to the auto array
+	//	*vauto = vstat
+	// 5. for each dynamic part assign to the array
+	//	vauto[i] = dynamic part
+	// 6. assign slice of allocated heap to var
+	//	var = vauto[:]
+	//
+	// an optimization is done if there is no constant part
+	//	3. var vauto *[...]t = new([...]t)
+	//	5. vauto[i] = dynamic part
+	//	6. var = vauto[:]
+
+	// if the literal contains constants,
+	// make static initialized array (1),(2)
+	var vstat ir.Node
+
+	mode := getdyn(n, true)
+	if mode&initConst != 0 && !isSmallSliceLit(n) {
+		if ctxt == inInitFunction {
+			vstat = readonlystaticname(t)
+		} else {
+			vstat = staticinit.StaticName(t)
+		}
+		fixedlit(ctxt, initKindStatic, n, vstat, init)
+	}
+
+	// make new auto *array (3 declare)
+	vauto := typecheck.Temp(types.NewPtr(t))
+
+	// set auto to point at new temp or heap (3 assign)
+	var a ir.Node
+	if x := n.Prealloc; x != nil {
+		// temp allocated during order.go for dddarg
+		if !types.Identical(t, x.Type()) {
+			panic("dotdotdot base type does not match order's assigned type")
+		}
+		a = initStackTemp(init, x, vstat)
+	} else if n.Esc() == ir.EscNone {
+		a = initStackTemp(init, typecheck.Temp(t), vstat)
+	} else {
+		a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
+	}
+	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
+
+	if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone {
+		// If we allocated on the heap with ONEW, copy the static to the
+		// heap (4). We skip this for stack temporaries, because
+		// initStackTemp already handled the copy.
+		a = ir.NewStarExpr(base.Pos, vauto)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
+	}
+
+	// put dynamics into array (5)
+	var index int64
+	for _, value := range n.List {
+		if value.Op() == ir.OKEY {
+			kv := value.(*ir.KeyExpr)
+			index = typecheck.IndexConst(kv.Key)
+			if index < 0 {
+				base.Fatalf("slicelit: invalid index %v", kv.Key)
+			}
+			value = kv.Value
+		}
+		a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(index))
+		a.SetBounded(true)
+		index++
+
+		// TODO need to check bounds?
+
+		switch value.Op() {
+		case ir.OSLICELIT:
+			break
+
+		case ir.OARRAYLIT, ir.OSTRUCTLIT:
+			value := value.(*ir.CompLitExpr)
+			k := initKindDynamic
+			if vstat == nil {
+				// Generate both static and dynamic initializations.
+				// See issue #31987.
+				k = initKindLocalCode
+			}
+			fixedlit(ctxt, k, value, a, init)
+			continue
+		}
+
+		if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value
+			continue
+		}
+
+		// build list of vauto[c] = expr
+		ir.SetPos(value)
+		as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value))
+		as = orderStmtInPlace(as, map[string][]*ir.Name{})
+		as = walkStmt(as)
+		init.Append(as)
+	}
+
+	// make slice out of heap (6)
+	a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto, nil, nil, nil))
+
+	a = typecheck.Stmt(a)
+	a = orderStmtInPlace(a, map[string][]*ir.Name{})
+	a = walkStmt(a)
+	init.Append(a)
+}
+
+func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
+	// make the map var
+	a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
+	a.SetEsc(n.Esc())
+	a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))}
+	litas(m, a, init)
+
+	entries := n.List
+
+	// The order pass already removed any dynamic (runtime-computed) entries.
+	// All remaining entries are static. Double-check that.
+	for _, r := range entries {
+		r := r.(*ir.KeyExpr)
+		if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+			base.Fatalf("maplit: entry is not a literal: %v", r)
+		}
+	}
+
+	if len(entries) > 25 {
+		// For a large number of entries, put them in an array and loop.
+
+		// build types [count]Tindex and [count]Tvalue
+		tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+		te := types.NewArray(n.Type().Elem(), int64(len(entries)))
+
+		tk.SetNoalg(true)
+		te.SetNoalg(true)
+
+		types.CalcSize(tk)
+		types.CalcSize(te)
+
+		// make and initialize static arrays
+		vstatk := readonlystaticname(tk)
+		vstate := readonlystaticname(te)
+
+		datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+		datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+		for _, r := range entries {
+			r := r.(*ir.KeyExpr)
+			datak.List.Append(r.Key)
+			datae.List.Append(r.Value)
+		}
+		fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+		fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
+
+		// loop adding structure elements to map
+		// for i = 0; i < len(vstatk); i++ {
+		//	map[vstatk[i]] = vstate[i]
+		// }
+		i := typecheck.Temp(types.Types[types.TINT])
+		rhs := ir.NewIndexExpr(base.Pos, vstate, i)
+		rhs.SetBounded(true)
+
+		kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
+		kidx.SetBounded(true)
+		lhs := ir.NewIndexExpr(base.Pos, m, kidx)
+
+		zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+		cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem()))
+		incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+		body := ir.NewAssignStmt(base.Pos, lhs, rhs)
+
+		loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil)
+		loop.Body = []ir.Node{body}
+		*loop.PtrInit() = []ir.Node{zero}
+
+		appendWalkStmt(init, loop)
+		return
+	}
+	// For a small number of entries, just add them directly.
+
+	// Build list of var[c] = expr.
+	// Use temporaries so that mapassign1 can have addressable key, elem.
+	// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
+	tmpkey := typecheck.Temp(m.Type().Key())
+	tmpelem := typecheck.Temp(m.Type().Elem())
+
+	for _, r := range entries {
+		r := r.(*ir.KeyExpr)
+		index, elem := r.Key, r.Value
+
+		ir.SetPos(index)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index))
+
+		ir.SetPos(elem)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
+
+		ir.SetPos(tmpelem)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem))
+	}
+
+	appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey))
+	appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem))
+}
+
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+	t := n.Type()
+	switch n.Op() {
+	default:
+		base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
+
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
+
+	case ir.OMETHEXPR:
+		n := n.(*ir.SelectorExpr)
+		anylit(n.FuncName(), var_, init)
+
+	case ir.OPTRLIT:
+		n := n.(*ir.AddrExpr)
+		if !t.IsPtr() {
+			base.Fatalf("anylit: not ptr")
+		}
+
+		var r ir.Node
+		if n.Prealloc != nil {
+			// n.Prealloc is stack temporary used as backing store.
+			r = initStackTemp(init, n.Prealloc, nil)
+		} else {
+			r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
+			r.SetEsc(n.Esc())
+		}
+		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r))
+
+		var_ = ir.NewStarExpr(base.Pos, var_)
+		var_ = typecheck.AssignExpr(var_)
+		anylit(n.X, var_, init)
+
+	case ir.OSTRUCTLIT, ir.OARRAYLIT:
+		n := n.(*ir.CompLitExpr)
+		if !t.IsStruct() && !t.IsArray() {
+			base.Fatalf("anylit: not struct/array")
+		}
+
+		if isSimpleName(var_) && len(n.List) > 4 {
+			// lay out static data
+			vstat := readonlystaticname(t)
+
+			ctxt := inInitFunction
+			if n.Op() == ir.OARRAYLIT {
+				ctxt = inNonInitFunction
+			}
+			fixedlit(ctxt, initKindStatic, n, vstat, init)
+
+			// copy static to var
+			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat))
+
+			// add expressions to automatic
+			fixedlit(inInitFunction, initKindDynamic, n, var_, init)
+			break
+		}
+
+		var components int64
+		if n.Op() == ir.OARRAYLIT {
+			components = t.NumElem()
+		} else {
+			components = int64(t.NumFields())
+		}
+		// initialization of an array or struct with unspecified components (missing fields or arrays)
+		if isSimpleName(var_) || int64(len(n.List)) < components {
+			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))
+		}
+
+		fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
+
+	case ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		slicelit(inInitFunction, n, var_, init)
+
+	case ir.OMAPLIT:
+		n := n.(*ir.CompLitExpr)
+		if !t.IsMap() {
+			base.Fatalf("anylit: not map")
+		}
+		maplit(n, var_, init)
+	}
+}
+
+// oaslit handles special composite literal assignments.
+// It returns true if n's effects have been added to init,
+// in which case n should be dropped from the program by the caller.
+func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
+	if n.X == nil || n.Y == nil {
+		// not a special composite literal assignment
+		return false
+	}
+	if n.X.Type() == nil || n.Y.Type() == nil {
+		// not a special composite literal assignment
+		return false
+	}
+	if !isSimpleName(n.X) {
+		// not a special composite literal assignment
+		return false
+	}
+	x := n.X.(*ir.Name)
+	if !types.Identical(n.X.Type(), n.Y.Type()) {
+		// not a special composite literal assignment
+		return false
+	}
+
+	switch n.Y.Op() {
+	default:
+		// not a special composite literal assignment
+		return false
+
+	case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+		if ir.Any(n.Y, func(y ir.Node) bool { return ir.Uses(y, x) }) {
+			// not a special composite literal assignment
+			return false
+		}
+		anylit(n.Y, n.X, init)
+	}
+
+	return true
+}
+
+func genAsStatic(as *ir.AssignStmt) {
+	if as.X.Type() == nil {
+		base.Fatalf("genAsStatic as.Left not typechecked")
+	}
+
+	name, offset, ok := staticinit.StaticLoc(as.X)
+	if !ok || (name.Class != ir.PEXTERN && as.X != ir.BlankNode) {
+		base.Fatalf("genAsStatic: lhs %v", as.X)
+	}
+
+	switch r := as.Y; r.Op() {
+	case ir.OLITERAL:
+		staticdata.InitConst(name, offset, r, int(r.Type().Width))
+		return
+	case ir.OMETHEXPR:
+		r := r.(*ir.SelectorExpr)
+		staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName()))
+		return
+	case ir.ONAME:
+		r := r.(*ir.Name)
+		if r.Offset_ != 0 {
+			base.Fatalf("genAsStatic %+v", as)
+		}
+		if r.Class == ir.PFUNC {
+			staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r))
+			return
+		}
+	}
+	base.Fatalf("genAsStatic: rhs %v", as.Y)
+}
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
new file mode 100644
index 0000000..fa8e2c0
--- /dev/null
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -0,0 +1,490 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"encoding/binary"
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/sys"
+)
+
+// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
+func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+	if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
+		return n.X
+	}
+	if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+		if n.Type().IsPtr() && n.X.Type().IsUnsafePtr() { // unsafe.Pointer to *T
+			return walkCheckPtrAlignment(n, init, nil)
+		}
+		if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
+			return walkCheckPtrArithmetic(n, init)
+		}
+	}
+	param, result := rtconvfn(n.X.Type(), n.Type())
+	if param == types.Txxx {
+		return n
+	}
+	fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
+	return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
+}
+
+// walkConvInterface walks an OCONVIFACE node.
+func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+
+	fromType := n.X.Type()
+	toType := n.Type()
+
+	if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _())
+		reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
+	}
+
+	// typeword generates the type word of the interface value.
+	typeword := func() ir.Node {
+		if toType.IsEmptyInterface() {
+			return reflectdata.TypePtr(fromType)
+		}
+		return reflectdata.ITabAddr(fromType, toType)
+	}
+
+	// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
+	if types.IsDirectIface(fromType) {
+		l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X)
+		l.SetType(toType)
+		l.SetTypecheck(n.Typecheck())
+		return l
+	}
+
+	// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
+	// by using an existing addressable value identical to n.Left
+	// or creating one on the stack.
+	var value ir.Node
+	switch {
+	case fromType.Size() == 0:
+		// n.Left is zero-sized. Use zerobase.
+		cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
+		value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR])
+	case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
+		// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
+		// and staticuint64s[n.Left * 8 + 7] on big-endian.
+		n.X = cheapExpr(n.X, init)
+		// byteindex widens n.Left so that the multiplication doesn't overflow.
+		index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
+		if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
+			index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
+		}
+		// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+		// individual bytes.
+		staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8))
+		xe := ir.NewIndexExpr(base.Pos, staticuint64s, index)
+		xe.SetBounded(true)
+		value = xe
+	case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PEXTERN && n.X.(*ir.Name).Readonly():
+		// n.Left is a readonly global; use it directly.
+		value = n.X
+	case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024:
+		// n.Left does not escape. Use a stack temporary initialized to n.Left.
+		value = typecheck.Temp(fromType)
+		init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X)))
+	}
+
+	if value != nil {
+		// Value is identical to n.Left.
+		// Construct the interface directly: {type/itab, &value}.
+		l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value)))
+		l.SetType(toType)
+		l.SetTypecheck(n.Typecheck())
+		return l
+	}
+
+	// Implement interface to empty interface conversion.
+	// tmp = i.itab
+	// if tmp != nil {
+	//    tmp = tmp.type
+	// }
+	// e = iface{tmp, i.data}
+	if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
+		// Evaluate the input interface.
+		c := typecheck.Temp(fromType)
+		init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
+
+		// Get the itab out of the interface.
+		tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+		init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c))))
+
+		// Get the type out of the itab.
+		nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil)
+		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))}
+		init.Append(nif)
+
+		// Build the result.
+		e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+		e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+		e.SetTypecheck(1)
+		return e
+	}
+
+	fnname, needsaddr := convFuncName(fromType, toType)
+
+	if !needsaddr && !fromType.IsInterface() {
+		// Use a specialized conversion routine that only returns a data pointer.
+		// ptr = convT2X(val)
+		// e = iface{typ/tab, ptr}
+		fn := typecheck.LookupRuntime(fnname)
+		types.CalcSize(fromType)
+		fn = typecheck.SubstArgTypes(fn, fromType)
+		types.CalcSize(fn.Type())
+		call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+		call.Args = []ir.Node{n.X}
+		e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init))
+		e.SetType(toType)
+		e.SetTypecheck(1)
+		return e
+	}
+
+	var tab ir.Node
+	if fromType.IsInterface() {
+		// convI2I
+		tab = reflectdata.TypePtr(toType)
+	} else {
+		// convT2x
+		tab = typeword()
+	}
+
+	v := n.X
+	if needsaddr {
+		// Types of large or unknown size are passed by reference.
+		// Orderexpr arranged for n.Left to be a temporary for all
+		// the conversions it could see. Comparison of an interface
+		// with a non-interface, especially in a switch on interface value
+		// with non-interface cases, is not visible to order.stmt, so we
+		// have to fall back on allocating a temp here.
+		if !ir.IsAddressable(v) {
+			v = copyExpr(v, v.Type(), init)
+		}
+		v = typecheck.NodAddr(v)
+	}
+
+	types.CalcSize(fromType)
+	fn := typecheck.LookupRuntime(fnname)
+	fn = typecheck.SubstArgTypes(fn, fromType, toType)
+	types.CalcSize(fn.Type())
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+	call.Args = []ir.Node{tab, v}
+	return walkExpr(typecheck.Expr(call), init)
+}
+
+// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
+func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	a := typecheck.NodNil()
+	if n.Esc() == ir.EscNone {
+		// Create temporary buffer for string on stack.
+		a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+	}
+	if n.Op() == ir.ORUNES2STR {
+		// slicerunetostring(*[32]byte, []rune) string
+		return mkcall("slicerunetostring", n.Type(), init, a, n.X)
+	}
+	// slicebytetostring(*[32]byte, ptr *byte, n int) string
+	n.X = cheapExpr(n.X, init)
+	ptr, len := backingArrayPtrLen(n.X)
+	return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
+}
+
+// walkBytesToStringTemp walks an OBYTES2STRTMP node.
+func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+	if !base.Flag.Cfg.Instrumenting {
+		// Let the backend handle OBYTES2STRTMP directly
+		// to avoid a function call to slicebytetostringtmp.
+		return n
+	}
+	// slicebytetostringtmp(ptr *byte, n int) string
+	n.X = cheapExpr(n.X, init)
+	ptr, len := backingArrayPtrLen(n.X)
+	return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
+}
+
+// walkRuneToString walks an ORUNESTR node.
+func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	a := typecheck.NodNil()
+	if n.Esc() == ir.EscNone {
+		a = stackBufAddr(4, types.Types[types.TUINT8])
+	}
+	// intstring(*[4]byte, rune)
+	return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
+}
+
+// walkStringToBytes walks an OSTR2BYTES node.
+func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	s := n.X
+	if ir.IsConst(s, constant.String) {
+		sc := ir.StringVal(s)
+
+		// Allocate a [n]byte of the right size.
+		t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+		var a ir.Node
+		if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
+			a = stackBufAddr(t.NumElem(), t.Elem())
+		} else {
+			types.CalcSize(t)
+			a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
+			a.SetType(types.NewPtr(t))
+			a.SetTypecheck(1)
+			a.MarkNonNil()
+		}
+		p := typecheck.Temp(t.PtrTo()) // *[n]byte
+		init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
+
+		// Copy from the static string data to the [n]byte.
+		if len(sc) > 0 {
+			as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo())))
+			appendWalkStmt(init, as)
+		}
+
+		// Slice the [n]byte to a []byte.
+		slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p, nil, nil, nil)
+		slice.SetType(n.Type())
+		slice.SetTypecheck(1)
+		return walkExpr(slice, init)
+	}
+
+	a := typecheck.NodNil()
+	if n.Esc() == ir.EscNone {
+		// Create temporary buffer for slice on stack.
+		a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+	}
+	// stringtoslicebyte(*32[byte], string) []byte
+	return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
+}
+
+// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
+func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	// []byte(string) conversion that creates a slice
+	// referring to the actual string bytes.
+	// This conversion is handled later by the backend and
+	// is only for use by internal compiler optimizations
+	// that know that the slice won't be mutated.
+	// The only such case today is:
+	// for i, c := range []byte(string)
+	n.X = walkExpr(n.X, init)
+	return n
+}
+
+// walkStringToRunes walks an OSTR2RUNES node.
+func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	a := typecheck.NodNil()
+	if n.Esc() == ir.EscNone {
+		// Create temporary buffer for slice on stack.
+		a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32])
+	}
+	// stringtoslicerune(*[32]rune, string) []rune
+	return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
+}
+
+// convFuncName builds the runtime function name for interface conversion.
+// It also reports whether the function expects the data by address.
+// Not all names are possible. For example, we never generate convE2E or convE2I.
+func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
+	tkind := to.Tie()
+	switch from.Tie() {
+	case 'I':
+		if tkind == 'I' {
+			return "convI2I", false
+		}
+	case 'T':
+		switch {
+		case from.Size() == 2 && from.Align == 2:
+			return "convT16", false
+		case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
+			return "convT32", false
+		case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
+			return "convT64", false
+		}
+		if sc := from.SoleComponent(); sc != nil {
+			switch {
+			case sc.IsString():
+				return "convTstring", false
+			case sc.IsSlice():
+				return "convTslice", false
+			}
+		}
+
+		switch tkind {
+		case 'E':
+			if !from.HasPointers() {
+				return "convT2Enoptr", true
+			}
+			return "convT2E", true
+		case 'I':
+			if !from.HasPointers() {
+				return "convT2Inoptr", true
+			}
+			return "convT2I", true
+		}
+	}
+	base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+	panic("unreachable")
+}
+
+// rtconvfn returns the parameter and result types that will be used by a
+// runtime function to convert from type src to type dst. The runtime function
+// name can be derived from the names of the returned types.
+//
+// If no such function is necessary, it returns (Txxx, Txxx).
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
+	if ssagen.Arch.SoftFloat {
+		return types.Txxx, types.Txxx
+	}
+
+	switch ssagen.Arch.LinkArch.Family {
+	case sys.ARM, sys.MIPS:
+		if src.IsFloat() {
+			switch dst.Kind() {
+			case types.TINT64, types.TUINT64:
+				return types.TFLOAT64, dst.Kind()
+			}
+		}
+		if dst.IsFloat() {
+			switch src.Kind() {
+			case types.TINT64, types.TUINT64:
+				return src.Kind(), types.TFLOAT64
+			}
+		}
+
+	case sys.I386:
+		if src.IsFloat() {
+			switch dst.Kind() {
+			case types.TINT64, types.TUINT64:
+				return types.TFLOAT64, dst.Kind()
+			case types.TUINT32, types.TUINT, types.TUINTPTR:
+				return types.TFLOAT64, types.TUINT32
+			}
+		}
+		if dst.IsFloat() {
+			switch src.Kind() {
+			case types.TINT64, types.TUINT64:
+				return src.Kind(), types.TFLOAT64
+			case types.TUINT32, types.TUINT, types.TUINTPTR:
+				return types.TUINT32, types.TFLOAT64
+			}
+		}
+	}
+	return types.Txxx, types.Txxx
+}
+
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
+// which is forbidden in user code.
+func byteindex(n ir.Node) ir.Node {
+	// We cannot convert from bool to int directly.
+	// While converting from int8 to int is possible, it would yield
+	// the wrong result for negative values.
+	// Reinterpreting the value as an unsigned byte solves both cases.
+	if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+		n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+		n.SetType(types.Types[types.TUINT8])
+		n.SetTypecheck(1)
+	}
+	n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+	n.SetType(types.Types[types.TINT])
+	n.SetTypecheck(1)
+	return n
+}
+
+func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Node {
+	if !n.Type().IsPtr() {
+		base.Fatalf("expected pointer type: %v", n.Type())
+	}
+	elem := n.Type().Elem()
+	if count != nil {
+		if !elem.IsArray() {
+			base.Fatalf("expected array type: %v", elem)
+		}
+		elem = elem.Elem()
+	}
+
+	size := elem.Size()
+	if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
+		return n
+	}
+
+	if count == nil {
+		count = ir.NewInt(1)
+	}
+
+	n.X = cheapExpr(n.X, init)
+	init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
+	return n
+}
+
+func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+	// Calling cheapExpr(n, init) below leads to a recursive call to
+	// walkExpr, which leads us back here again. Use n.Checkptr to
+	// prevent infinite loops.
+	if n.CheckPtr() {
+		return n
+	}
+	n.SetCheckPtr(true)
+	defer n.SetCheckPtr(false)
+
+	// TODO(mdempsky): Make stricter. We only need to exempt
+	// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
+	switch n.X.Op() {
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+		return n
+	}
+
+	if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
+		return n
+	}
+
+	// Find original unsafe.Pointer operands involved in this
+	// arithmetic expression.
+	//
+	// "It is valid both to add and to subtract offsets from a
+	// pointer in this way. It is also valid to use &^ to round
+	// pointers, usually for alignment."
+	var originals []ir.Node
+	var walk func(n ir.Node)
+	walk = func(n ir.Node) {
+		switch n.Op() {
+		case ir.OADD:
+			n := n.(*ir.BinaryExpr)
+			walk(n.X)
+			walk(n.Y)
+		case ir.OSUB, ir.OANDNOT:
+			n := n.(*ir.BinaryExpr)
+			walk(n.X)
+		case ir.OCONVNOP:
+			n := n.(*ir.ConvExpr)
+			if n.X.Type().IsUnsafePtr() {
+				n.X = cheapExpr(n.X, init)
+				originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
+			}
+		}
+	}
+	walk(n.X)
+
+	cheap := cheapExpr(n, init)
+
+	slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+	slice.SetEsc(ir.EscNone)
+
+	init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
+	// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+	// the backing store for multiple calls to checkptrArithmetic.
+
+	return cheap
+}
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
new file mode 100644
index 0000000..7b65db5
--- /dev/null
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -0,0 +1,966 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"fmt"
+	"go/constant"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/staticdata"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+)
+
+// The result of walkExpr MUST be assigned back to n, e.g.
+// 	n.Left = walkExpr(n.Left, init)
+func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
+	if n == nil {
+		return n
+	}
+
+	if n, ok := n.(ir.InitNode); ok && init == n.PtrInit() {
+		// not okay to use n->ninit when walking n,
+		// because we might replace n with some other node
+		// and would lose the init list.
+		base.Fatalf("walkExpr init == &n->ninit")
+	}
+
+	if len(n.Init()) != 0 {
+		walkStmtList(n.Init())
+		init.Append(ir.TakeInit(n)...)
+	}
+
+	lno := ir.SetPos(n)
+
+	if base.Flag.LowerW > 1 {
+		ir.Dump("before walk expr", n)
+	}
+
+	if n.Typecheck() != 1 {
+		base.Fatalf("missed typecheck: %+v", n)
+	}
+
+	if n.Type().IsUntyped() {
+		base.Fatalf("expression has untyped type: %+v", n)
+	}
+
+	n = walkExpr1(n, init)
+
+	// Eagerly compute sizes of all expressions for the back end.
+	if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() {
+		types.CheckSize(typ)
+	}
+	if n, ok := n.(*ir.Name); ok && n.Heapaddr != nil {
+		types.CheckSize(n.Heapaddr.Type())
+	}
+	if ir.IsConst(n, constant.String) {
+		// Emit string symbol now to avoid emitting
+		// any concurrently during the backend.
+		_ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val()))
+	}
+
+	if base.Flag.LowerW != 0 && n != nil {
+		ir.Dump("after walk expr", n)
+	}
+
+	base.Pos = lno
+	return n
+}
+
+func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
+	switch n.Op() {
+	default:
+		ir.Dump("walk", n)
+		base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
+		panic("unreachable")
+
+	case ir.ONONAME, ir.OGETG:
+		return n
+
+	case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+		// TODO(mdempsky): Just return n; see discussion on CL 38655.
+		// Perhaps refactor to use Node.mayBeShared for these instead.
+		// If these return early, make sure to still call
+		// StringSym for constant strings.
+		return n
+
+	case ir.OMETHEXPR:
+		// TODO(mdempsky): Do this right after type checking.
+		n := n.(*ir.SelectorExpr)
+		return n.FuncName()
+
+	case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
+		n := n.(*ir.UnaryExpr)
+		n.X = walkExpr(n.X, init)
+		return n
+
+	case ir.ODOTMETH, ir.ODOTINTER:
+		n := n.(*ir.SelectorExpr)
+		n.X = walkExpr(n.X, init)
+		return n
+
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		n.X = walkExpr(n.X, init)
+		return n
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		n.X = walkExpr(n.X, init)
+		return n
+
+	case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		n.X = walkExpr(n.X, init)
+		n.Y = walkExpr(n.Y, init)
+		return n
+
+	case ir.ODOT, ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		return walkDot(n, init)
+
+	case ir.ODOTTYPE, ir.ODOTTYPE2:
+		n := n.(*ir.TypeAssertExpr)
+		return walkDotType(n, init)
+
+	case ir.OLEN, ir.OCAP:
+		n := n.(*ir.UnaryExpr)
+		return walkLenCap(n, init)
+
+	case ir.OCOMPLEX:
+		n := n.(*ir.BinaryExpr)
+		n.X = walkExpr(n.X, init)
+		n.Y = walkExpr(n.Y, init)
+		return n
+
+	case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		return walkCompare(n, init)
+
+	case ir.OANDAND, ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		return walkLogical(n, init)
+
+	case ir.OPRINT, ir.OPRINTN:
+		return walkPrint(n.(*ir.CallExpr), init)
+
+	case ir.OPANIC:
+		n := n.(*ir.UnaryExpr)
+		return mkcall("gopanic", nil, init, n.X)
+
+	case ir.ORECOVER:
+		n := n.(*ir.CallExpr)
+		return mkcall("gorecover", n.Type(), init, typecheck.NodAddr(ir.RegFP))
+
+	case ir.OCFUNC:
+		return n
+
+	case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		return walkCall(n, init)
+
+	case ir.OAS, ir.OASOP:
+		return walkAssign(init, n)
+
+	case ir.OAS2:
+		n := n.(*ir.AssignListStmt)
+		return walkAssignList(init, n)
+
+	// a,b,... = fn()
+	case ir.OAS2FUNC:
+		n := n.(*ir.AssignListStmt)
+		return walkAssignFunc(init, n)
+
+	// x, y = <-c
+	// order.stmt made sure x is addressable or blank.
+	case ir.OAS2RECV:
+		n := n.(*ir.AssignListStmt)
+		return walkAssignRecv(init, n)
+
+	// a,b = m[i]
+	case ir.OAS2MAPR:
+		n := n.(*ir.AssignListStmt)
+		return walkAssignMapRead(init, n)
+
+	case ir.ODELETE:
+		n := n.(*ir.CallExpr)
+		return walkDelete(init, n)
+
+	case ir.OAS2DOTTYPE:
+		n := n.(*ir.AssignListStmt)
+		return walkAssignDotType(n, init)
+
+	case ir.OCONVIFACE:
+		n := n.(*ir.ConvExpr)
+		return walkConvInterface(n, init)
+
+	case ir.OCONV, ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		return walkConv(n, init)
+
+	case ir.ODIV, ir.OMOD:
+		n := n.(*ir.BinaryExpr)
+		return walkDivMod(n, init)
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		return walkIndex(n, init)
+
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		return walkIndexMap(n, init)
+
+	case ir.ORECV:
+		base.Fatalf("walkExpr ORECV") // should see inside OAS only
+		panic("unreachable")
+
+	case ir.OSLICEHEADER:
+		n := n.(*ir.SliceHeaderExpr)
+		return walkSliceHeader(n, init)
+
+	case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+		n := n.(*ir.SliceExpr)
+		return walkSlice(n, init)
+
+	case ir.ONEW:
+		n := n.(*ir.UnaryExpr)
+		return walkNew(n, init)
+
+	case ir.OADDSTR:
+		return walkAddString(n.(*ir.AddStringExpr), init)
+
+	case ir.OAPPEND:
+		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
+		base.Fatalf("append outside assignment")
+		panic("unreachable")
+
+	case ir.OCOPY:
+		return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime)
+
+	case ir.OCLOSE:
+		n := n.(*ir.UnaryExpr)
+		return walkClose(n, init)
+
+	case ir.OMAKECHAN:
+		n := n.(*ir.MakeExpr)
+		return walkMakeChan(n, init)
+
+	case ir.OMAKEMAP:
+		n := n.(*ir.MakeExpr)
+		return walkMakeMap(n, init)
+
+	case ir.OMAKESLICE:
+		n := n.(*ir.MakeExpr)
+		return walkMakeSlice(n, init)
+
+	case ir.OMAKESLICECOPY:
+		n := n.(*ir.MakeExpr)
+		return walkMakeSliceCopy(n, init)
+
+	case ir.ORUNESTR:
+		n := n.(*ir.ConvExpr)
+		return walkRuneToString(n, init)
+
+	case ir.OBYTES2STR, ir.ORUNES2STR:
+		n := n.(*ir.ConvExpr)
+		return walkBytesRunesToString(n, init)
+
+	case ir.OBYTES2STRTMP:
+		n := n.(*ir.ConvExpr)
+		return walkBytesToStringTemp(n, init)
+
+	case ir.OSTR2BYTES:
+		n := n.(*ir.ConvExpr)
+		return walkStringToBytes(n, init)
+
+	case ir.OSTR2BYTESTMP:
+		n := n.(*ir.ConvExpr)
+		return walkStringToBytesTemp(n, init)
+
+	case ir.OSTR2RUNES:
+		n := n.(*ir.ConvExpr)
+		return walkStringToRunes(n, init)
+
+	case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+		return walkCompLit(n, init)
+
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		return walkSend(n, init)
+
+	case ir.OCLOSURE:
+		return walkClosure(n.(*ir.ClosureExpr), init)
+
+	case ir.OCALLPART:
+		return walkCallPart(n.(*ir.SelectorExpr), init)
+	}
+
+	// No return! Each case must return (or panic),
+	// to avoid confusion about what gets returned
+	// in the presence of type assertions.
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
+func walkExprList(s []ir.Node, init *ir.Nodes) {
+	for i := range s {
+		s[i] = walkExpr(s[i], init)
+	}
+}
+
+func walkExprListCheap(s []ir.Node, init *ir.Nodes) {
+	for i, n := range s {
+		s[i] = cheapExpr(n, init)
+		s[i] = walkExpr(s[i], init)
+	}
+}
+
+func walkExprListSafe(s []ir.Node, init *ir.Nodes) {
+	for i, n := range s {
+		s[i] = safeExpr(n, init)
+		s[i] = walkExpr(s[i], init)
+	}
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node {
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL:
+		return n
+	}
+
+	return copyExpr(n, n.Type(), init)
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
+	if n == nil {
+		return nil
+	}
+
+	if len(n.Init()) != 0 {
+		walkStmtList(n.Init())
+		init.Append(ir.TakeInit(n)...)
+	}
+
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+		return n
+
+	case ir.OLEN, ir.OCAP:
+		n := n.(*ir.UnaryExpr)
+		l := safeExpr(n.X, init)
+		if l == n.X {
+			return n
+		}
+		a := ir.Copy(n).(*ir.UnaryExpr)
+		a.X = l
+		return walkExpr(typecheck.Expr(a), init)
+
+	case ir.ODOT, ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		l := safeExpr(n.X, init)
+		if l == n.X {
+			return n
+		}
+		a := ir.Copy(n).(*ir.SelectorExpr)
+		a.X = l
+		return walkExpr(typecheck.Expr(a), init)
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		l := safeExpr(n.X, init)
+		if l == n.X {
+			return n
+		}
+		a := ir.Copy(n).(*ir.StarExpr)
+		a.X = l
+		return walkExpr(typecheck.Expr(a), init)
+
+	case ir.OINDEX, ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		l := safeExpr(n.X, init)
+		r := safeExpr(n.Index, init)
+		if l == n.X && r == n.Index {
+			return n
+		}
+		a := ir.Copy(n).(*ir.IndexExpr)
+		a.X = l
+		a.Index = r
+		return walkExpr(typecheck.Expr(a), init)
+
+	case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		if isStaticCompositeLiteral(n) {
+			return n
+		}
+	}
+
+	// make a copy; must not be used as an lvalue
+	if ir.IsAddressable(n) {
+		base.Fatalf("missing lvalue case in safeExpr: %v", n)
+	}
+	return cheapExpr(n, init)
+}
+
+func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+	l := typecheck.Temp(t)
+	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
+	return l
+}
+
+func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
+	c := len(n.List)
+
+	if c < 2 {
+		base.Fatalf("walkAddString count %d too small", c)
+	}
+
+	buf := typecheck.NodNil()
+	if n.Esc() == ir.EscNone {
+		sz := int64(0)
+		for _, n1 := range n.List {
+			if n1.Op() == ir.OLITERAL {
+				sz += int64(len(ir.StringVal(n1)))
+			}
+		}
+
+		// Don't allocate the buffer if the result won't fit.
+		if sz < tmpstringbufsize {
+			// Create temporary buffer for result string on stack.
+			buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+		}
+	}
+
+	// build list of string arguments
+	args := []ir.Node{buf}
+	for _, n2 := range n.List {
+		args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING]))
+	}
+
+	var fn string
+	if c <= 5 {
+		// small numbers of strings use direct runtime helpers.
+		// note: order.expr knows this cutoff too.
+		fn = fmt.Sprintf("concatstring%d", c)
+	} else {
+		// large numbers of strings are passed to the runtime as a slice.
+		fn = "concatstrings"
+
+		t := types.NewSlice(types.Types[types.TSTRING])
+		// args[1:] to skip buf arg
+		slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
+		slice.Prealloc = n.Prealloc
+		args = []ir.Node{buf, slice}
+		slice.SetEsc(ir.EscNone)
+	}
+
+	cat := typecheck.LookupRuntime(fn)
+	r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil)
+	r.Args = args
+	r1 := typecheck.Expr(r)
+	r1 = walkExpr(r1, init)
+	r1.SetType(n.Type())
+
+	return r1
+}
+
+// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node.
+func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+	if n.Op() == ir.OCALLINTER || n.Op() == ir.OCALLMETH {
+		// We expect both interface call reflect.Type.Method and concrete
+		// call reflect.(*rtype).Method.
+		usemethod(n)
+	}
+	if n.Op() == ir.OCALLINTER {
+		reflectdata.MarkUsedIfaceMethod(n)
+	}
+
+	if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
+		directClosureCall(n)
+	}
+
+	walkCall1(n, init)
+	return n
+}
+
+func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
+	if n.Walked() {
+		return // already walked
+	}
+	n.SetWalked(true)
+
+	// If this is a method call t.M(...),
+	// rewrite into a function call T.M(t, ...).
+	// TODO(mdempsky): Do this right after type checking.
+	if n.Op() == ir.OCALLMETH {
+		withRecv := make([]ir.Node, len(n.Args)+1)
+		dot := n.X.(*ir.SelectorExpr)
+		withRecv[0] = dot.X
+		copy(withRecv[1:], n.Args)
+		n.Args = withRecv
+
+		dot = ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym)
+
+		n.SetOp(ir.OCALLFUNC)
+		n.X = typecheck.Expr(dot)
+	}
+
+	args := n.Args
+	params := n.X.Type().Params()
+
+	n.X = walkExpr(n.X, init)
+	walkExprList(args, init)
+
+	for i, arg := range args {
+		// Validate argument and parameter types match.
+		param := params.Field(i)
+		if !types.Identical(arg.Type(), param.Type) {
+			base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
+		}
+
+		// For any argument whose evaluation might require a function call,
+		// store that argument into a temporary variable,
+		// to prevent that calls from clobbering arguments already on the stack.
+		if mayCall(arg) {
+			// assignment of arg to Temp
+			tmp := typecheck.Temp(param.Type)
+			init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
+			// replace arg with temp
+			args[i] = tmp
+		}
+	}
+
+	n.Args = args
+}
+
+// walkDivMod walks an ODIV or OMOD node.
+func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+	n.Y = walkExpr(n.Y, init)
+
+	// rewrite complex div into function call.
+	et := n.X.Type().Kind()
+
+	if types.IsComplex[et] && n.Op() == ir.ODIV {
+		t := n.Type()
+		call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128]))
+		return typecheck.Conv(call, t)
+	}
+
+	// Nothing to do for float divisions.
+	if types.IsFloat[et] {
+		return n
+	}
+
+	// rewrite 64-bit div and mod on 32-bit architectures.
+	// TODO: Remove this code once we can introduce
+	// runtime calls late in SSA processing.
+	if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) {
+		if n.Y.Op() == ir.OLITERAL {
+			// Leave div/mod by constant powers of 2 or small 16-bit constants.
+			// The SSA backend will handle those.
+			switch et {
+			case types.TINT64:
+				c := ir.Int64Val(n.Y)
+				if c < 0 {
+					c = -c
+				}
+				if c != 0 && c&(c-1) == 0 {
+					return n
+				}
+			case types.TUINT64:
+				c := ir.Uint64Val(n.Y)
+				if c < 1<<16 {
+					return n
+				}
+				if c != 0 && c&(c-1) == 0 {
+					return n
+				}
+			}
+		}
+		var fn string
+		if et == types.TINT64 {
+			fn = "int64"
+		} else {
+			fn = "uint64"
+		}
+		if n.Op() == ir.ODIV {
+			fn += "div"
+		} else {
+			fn += "mod"
+		}
+		return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et]))
+	}
+	return n
+}
+
+// walkDot walks an ODOT or ODOTPTR node.
+func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+	usefield(n)
+	n.X = walkExpr(n.X, init)
+	return n
+}
+
+// walkDotType walks an ODOTTYPE or ODOTTYPE2 node.
+func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+	// Set up interface type addresses for back end.
+	if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
+		n.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type())
+	}
+	return n
+}
+
+// walkIndex walks an OINDEX node.
+func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+
+	// save the original node for bounds checking elision.
+	// If it was a ODIV/OMOD walk might rewrite it.
+	r := n.Index
+
+	n.Index = walkExpr(n.Index, init)
+
+	// if range of type cannot exceed static array bound,
+	// disable bounds check.
+	if n.Bounded() {
+		return n
+	}
+	t := n.X.Type()
+	if t != nil && t.IsPtr() {
+		t = t.Elem()
+	}
+	if t.IsArray() {
+		n.SetBounded(bounded(r, t.NumElem()))
+		if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+			base.Warn("index bounds check elided")
+		}
+		if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+			base.Errorf("index out of bounds")
+		}
+	} else if ir.IsConst(n.X, constant.String) {
+		n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X)))))
+		if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+			base.Warn("index bounds check elided")
+		}
+		if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+			base.Errorf("index out of bounds")
+		}
+	}
+
+	if ir.IsConst(n.Index, constant.Int) {
+		if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) {
+			base.Errorf("index out of bounds")
+		}
+	}
+	return n
+}
+
+// walkIndexMap walks an OINDEXMAP node.
+func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+	// Replace m[k] with *map{access1,assign}(maptype, m, &k)
+	n.X = walkExpr(n.X, init)
+	n.Index = walkExpr(n.Index, init)
+	map_ := n.X
+	key := n.Index
+	t := map_.Type()
+	var call *ir.CallExpr
+	if n.Assigned {
+		// This m[k] expression is on the left-hand side of an assignment.
+		fast := mapfast(t)
+		if fast == mapslow {
+			// standard version takes key by reference.
+			// order.expr made sure key is addressable.
+			key = typecheck.NodAddr(key)
+		}
+		call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
+	} else {
+		// m[k] is not the target of an assignment.
+		fast := mapfast(t)
+		if fast == mapslow {
+			// standard version takes key by reference.
+			// order.expr made sure key is addressable.
+			key = typecheck.NodAddr(key)
+		}
+
+		if w := t.Elem().Width; w <= zeroValSize {
+			call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
+		} else {
+			z := reflectdata.ZeroAddr(w)
+			call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
+		}
+	}
+	call.SetType(types.NewPtr(t.Elem()))
+	call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+	star := ir.NewStarExpr(base.Pos, call)
+	star.SetType(t.Elem())
+	star.SetTypecheck(1)
+	return star
+}
+
+// walkLogical walks an OANDAND or OOROR node.
+func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node {
+	n.X = walkExpr(n.X, init)
+
+	// cannot put side effects from n.Right on init,
+	// because they cannot run before n.Left is checked.
+	// save elsewhere and store on the eventual n.Right.
+	var ll ir.Nodes
+
+	n.Y = walkExpr(n.Y, &ll)
+	n.Y = ir.InitExpr(ll, n.Y)
+	return n
+}
+
+// walkSend walks an OSEND node.
+func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node {
+	n1 := n.Value
+	n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send")
+	n1 = walkExpr(n1, init)
+	n1 = typecheck.NodAddr(n1)
+	return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1)
+}
+
+// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
+func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
+
+	checkSlice := ir.ShouldCheckPtr(ir.CurFunc, 1) && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
+	if checkSlice {
+		conv := n.X.(*ir.ConvExpr)
+		conv.X = walkExpr(conv.X, init)
+	} else {
+		n.X = walkExpr(n.X, init)
+	}
+
+	n.Low = walkExpr(n.Low, init)
+	if n.Low != nil && ir.IsZero(n.Low) {
+		// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
+		n.Low = nil
+	}
+	n.High = walkExpr(n.High, init)
+	n.Max = walkExpr(n.Max, init)
+	if checkSlice {
+		n.X = walkCheckPtrAlignment(n.X.(*ir.ConvExpr), init, n.Max)
+	}
+
+	if n.Op().IsSlice3() {
+		if n.Max != nil && n.Max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, n.Max.(*ir.UnaryExpr).X) {
+			// Reduce x[i:j:cap(x)] to x[i:j].
+			if n.Op() == ir.OSLICE3 {
+				n.SetOp(ir.OSLICE)
+			} else {
+				n.SetOp(ir.OSLICEARR)
+			}
+			return reduceSlice(n)
+		}
+		return n
+	}
+	return reduceSlice(n)
+}
+
+// walkSliceHeader walks an OSLICEHEADER node.
+func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node {
+	n.Ptr = walkExpr(n.Ptr, init)
+	n.Len = walkExpr(n.Len, init)
+	n.Cap = walkExpr(n.Cap, init)
+	return n
+}
+
+// TODO(josharian): combine this with its caller and simplify
+func reduceSlice(n *ir.SliceExpr) ir.Node {
+	if n.High != nil && n.High.Op() == ir.OLEN && ir.SameSafeExpr(n.X, n.High.(*ir.UnaryExpr).X) {
+		// Reduce x[i:len(x)] to x[i:].
+		n.High = nil
+	}
+	if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && n.Low == nil && n.High == nil {
+		// Reduce x[:] to x.
+		if base.Debug.Slice > 0 {
+			base.Warn("slice: omit slice operation")
+		}
+		return n.X
+	}
+	return n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n ir.Node, max int64) bool {
+	if n.Type() == nil || !n.Type().IsInteger() {
+		return false
+	}
+
+	sign := n.Type().IsSigned()
+	bits := int32(8 * n.Type().Width)
+
+	if ir.IsSmallIntConst(n) {
+		v := ir.Int64Val(n)
+		return 0 <= v && v < max
+	}
+
+	switch n.Op() {
+	case ir.OAND, ir.OANDNOT:
+		n := n.(*ir.BinaryExpr)
+		v := int64(-1)
+		switch {
+		case ir.IsSmallIntConst(n.X):
+			v = ir.Int64Val(n.X)
+		case ir.IsSmallIntConst(n.Y):
+			v = ir.Int64Val(n.Y)
+			if n.Op() == ir.OANDNOT {
+				v = ^v
+				if !sign {
+					v &= 1<<uint(bits) - 1
+				}
+			}
+		}
+		if 0 <= v && v < max {
+			return true
+		}
+
+	case ir.OMOD:
+		n := n.(*ir.BinaryExpr)
+		if !sign && ir.IsSmallIntConst(n.Y) {
+			v := ir.Int64Val(n.Y)
+			if 0 <= v && v <= max {
+				return true
+			}
+		}
+
+	case ir.ODIV:
+		n := n.(*ir.BinaryExpr)
+		if !sign && ir.IsSmallIntConst(n.Y) {
+			v := ir.Int64Val(n.Y)
+			for bits > 0 && v >= 2 {
+				bits--
+				v >>= 1
+			}
+		}
+
+	case ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		if !sign && ir.IsSmallIntConst(n.Y) {
+			v := ir.Int64Val(n.Y)
+			if v > int64(bits) {
+				return true
+			}
+			bits -= int32(v)
+		}
+	}
+
+	if !sign && bits <= 62 && 1<<uint(bits) <= max {
+		return true
+	}
+
+	return false
+}
+
+// usemethod checks interface method calls for uses of reflect.Type.Method.
+func usemethod(n *ir.CallExpr) {
+	t := n.X.Type()
+
+	// Looking for either of:
+	//	Method(int) reflect.Method
+	//	MethodByName(string) (reflect.Method, bool)
+	//
+	// TODO(crawshaw): improve precision of match by working out
+	//                 how to check the method name.
+	if n := t.NumParams(); n != 1 {
+		return
+	}
+	if n := t.NumResults(); n != 1 && n != 2 {
+		return
+	}
+	p0 := t.Params().Field(0)
+	res0 := t.Results().Field(0)
+	var res1 *types.Field
+	if t.NumResults() == 2 {
+		res1 = t.Results().Field(1)
+	}
+
+	if res1 == nil {
+		if p0.Type.Kind() != types.TINT {
+			return
+		}
+	} else {
+		if !p0.Type.IsString() {
+			return
+		}
+		if !res1.Type.IsBoolean() {
+			return
+		}
+	}
+
+	// Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
+	// Those functions may be alive via the itab, which should not cause all methods
+	// alive. We only want to mark their callers.
+	if base.Ctxt.Pkgpath == "reflect" {
+		switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
+		case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+			return
+		}
+	}
+
+	// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
+	//       (including global variables such as numImports - was issue #19028).
+	// Also need to check for reflect package itself (see Issue #38515).
+	if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
+		ir.CurFunc.SetReflectMethod(true)
+		// The LSym is initialized at this point. We need to set the attribute on the LSym.
+		ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
+	}
+}
+
+func usefield(n *ir.SelectorExpr) {
+	if objabi.Fieldtrack_enabled == 0 {
+		return
+	}
+
+	switch n.Op() {
+	default:
+		base.Fatalf("usefield %v", n.Op())
+
+	case ir.ODOT, ir.ODOTPTR:
+		break
+	}
+
+	field := n.Selection
+	if field == nil {
+		base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel)
+	}
+	if field.Sym != n.Sel {
+		base.Fatalf("field inconsistency: %v != %v", field.Sym, n.Sel)
+	}
+	if !strings.Contains(field.Note, "go:\"track\"") {
+		return
+	}
+
+	outer := n.X.Type()
+	if outer.IsPtr() {
+		outer = outer.Elem()
+	}
+	if outer.Sym() == nil {
+		base.Errorf("tracked field must be in named struct type")
+	}
+	if !types.IsExported(field.Sym.Name) {
+		base.Errorf("tracked field must be exported (upper case)")
+	}
+
+	sym := reflectdata.TrackSym(outer, field)
+	if ir.CurFunc.FieldTrack == nil {
+		ir.CurFunc.FieldTrack = make(map[*obj.LSym]struct{})
+	}
+	ir.CurFunc.FieldTrack[sym] = struct{}{}
+}
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
new file mode 100644
index 0000000..fe0b6a0
--- /dev/null
+++ b/src/cmd/compile/internal/walk/order.go
@@ -0,0 +1,1437 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"fmt"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/escape"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/staticinit"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// orderState holds state during the ordering process.
+type orderState struct {
+	out  []ir.Node             // list of generated statements
+	temp []*ir.Name            // stack of temporary variables
+	free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
+	edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
+}
+
+// Order rewrites fn.Nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *ir.Func) {
+	if base.Flag.W > 1 {
+		s := fmt.Sprintf("\nbefore order %v", fn.Sym())
+		ir.DumpList(s, fn.Body)
+	}
+
+	orderBlock(&fn.Body, map[string][]*ir.Name{})
+}
+
+// append typechecks stmt and appends it to out.
+func (o *orderState) append(stmt ir.Node) {
+	o.out = append(o.out, typecheck.Stmt(stmt))
+}
+
+// newTemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, newTemp emits code to zero the temporary.
+func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
+	var v *ir.Name
+	// Note: LongString is close to the type equality we want,
+	// but not exactly. We still need to double-check with types.Identical.
+	key := t.LongString()
+	a := o.free[key]
+	for i, n := range a {
+		if types.Identical(t, n.Type()) {
+			v = a[i]
+			a[i] = a[len(a)-1]
+			a = a[:len(a)-1]
+			o.free[key] = a
+			break
+		}
+	}
+	if v == nil {
+		v = typecheck.Temp(t)
+	}
+	if clear {
+		o.append(ir.NewAssignStmt(base.Pos, v, nil))
+	}
+
+	o.temp = append(o.temp, v)
+	return v
+}
+
+// copyExpr behaves like newTemp but also emits
+// code to initialize the temporary to the value n.
+func (o *orderState) copyExpr(n ir.Node) *ir.Name {
+	return o.copyExpr1(n, false)
+}
+
+// copyExprClear is like copyExpr but clears the temp before assignment.
+// It is provided for use when the evaluation of tmp = n turns into
+// a function call that is passed a pointer to the temporary as the output space.
+// If the call blocks before tmp has been written,
+// the garbage collector will still treat the temporary as live,
+// so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
+	return o.copyExpr1(n, true)
+}
+
+func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
+	t := n.Type()
+	v := o.newTemp(t, clear)
+	o.append(ir.NewAssignStmt(base.Pos, v, n))
+	return v
+}
+
+// cheapExpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, cheapExpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func (o *orderState) cheapExpr(n ir.Node) ir.Node {
+	if n == nil {
+		return nil
+	}
+
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL:
+		return n
+	case ir.OLEN, ir.OCAP:
+		n := n.(*ir.UnaryExpr)
+		l := o.cheapExpr(n.X)
+		if l == n.X {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.UnaryExpr)
+		a.X = l
+		return typecheck.Expr(a)
+	}
+
+	return o.copyExpr(n)
+}
+
+// safeExpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func (o *orderState) safeExpr(n ir.Node) ir.Node {
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL:
+		return n
+
+	case ir.OLEN, ir.OCAP:
+		n := n.(*ir.UnaryExpr)
+		l := o.safeExpr(n.X)
+		if l == n.X {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.UnaryExpr)
+		a.X = l
+		return typecheck.Expr(a)
+
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		l := o.safeExpr(n.X)
+		if l == n.X {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.SelectorExpr)
+		a.X = l
+		return typecheck.Expr(a)
+
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		l := o.cheapExpr(n.X)
+		if l == n.X {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.SelectorExpr)
+		a.X = l
+		return typecheck.Expr(a)
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		l := o.cheapExpr(n.X)
+		if l == n.X {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.StarExpr)
+		a.X = l
+		return typecheck.Expr(a)
+
+	case ir.OINDEX, ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		var l ir.Node
+		if n.X.Type().IsArray() {
+			l = o.safeExpr(n.X)
+		} else {
+			l = o.cheapExpr(n.X)
+		}
+		r := o.cheapExpr(n.Index)
+		if l == n.X && r == n.Index {
+			return n
+		}
+		a := ir.SepCopy(n).(*ir.IndexExpr)
+		a.X = l
+		a.Index = r
+		return typecheck.Expr(a)
+
+	default:
+		base.Fatalf("order.safeExpr %v", n.Op())
+		return nil // not reached
+	}
+}
+
+// isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n ir.Node) bool {
+	return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
+}
+
+// addrTemp ensures that n is okay to pass by address to runtime routines.
+// If the original argument n is not okay, addrTemp creates a tmp, emits
+// tmp = n, and then returns tmp.
+// The result of addrTemp MUST be assigned back to n, e.g.
+// 	n.Left = o.addrTemp(n.Left)
+func (o *orderState) addrTemp(n ir.Node) ir.Node {
+	if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
+		// TODO: expand this to all static composite literal nodes?
+		n = typecheck.DefaultLit(n, nil)
+		types.CalcSize(n.Type())
+		vstat := readonlystaticname(n.Type())
+		var s staticinit.Schedule
+		s.StaticAssign(vstat, 0, n, n.Type())
+		if s.Out != nil {
+			base.Fatalf("staticassign of const generated code: %+v", n)
+		}
+		vstat = typecheck.Expr(vstat).(*ir.Name)
+		return vstat
+	}
+	if isaddrokay(n) {
+		return n
+	}
+	return o.copyExpr(n)
+}
+
+// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
+// It should only be used for map runtime calls which have *_fast* versions.
+func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
+	// Most map calls need to take the address of the key.
+	// Exception: map*_fast* calls. See golang.org/issue/19015.
+	if mapfast(t) == mapslow {
+		return o.addrTemp(n)
+	}
+	return n
+}
+
+// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
+// in n to avoid string allocations for keys in map lookups.
+// Returns a bool that signals if a modification was made.
+//
+// For:
+//  x = m[string(k)]
+//  x = m[T1{... Tn{..., string(k), ...}]
+// where k is []byte, T1 to Tn is a nesting of struct and array literals,
+// the allocation of backing bytes for the string can be avoided
+// by reusing the []byte backing array. These are special cases
+// for avoiding allocations when converting byte slices to strings.
+// It would be nice to handle these generally, but because
+// []byte keys are not allowed in maps, the use of string(k)
+// comes up in important cases in practice. See issue 3512.
+func mapKeyReplaceStrConv(n ir.Node) bool {
+	var replaced bool
+	switch n.Op() {
+	case ir.OBYTES2STR:
+		n := n.(*ir.ConvExpr)
+		n.SetOp(ir.OBYTES2STRTMP)
+		replaced = true
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elem := range n.List {
+			elem := elem.(*ir.StructKeyExpr)
+			if mapKeyReplaceStrConv(elem.Value) {
+				replaced = true
+			}
+		}
+	case ir.OARRAYLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elem := range n.List {
+			if elem.Op() == ir.OKEY {
+				elem = elem.(*ir.KeyExpr).Value
+			}
+			if mapKeyReplaceStrConv(elem) {
+				replaced = true
+			}
+		}
+	}
+	return replaced
+}
+
+type ordermarker int
+
+// markTemp returns the top of the temporary variable stack.
+func (o *orderState) markTemp() ordermarker {
+	return ordermarker(len(o.temp))
+}
+
+// popTemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by markTemp.
+func (o *orderState) popTemp(mark ordermarker) {
+	for _, n := range o.temp[mark:] {
+		key := n.Type().LongString()
+		o.free[key] = append(o.free[key], n)
+	}
+	o.temp = o.temp[:mark]
+}
+
+// cleanTempNoPop emits VARKILL instructions to *out
+// for each temporary above the mark on the temporary stack.
+// It does not pop the temporaries from the stack.
+func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
+	var out []ir.Node
+	for i := len(o.temp) - 1; i >= int(mark); i-- {
+		n := o.temp[i]
+		out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
+	}
+	return out
+}
+
+// cleanTemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func (o *orderState) cleanTemp(top ordermarker) {
+	o.out = append(o.out, o.cleanTempNoPop(top)...)
+	o.popTemp(top)
+}
+
+// stmtList orders each of the statements in the list.
+func (o *orderState) stmtList(l ir.Nodes) {
+	s := l
+	for i := range s {
+		orderMakeSliceCopy(s[i:])
+		o.stmt(s[i])
+	}
+}
+
+// orderMakeSliceCopy matches the pattern:
+//  m = OMAKESLICE([]T, x); OCOPY(m, s)
+// and rewrites it to:
+//  m = OMAKESLICECOPY([]T, x, s); nil
+func orderMakeSliceCopy(s []ir.Node) {
+	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+		return
+	}
+	if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
+		return
+	}
+
+	as := s[0].(*ir.AssignStmt)
+	cp := s[1].(*ir.BinaryExpr)
+	if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
+		as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
+		as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
+		// The line above this one is correct with the differing equality operators:
+		// we want as.X and cp.X to be the same name,
+		// but we want the initial data to be coming from a different name.
+		return
+	}
+
+	mk := as.Y.(*ir.MakeExpr)
+	if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
+		return
+	}
+	mk.SetOp(ir.OMAKESLICECOPY)
+	mk.Cap = cp.Y
+	// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
+	mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
+	as.Y = typecheck.Expr(mk)
+	s[1] = nil // remove separate copy call
+}
+
+// edge inserts coverage instrumentation for libfuzzer.
+func (o *orderState) edge() {
+	if base.Debug.Libfuzzer == 0 {
+		return
+	}
+
+	// Create a new uint8 counter to be allocated in section
+	// __libfuzzer_extra_counters.
+	counter := staticinit.StaticName(types.Types[types.TUINT8])
+	counter.SetLibfuzzerExtraCounter(true)
+
+	// counter += 1
+	incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
+	o.append(incr)
+}
+
+// orderBlock orders the block of statements in n into a new slice,
+// and then replaces the old slice in n with the new slice.
+// free is a map that can be used to obtain temporary variables by type.
+func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+	var order orderState
+	order.free = free
+	mark := order.markTemp()
+	order.edge()
+	order.stmtList(*n)
+	order.cleanTemp(mark)
+	*n = order.out
+}
+
+// exprInPlace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+// The result of exprInPlace MUST be assigned back to n, e.g.
+// 	n.Left = o.exprInPlace(n.Left)
+func (o *orderState) exprInPlace(n ir.Node) ir.Node {
+	var order orderState
+	order.free = o.free
+	n = order.expr(n, nil)
+	n = ir.InitExpr(order.out, n)
+
+	// insert new temporaries from order
+	// at head of outer list.
+	o.temp = append(o.temp, order.temp...)
+	return n
+}
+
+// orderStmtInPlace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+// The result of orderStmtInPlace MUST be assigned back to n, e.g.
+// 	n.Left = orderStmtInPlace(n.Left)
+// free is a map that can be used to obtain temporary variables by type.
+func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
+	var order orderState
+	order.free = free
+	mark := order.markTemp()
+	order.stmt(n)
+	order.cleanTemp(mark)
+	return ir.NewBlockStmt(src.NoXPos, order.out)
+}
+
+// init moves n's init list to o.out.
+func (o *orderState) init(n ir.Node) {
+	if ir.MayBeShared(n) {
+		// For concurrency safety, don't mutate potentially shared nodes.
+		// First, ensure that no work is required here.
+		if len(n.Init()) > 0 {
+			base.Fatalf("order.init shared node with ninit")
+		}
+		return
+	}
+	o.stmtList(ir.TakeInit(n))
+}
+
+// call orders the call expression n.
+// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func (o *orderState) call(nn ir.Node) {
+	if len(nn.Init()) > 0 {
+		// Caller should have already called o.init(nn).
+		base.Fatalf("%v with unexpected ninit", nn.Op())
+	}
+
+	// Builtin functions.
+	if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
+		switch n := nn.(type) {
+		default:
+			base.Fatalf("unexpected call: %+v", n)
+		case *ir.UnaryExpr:
+			n.X = o.expr(n.X, nil)
+		case *ir.ConvExpr:
+			n.X = o.expr(n.X, nil)
+		case *ir.BinaryExpr:
+			n.X = o.expr(n.X, nil)
+			n.Y = o.expr(n.Y, nil)
+		case *ir.MakeExpr:
+			n.Len = o.expr(n.Len, nil)
+			n.Cap = o.expr(n.Cap, nil)
+		case *ir.CallExpr:
+			o.exprList(n.Args)
+		}
+		return
+	}
+
+	n := nn.(*ir.CallExpr)
+	typecheck.FixVariadicCall(n)
+	n.X = o.expr(n.X, nil)
+	o.exprList(n.Args)
+
+	if n.Op() == ir.OCALLINTER {
+		return
+	}
+	keepAlive := func(arg ir.Node) {
+		// If the argument is really a pointer being converted to uintptr,
+		// arrange for the pointer to be kept alive until the call returns,
+		// by copying it into a temp and marking that temp
+		// still alive when we pop the temp stack.
+		if arg.Op() == ir.OCONVNOP {
+			arg := arg.(*ir.ConvExpr)
+			if arg.X.Type().IsUnsafePtr() {
+				x := o.copyExpr(arg.X)
+				arg.X = x
+				x.SetAddrtaken(true) // ensure SSA keeps the x variable
+				n.KeepAlive = append(n.KeepAlive, x)
+			}
+		}
+	}
+
+	// Check for "unsafe-uintptr" tag provided by escape analysis.
+	for i, param := range n.X.Type().Params().FieldSlice() {
+		if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
+			if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
+				arg := arg.(*ir.CompLitExpr)
+				for _, elt := range arg.List {
+					keepAlive(elt)
+				}
+			} else {
+				keepAlive(arg)
+			}
+		}
+	}
+}
+
+// mapAssign appends n to o.out.
+func (o *orderState) mapAssign(n ir.Node) {
+	switch n.Op() {
+	default:
+		base.Fatalf("order.mapAssign %v", n.Op())
+
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		if n.X.Op() == ir.OINDEXMAP {
+			n.Y = o.safeMapRHS(n.Y)
+		}
+		o.out = append(o.out, n)
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		if n.X.Op() == ir.OINDEXMAP {
+			n.Y = o.safeMapRHS(n.Y)
+		}
+		o.out = append(o.out, n)
+	}
+}
+
+func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
+	// Make sure we evaluate the RHS before starting the map insert.
+	// We need to make sure the RHS won't panic.  See issue 22881.
+	if r.Op() == ir.OAPPEND {
+		r := r.(*ir.CallExpr)
+		s := r.Args[1:]
+		for i, n := range s {
+			s[i] = o.cheapExpr(n)
+		}
+		return r
+	}
+	return o.cheapExpr(r)
+}
+
+// stmt orders the statement n, appending to o.out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func (o *orderState) stmt(n ir.Node) {
+	if n == nil {
+		return
+	}
+
+	lno := ir.SetPos(n)
+	o.init(n)
+
+	switch n.Op() {
+	default:
+		base.Fatalf("order.stmt %v", n.Op())
+
+	case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
+		o.out = append(o.out, n)
+
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+		n.Y = o.expr(n.Y, n.X)
+		o.mapAssign(n)
+		o.cleanTemp(t)
+
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+		n.Y = o.expr(n.Y, nil)
+
+		if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
+			// Rewrite m[k] op= r into m[k] = m[k] op r so
+			// that we can ensure that if op panics
+			// because r is zero, the panic happens before
+			// the map assignment.
+			// DeepCopy is a big hammer here, but safeExpr
+			// makes sure there is nothing too deep being copied.
+			l1 := o.safeExpr(n.X)
+			l2 := ir.DeepCopy(src.NoXPos, l1)
+			if l2.Op() == ir.OINDEXMAP {
+				l2 := l2.(*ir.IndexExpr)
+				l2.Assigned = false
+			}
+			l2 = o.copyExpr(l2)
+			r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
+			as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
+			o.mapAssign(as)
+			o.cleanTemp(t)
+			return
+		}
+
+		o.mapAssign(n)
+		o.cleanTemp(t)
+
+	case ir.OAS2:
+		n := n.(*ir.AssignListStmt)
+		t := o.markTemp()
+		o.exprList(n.Lhs)
+		o.exprList(n.Rhs)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	// Special: avoid copy of func call n.Right
+	case ir.OAS2FUNC:
+		n := n.(*ir.AssignListStmt)
+		t := o.markTemp()
+		o.exprList(n.Lhs)
+		o.init(n.Rhs[0])
+		o.call(n.Rhs[0])
+		o.as2func(n)
+		o.cleanTemp(t)
+
+	// Special: use temporary variables to hold result,
+	// so that runtime can take address of temporary.
+	// No temporary for blank assignment.
+	//
+	// OAS2MAPR: make sure key is addressable if needed,
+	//           and make sure OINDEXMAP is not copied out.
+	case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
+		n := n.(*ir.AssignListStmt)
+		t := o.markTemp()
+		o.exprList(n.Lhs)
+
+		switch r := n.Rhs[0]; r.Op() {
+		case ir.ODOTTYPE2:
+			r := r.(*ir.TypeAssertExpr)
+			r.X = o.expr(r.X, nil)
+		case ir.ORECV:
+			r := r.(*ir.UnaryExpr)
+			r.X = o.expr(r.X, nil)
+		case ir.OINDEXMAP:
+			r := r.(*ir.IndexExpr)
+			r.X = o.expr(r.X, nil)
+			r.Index = o.expr(r.Index, nil)
+			// See similar conversion for OINDEXMAP below.
+			_ = mapKeyReplaceStrConv(r.Index)
+			r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
+		default:
+			base.Fatalf("order.stmt: %v", r.Op())
+		}
+
+		o.as2ok(n)
+		o.cleanTemp(t)
+
+	// Special: does not save n onto out.
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		o.stmtList(n.List)
+
+	// Special: n->left is not an expression; save as is.
+	case ir.OBREAK,
+		ir.OCONTINUE,
+		ir.ODCL,
+		ir.ODCLCONST,
+		ir.ODCLTYPE,
+		ir.OFALL,
+		ir.OGOTO,
+		ir.OLABEL,
+		ir.OTAILCALL:
+		o.out = append(o.out, n)
+
+	// Special: handle call arguments.
+	case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+		n := n.(*ir.CallExpr)
+		t := o.markTemp()
+		o.call(n)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.OCLOSE, ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.OCOPY:
+		n := n.(*ir.BinaryExpr)
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+		n.Y = o.expr(n.Y, nil)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		n := n.(*ir.CallExpr)
+		t := o.markTemp()
+		o.exprList(n.Args)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	// Special: order arguments to inner call but not call itself.
+	case ir.ODEFER, ir.OGO:
+		n := n.(*ir.GoDeferStmt)
+		t := o.markTemp()
+		o.init(n.Call)
+		o.call(n.Call)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.ODELETE:
+		n := n.(*ir.CallExpr)
+		t := o.markTemp()
+		n.Args[0] = o.expr(n.Args[0], nil)
+		n.Args[1] = o.expr(n.Args[1], nil)
+		n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	// Clean temporaries from condition evaluation at
+	// beginning of loop body and after for statement.
+	case ir.OFOR:
+		n := n.(*ir.ForStmt)
+		t := o.markTemp()
+		n.Cond = o.exprInPlace(n.Cond)
+		n.Body.Prepend(o.cleanTempNoPop(t)...)
+		orderBlock(&n.Body, o.free)
+		n.Post = orderStmtInPlace(n.Post, o.free)
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	// Clean temporaries from condition at
+	// beginning of both branches.
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		t := o.markTemp()
+		n.Cond = o.exprInPlace(n.Cond)
+		n.Body.Prepend(o.cleanTempNoPop(t)...)
+		n.Else.Prepend(o.cleanTempNoPop(t)...)
+		o.popTemp(t)
+		orderBlock(&n.Body, o.free)
+		orderBlock(&n.Else, o.free)
+		o.out = append(o.out, n)
+
+	case ir.OPANIC:
+		n := n.(*ir.UnaryExpr)
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+		if !n.X.Type().IsEmptyInterface() {
+			base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
+		}
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.ORANGE:
+		// n.Right is the expression being ranged over.
+		// order it, and then make a copy if we need one.
+		// We almost always do, to ensure that we don't
+		// see any value changes made during the loop.
+		// Usually the copy is cheap (e.g., array pointer,
+		// chan, slice, string are all tiny).
+		// The exception is ranging over an array value
+		// (not a slice, not a pointer to array),
+		// which must make a copy to avoid seeing updates made during
+		// the range body. Ranging over an array value is uncommon though.
+
+		// Mark []byte(str) range expression to reuse string backing storage.
+		// It is safe because the storage cannot be mutated.
+		n := n.(*ir.RangeStmt)
+		if n.X.Op() == ir.OSTR2BYTES {
+			n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
+		}
+
+		t := o.markTemp()
+		n.X = o.expr(n.X, nil)
+
+		orderBody := true
+		xt := typecheck.RangeExprType(n.X.Type())
+		switch xt.Kind() {
+		default:
+			base.Fatalf("order.stmt range %v", n.Type())
+
+		case types.TARRAY, types.TSLICE:
+			if n.Value == nil || ir.IsBlank(n.Value) {
+				// for i := range x will only use x once, to compute len(x).
+				// No need to copy it.
+				break
+			}
+			fallthrough
+
+		case types.TCHAN, types.TSTRING:
+			// chan, string, slice, array ranges use value multiple times.
+			// make copy.
+			r := n.X
+
+			if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
+				r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
+				r.SetType(types.Types[types.TSTRING])
+				r = typecheck.Expr(r)
+			}
+
+			n.X = o.copyExpr(r)
+
+		case types.TMAP:
+			if isMapClear(n) {
+				// Preserve the body of the map clear pattern so it can
+				// be detected during walk. The loop body will not be used
+				// when optimizing away the range loop to a runtime call.
+				orderBody = false
+				break
+			}
+
+			// copy the map value in case it is a map literal.
+			// TODO(rsc): Make tmp = literal expressions reuse tmp.
+			// For maps tmp is just one word so it hardly matters.
+			r := n.X
+			n.X = o.copyExpr(r)
+
+			// n.Prealloc is the temp for the iterator.
+			// MapIterType contains pointers and needs to be zeroed.
+			n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
+		}
+		n.Key = o.exprInPlace(n.Key)
+		n.Value = o.exprInPlace(n.Value)
+		if orderBody {
+			orderBlock(&n.Body, o.free)
+		}
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		o.exprList(n.Results)
+		o.out = append(o.out, n)
+
+	// Special: clean case temporaries in each block entry.
+	// Select must enter one of its blocks, so there is no
+	// need for a cleaning at the end.
+	// Doubly special: evaluation order for select is stricter
+	// than ordinary expressions. Even something like p.c
+	// has to be hoisted into a temporary, so that it cannot be
+	// reordered after the channel evaluation for a different
+	// case (if p were nil, then the timing of the fault would
+	// give this away).
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		t := o.markTemp()
+		for _, ncas := range n.Cases {
+			r := ncas.Comm
+			ir.SetPos(ncas)
+
+			// Append any new body prologue to ninit.
+			// The next loop will insert ninit into nbody.
+			if len(ncas.Init()) != 0 {
+				base.Fatalf("order select ninit")
+			}
+			if r == nil {
+				continue
+			}
+			switch r.Op() {
+			default:
+				ir.Dump("select case", r)
+				base.Fatalf("unknown op in select %v", r.Op())
+
+			case ir.OSELRECV2:
+				// case x, ok = <-c
+				r := r.(*ir.AssignListStmt)
+				recv := r.Rhs[0].(*ir.UnaryExpr)
+				recv.X = o.expr(recv.X, nil)
+				if !ir.IsAutoTmp(recv.X) {
+					recv.X = o.copyExpr(recv.X)
+				}
+				init := ir.TakeInit(r)
+
+				colas := r.Def
+				do := func(i int, t *types.Type) {
+					n := r.Lhs[i]
+					if ir.IsBlank(n) {
+						return
+					}
+					// If this is case x := <-ch or case x, y := <-ch, the case has
+					// the ODCL nodes to declare x and y. We want to delay that
+					// declaration (and possible allocation) until inside the case body.
+					// Delete the ODCL nodes here and recreate them inside the body below.
+					if colas {
+						if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
+							init = init[1:]
+						}
+						dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+						ncas.PtrInit().Append(dcl)
+					}
+					tmp := o.newTemp(t, t.HasPointers())
+					as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
+					ncas.PtrInit().Append(as)
+					r.Lhs[i] = tmp
+				}
+				do(0, recv.X.Type().Elem())
+				do(1, types.Types[types.TBOOL])
+				if len(init) != 0 {
+					ir.DumpList("ninit", r.Init())
+					base.Fatalf("ninit on select recv")
+				}
+				orderBlock(ncas.PtrInit(), o.free)
+
+			case ir.OSEND:
+				r := r.(*ir.SendStmt)
+				if len(r.Init()) != 0 {
+					ir.DumpList("ninit", r.Init())
+					base.Fatalf("ninit on select send")
+				}
+
+				// case c <- x
+				// r->left is c, r->right is x, both are always evaluated.
+				r.Chan = o.expr(r.Chan, nil)
+
+				if !ir.IsAutoTmp(r.Chan) {
+					r.Chan = o.copyExpr(r.Chan)
+				}
+				r.Value = o.expr(r.Value, nil)
+				if !ir.IsAutoTmp(r.Value) {
+					r.Value = o.copyExpr(r.Value)
+				}
+			}
+		}
+		// Now that we have accumulated all the temporaries, clean them.
+		// Also insert any ninit queued during the previous loop.
+		// (The temporary cleaning must follow that ninit work.)
+		for _, cas := range n.Cases {
+			orderBlock(&cas.Body, o.free)
+			cas.Body.Prepend(o.cleanTempNoPop(t)...)
+
+			// TODO(mdempsky): Is this actually necessary?
+			// walkSelect appears to walk Ninit.
+			cas.Body.Prepend(ir.TakeInit(cas)...)
+		}
+
+		o.out = append(o.out, n)
+		o.popTemp(t)
+
+	// Special: value being sent is passed as a pointer; make it addressable.
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		t := o.markTemp()
+		n.Chan = o.expr(n.Chan, nil)
+		n.Value = o.expr(n.Value, nil)
+		if base.Flag.Cfg.Instrumenting {
+			// Force copying to the stack so that (chan T)(nil) <- x
+			// is still instrumented as a read of x.
+			n.Value = o.copyExpr(n.Value)
+		} else {
+			n.Value = o.addrTemp(n.Value)
+		}
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+
+	// TODO(rsc): Clean temporaries more aggressively.
+	// Note that because walkSwitch will rewrite some of the
+	// switch into a binary search, this is not as easy as it looks.
+	// (If we ran that code here we could invoke order.stmt on
+	// the if-else chain instead.)
+	// For now just clean all the temporaries at the end.
+	// In practice that's fine.
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
+			// Add empty "default:" case for instrumentation.
+			n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
+		}
+
+		t := o.markTemp()
+		n.Tag = o.expr(n.Tag, nil)
+		for _, ncas := range n.Cases {
+			o.exprListInPlace(ncas.List)
+			orderBlock(&ncas.Body, o.free)
+		}
+
+		o.out = append(o.out, n)
+		o.cleanTemp(t)
+	}
+
+	base.Pos = lno
+}
+
+func hasDefaultCase(n *ir.SwitchStmt) bool {
+	for _, ncas := range n.Cases {
+		if len(ncas.List) == 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// exprList orders the expression list l into o.
+func (o *orderState) exprList(l ir.Nodes) {
+	s := l
+	for i := range s {
+		s[i] = o.expr(s[i], nil)
+	}
+}
+
+// exprListInPlace orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func (o *orderState) exprListInPlace(l ir.Nodes) {
+	s := l
+	for i := range s {
+		s[i] = o.exprInPlace(s[i])
+	}
+}
+
+func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
+	return o.expr(n, nil)
+}
+
+// expr orders a single expression, appending side
+// effects to o.out as needed.
+// If this is part of an assignment lhs = *np, lhs is given.
+// Otherwise lhs == nil. (When lhs != nil it may be possible
+// to avoid copying the result of the expression to a temporary.)
+// The result of expr MUST be assigned back to n, e.g.
+// 	n.Left = o.expr(n.Left, lhs)
+func (o *orderState) expr(n, lhs ir.Node) ir.Node {
+	if n == nil {
+		return n
+	}
+	lno := ir.SetPos(n)
+	n = o.expr1(n, lhs)
+	base.Pos = lno
+	return n
+}
+
+func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
+	o.init(n)
+
+	switch n.Op() {
+	default:
+		if o.edit == nil {
+			o.edit = o.exprNoLHS // create closure once
+		}
+		ir.EditChildren(n, o.edit)
+		return n
+
+	// Addition of strings turns into a function call.
+	// Allocate a temporary to hold the strings.
+	// Fewer than 5 strings use direct runtime helpers.
+	case ir.OADDSTR:
+		n := n.(*ir.AddStringExpr)
+		o.exprList(n.List)
+
+		if len(n.List) > 5 {
+			t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
+			n.Prealloc = o.newTemp(t, false)
+		}
+
+		// Mark string(byteSlice) arguments to reuse byteSlice backing
+		// buffer during conversion. String concatenation does not
+		// memorize the strings for later use, so it is safe.
+		// However, we can do it only if there is at least one non-empty string literal.
+		// Otherwise if all other arguments are empty strings,
+		// concatstrings will return the reference to the temp string
+		// to the caller.
+		hasbyte := false
+
+		haslit := false
+		for _, n1 := range n.List {
+			hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+			haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
+		}
+
+		if haslit && hasbyte {
+			for _, n2 := range n.List {
+				if n2.Op() == ir.OBYTES2STR {
+					n2 := n2.(*ir.ConvExpr)
+					n2.SetOp(ir.OBYTES2STRTMP)
+				}
+			}
+		}
+		return n
+
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		n.X = o.expr(n.X, nil)
+		n.Index = o.expr(n.Index, nil)
+		needCopy := false
+
+		if !n.Assigned {
+			// Enforce that any []byte slices we are not copying
+			// can not be changed before the map index by forcing
+			// the map index to happen immediately following the
+			// conversions. See copyExpr a few lines below.
+			needCopy = mapKeyReplaceStrConv(n.Index)
+
+			if base.Flag.Cfg.Instrumenting {
+				// Race detector needs the copy.
+				needCopy = true
+			}
+		}
+
+		// key must be addressable
+		n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
+		if needCopy {
+			return o.copyExpr(n)
+		}
+		return n
+
+	// concrete type (not interface) argument might need an addressable
+	// temporary to pass to the runtime conversion routine.
+	case ir.OCONVIFACE:
+		n := n.(*ir.ConvExpr)
+		n.X = o.expr(n.X, nil)
+		if n.X.Type().IsInterface() {
+			return n
+		}
+		if _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
+			// Need a temp if we need to pass the address to the conversion function.
+			// We also process static composite literal node here, making a named static global
+			// whose address we can put directly in an interface (see OCONVIFACE case in walk).
+			n.X = o.addrTemp(n.X)
+		}
+		return n
+
+	case ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
+			call := n.X.(*ir.CallExpr)
+			// When reordering unsafe.Pointer(f()) into a separate
+			// statement, the conversion and function call must stay
+			// together. See golang.org/issue/15329.
+			o.init(call)
+			o.call(call)
+			if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+				return o.copyExpr(n)
+			}
+		} else {
+			n.X = o.expr(n.X, nil)
+		}
+		return n
+
+	case ir.OANDAND, ir.OOROR:
+		// ... = LHS && RHS
+		//
+		// var r bool
+		// r = LHS
+		// if r {       // or !r, for OROR
+		//     r = RHS
+		// }
+		// ... = r
+
+		n := n.(*ir.LogicalExpr)
+		r := o.newTemp(n.Type(), false)
+
+		// Evaluate left-hand side.
+		lhs := o.expr(n.X, nil)
+		o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
+
+		// Evaluate right-hand side, save generated code.
+		saveout := o.out
+		o.out = nil
+		t := o.markTemp()
+		o.edge()
+		rhs := o.expr(n.Y, nil)
+		o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
+		o.cleanTemp(t)
+		gen := o.out
+		o.out = saveout
+
+		// If left-hand side doesn't cause a short-circuit, issue right-hand side.
+		nif := ir.NewIfStmt(base.Pos, r, nil, nil)
+		if n.Op() == ir.OANDAND {
+			nif.Body = gen
+		} else {
+			nif.Else = gen
+		}
+		o.out = append(o.out, nif)
+		return r
+
+	case ir.OCALLFUNC,
+		ir.OCALLINTER,
+		ir.OCALLMETH,
+		ir.OCAP,
+		ir.OCOMPLEX,
+		ir.OCOPY,
+		ir.OIMAG,
+		ir.OLEN,
+		ir.OMAKECHAN,
+		ir.OMAKEMAP,
+		ir.OMAKESLICE,
+		ir.OMAKESLICECOPY,
+		ir.ONEW,
+		ir.OREAL,
+		ir.ORECOVER,
+		ir.OSTR2BYTES,
+		ir.OSTR2BYTESTMP,
+		ir.OSTR2RUNES:
+
+		if isRuneCount(n) {
+			// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
+			conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
+			conv.X = o.expr(conv.X, nil)
+		} else {
+			o.call(n)
+		}
+
+		if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+			return o.copyExpr(n)
+		}
+		return n
+
+	case ir.OAPPEND:
+		// Check for append(x, make([]T, y)...) .
+		n := n.(*ir.CallExpr)
+		if isAppendOfMake(n) {
+			n.Args[0] = o.expr(n.Args[0], nil) // order x
+			mk := n.Args[1].(*ir.MakeExpr)
+			mk.Len = o.expr(mk.Len, nil) // order y
+		} else {
+			o.exprList(n.Args)
+		}
+
+		if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
+			return o.copyExpr(n)
+		}
+		return n
+
+	case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+		n := n.(*ir.SliceExpr)
+		n.X = o.expr(n.X, nil)
+		n.Low = o.cheapExpr(o.expr(n.Low, nil))
+		n.High = o.cheapExpr(o.expr(n.High, nil))
+		n.Max = o.cheapExpr(o.expr(n.Max, nil))
+		if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
+			return o.copyExpr(n)
+		}
+		return n
+
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		if n.Transient() && len(n.Func.ClosureVars) > 0 {
+			n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
+		}
+		return n
+
+	case ir.OCALLPART:
+		n := n.(*ir.SelectorExpr)
+		n.X = o.expr(n.X, nil)
+		if n.Transient() {
+			t := typecheck.PartialCallType(n)
+			n.Prealloc = o.newTemp(t, false)
+		}
+		return n
+
+	case ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		o.exprList(n.List)
+		if n.Transient() {
+			t := types.NewArray(n.Type().Elem(), n.Len)
+			n.Prealloc = o.newTemp(t, false)
+		}
+		return n
+
+	case ir.ODOTTYPE, ir.ODOTTYPE2:
+		n := n.(*ir.TypeAssertExpr)
+		n.X = o.expr(n.X, nil)
+		if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
+			return o.copyExprClear(n)
+		}
+		return n
+
+	case ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		n.X = o.expr(n.X, nil)
+		return o.copyExprClear(n)
+
+	case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		n.X = o.expr(n.X, nil)
+		n.Y = o.expr(n.Y, nil)
+
+		t := n.X.Type()
+		switch {
+		case t.IsString():
+			// Mark string(byteSlice) arguments to reuse byteSlice backing
+			// buffer during conversion. String comparison does not
+			// memorize the strings for later use, so it is safe.
+			if n.X.Op() == ir.OBYTES2STR {
+				n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+			}
+			if n.Y.Op() == ir.OBYTES2STR {
+				n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+			}
+
+		case t.IsStruct() || t.IsArray():
+			// for complex comparisons, we need both args to be
+			// addressable so we can pass them to the runtime.
+			n.X = o.addrTemp(n.X)
+			n.Y = o.addrTemp(n.Y)
+		}
+		return n
+
+	case ir.OMAPLIT:
+		// Order map by converting:
+		//   map[int]int{
+		//     a(): b(),
+		//     c(): d(),
+		//     e(): f(),
+		//   }
+		// to
+		//   m := map[int]int{}
+		//   m[a()] = b()
+		//   m[c()] = d()
+		//   m[e()] = f()
+		// Then order the result.
+		// Without this special case, order would otherwise compute all
+		// the keys and values before storing any of them to the map.
+		// See issue 26552.
+		n := n.(*ir.CompLitExpr)
+		entries := n.List
+		statics := entries[:0]
+		var dynamics []*ir.KeyExpr
+		for _, r := range entries {
+			r := r.(*ir.KeyExpr)
+
+			if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+				dynamics = append(dynamics, r)
+				continue
+			}
+
+			// Recursively ordering some static entries can change them to dynamic;
+			// e.g., OCONVIFACE nodes. See #31777.
+			r = o.expr(r, nil).(*ir.KeyExpr)
+			if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+				dynamics = append(dynamics, r)
+				continue
+			}
+
+			statics = append(statics, r)
+		}
+		n.List = statics
+
+		if len(dynamics) == 0 {
+			return n
+		}
+
+		// Emit the creation of the map (with all its static entries).
+		m := o.newTemp(n.Type(), false)
+		as := ir.NewAssignStmt(base.Pos, m, n)
+		typecheck.Stmt(as)
+		o.stmt(as)
+
+		// Emit eval+insert of dynamic entries, one at a time.
+		for _, r := range dynamics {
+			as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
+			typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
+			o.stmt(as)
+		}
+		return m
+	}
+
+	// No return - type-assertions above. Each case must return for itself.
+}
+
+// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
+// The caller should order the right-hand side of the assignment before calling order.as2func.
+// It rewrites,
+//	a, b, a = ...
+// as
+//	tmp1, tmp2, tmp3 = ...
+//	a, b, a = tmp1, tmp2, tmp3
+// This is necessary to ensure left to right assignment order.
+func (o *orderState) as2func(n *ir.AssignListStmt) {
+	results := n.Rhs[0].Type()
+	as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+	for i, nl := range n.Lhs {
+		if !ir.IsBlank(nl) {
+			typ := results.Field(i).Type
+			tmp := o.newTemp(typ, typ.HasPointers())
+			n.Lhs[i] = tmp
+			as.Lhs = append(as.Lhs, nl)
+			as.Rhs = append(as.Rhs, tmp)
+		}
+	}
+
+	o.out = append(o.out, n)
+	o.stmt(typecheck.Stmt(as))
+}
+
+// as2ok orders OAS2XXX with ok.
+// Just like as2func, this also adds temporaries to ensure left-to-right assignment.
+func (o *orderState) as2ok(n *ir.AssignListStmt) {
+	as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+
+	do := func(i int, typ *types.Type) {
+		if nl := n.Lhs[i]; !ir.IsBlank(nl) {
+			var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
+			n.Lhs[i] = tmp
+			as.Lhs = append(as.Lhs, nl)
+			if i == 1 {
+				// The "ok" result is an untyped boolean according to the Go
+				// spec. We need to explicitly convert it to the LHS type in
+				// case the latter is a defined boolean type (#8475).
+				tmp = typecheck.Conv(tmp, nl.Type())
+			}
+			as.Rhs = append(as.Rhs, tmp)
+		}
+	}
+
+	do(0, n.Rhs[0].Type())
+	do(1, types.Types[types.TBOOL])
+
+	o.out = append(o.out, n)
+	o.stmt(typecheck.Stmt(as))
+}
diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go
new file mode 100644
index 0000000..47cd2fd
--- /dev/null
+++ b/src/cmd/compile/internal/walk/race.go
@@ -0,0 +1,51 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"cmd/internal/sys"
+)
+
+func instrument(fn *ir.Func) {
+	if fn.Pragma&ir.Norace != 0 || (fn.Linksym() != nil && fn.Linksym().ABIWrapper()) {
+		return
+	}
+
+	if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
+		fn.SetInstrumentBody(true)
+	}
+
+	if base.Flag.Race {
+		lno := base.Pos
+		base.Pos = src.NoXPos
+		if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 {
+			fn.Enter.Prepend(mkcallstmt("racefuncenterfp"))
+			fn.Exit.Append(mkcallstmt("racefuncexit"))
+		} else {
+
+			// nodpc is the PC of the caller as extracted by
+			// getcallerpc. We use -widthptr(FP) for x86.
+			// This only works for amd64. This will not
+			// work on arm or others that might support
+			// race in the future.
+
+			nodpc := ir.NewNameAt(src.NoXPos, typecheck.Lookup(".fp"))
+			nodpc.Class = ir.PPARAM
+			nodpc.SetUsed(true)
+			nodpc.SetType(types.Types[types.TUINTPTR])
+			nodpc.SetFrameOffset(int64(-types.PtrSize))
+			fn.Dcl = append(fn.Dcl, nodpc)
+			fn.Enter.Prepend(mkcallstmt("racefuncenter", nodpc))
+			fn.Exit.Append(mkcallstmt("racefuncexit"))
+		}
+		base.Pos = lno
+	}
+}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
new file mode 100644
index 0000000..5ab24b2
--- /dev/null
+++ b/src/cmd/compile/internal/walk/range.go
@@ -0,0 +1,486 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"unicode/utf8"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/sys"
+)
+
+func cheapComputableIndex(width int64) bool {
+	switch ssagen.Arch.LinkArch.Family {
+	// MIPS does not have R+R addressing
+	// Arm64 may lack ability to generate this code in our assembler,
+	// but the architecture supports it.
+	case sys.PPC64, sys.S390X:
+		return width == 1
+	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+		switch width {
+		case 1, 2, 4, 8:
+			return true
+		}
+	}
+	return false
+}
+
+// walkRange transforms various forms of ORANGE into
+// simpler forms.  The result must be assigned back to n.
+// Node n may also be modified in place, and may also be
+// the returned node.
+func walkRange(nrange *ir.RangeStmt) ir.Node {
+	if isMapClear(nrange) {
+		m := nrange.X
+		lno := ir.SetPos(m)
+		n := mapClear(m)
+		base.Pos = lno
+		return n
+	}
+
+	nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
+	nfor.SetInit(nrange.Init())
+	nfor.Label = nrange.Label
+
+	// variable name conventions:
+	//	ohv1, hv1, hv2: hidden (old) val 1, 2
+	//	ha, hit: hidden aggregate, iterator
+	//	hn, hp: hidden len, pointer
+	//	hb: hidden bool
+	//	a, v1, v2: not hidden aggregate, val 1, 2
+
+	a := nrange.X
+	t := typecheck.RangeExprType(a.Type())
+	lno := ir.SetPos(a)
+
+	v1, v2 := nrange.Key, nrange.Value
+
+	if ir.IsBlank(v2) {
+		v2 = nil
+	}
+
+	if ir.IsBlank(v1) && v2 == nil {
+		v1 = nil
+	}
+
+	if v1 == nil && v2 != nil {
+		base.Fatalf("walkRange: v2 != nil while v1 == nil")
+	}
+
+	var ifGuard *ir.IfStmt
+
+	var body []ir.Node
+	var init []ir.Node
+	switch t.Kind() {
+	default:
+		base.Fatalf("walkRange")
+
+	case types.TARRAY, types.TSLICE:
+		if nn := arrayClear(nrange, v1, v2, a); nn != nil {
+			base.Pos = lno
+			return nn
+		}
+
+		// order.stmt arranged for a copy of the array/slice variable if needed.
+		ha := a
+
+		hv1 := typecheck.Temp(types.Types[types.TINT])
+		hn := typecheck.Temp(types.Types[types.TINT])
+
+		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
+
+		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
+
+		// for range ha { body }
+		if v1 == nil {
+			break
+		}
+
+		// for v1 := range ha { body }
+		if v2 == nil {
+			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+			break
+		}
+
+		// for v1, v2 := range ha { body }
+		if cheapComputableIndex(t.Elem().Width) {
+			// v1, v2 = hv1, ha[hv1]
+			tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
+			tmp.SetBounded(true)
+			// Use OAS2 to correctly handle assignments
+			// of the form "v1, a[v1] := range".
+			a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+			a.Lhs = []ir.Node{v1, v2}
+			a.Rhs = []ir.Node{hv1, tmp}
+			body = []ir.Node{a}
+			break
+		}
+
+		// TODO(austin): OFORUNTIL is a strange beast, but is
+		// necessary for expressing the control flow we need
+		// while also making "break" and "continue" work. It
+		// would be nice to just lower ORANGE during SSA, but
+		// racewalk needs to see many of the operations
+		// involved in ORANGE's implementation. If racewalk
+		// moves into SSA, consider moving ORANGE into SSA and
+		// eliminating OFORUNTIL.
+
+		// TODO(austin): OFORUNTIL inhibits bounds-check
+		// elimination on the index variable (see #20711).
+		// Enhance the prove pass to understand this.
+		ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
+		ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+		nfor.SetOp(ir.OFORUNTIL)
+
+		hp := typecheck.Temp(types.NewPtr(t.Elem()))
+		tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
+		tmp.SetBounded(true)
+		init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
+
+		// Use OAS2 to correctly handle assignments
+		// of the form "v1, a[v1] := range".
+		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+		a.Lhs = []ir.Node{v1, v2}
+		a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}
+		body = append(body, a)
+
+		// Advance pointer as part of the late increment.
+		//
+		// This runs *after* the condition check, so we know
+		// advancing the pointer is safe and won't go past the
+		// end of the allocation.
+		as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width))
+		nfor.Late = []ir.Node{typecheck.Stmt(as)}
+
+	case types.TMAP:
+		// order.stmt allocated the iterator for us.
+		// we only use a once, so no copy needed.
+		ha := a
+
+		hit := nrange.Prealloc
+		th := hit.Type()
+		keysym := th.Field(0).Sym  // depends on layout of iterator struct.  See reflect.go:MapIterType
+		elemsym := th.Field(1).Sym // ditto
+
+		fn := typecheck.LookupRuntime("mapiterinit")
+
+		fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
+		init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
+		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
+
+		fn = typecheck.LookupRuntime("mapiternext")
+		fn = typecheck.SubstArgTypes(fn, th)
+		nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
+
+		key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
+		if v1 == nil {
+			body = nil
+		} else if v2 == nil {
+			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
+		} else {
+			elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
+			a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+			a.Lhs = []ir.Node{v1, v2}
+			a.Rhs = []ir.Node{key, elem}
+			body = []ir.Node{a}
+		}
+
+	case types.TCHAN:
+		// order.stmt arranged for a copy of the channel variable.
+		ha := a
+
+		hv1 := typecheck.Temp(t.Elem())
+		hv1.SetTypecheck(1)
+		if t.Elem().HasPointers() {
+			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+		}
+		hb := typecheck.Temp(types.Types[types.TBOOL])
+
+		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
+		a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil)
+		a.SetTypecheck(1)
+		a.Lhs = []ir.Node{hv1, hb}
+		a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
+		nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
+		if v1 == nil {
+			body = nil
+		} else {
+			body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+		}
+		// Zero hv1. This prevents hv1 from being the sole, inaccessible
+		// reference to an otherwise GC-able value during the next channel receive.
+		// See issue 15281.
+		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+	case types.TSTRING:
+		// Transform string range statements like "for v1, v2 = range a" into
+		//
+		// ha := a
+		// for hv1 := 0; hv1 < len(ha); {
+		//   hv1t := hv1
+		//   hv2 := rune(ha[hv1])
+		//   if hv2 < utf8.RuneSelf {
+		//      hv1++
+		//   } else {
+		//      hv2, hv1 = decoderune(ha, hv1)
+		//   }
+		//   v1, v2 = hv1t, hv2
+		//   // original body
+		// }
+
+		// order.stmt arranged for a copy of the string variable.
+		ha := a
+
+		hv1 := typecheck.Temp(types.Types[types.TINT])
+		hv1t := typecheck.Temp(types.Types[types.TINT])
+		hv2 := typecheck.Temp(types.RuneType)
+
+		// hv1 := 0
+		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+		// hv1 < len(ha)
+		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
+
+		if v1 != nil {
+			// hv1t = hv1
+			body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
+		}
+
+		// hv2 := rune(ha[hv1])
+		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
+		nind.SetBounded(true)
+		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
+
+		// if hv2 < utf8.RuneSelf
+		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
+
+		// hv1++
+		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
+
+		// } else {
+		eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+
+		// hv2, hv1 = decoderune(ha, hv1)
+		eif.Lhs = []ir.Node{hv2, hv1}
+		fn := typecheck.LookupRuntime("decoderune")
+		var fnInit ir.Nodes
+		eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, ha, hv1)}
+		fnInit.Append(eif)
+		nif.Else = fnInit
+
+		body = append(body, nif)
+
+		if v1 != nil {
+			if v2 != nil {
+				// v1, v2 = hv1t, hv2
+				a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+				a.Lhs = []ir.Node{v1, v2}
+				a.Rhs = []ir.Node{hv1t, hv2}
+				body = append(body, a)
+			} else {
+				// v1 = hv1t
+				body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
+			}
+		}
+	}
+
+	typecheck.Stmts(init)
+
+	if ifGuard != nil {
+		ifGuard.PtrInit().Append(init...)
+		ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
+	} else {
+		nfor.PtrInit().Append(init...)
+	}
+
+	typecheck.Stmts(nfor.Cond.Init())
+
+	nfor.Cond = typecheck.Expr(nfor.Cond)
+	nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
+	nfor.Post = typecheck.Stmt(nfor.Post)
+	typecheck.Stmts(body)
+	nfor.Body.Append(body...)
+	nfor.Body.Append(nrange.Body...)
+
+	var n ir.Node = nfor
+	if ifGuard != nil {
+		ifGuard.Body = []ir.Node{n}
+		n = ifGuard
+	}
+
+	n = walkStmt(n)
+
+	base.Pos = lno
+	return n
+}
+
+// isMapClear checks if n is of the form:
+//
+// for k := range m {
+//   delete(m, k)
+// }
+//
+// where == for keys of map m is reflexive.
+func isMapClear(n *ir.RangeStmt) bool {
+	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+		return false
+	}
+
+	t := n.X.Type()
+	if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
+		return false
+	}
+
+	k := n.Key
+	// Require k to be a new variable name.
+	if !ir.DeclaredBy(k, n) {
+		return false
+	}
+
+	if len(n.Body) != 1 {
+		return false
+	}
+
+	stmt := n.Body[0] // only stmt in body
+	if stmt == nil || stmt.Op() != ir.ODELETE {
+		return false
+	}
+
+	m := n.X
+	if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
+		return false
+	}
+
+	// Keys where equality is not reflexive can not be deleted from maps.
+	if !types.IsReflexive(t.Key()) {
+		return false
+	}
+
+	return true
+}
+
+// mapClear constructs a call to runtime.mapclear for the map m.
+func mapClear(m ir.Node) ir.Node {
+	t := m.Type()
+
+	// instantiate mapclear(typ *type, hmap map[any]any)
+	fn := typecheck.LookupRuntime("mapclear")
+	fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+	n := mkcallstmt1(fn, reflectdata.TypePtr(t), m)
+	return walkStmt(typecheck.Stmt(n))
+}
+
+// Lower n into runtime·memclr if possible, for
+// fast zeroing of slices and arrays (issue 5373).
+// Look for instances of
+//
+// for i := range a {
+// 	a[i] = zero
+// }
+//
+// in which the evaluation of a is side-effect-free.
+//
+// Parameters are as in walkRange: "for v1, v2 = range a".
+func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
+	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+		return nil
+	}
+
+	if v1 == nil || v2 != nil {
+		return nil
+	}
+
+	if len(loop.Body) != 1 || loop.Body[0] == nil {
+		return nil
+	}
+
+	stmt1 := loop.Body[0] // only stmt in body
+	if stmt1.Op() != ir.OAS {
+		return nil
+	}
+	stmt := stmt1.(*ir.AssignStmt)
+	if stmt.X.Op() != ir.OINDEX {
+		return nil
+	}
+	lhs := stmt.X.(*ir.IndexExpr)
+
+	if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
+		return nil
+	}
+
+	elemsize := typecheck.RangeExprType(loop.X.Type()).Elem().Width
+	if elemsize <= 0 || !ir.IsZero(stmt.Y) {
+		return nil
+	}
+
+	// Convert to
+	// if len(a) != 0 {
+	// 	hp = &a[0]
+	// 	hn = len(a)*sizeof(elem(a))
+	// 	memclr{NoHeap,Has}Pointers(hp, hn)
+	// 	i = len(a) - 1
+	// }
+	n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+	n.Body = nil
+	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
+
+	// hp = &a[0]
+	hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
+
+	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
+	ix.SetBounded(true)
+	addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
+
+	// hn = len(a) * sizeof(elem(a))
+	hn := typecheck.Temp(types.Types[types.TUINTPTR])
+	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
+	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
+
+	var fn ir.Node
+	if a.Type().Elem().HasPointers() {
+		// memclrHasPointers(hp, hn)
+		ir.CurFunc.SetWBPos(stmt.Pos())
+		fn = mkcallstmt("memclrHasPointers", hp, hn)
+	} else {
+		// memclrNoHeapPointers(hp, hn)
+		fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
+	}
+
+	n.Body.Append(fn)
+
+	// i = len(a) - 1
+	v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
+
+	n.Body.Append(v1)
+
+	n.Cond = typecheck.Expr(n.Cond)
+	n.Cond = typecheck.DefaultLit(n.Cond, nil)
+	typecheck.Stmts(n.Body)
+	return walkStmt(n)
+}
+
+// addptr returns (*T)(uintptr(p) + n).
+func addptr(p ir.Node, n int64) ir.Node {
+	t := p.Type()
+
+	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+	p.SetType(types.Types[types.TUINTPTR])
+
+	p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
+
+	p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+	p.SetType(t)
+
+	return p
+}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
new file mode 100644
index 0000000..873be289
--- /dev/null
+++ b/src/cmd/compile/internal/walk/select.go
@@ -0,0 +1,295 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+)
+
+func walkSelect(sel *ir.SelectStmt) {
+	lno := ir.SetPos(sel)
+	if sel.Walked() {
+		base.Fatalf("double walkSelect")
+	}
+	sel.SetWalked(true)
+
+	init := ir.TakeInit(sel)
+
+	init = append(init, walkSelectCases(sel.Cases)...)
+	sel.Cases = nil
+
+	sel.Compiled = init
+	walkStmtList(sel.Compiled)
+
+	base.Pos = lno
+}
+
+func walkSelectCases(cases []*ir.CommClause) []ir.Node {
+	ncas := len(cases)
+	sellineno := base.Pos
+
+	// optimization: zero-case select
+	if ncas == 0 {
+		return []ir.Node{mkcallstmt("block")}
+	}
+
+	// optimization: one-case select: single op.
+	if ncas == 1 {
+		cas := cases[0]
+		ir.SetPos(cas)
+		l := cas.Init()
+		if cas.Comm != nil { // not default:
+			n := cas.Comm
+			l = append(l, ir.TakeInit(n)...)
+			switch n.Op() {
+			default:
+				base.Fatalf("select %v", n.Op())
+
+			case ir.OSEND:
+				// already ok
+
+			case ir.OSELRECV2:
+				r := n.(*ir.AssignListStmt)
+				if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) {
+					n = r.Rhs[0]
+					break
+				}
+				r.SetOp(ir.OAS2RECV)
+			}
+
+			l = append(l, n)
+		}
+
+		l = append(l, cas.Body...)
+		l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+		return l
+	}
+
+	// convert case value arguments to addresses.
+	// this rewrite is used by both the general code and the next optimization.
+	var dflt *ir.CommClause
+	for _, cas := range cases {
+		ir.SetPos(cas)
+		n := cas.Comm
+		if n == nil {
+			dflt = cas
+			continue
+		}
+		switch n.Op() {
+		case ir.OSEND:
+			n := n.(*ir.SendStmt)
+			n.Value = typecheck.NodAddr(n.Value)
+			n.Value = typecheck.Expr(n.Value)
+
+		case ir.OSELRECV2:
+			n := n.(*ir.AssignListStmt)
+			if !ir.IsBlank(n.Lhs[0]) {
+				n.Lhs[0] = typecheck.NodAddr(n.Lhs[0])
+				n.Lhs[0] = typecheck.Expr(n.Lhs[0])
+			}
+		}
+	}
+
+	// optimization: two-case select but one is default: single non-blocking op.
+	if ncas == 2 && dflt != nil {
+		cas := cases[0]
+		if cas == dflt {
+			cas = cases[1]
+		}
+
+		n := cas.Comm
+		ir.SetPos(n)
+		r := ir.NewIfStmt(base.Pos, nil, nil, nil)
+		*r.PtrInit() = cas.Init()
+		var call ir.Node
+		switch n.Op() {
+		default:
+			base.Fatalf("select %v", n.Op())
+
+		case ir.OSEND:
+			// if selectnbsend(c, v) { body } else { default body }
+			n := n.(*ir.SendStmt)
+			ch := n.Chan
+			call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value)
+
+		case ir.OSELRECV2:
+			n := n.(*ir.AssignListStmt)
+			recv := n.Rhs[0].(*ir.UnaryExpr)
+			ch := recv.X
+			elem := n.Lhs[0]
+			if ir.IsBlank(elem) {
+				elem = typecheck.NodNil()
+			}
+			if ir.IsBlank(n.Lhs[1]) {
+				// if selectnbrecv(&v, c) { body } else { default body }
+				call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
+			} else {
+				// TODO(cuonglm): make this use selectnbrecv()
+				// if selectnbrecv2(&v, &received, c) { body } else { default body }
+				receivedp := typecheck.Expr(typecheck.NodAddr(n.Lhs[1]))
+				call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
+			}
+		}
+
+		r.Cond = typecheck.Expr(call)
+		r.Body = cas.Body
+		r.Else = append(dflt.Init(), dflt.Body...)
+		return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)}
+	}
+
+	if dflt != nil {
+		ncas--
+	}
+	casorder := make([]*ir.CommClause, ncas)
+	nsends, nrecvs := 0, 0
+
+	var init []ir.Node
+
+	// generate sel-struct
+	base.Pos = sellineno
+	selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
+	init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
+
+	// No initialization for order; runtime.selectgo is responsible for that.
+	order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+
+	var pc0, pcs ir.Node
+	if base.Flag.Race {
+		pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+		pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))))
+	} else {
+		pc0 = typecheck.NodNil()
+	}
+
+	// register cases
+	for _, cas := range cases {
+		ir.SetPos(cas)
+
+		init = append(init, ir.TakeInit(cas)...)
+
+		n := cas.Comm
+		if n == nil { // default:
+			continue
+		}
+
+		var i int
+		var c, elem ir.Node
+		switch n.Op() {
+		default:
+			base.Fatalf("select %v", n.Op())
+		case ir.OSEND:
+			n := n.(*ir.SendStmt)
+			i = nsends
+			nsends++
+			c = n.Chan
+			elem = n.Value
+		case ir.OSELRECV2:
+			n := n.(*ir.AssignListStmt)
+			nrecvs++
+			i = ncas - nrecvs
+			recv := n.Rhs[0].(*ir.UnaryExpr)
+			c = recv.X
+			elem = n.Lhs[0]
+		}
+
+		casorder[i] = cas
+
+		setField := func(f string, val ir.Node) {
+			r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), typecheck.Lookup(f)), val)
+			init = append(init, typecheck.Stmt(r))
+		}
+
+		c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR])
+		setField("c", c)
+		if !ir.IsBlank(elem) {
+			elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR])
+			setField("elem", elem)
+		}
+
+		// TODO(mdempsky): There should be a cleaner way to
+		// handle this.
+		if base.Flag.Race {
+			r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
+			init = append(init, r)
+		}
+	}
+	if nsends+nrecvs != ncas {
+		base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+	}
+
+	// run the select
+	base.Pos = sellineno
+	chosen := typecheck.Temp(types.Types[types.TINT])
+	recvOK := typecheck.Temp(types.Types[types.TBOOL])
+	r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+	r.Lhs = []ir.Node{chosen, recvOK}
+	fn := typecheck.LookupRuntime("selectgo")
+	var fnInit ir.Nodes
+	r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
+	init = append(init, fnInit...)
+	init = append(init, typecheck.Stmt(r))
+
+	// selv and order are no longer alive after selectgo.
+	init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv))
+	init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order))
+	if base.Flag.Race {
+		init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs))
+	}
+
+	// dispatch cases
+	dispatch := func(cond ir.Node, cas *ir.CommClause) {
+		cond = typecheck.Expr(cond)
+		cond = typecheck.DefaultLit(cond, nil)
+
+		r := ir.NewIfStmt(base.Pos, cond, nil, nil)
+
+		if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
+			n := n.(*ir.AssignListStmt)
+			if !ir.IsBlank(n.Lhs[1]) {
+				x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
+				r.Body.Append(typecheck.Stmt(x))
+			}
+		}
+
+		r.Body.Append(cas.Body.Take()...)
+		r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+		init = append(init, r)
+	}
+
+	if dflt != nil {
+		ir.SetPos(dflt)
+		dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(0)), dflt)
+	}
+	for i, cas := range casorder {
+		ir.SetPos(cas)
+		dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas)
+	}
+
+	return init
+}
+
+// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+	s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i)))
+	t := types.NewPtr(types.Types[types.TUINT8])
+	return typecheck.ConvNop(s, t)
+}
+
+var scase *types.Type
+
+// Keep in sync with src/runtime/select.go.
+func scasetype() *types.Type {
+	if scase == nil {
+		scase = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]),
+			types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]),
+		})
+		scase.SetNoalg(true)
+	}
+	return scase
+}
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
new file mode 100644
index 0000000..46a621c
--- /dev/null
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -0,0 +1,296 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+)
+
+// The result of walkStmt MUST be assigned back to n, e.g.
+// 	n.Left = walkStmt(n.Left)
+func walkStmt(n ir.Node) ir.Node {
+	if n == nil {
+		return n
+	}
+
+	ir.SetPos(n)
+
+	walkStmtList(n.Init())
+
+	switch n.Op() {
+	default:
+		if n.Op() == ir.ONAME {
+			n := n.(*ir.Name)
+			base.Errorf("%v is not a top level statement", n.Sym())
+		} else {
+			base.Errorf("%v is not a top level statement", n.Op())
+		}
+		ir.Dump("nottop", n)
+		return n
+
+	case ir.OAS,
+		ir.OASOP,
+		ir.OAS2,
+		ir.OAS2DOTTYPE,
+		ir.OAS2RECV,
+		ir.OAS2FUNC,
+		ir.OAS2MAPR,
+		ir.OCLOSE,
+		ir.OCOPY,
+		ir.OCALLMETH,
+		ir.OCALLINTER,
+		ir.OCALL,
+		ir.OCALLFUNC,
+		ir.ODELETE,
+		ir.OSEND,
+		ir.OPRINT,
+		ir.OPRINTN,
+		ir.OPANIC,
+		ir.ORECOVER,
+		ir.OGETG:
+		if n.Typecheck() == 0 {
+			base.Fatalf("missing typecheck: %+v", n)
+		}
+		init := ir.TakeInit(n)
+		n = walkExpr(n, &init)
+		if n.Op() == ir.ONAME {
+			// copy rewrote to a statement list and a temp for the length.
+			// Throw away the temp to avoid plain values as statements.
+			n = ir.NewBlockStmt(n.Pos(), init)
+			init = nil
+		}
+		if len(init) > 0 {
+			switch n.Op() {
+			case ir.OAS, ir.OAS2, ir.OBLOCK:
+				n.(ir.InitNode).PtrInit().Prepend(init...)
+
+			default:
+				init.Append(n)
+				n = ir.NewBlockStmt(n.Pos(), init)
+			}
+		}
+		return n
+
+	// special case for a receive where we throw away
+	// the value received.
+	case ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		return walkRecv(n)
+
+	case ir.OBREAK,
+		ir.OCONTINUE,
+		ir.OFALL,
+		ir.OGOTO,
+		ir.OLABEL,
+		ir.ODCL,
+		ir.ODCLCONST,
+		ir.ODCLTYPE,
+		ir.OCHECKNIL,
+		ir.OVARDEF,
+		ir.OVARKILL,
+		ir.OVARLIVE:
+		return n
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		walkStmtList(n.List)
+		return n
+
+	case ir.OCASE:
+		base.Errorf("case statement out of place")
+		panic("unreachable")
+
+	case ir.ODEFER:
+		n := n.(*ir.GoDeferStmt)
+		ir.CurFunc.SetHasDefer(true)
+		ir.CurFunc.NumDefers++
+		if ir.CurFunc.NumDefers > maxOpenDefers {
+			// Don't allow open-coded defers if there are more than
+			// 8 defers in the function, since we use a single
+			// byte to record active defers.
+			ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+		}
+		if n.Esc() != ir.EscNever {
+			// If n.Esc is not EscNever, then this defer occurs in a loop,
+			// so open-coded defers cannot be used in this function.
+			ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+		}
+		fallthrough
+	case ir.OGO:
+		n := n.(*ir.GoDeferStmt)
+		return walkGoDefer(n)
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		return walkFor(n)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		return walkIf(n)
+
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		return walkReturn(n)
+
+	case ir.OTAILCALL:
+		n := n.(*ir.TailCallStmt)
+		return n
+
+	case ir.OINLMARK:
+		n := n.(*ir.InlineMarkStmt)
+		return n
+
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		walkSelect(n)
+		return n
+
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+		walkSwitch(n)
+		return n
+
+	case ir.ORANGE:
+		n := n.(*ir.RangeStmt)
+		return walkRange(n)
+	}
+
+	// No return! Each case must return (or panic),
+	// to avoid confusion about what gets returned
+	// in the presence of type assertions.
+}
+
+func walkStmtList(s []ir.Node) {
+	for i := range s {
+		s[i] = walkStmt(s[i])
+	}
+}
+
+// walkFor walks an OFOR or OFORUNTIL node.
+func walkFor(n *ir.ForStmt) ir.Node {
+	if n.Cond != nil {
+		init := ir.TakeInit(n.Cond)
+		walkStmtList(init)
+		n.Cond = walkExpr(n.Cond, &init)
+		n.Cond = ir.InitExpr(init, n.Cond)
+	}
+
+	n.Post = walkStmt(n.Post)
+	if n.Op() == ir.OFORUNTIL {
+		walkStmtList(n.Late)
+	}
+	walkStmtList(n.Body)
+	return n
+}
+
+// walkGoDefer walks an OGO or ODEFER node.
+func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
+	var init ir.Nodes
+	switch call := n.Call; call.Op() {
+	case ir.OPRINT, ir.OPRINTN:
+		call := call.(*ir.CallExpr)
+		n.Call = wrapCall(call, &init)
+
+	case ir.ODELETE:
+		call := call.(*ir.CallExpr)
+		if mapfast(call.Args[0].Type()) == mapslow {
+			n.Call = wrapCall(call, &init)
+		} else {
+			n.Call = walkExpr(call, &init)
+		}
+
+	case ir.OCOPY:
+		call := call.(*ir.BinaryExpr)
+		n.Call = walkCopy(call, &init, true)
+
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+		call := call.(*ir.CallExpr)
+		if len(call.KeepAlive) > 0 {
+			n.Call = wrapCall(call, &init)
+		} else {
+			n.Call = walkExpr(call, &init)
+		}
+
+	default:
+		n.Call = walkExpr(call, &init)
+	}
+	if len(init) > 0 {
+		init.Append(n)
+		return ir.NewBlockStmt(n.Pos(), init)
+	}
+	return n
+}
+
+// walkIf walks an OIF node.
+func walkIf(n *ir.IfStmt) ir.Node {
+	n.Cond = walkExpr(n.Cond, n.PtrInit())
+	walkStmtList(n.Body)
+	walkStmtList(n.Else)
+	return n
+}
+
+// The result of wrapCall MUST be assigned back to n, e.g.
+// 	n.Left = wrapCall(n.Left, init)
+func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+	if len(n.Init()) != 0 {
+		walkStmtList(n.Init())
+		init.Append(ir.TakeInit(n)...)
+	}
+
+	isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
+
+	// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
+	if !isBuiltinCall && n.IsDDD {
+		last := len(n.Args) - 1
+		if va := n.Args[last]; va.Op() == ir.OSLICELIT {
+			va := va.(*ir.CompLitExpr)
+			n.Args = append(n.Args[:last], va.List...)
+			n.IsDDD = false
+		}
+	}
+
+	// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
+	origArgs := make([]ir.Node, len(n.Args))
+	var funcArgs []*ir.Field
+	for i, arg := range n.Args {
+		s := typecheck.LookupNum("a", i)
+		if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
+			origArgs[i] = arg
+			arg = arg.(*ir.ConvExpr).X
+			n.Args[i] = arg
+		}
+		funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
+	}
+	t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
+
+	wrapCall_prgen++
+	sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
+	fn := typecheck.DeclFunc(sym, t)
+
+	args := ir.ParamNames(t.Type())
+	for i, origArg := range origArgs {
+		if origArg == nil {
+			continue
+		}
+		args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
+	}
+	call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
+	if !isBuiltinCall {
+		call.SetOp(ir.OCALL)
+		call.IsDDD = n.IsDDD
+	}
+	fn.Body = []ir.Node{call}
+
+	typecheck.FinishFuncBody()
+
+	typecheck.Func(fn)
+	typecheck.Stmts(fn.Body)
+	typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+	call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, n.Args)
+	return walkExpr(typecheck.Stmt(call), init)
+}
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
new file mode 100644
index 0000000..162de01
--- /dev/null
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -0,0 +1,568 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"go/constant"
+	"go/token"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// walkSwitch walks a switch statement.
+func walkSwitch(sw *ir.SwitchStmt) {
+	// Guard against double walk, see #25776.
+	if sw.Walked() {
+		return // Was fatal, but eliminating every possible source of double-walking is hard
+	}
+	sw.SetWalked(true)
+
+	if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
+		walkSwitchType(sw)
+	} else {
+		walkSwitchExpr(sw)
+	}
+}
+
+// walkSwitchExpr generates an AST implementing sw.  sw is an
+// expression switch.
+func walkSwitchExpr(sw *ir.SwitchStmt) {
+	lno := ir.SetPos(sw)
+
+	cond := sw.Tag
+	sw.Tag = nil
+
+	// convert switch {...} to switch true {...}
+	if cond == nil {
+		cond = ir.NewBool(true)
+		cond = typecheck.Expr(cond)
+		cond = typecheck.DefaultLit(cond, nil)
+	}
+
+	// Given "switch string(byteslice)",
+	// with all cases being side-effect free,
+	// use a zero-cost alias of the byte slice.
+	// Do this before calling walkExpr on cond,
+	// because walkExpr will lower the string
+	// conversion into a runtime call.
+	// See issue 24937 for more discussion.
+	if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+		cond := cond.(*ir.ConvExpr)
+		cond.SetOp(ir.OBYTES2STRTMP)
+	}
+
+	cond = walkExpr(cond, sw.PtrInit())
+	if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+		cond = copyExpr(cond, cond.Type(), &sw.Compiled)
+	}
+
+	base.Pos = lno
+
+	s := exprSwitch{
+		exprname: cond,
+	}
+
+	var defaultGoto ir.Node
+	var body ir.Nodes
+	for _, ncase := range sw.Cases {
+		label := typecheck.AutoLabel(".s")
+		jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+		// Process case dispatch.
+		if len(ncase.List) == 0 {
+			if defaultGoto != nil {
+				base.Fatalf("duplicate default case not detected during typechecking")
+			}
+			defaultGoto = jmp
+		}
+
+		for _, n1 := range ncase.List {
+			s.Add(ncase.Pos(), n1, jmp)
+		}
+
+		// Process body.
+		body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+		body.Append(ncase.Body...)
+		if fall, pos := endsInFallthrough(ncase.Body); !fall {
+			br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+			br.SetPos(pos)
+			body.Append(br)
+		}
+	}
+	sw.Cases = nil
+
+	if defaultGoto == nil {
+		br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+		br.SetPos(br.Pos().WithNotStmt())
+		defaultGoto = br
+	}
+
+	s.Emit(&sw.Compiled)
+	sw.Compiled.Append(defaultGoto)
+	sw.Compiled.Append(body.Take()...)
+	walkStmtList(sw.Compiled)
+}
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+	exprname ir.Node // value being switched on
+
+	done    ir.Nodes
+	clauses []exprClause
+}
+
+type exprClause struct {
+	pos    src.XPos
+	lo, hi ir.Node
+	jmp    ir.Node
+}
+
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
+	c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
+	if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
+		s.clauses = append(s.clauses, c)
+		return
+	}
+
+	s.flush()
+	s.clauses = append(s.clauses, c)
+	s.flush()
+}
+
+func (s *exprSwitch) Emit(out *ir.Nodes) {
+	s.flush()
+	out.Append(s.done.Take()...)
+}
+
+func (s *exprSwitch) flush() {
+	cc := s.clauses
+	s.clauses = nil
+	if len(cc) == 0 {
+		return
+	}
+
+	// Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
+	// The code below is structured to implicitly handle this case
+	// (e.g., sort.Slice doesn't need to invoke the less function
+	// when there's only a single slice element).
+
+	if s.exprname.Type().IsString() && len(cc) >= 2 {
+		// Sort strings by length and then by value. It is
+		// much cheaper to compare lengths than values, and
+		// all we need here is consistency. We respect this
+		// sorting below.
+		sort.Slice(cc, func(i, j int) bool {
+			si := ir.StringVal(cc[i].lo)
+			sj := ir.StringVal(cc[j].lo)
+			if len(si) != len(sj) {
+				return len(si) < len(sj)
+			}
+			return si < sj
+		})
+
+		// runLen returns the string length associated with a
+		// particular run of exprClauses.
+		runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
+
+		// Collapse runs of consecutive strings with the same length.
+		var runs [][]exprClause
+		start := 0
+		for i := 1; i < len(cc); i++ {
+			if runLen(cc[start:]) != runLen(cc[i:]) {
+				runs = append(runs, cc[start:i])
+				start = i
+			}
+		}
+		runs = append(runs, cc[start:])
+
+		// Perform two-level binary search.
+		binarySearch(len(runs), &s.done,
+			func(i int) ir.Node {
+				return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1])))
+			},
+			func(i int, nif *ir.IfStmt) {
+				run := runs[i]
+				nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run)))
+				s.search(run, &nif.Body)
+			},
+		)
+		return
+	}
+
+	sort.Slice(cc, func(i, j int) bool {
+		return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
+	})
+
+	// Merge consecutive integer cases.
+	if s.exprname.Type().IsInteger() {
+		consecutive := func(last, next constant.Value) bool {
+			delta := constant.BinaryOp(next, token.SUB, last)
+			return constant.Compare(delta, token.EQL, constant.MakeInt64(1))
+		}
+
+		merged := cc[:1]
+		for _, c := range cc[1:] {
+			last := &merged[len(merged)-1]
+			if last.jmp == c.jmp && consecutive(last.hi.Val(), c.lo.Val()) {
+				last.hi = c.lo
+			} else {
+				merged = append(merged, c)
+			}
+		}
+		cc = merged
+	}
+
+	s.search(cc, &s.done)
+}
+
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
+	binarySearch(len(cc), out,
+		func(i int) ir.Node {
+			return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
+		},
+		func(i int, nif *ir.IfStmt) {
+			c := &cc[i]
+			nif.Cond = c.test(s.exprname)
+			nif.Body = []ir.Node{c.jmp}
+		},
+	)
+}
+
+func (c *exprClause) test(exprname ir.Node) ir.Node {
+	// Integer range.
+	if c.hi != c.lo {
+		low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo)
+		high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi)
+		return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high)
+	}
+
+	// Optimize "switch true { ...}" and "switch false { ... }".
+	if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
+		if ir.BoolVal(exprname) {
+			return c.lo
+		} else {
+			return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo)
+		}
+	}
+
+	return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
+}
+
+func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
+	// In theory, we could be more aggressive, allowing any
+	// side-effect-free expressions in cases, but it's a bit
+	// tricky because some of that information is unavailable due
+	// to the introduction of temporaries during order.
+	// Restricting to constants is simple and probably powerful
+	// enough.
+
+	for _, ncase := range sw.Cases {
+		for _, v := range ncase.List {
+			if v.Op() != ir.OLITERAL {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
+func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
+	// Search backwards for the index of the fallthrough
+	// statement. Do not assume it'll be in the last
+	// position, since in some cases (e.g. when the statement
+	// list contains autotmp_ variables), one or more OVARKILL
+	// nodes will be at the end of the list.
+
+	i := len(stmts) - 1
+	for i >= 0 && stmts[i].Op() == ir.OVARKILL {
+		i--
+	}
+	if i < 0 {
+		return false, src.NoXPos
+	}
+	return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
+}
+
+// walkSwitchType generates an AST that implements sw, where sw is a
+// type switch.
+func walkSwitchType(sw *ir.SwitchStmt) {
+	var s typeSwitch
+	s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
+	sw.Tag = nil
+
+	s.facename = walkExpr(s.facename, sw.PtrInit())
+	s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
+	s.okname = typecheck.Temp(types.Types[types.TBOOL])
+
+	// Get interface descriptor word.
+	// For empty interfaces this will be the type.
+	// For non-empty interfaces this will be the itab.
+	itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
+
+	// For empty interfaces, do:
+	//     if e._type == nil {
+	//         do nil case if it exists, otherwise default
+	//     }
+	//     h := e._type.hash
+	// Use a similar strategy for non-empty interfaces.
+	ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
+	ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
+	base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+	ifNil.Cond = typecheck.Expr(ifNil.Cond)
+	ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
+	// ifNil.Nbody assigned at end.
+	sw.Compiled.Append(ifNil)
+
+	// Load hash from type or itab.
+	dotHash := typeHashFieldOf(base.Pos, itab)
+	s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+
+	br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+	var defaultGoto, nilGoto ir.Node
+	var body ir.Nodes
+	for _, ncase := range sw.Cases {
+		caseVar := ncase.Var
+
+		// For single-type cases with an interface type,
+		// we initialize the case variable as part of the type assertion.
+		// In other cases, we initialize it in the body.
+		var singleType *types.Type
+		if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
+			singleType = ncase.List[0].Type()
+		}
+		caseVarInitialized := false
+
+		label := typecheck.AutoLabel(".s")
+		jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+		if len(ncase.List) == 0 { // default:
+			if defaultGoto != nil {
+				base.Fatalf("duplicate default case not detected during typechecking")
+			}
+			defaultGoto = jmp
+		}
+
+		for _, n1 := range ncase.List {
+			if ir.IsNil(n1) { // case nil:
+				if nilGoto != nil {
+					base.Fatalf("duplicate nil case not detected during typechecking")
+				}
+				nilGoto = jmp
+				continue
+			}
+
+			if singleType != nil && singleType.IsInterface() {
+				s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
+				caseVarInitialized = true
+			} else {
+				s.Add(ncase.Pos(), n1.Type(), nil, jmp)
+			}
+		}
+
+		body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+		if caseVar != nil && !caseVarInitialized {
+			val := s.facename
+			if singleType != nil {
+				// We have a single concrete type. Extract the data.
+				if singleType.IsInterface() {
+					base.Fatalf("singleType interface should have been handled in Add")
+				}
+				val = ifaceData(ncase.Pos(), s.facename, singleType)
+			}
+			l := []ir.Node{
+				ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
+				ir.NewAssignStmt(ncase.Pos(), caseVar, val),
+			}
+			typecheck.Stmts(l)
+			body.Append(l...)
+		}
+		body.Append(ncase.Body...)
+		body.Append(br)
+	}
+	sw.Cases = nil
+
+	if defaultGoto == nil {
+		defaultGoto = br
+	}
+	if nilGoto == nil {
+		nilGoto = defaultGoto
+	}
+	ifNil.Body = []ir.Node{nilGoto}
+
+	s.Emit(&sw.Compiled)
+	sw.Compiled.Append(defaultGoto)
+	sw.Compiled.Append(body.Take()...)
+
+	walkStmtList(sw.Compiled)
+}
+
+// typeHashFieldOf returns an expression to select the type hash field
+// from an interface's descriptor word (whether a *runtime._type or
+// *runtime.itab pointer).
+func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr {
+	if itab.Op() != ir.OITAB {
+		base.Fatalf("expected OITAB, got %v", itab.Op())
+	}
+	var hashField *types.Field
+	if itab.X.Type().IsEmptyInterface() {
+		// runtime._type's hash field
+		if rtypeHashField == nil {
+			rtypeHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+		}
+		hashField = rtypeHashField
+	} else {
+		// runtime.itab's hash field
+		if itabHashField == nil {
+			itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+		}
+		hashField = itabHashField
+	}
+	return boundedDotPtr(pos, itab, hashField)
+}
+
+var rtypeHashField, itabHashField *types.Field
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+	// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
+	facename ir.Node // value being type-switched on
+	hashname ir.Node // type hash of the value being type-switched on
+	okname   ir.Node // boolean used for comma-ok type assertions
+
+	done    ir.Nodes
+	clauses []typeClause
+}
+
+type typeClause struct {
+	hash uint32
+	body ir.Nodes
+}
+
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar *ir.Name, jmp ir.Node) {
+	var body ir.Nodes
+	if caseVar != nil {
+		l := []ir.Node{
+			ir.NewDecl(pos, ir.ODCL, caseVar),
+			ir.NewAssignStmt(pos, caseVar, nil),
+		}
+		typecheck.Stmts(l)
+		body.Append(l...)
+	} else {
+		caseVar = ir.BlankNode.(*ir.Name)
+	}
+
+	// cv, ok = iface.(type)
+	as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
+	as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
+	dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
+	dot.SetType(typ) // iface.(type)
+	as.Rhs = []ir.Node{dot}
+	appendWalkStmt(&body, as)
+
+	// if ok { goto label }
+	nif := ir.NewIfStmt(pos, nil, nil, nil)
+	nif.Cond = s.okname
+	nif.Body = []ir.Node{jmp}
+	body.Append(nif)
+
+	if !typ.IsInterface() {
+		s.clauses = append(s.clauses, typeClause{
+			hash: types.TypeHash(typ),
+			body: body,
+		})
+		return
+	}
+
+	s.flush()
+	s.done.Append(body.Take()...)
+}
+
+func (s *typeSwitch) Emit(out *ir.Nodes) {
+	s.flush()
+	out.Append(s.done.Take()...)
+}
+
+func (s *typeSwitch) flush() {
+	cc := s.clauses
+	s.clauses = nil
+	if len(cc) == 0 {
+		return
+	}
+
+	sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
+
+	// Combine adjacent cases with the same hash.
+	merged := cc[:1]
+	for _, c := range cc[1:] {
+		last := &merged[len(merged)-1]
+		if last.hash == c.hash {
+			last.body.Append(c.body.Take()...)
+		} else {
+			merged = append(merged, c)
+		}
+	}
+	cc = merged
+
+	binarySearch(len(cc), &s.done,
+		func(i int) ir.Node {
+			return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash)))
+		},
+		func(i int, nif *ir.IfStmt) {
+			// TODO(mdempsky): Omit hash equality check if
+			// there's only one type.
+			c := cc[i]
+			nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(int64(c.hash)))
+			nif.Body.Append(c.body.Take()...)
+		},
+	)
+}
+
+// binarySearch constructs a binary search tree for handling n cases,
+// and appends it to out. It's used for efficiently implementing
+// switch statements.
+//
+// less(i) should return a boolean expression. If it evaluates true,
+// then cases before i will be tested; otherwise, cases i and later.
+//
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
+// particular, it should set nif.Left and nif.Nbody.
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
+	const binarySearchMin = 4 // minimum number of cases for binary search
+
+	var do func(lo, hi int, out *ir.Nodes)
+	do = func(lo, hi int, out *ir.Nodes) {
+		n := hi - lo
+		if n < binarySearchMin {
+			for i := lo; i < hi; i++ {
+				nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+				leaf(i, nif)
+				base.Pos = base.Pos.WithNotStmt()
+				nif.Cond = typecheck.Expr(nif.Cond)
+				nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+				out.Append(nif)
+				out = &nif.Else
+			}
+			return
+		}
+
+		half := lo + n/2
+		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+		nif.Cond = less(half)
+		base.Pos = base.Pos.WithNotStmt()
+		nif.Cond = typecheck.Expr(nif.Cond)
+		nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+		do(lo, half, &nif.Body)
+		do(half, hi, &nif.Else)
+		out.Append(nif)
+	}
+
+	do(0, n, out)
+}
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
new file mode 100644
index 0000000..9879a6c
--- /dev/null
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+)
+
+// initStackTemp appends statements to init to initialize the given
+// temporary variable to val, and then returns the expression &tmp.
+func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr {
+	if val != nil && !types.Identical(tmp.Type(), val.Type()) {
+		base.Fatalf("bad initial value for %L: %L", tmp, val)
+	}
+	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val))
+	return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
+
+// stackTempAddr returns the expression &tmp, where tmp is a newly
+// allocated temporary variable of the given type. Statements to
+// zero-initialize tmp are appended to init.
+func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
+	return initStackTemp(init, typecheck.Temp(typ), nil)
+}
+
+// stackBufAddr returns thte expression &tmp, where tmp is a newly
+// allocated temporary variable of type [len]elem. This variable is
+// initialized, and elem must not contain pointers.
+func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
+	if elem.HasPointers() {
+		base.FatalfAt(base.Pos, "%v has pointers", elem)
+	}
+	tmp := typecheck.Temp(types.NewArray(elem, len))
+	return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
new file mode 100644
index 0000000..b47d96d
--- /dev/null
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -0,0 +1,412 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+	"errors"
+	"fmt"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// The constant is known to runtime.
+const tmpstringbufsize = 32
+const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
+
+func Walk(fn *ir.Func) {
+	ir.CurFunc = fn
+	errorsBefore := base.Errors()
+	order(fn)
+	if base.Errors() > errorsBefore {
+		return
+	}
+
+	if base.Flag.W != 0 {
+		s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
+		ir.DumpList(s, ir.CurFunc.Body)
+	}
+
+	lno := base.Pos
+
+	base.Pos = lno
+	if base.Errors() > errorsBefore {
+		return
+	}
+	walkStmtList(ir.CurFunc.Body)
+	if base.Flag.W != 0 {
+		s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
+		ir.DumpList(s, ir.CurFunc.Body)
+	}
+
+	if base.Flag.Cfg.Instrumenting {
+		instrument(fn)
+	}
+
+	// Eagerly compute sizes of all variables for SSA.
+	for _, n := range fn.Dcl {
+		types.CalcSize(n.Type())
+	}
+}
+
+// walkRecv walks an ORECV node.
+func walkRecv(n *ir.UnaryExpr) ir.Node {
+	if n.Typecheck() == 0 {
+		base.Fatalf("missing typecheck: %+v", n)
+	}
+	init := ir.TakeInit(n)
+
+	n.X = walkExpr(n.X, &init)
+	call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
+	return ir.InitExpr(init, call)
+}
+
+func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
+	if n.Op() != ir.OAS {
+		base.Fatalf("convas: not OAS %v", n.Op())
+	}
+	n.SetTypecheck(1)
+
+	if n.X == nil || n.Y == nil {
+		return n
+	}
+
+	lt := n.X.Type()
+	rt := n.Y.Type()
+	if lt == nil || rt == nil {
+		return n
+	}
+
+	if ir.IsBlank(n.X) {
+		n.Y = typecheck.DefaultLit(n.Y, nil)
+		return n
+	}
+
+	if !types.Identical(lt, rt) {
+		n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
+		n.Y = walkExpr(n.Y, init)
+	}
+	types.CalcSize(n.Y.Type())
+
+	return n
+}
+
+var stop = errors.New("stop")
+
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+	if init == nil {
+		base.Fatalf("mkcall with nil init: %v", fn)
+	}
+	if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
+		base.Fatalf("mkcall %v %v", fn, fn.Type())
+	}
+
+	n := fn.Type().NumParams()
+	if n != len(va) {
+		base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+	}
+
+	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
+	typecheck.Call(call)
+	call.SetType(t)
+	return walkExpr(call, init).(*ir.CallExpr)
+}
+
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+	return vmkcall(typecheck.LookupRuntime(name), t, init, args)
+}
+
+func mkcallstmt(name string, args ...ir.Node) ir.Node {
+	return mkcallstmt1(typecheck.LookupRuntime(name), args...)
+}
+
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+	return vmkcall(fn, t, init, args)
+}
+
+func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
+	var init ir.Nodes
+	n := vmkcall(fn, nil, &init, args)
+	if len(init) == 0 {
+		return n
+	}
+	init.Append(n)
+	return ir.NewBlockStmt(n.Pos(), init)
+}
+
+func chanfn(name string, n int, t *types.Type) ir.Node {
+	if !t.IsChan() {
+		base.Fatalf("chanfn %v", t)
+	}
+	fn := typecheck.LookupRuntime(name)
+	switch n {
+	default:
+		base.Fatalf("chanfn %d", n)
+	case 1:
+		fn = typecheck.SubstArgTypes(fn, t.Elem())
+	case 2:
+		fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+	}
+	return fn
+}
+
+func mapfn(name string, t *types.Type) ir.Node {
+	if !t.IsMap() {
+		base.Fatalf("mapfn %v", t)
+	}
+	fn := typecheck.LookupRuntime(name)
+	fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
+	return fn
+}
+
+func mapfndel(name string, t *types.Type) ir.Node {
+	if !t.IsMap() {
+		base.Fatalf("mapfn %v", t)
+	}
+	fn := typecheck.LookupRuntime(name)
+	fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
+	return fn
+}
+
+const (
+	mapslow = iota
+	mapfast32
+	mapfast32ptr
+	mapfast64
+	mapfast64ptr
+	mapfaststr
+	nmapfast
+)
+
+type mapnames [nmapfast]string
+
+func mkmapnames(base string, ptr string) mapnames {
+	return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
+}
+
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
+
+func mapfast(t *types.Type) int {
+	// Check runtime/map.go:maxElemSize before changing.
+	if t.Elem().Width > 128 {
+		return mapslow
+	}
+	switch reflectdata.AlgType(t.Key()) {
+	case types.AMEM32:
+		if !t.Key().HasPointers() {
+			return mapfast32
+		}
+		if types.PtrSize == 4 {
+			return mapfast32ptr
+		}
+		base.Fatalf("small pointer %v", t.Key())
+	case types.AMEM64:
+		if !t.Key().HasPointers() {
+			return mapfast64
+		}
+		if types.PtrSize == 8 {
+			return mapfast64ptr
+		}
+		// Two-word object, at least one of which is a pointer.
+		// Use the slow path.
+	case types.ASTRING:
+		return mapfaststr
+	}
+	return mapslow
+}
+
+func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
+	walkExprListSafe(n.Args, init)
+
+	// walkExprListSafe will leave OINDEX (s[n]) alone if both s
+	// and n are name or literal, but those may index the slice we're
+	// modifying here. Fix explicitly.
+	ls := n.Args
+	for i1, n1 := range ls {
+		ls[i1] = cheapExpr(n1, init)
+	}
+}
+
+// Rewrite
+//	go builtin(x, y, z)
+// into
+//	go func(a1, a2, a3) {
+//		builtin(a1, a2, a3)
+//	}(x, y, z)
+// for print, println, and delete.
+//
+// Rewrite
+//	go f(x, y, uintptr(unsafe.Pointer(z)))
+// into
+//	go func(a1, a2, a3) {
+//		builtin(a1, a2, uintptr(a3))
+//	}(x, y, unsafe.Pointer(z))
+// for function contains unsafe-uintptr arguments.
+
+var wrapCall_prgen int
+
+// appendWalkStmt typechecks and walks stmt and then appends it to init.
+func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
+	op := stmt.Op()
+	n := typecheck.Stmt(stmt)
+	if op == ir.OAS || op == ir.OAS2 {
+		// If the assignment has side effects, walkExpr will append them
+		// directly to init for us, while walkStmt will wrap it in an OBLOCK.
+		// We need to append them directly.
+		// TODO(rsc): Clean this up.
+		n = walkExpr(n, init)
+	} else {
+		n = walkStmt(n)
+	}
+	init.Append(n)
+}
+
+// The max number of defers in a function using open-coded defers. We enforce this
+// limit because the deferBits bitmask is currently a single byte (to minimize code size)
+const maxOpenDefers = 8
+
+// backingArrayPtrLen extracts the pointer and length from a slice or string.
+// This constructs two nodes referring to n, so n must be a cheapExpr.
+func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
+	var init ir.Nodes
+	c := cheapExpr(n, &init)
+	if c != n || len(init) != 0 {
+		base.Fatalf("backingArrayPtrLen not cheap: %v", n)
+	}
+	ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
+	if n.Type().IsString() {
+		ptr.SetType(types.Types[types.TUINT8].PtrTo())
+	} else {
+		ptr.SetType(n.Type().Elem().PtrTo())
+	}
+	length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
+	length.SetType(types.Types[types.TINT])
+	return ptr, length
+}
+
+// mayCall reports whether evaluating expression n may require
+// function calls, which could clobber function call arguments/results
+// currently on the stack.
+func mayCall(n ir.Node) bool {
+	// When instrumenting, any expression might require function calls.
+	if base.Flag.Cfg.Instrumenting {
+		return true
+	}
+
+	isSoftFloat := func(typ *types.Type) bool {
+		return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
+	}
+
+	return ir.Any(n, func(n ir.Node) bool {
+		// walk should have already moved any Init blocks off of
+		// expressions.
+		if len(n.Init()) != 0 {
+			base.FatalfAt(n.Pos(), "mayCall %+v", n)
+		}
+
+		switch n.Op() {
+		default:
+			base.FatalfAt(n.Pos(), "mayCall %+v", n)
+
+		case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+			return true
+
+		case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+			ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
+			// These ops might panic, make sure they are done
+			// before we start marshaling args for a call. See issue 16760.
+			return true
+
+		case ir.OANDAND, ir.OOROR:
+			n := n.(*ir.LogicalExpr)
+			// The RHS expression may have init statements that
+			// should only execute conditionally, and so cannot be
+			// pulled out to the top-level init list. We could try
+			// to be more precise here.
+			return len(n.Y.Init()) != 0
+
+		// When using soft-float, these ops might be rewritten to function calls
+		// so we ensure they are evaluated first.
+		case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
+			return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
+		case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+			n := n.(*ir.BinaryExpr)
+			return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
+		case ir.OCONV:
+			n := n.(*ir.ConvExpr)
+			return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
+
+		case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
+			ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
+			ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
+			ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
+			ir.OCONVNOP, ir.ODOT,
+			ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
+			ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER:
+			// ok: operations that don't require function calls.
+			// Expand as needed.
+		}
+
+		return false
+	})
+}
+
+// itabType loads the _type field from a runtime.itab struct.
+func itabType(itab ir.Node) ir.Node {
+	if itabTypeField == nil {
+		// runtime.itab's _type field
+		itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8]))
+	}
+	return boundedDotPtr(base.Pos, itab, itabTypeField)
+}
+
+var itabTypeField *types.Field
+
+// boundedDotPtr returns a selector expression representing ptr.field
+// and omits nil-pointer checks for ptr.
+func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
+	sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
+	sel.Selection = field
+	sel.SetType(field.Type)
+	sel.SetTypecheck(1)
+	sel.SetBounded(true) // guaranteed not to fault
+	return sel
+}
+
+func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
+	f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
+	f.Offset = offset
+	return f
+}
+
+// ifaceData loads the data field from an interface.
+// The concrete type must be known to have type t.
+// It follows the pointer if !IsDirectIface(t).
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
+	if t.IsInterface() {
+		base.Fatalf("ifaceData interface: %v", t)
+	}
+	ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
+	if types.IsDirectIface(t) {
+		ptr.SetType(t)
+		ptr.SetTypecheck(1)
+		return ptr
+	}
+	ptr.SetType(types.NewPtr(t))
+	ptr.SetTypecheck(1)
+	ind := ir.NewStarExpr(pos, ptr)
+	ind.SetType(t)
+	ind.SetTypecheck(1)
+	ind.SetBounded(true)
+	return ind
+}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index 9c9f6ed..e4ef9d7 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -5,16 +5,19 @@
 package wasm
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/objw"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/wasm"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &wasm.Linkwasm
 	arch.REGSP = wasm.REG_SP
 	arch.MAXWIDTH = 1 << 50
@@ -28,31 +31,31 @@
 	arch.SSAGenBlock = ssaGenBlock
 }
 
-func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 	if cnt%8 != 0 {
-		gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+		base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
 	}
 
 	for i := int64(0); i < cnt; i += 8 {
-		p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
-		p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
-		p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+		p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+		p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+		p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	return pp.Prog(wasm.ANop)
 }
 
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if next != b.Succs[0].Block() {
@@ -118,11 +121,11 @@
 	}
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
 		s.PrepareCall(v)
-		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == gc.Deferreturn {
+		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
 			// add a resume point before call to deferreturn so it can be called again via jmpdefer
 			s.Prog(wasm.ARESUMEPOINT)
 		}
@@ -147,26 +150,26 @@
 		getValue32(s, v.Args[1])
 		i32Const(s, int32(v.AuxInt))
 		p := s.Prog(wasm.ACall)
-		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmMove}
+		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmMove}
 
 	case ssa.OpWasmLoweredZero:
 		getValue32(s, v.Args[0])
 		i32Const(s, int32(v.AuxInt))
 		p := s.Prog(wasm.ACall)
-		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmZero}
+		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmZero}
 
 	case ssa.OpWasmLoweredNilCheck:
 		getValue64(s, v.Args[0])
 		s.Prog(wasm.AI64Eqz)
 		s.Prog(wasm.AIf)
 		p := s.Prog(wasm.ACALLNORESUME)
-		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.SigPanic}
+		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
 		s.Prog(wasm.AEnd)
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 
 	case ssa.OpWasmLoweredWB:
@@ -185,7 +188,7 @@
 		getReg(s, wasm.REG_SP)
 		getValue64(s, v.Args[0])
 		p := s.Prog(storeOp(v.Type))
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 
 	default:
 		if v.Type.IsMemory() {
@@ -205,7 +208,7 @@
 	}
 }
 
-func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
+func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
 	switch v.Op {
 	case ssa.OpWasmLoweredGetClosurePtr:
 		getReg(s, wasm.REG_CTXT)
@@ -240,10 +243,10 @@
 		p.From.Type = obj.TYPE_ADDR
 		switch v.Aux.(type) {
 		case *obj.LSym:
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			p.From.Reg = v.Args[0].Reg()
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		default:
 			panic("wasm: bad LoweredAddr")
 		}
@@ -312,7 +315,7 @@
 		if v.Type.Size() == 8 {
 			// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
 			p := s.Prog(wasm.ACall)
-			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmDiv}
+			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
 			break
 		}
 		s.Prog(wasm.AI64DivS)
@@ -326,7 +329,7 @@
 				s.Prog(wasm.AF64PromoteF32)
 			}
 			p := s.Prog(wasm.ACall)
-			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncS}
+			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
 		}
 
 	case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
@@ -338,7 +341,7 @@
 				s.Prog(wasm.AF64PromoteF32)
 			}
 			p := s.Prog(wasm.ACall)
-			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncU}
+			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
 		}
 
 	case ssa.OpWasmF32DemoteF64:
@@ -360,7 +363,7 @@
 
 	case ssa.OpLoadReg:
 		p := s.Prog(loadOp(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 
 	case ssa.OpCopy:
 		getValue64(s, v.Args[0])
@@ -382,7 +385,7 @@
 	}
 }
 
-func getValue32(s *gc.SSAGenState, v *ssa.Value) {
+func getValue32(s *ssagen.State, v *ssa.Value) {
 	if v.OnWasmStack {
 		s.OnWasmStackSkipped--
 		ssaGenValueOnStack(s, v, false)
@@ -399,7 +402,7 @@
 	}
 }
 
-func getValue64(s *gc.SSAGenState, v *ssa.Value) {
+func getValue64(s *ssagen.State, v *ssa.Value) {
 	if v.OnWasmStack {
 		s.OnWasmStackSkipped--
 		ssaGenValueOnStack(s, v, true)
@@ -413,32 +416,32 @@
 	}
 }
 
-func i32Const(s *gc.SSAGenState, val int32) {
+func i32Const(s *ssagen.State, val int32) {
 	p := s.Prog(wasm.AI32Const)
 	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
 }
 
-func i64Const(s *gc.SSAGenState, val int64) {
+func i64Const(s *ssagen.State, val int64) {
 	p := s.Prog(wasm.AI64Const)
 	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
 }
 
-func f32Const(s *gc.SSAGenState, val float64) {
+func f32Const(s *ssagen.State, val float64) {
 	p := s.Prog(wasm.AF32Const)
 	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
 }
 
-func f64Const(s *gc.SSAGenState, val float64) {
+func f64Const(s *ssagen.State, val float64) {
 	p := s.Prog(wasm.AF64Const)
 	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
 }
 
-func getReg(s *gc.SSAGenState, reg int16) {
+func getReg(s *ssagen.State, reg int16) {
 	p := s.Prog(wasm.AGet)
 	p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
 }
 
-func setReg(s *gc.SSAGenState, reg int16) {
+func setReg(s *ssagen.State, reg int16) {
 	p := s.Prog(wasm.ASet)
 	p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
 }
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index e137daa..fc806f9 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -5,14 +5,15 @@
 package x86
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/x86"
 	"cmd/internal/objabi"
 	"fmt"
 	"os"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &x86.Link386
 	arch.REGSP = x86.REGSP
 	arch.SSAGenValue = ssaGenValue
@@ -24,10 +25,10 @@
 		arch.SoftFloat = true
 	case "387":
 		fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
-		gc.Exit(1)
+		base.Exit(1)
 	default:
 		fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
-		gc.Exit(1)
+		base.Exit(1)
 
 	}
 
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
index a33ddc8..3ca4797 100644
--- a/src/cmd/compile/internal/x86/ggen.go
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -5,39 +5,41 @@
 package x86
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 )
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 	if *ax == 0 {
-		p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+		p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 		*ax = 1
 	}
 
-	if cnt <= int64(4*gc.Widthreg) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
-			p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+	if cnt <= int64(4*types.RegSize) {
+		for i := int64(0); i < cnt; i += int64(types.RegSize) {
+			p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthreg) {
-		p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
-		p.To.Sym = gc.Duffzero
+	} else if cnt <= int64(128*types.RegSize) {
+		p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
+		p.To.Sym = ir.Syms.Duffzero
 	} else {
-		p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
-		p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-		p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+		p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	// See comment in ../amd64/ggen.go.
 	p := pp.Prog(x86.AXCHGL)
 	p.From.Type = obj.TYPE_REG
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index fbf76d0..00dfa07 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -8,16 +8,18 @@
 	"fmt"
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 )
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 	flive := b.FlagsLiveAtEnd
 	for _, c := range b.ControlValues() {
 		flive = c.Type.IsFlags() || flive
@@ -107,7 +109,7 @@
 //     dest := dest(To) op src(From)
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
 	p := s.Prog(op)
 	p.From.Type = obj.TYPE_REG
 	p.To.Type = obj.TYPE_REG
@@ -116,7 +118,7 @@
 	return p
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.Op386ADDL:
 		r := v.Reg()
@@ -404,14 +406,14 @@
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = r
 		p.From.Index = i
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.Op386LEAL:
 		p := s.Prog(x86.ALEAL)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
@@ -437,7 +439,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Args[1].Reg()
 	case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
@@ -445,7 +447,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.From, v, sc.Off())
+		ssagen.AddAux2(&p.From, v, sc.Off())
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = sc.Val()
 	case ssa.Op386MOVLconst:
@@ -480,9 +482,9 @@
 		p.From.Name = obj.NAME_EXTERN
 		f := math.Float64frombits(uint64(v.AuxInt))
 		if v.Op == ssa.Op386MOVSDconst1 {
-			p.From.Sym = gc.Ctxt.Float64Sym(f)
+			p.From.Sym = base.Ctxt.Float64Sym(f)
 		} else {
-			p.From.Sym = gc.Ctxt.Float32Sym(float32(f))
+			p.From.Sym = base.Ctxt.Float32Sym(float32(f))
 		}
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -497,7 +499,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
@@ -521,7 +523,7 @@
 		}
 		p.From.Reg = r
 		p.From.Index = i
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
@@ -531,7 +533,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.From.Index = v.Args[2].Reg()
 		p.From.Scale = 4
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
@@ -544,7 +546,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
@@ -557,7 +559,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.Op386ADDLconstmodify:
 		sc := v.AuxValAndOff()
 		val := sc.Val()
@@ -571,7 +573,7 @@
 			off := sc.Off()
 			p.To.Type = obj.TYPE_MEM
 			p.To.Reg = v.Args[0].Reg()
-			gc.AddAux2(&p.To, v, off)
+			ssagen.AddAux2(&p.To, v, off)
 			break
 		}
 		fallthrough
@@ -584,7 +586,7 @@
 		p.From.Offset = val
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, off)
+		ssagen.AddAux2(&p.To, v, off)
 	case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
 		ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
 		ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
@@ -610,7 +612,7 @@
 		}
 		p.To.Reg = r
 		p.To.Index = i
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_CONST
@@ -618,7 +620,7 @@
 		p.From.Offset = sc.Val()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.Op386ADDLconstmodifyidx4:
 		sc := v.AuxValAndOff()
 		val := sc.Val()
@@ -634,7 +636,7 @@
 			p.To.Reg = v.Args[0].Reg()
 			p.To.Scale = 4
 			p.To.Index = v.Args[1].Reg()
-			gc.AddAux2(&p.To, v, off)
+			ssagen.AddAux2(&p.To, v, off)
 			break
 		}
 		fallthrough
@@ -661,7 +663,7 @@
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = r
 		p.To.Index = i
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
 		ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
 		ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
@@ -670,12 +672,12 @@
 	case ssa.Op386DUFFZERO:
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.Op386DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 
 	case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
@@ -693,7 +695,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
@@ -705,15 +707,15 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.Op386LoweredGetClosurePtr:
 		// Closure pointer is DX.
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.Op386LoweredGetG:
 		r := v.Reg()
 		// See the comments in cmd/internal/obj/x86/obj6.go
 		// near CanUse1InsnTLS for a detailed explanation of these instructions.
-		if x86.CanUse1InsnTLS(gc.Ctxt) {
+		if x86.CanUse1InsnTLS(base.Ctxt) {
 			// MOVL (TLS), r
 			p := s.Prog(x86.AMOVL)
 			p.From.Type = obj.TYPE_MEM
@@ -749,7 +751,7 @@
 		// caller's SP is the address of the first arg
 		p := s.Prog(x86.AMOVL)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+		p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -764,14 +766,14 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(8) // space used in callee args area by assembly stubs
 
 	case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
 		s.UseArgs(12) // space used in callee args area by assembly stubs
 
 	case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
@@ -846,12 +848,12 @@
 		p.From.Reg = x86.REG_AX
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpClobber:
 		p := s.Prog(x86.AMOVL)
@@ -859,7 +861,7 @@
 		p.From.Offset = 0xdeaddead
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = x86.REG_SP
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	default:
 		v.Fatalf("genValue not implemented: %s", v.LongString())
 	}
@@ -884,22 +886,22 @@
 	ssa.Block386NAN: {x86.AJPS, x86.AJPC},
 }
 
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
 }
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockDefer:
 		// defer returns in rax:
@@ -912,11 +914,11 @@
 		p.To.Reg = x86.REG_AX
 		p = s.Prog(x86.AJNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
index 3aa64a5..cb2f4e8 100644
--- a/src/cmd/compile/main.go
+++ b/src/cmd/compile/main.go
@@ -8,12 +8,14 @@
 	"cmd/compile/internal/amd64"
 	"cmd/compile/internal/arm"
 	"cmd/compile/internal/arm64"
+	"cmd/compile/internal/base"
 	"cmd/compile/internal/gc"
 	"cmd/compile/internal/mips"
 	"cmd/compile/internal/mips64"
 	"cmd/compile/internal/ppc64"
 	"cmd/compile/internal/riscv64"
 	"cmd/compile/internal/s390x"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/wasm"
 	"cmd/compile/internal/x86"
 	"cmd/internal/objabi"
@@ -22,7 +24,7 @@
 	"os"
 )
 
-var archInits = map[string]func(*gc.Arch){
+var archInits = map[string]func(*ssagen.ArchInfo){
 	"386":      x86.Init,
 	"amd64":    amd64.Init,
 	"arm":      arm.Init,
@@ -50,5 +52,5 @@
 	}
 
 	gc.Main(archInit)
-	gc.Exit(0)
+	base.Exit(0)
 }
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index c8c3212..332f2fa 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -1765,6 +1765,8 @@
 		rval = true
 	case "syscall":
 		rval = true
+	case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal
+		rval = true
 	default:
 		rval = strings.HasPrefix(pkgpath, "runtime/internal")
 	}
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index cf85f2a..7520b0e 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -23,80 +23,41 @@
 // compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
 // All directories in this list are relative to and must be below $GOROOT/src.
 //
-// The list has have two kinds of entries: names beginning with cmd/ with
+// The list has two kinds of entries: names beginning with cmd/ with
 // no other slashes, which are commands, and other paths, which are packages
 // supporting the commands. Packages in the standard library can be listed
 // if a newer copy needs to be substituted for the Go 1.4 copy when used
-// by the command packages.
+// by the command packages. Paths ending with /... automatically
+// include all packages within subdirectories as well.
 // These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big.
 var bootstrapDirs = []string{
 	"cmd/asm",
-	"cmd/asm/internal/arch",
-	"cmd/asm/internal/asm",
-	"cmd/asm/internal/flags",
-	"cmd/asm/internal/lex",
+	"cmd/asm/internal/...",
 	"cmd/cgo",
 	"cmd/compile",
-	"cmd/compile/internal/amd64",
-	"cmd/compile/internal/arm",
-	"cmd/compile/internal/arm64",
-	"cmd/compile/internal/gc",
-	"cmd/compile/internal/logopt",
-	"cmd/compile/internal/mips",
-	"cmd/compile/internal/mips64",
-	"cmd/compile/internal/ppc64",
-	"cmd/compile/internal/riscv64",
-	"cmd/compile/internal/s390x",
-	"cmd/compile/internal/ssa",
-	"cmd/compile/internal/syntax",
-	"cmd/compile/internal/types",
-	"cmd/compile/internal/x86",
-	"cmd/compile/internal/wasm",
+	"cmd/compile/internal/...",
+	"cmd/internal/archive",
 	"cmd/internal/bio",
 	"cmd/internal/codesign",
-	"cmd/internal/gcprog",
 	"cmd/internal/dwarf",
 	"cmd/internal/edit",
+	"cmd/internal/gcprog",
 	"cmd/internal/goobj",
+	"cmd/internal/obj/...",
 	"cmd/internal/objabi",
-	"cmd/internal/obj",
-	"cmd/internal/obj/arm",
-	"cmd/internal/obj/arm64",
-	"cmd/internal/obj/mips",
-	"cmd/internal/obj/ppc64",
-	"cmd/internal/obj/riscv",
-	"cmd/internal/obj/s390x",
-	"cmd/internal/obj/x86",
-	"cmd/internal/obj/wasm",
 	"cmd/internal/pkgpath",
 	"cmd/internal/src",
 	"cmd/internal/sys",
 	"cmd/link",
-	"cmd/link/internal/amd64",
-	"cmd/link/internal/arm",
-	"cmd/link/internal/arm64",
-	"cmd/link/internal/benchmark",
-	"cmd/link/internal/ld",
-	"cmd/link/internal/loadelf",
-	"cmd/link/internal/loader",
-	"cmd/link/internal/loadmacho",
-	"cmd/link/internal/loadpe",
-	"cmd/link/internal/loadxcoff",
-	"cmd/link/internal/mips",
-	"cmd/link/internal/mips64",
-	"cmd/link/internal/ppc64",
-	"cmd/link/internal/riscv64",
-	"cmd/link/internal/s390x",
-	"cmd/link/internal/sym",
-	"cmd/link/internal/x86",
+	"cmd/link/internal/...",
 	"compress/flate",
 	"compress/zlib",
-	"cmd/link/internal/wasm",
 	"container/heap",
 	"debug/dwarf",
 	"debug/elf",
 	"debug/macho",
 	"debug/pe",
+	"go/constant",
 	"internal/goversion",
 	"internal/race",
 	"internal/unsafeheader",
@@ -104,6 +65,7 @@
 	"math/big",
 	"math/bits",
 	"sort",
+	"strconv",
 }
 
 // File prefixes that are ignored by go/build anyway, and cause
@@ -111,6 +73,7 @@
 var ignorePrefixes = []string{
 	".",
 	"_",
+	"#",
 }
 
 // File suffixes that use build tags introduced since Go 1.4.
@@ -124,6 +87,7 @@
 	"_wasm.s",
 	"_wasm.go",
 	"_test.s",
+	"_test.go",
 }
 
 func bootstrapBuildTools() {
@@ -149,31 +113,47 @@
 	// Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
 	writefile("module bootstrap\n", pathf("%s/%s", base, "go.mod"), 0)
 	for _, dir := range bootstrapDirs {
-		src := pathf("%s/src/%s", goroot, dir)
-		dst := pathf("%s/%s", base, dir)
-		xmkdirall(dst)
-		if dir == "cmd/cgo" {
-			// Write to src because we need the file both for bootstrap
-			// and for later in the main build.
-			mkzdefaultcc("", pathf("%s/zdefaultcc.go", src))
-		}
-	Dir:
-		for _, name := range xreaddirfiles(src) {
+		recurse := strings.HasSuffix(dir, "/...")
+		dir = strings.TrimSuffix(dir, "/...")
+		filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+			if err != nil {
+				fatalf("walking bootstrap dirs failed: %v: %v", path, err)
+			}
+
+			name := filepath.Base(path)
+			src := pathf("%s/src/%s", goroot, path)
+			dst := pathf("%s/%s", base, path)
+
+			if info.IsDir() {
+				if !recurse && path != dir || name == "testdata" {
+					return filepath.SkipDir
+				}
+
+				xmkdirall(dst)
+				if path == "cmd/cgo" {
+					// Write to src because we need the file both for bootstrap
+					// and for later in the main build.
+					mkzdefaultcc("", pathf("%s/zdefaultcc.go", src))
+					mkzdefaultcc("", pathf("%s/zdefaultcc.go", dst))
+				}
+				return nil
+			}
+
 			for _, pre := range ignorePrefixes {
 				if strings.HasPrefix(name, pre) {
-					continue Dir
+					return nil
 				}
 			}
 			for _, suf := range ignoreSuffixes {
 				if strings.HasSuffix(name, suf) {
-					continue Dir
+					return nil
 				}
 			}
-			srcFile := pathf("%s/%s", src, name)
-			dstFile := pathf("%s/%s", dst, name)
-			text := bootstrapRewriteFile(srcFile)
-			writefile(text, dstFile, 0)
-		}
+
+			text := bootstrapRewriteFile(src)
+			writefile(text, dst, 0)
+			return nil
+		})
 	}
 
 	// Set up environment for invoking Go 1.4 go command.
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index e7c63f0..db3f281 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -111,7 +111,7 @@
 // 	-p n
 // 		the number of programs, such as build commands or
 // 		test binaries, that can be run in parallel.
-// 		The default is the number of CPUs available.
+// 		The default is GOMAXPROCS, normally the number of CPUs available.
 // 	-race
 // 		enable data race detection.
 // 		Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64,
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 3ce3238..d14b232 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -811,6 +811,7 @@
 	// so that we can change files.
 	for _, copydir := range []string{
 		"src/runtime",
+		"src/internal/abi",
 		"src/internal/bytealg",
 		"src/internal/cpu",
 		"src/math/bits",
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index c48904e..3222479 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -28,18 +28,18 @@
 	BuildA                 bool   // -a flag
 	BuildBuildmode         string // -buildmode flag
 	BuildContext           = defaultContext()
-	BuildMod               string             // -mod flag
-	BuildModExplicit       bool               // whether -mod was set explicitly
-	BuildModReason         string             // reason -mod was set, if set by default
-	BuildI                 bool               // -i flag
-	BuildLinkshared        bool               // -linkshared flag
-	BuildMSan              bool               // -msan flag
-	BuildN                 bool               // -n flag
-	BuildO                 string             // -o flag
-	BuildP                 = runtime.NumCPU() // -p flag
-	BuildPkgdir            string             // -pkgdir flag
-	BuildRace              bool               // -race flag
-	BuildToolexec          []string           // -toolexec flag
+	BuildMod               string                  // -mod flag
+	BuildModExplicit       bool                    // whether -mod was set explicitly
+	BuildModReason         string                  // reason -mod was set, if set by default
+	BuildI                 bool                    // -i flag
+	BuildLinkshared        bool                    // -linkshared flag
+	BuildMSan              bool                    // -msan flag
+	BuildN                 bool                    // -n flag
+	BuildO                 string                  // -o flag
+	BuildP                 = runtime.GOMAXPROCS(0) // -p flag
+	BuildPkgdir            string                  // -pkgdir flag
+	BuildRace              bool                    // -race flag
+	BuildToolexec          []string                // -toolexec flag
 	BuildToolchainName     string
 	BuildToolchainCompiler func() string
 	BuildToolchainLinker   func() string
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 780d639..0e7af6d 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -71,7 +71,7 @@
 	-p n
 		the number of programs, such as build commands or
 		test binaries, that can be run in parallel.
-		The default is the number of CPUs available.
+		The default is GOMAXPROCS, normally the number of CPUs available.
 	-race
 		enable data race detection.
 		Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64,
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index cc4e2b2..2087855 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -129,7 +129,11 @@
 		}
 	}
 
-	args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), gcflags, gcargs, "-D", p.Internal.LocalPrefix}
+	args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), gcflags, gcargs}
+	if p.Internal.LocalPrefix != "" {
+		// Workaround #43883.
+		args = append(args, "-D", p.Internal.LocalPrefix)
+	}
 	if importcfg != nil {
 		if err := b.writeFile(objdir+"importcfg", importcfg); err != nil {
 			return "", nil, err
@@ -235,16 +239,19 @@
 	//   - it has no successor packages to compile (usually package main)
 	//   - all paths through the build graph pass through it
 	//   - critical path scheduling says it is high priority
-	// and in such a case, set c to runtime.NumCPU.
+	// and in such a case, set c to runtime.GOMAXPROCS(0).
+	// By default this is the same as runtime.NumCPU.
 	// We do this now when p==1.
+	// To limit parallelism, set GOMAXPROCS below numCPU; this may be useful
+	// on a low-memory builder, or if a deterministic build order is required.
+	c := runtime.GOMAXPROCS(0)
 	if cfg.BuildP == 1 {
-		// No process parallelism. Max out c.
-		return runtime.NumCPU()
+		// No process parallelism, do not cap compiler parallelism.
+		return c
 	}
-	// Some process parallelism. Set c to min(4, numcpu).
-	c := 4
-	if ncpu := runtime.NumCPU(); ncpu < c {
-		c = ncpu
+	// Some process parallelism. Set c to min(4, maxprocs).
+	if c > 4 {
+		c = 4
 	}
 	return c
 }
diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go
index c1661d7..e9b25fe 100644
--- a/src/cmd/internal/archive/archive.go
+++ b/src/cmd/internal/archive/archive.go
@@ -118,9 +118,9 @@
 
 func (r *objReader) init(f *os.File) {
 	r.a = &Archive{f, nil}
-	r.offset, _ = f.Seek(0, io.SeekCurrent)
-	r.limit, _ = f.Seek(0, io.SeekEnd)
-	f.Seek(r.offset, io.SeekStart)
+	r.offset, _ = f.Seek(0, os.SEEK_CUR)
+	r.limit, _ = f.Seek(0, os.SEEK_END)
+	f.Seek(r.offset, os.SEEK_SET)
 	r.b = bio.NewReader(f)
 }
 
@@ -221,7 +221,7 @@
 		r.readFull(r.tmp[:n])
 	} else {
 		// Seek, giving up buffered data.
-		r.b.MustSeek(r.offset+n, io.SeekStart)
+		r.b.MustSeek(r.offset+n, os.SEEK_SET)
 		r.offset += n
 	}
 }
@@ -426,7 +426,7 @@
 
 // AddEntry adds an entry to the end of a, with the content from r.
 func (a *Archive) AddEntry(typ EntryType, name string, mtime int64, uid, gid int, mode os.FileMode, size int64, r io.Reader) {
-	off, err := a.f.Seek(0, io.SeekEnd)
+	off, err := a.f.Seek(0, os.SEEK_END)
 	if err != nil {
 		log.Fatal(err)
 	}
@@ -464,3 +464,24 @@
 	s += sixteenSpaces[:16-len(s)]
 	return s
 }
+
+// architecture-independent object file output
+const HeaderSize = 60
+
+func ReadHeader(b *bufio.Reader, name string) int {
+	var buf [HeaderSize]byte
+	if _, err := io.ReadFull(b, buf[:]); err != nil {
+		return -1
+	}
+	aname := strings.Trim(string(buf[0:16]), " ")
+	if !strings.HasPrefix(aname, name) {
+		return -1
+	}
+	asize := strings.Trim(string(buf[48:58]), " ")
+	i, _ := strconv.Atoi(asize)
+	return i
+}
+
+func FormatHeader(arhdr []byte, name string, size int64) {
+	copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}
diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go
index 07c3406..22608e7 100644
--- a/src/cmd/internal/goobj/mkbuiltin.go
+++ b/src/cmd/internal/goobj/mkbuiltin.go
@@ -118,8 +118,8 @@
 
 // addBasicTypes returns the symbol names for basic types that are
 // defined in the runtime and referenced in other packages.
-// Needs to be kept in sync with reflect.go:dumpbasictypes() and
-// reflect.go:dtypesym() in the compiler.
+// Needs to be kept in sync with reflect.go:WriteBasicTypes() and
+// reflect.go:writeType() in the compiler.
 func enumerateBasicTypes() []extra {
 	names := [...]string{
 		"int8", "uint8", "int16", "uint16",
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 8c8ff58..8206902 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -39,6 +39,7 @@
 	"cmd/internal/sys"
 	"fmt"
 	"sync"
+	"sync/atomic"
 )
 
 // An Addr is an argument to an instruction.
@@ -250,6 +251,12 @@
 	a.Val = t
 }
 
+func (a *Addr) SetConst(v int64) {
+	a.Sym = nil
+	a.Type = TYPE_CONST
+	a.Offset = v
+}
+
 // Prog describes a single machine instruction.
 //
 // The general instruction form is:
@@ -629,6 +636,10 @@
 	// ContentAddressable indicates this is a content-addressable symbol.
 	AttrContentAddressable
 
+	// ABI wrapper is set for compiler-generated text symbols that
+	// convert between ABI0 and ABIInternal calling conventions.
+	AttrABIWrapper
+
 	// attrABIBase is the value at which the ABI is encoded in
 	// Attribute. This must be last; all bits after this are
 	// assumed to be an ABI value.
@@ -637,36 +648,52 @@
 	attrABIBase
 )
 
-func (a Attribute) DuplicateOK() bool        { return a&AttrDuplicateOK != 0 }
-func (a Attribute) MakeTypelink() bool       { return a&AttrMakeTypelink != 0 }
-func (a Attribute) CFunc() bool              { return a&AttrCFunc != 0 }
-func (a Attribute) NoSplit() bool            { return a&AttrNoSplit != 0 }
-func (a Attribute) Leaf() bool               { return a&AttrLeaf != 0 }
-func (a Attribute) OnList() bool             { return a&AttrOnList != 0 }
-func (a Attribute) ReflectMethod() bool      { return a&AttrReflectMethod != 0 }
-func (a Attribute) Local() bool              { return a&AttrLocal != 0 }
-func (a Attribute) Wrapper() bool            { return a&AttrWrapper != 0 }
-func (a Attribute) NeedCtxt() bool           { return a&AttrNeedCtxt != 0 }
-func (a Attribute) NoFrame() bool            { return a&AttrNoFrame != 0 }
-func (a Attribute) Static() bool             { return a&AttrStatic != 0 }
-func (a Attribute) WasInlined() bool         { return a&AttrWasInlined != 0 }
-func (a Attribute) TopFrame() bool           { return a&AttrTopFrame != 0 }
-func (a Attribute) Indexed() bool            { return a&AttrIndexed != 0 }
-func (a Attribute) UsedInIface() bool        { return a&AttrUsedInIface != 0 }
-func (a Attribute) ContentAddressable() bool { return a&AttrContentAddressable != 0 }
+func (a *Attribute) load() Attribute { return Attribute(atomic.LoadUint32((*uint32)(a))) }
+
+func (a *Attribute) DuplicateOK() bool        { return a.load()&AttrDuplicateOK != 0 }
+func (a *Attribute) MakeTypelink() bool       { return a.load()&AttrMakeTypelink != 0 }
+func (a *Attribute) CFunc() bool              { return a.load()&AttrCFunc != 0 }
+func (a *Attribute) NoSplit() bool            { return a.load()&AttrNoSplit != 0 }
+func (a *Attribute) Leaf() bool               { return a.load()&AttrLeaf != 0 }
+func (a *Attribute) OnList() bool             { return a.load()&AttrOnList != 0 }
+func (a *Attribute) ReflectMethod() bool      { return a.load()&AttrReflectMethod != 0 }
+func (a *Attribute) Local() bool              { return a.load()&AttrLocal != 0 }
+func (a *Attribute) Wrapper() bool            { return a.load()&AttrWrapper != 0 }
+func (a *Attribute) NeedCtxt() bool           { return a.load()&AttrNeedCtxt != 0 }
+func (a *Attribute) NoFrame() bool            { return a.load()&AttrNoFrame != 0 }
+func (a *Attribute) Static() bool             { return a.load()&AttrStatic != 0 }
+func (a *Attribute) WasInlined() bool         { return a.load()&AttrWasInlined != 0 }
+func (a *Attribute) TopFrame() bool           { return a.load()&AttrTopFrame != 0 }
+func (a *Attribute) Indexed() bool            { return a.load()&AttrIndexed != 0 }
+func (a *Attribute) UsedInIface() bool        { return a.load()&AttrUsedInIface != 0 }
+func (a *Attribute) ContentAddressable() bool { return a.load()&AttrContentAddressable != 0 }
+func (a *Attribute) ABIWrapper() bool         { return a.load()&AttrABIWrapper != 0 }
 
 func (a *Attribute) Set(flag Attribute, value bool) {
-	if value {
-		*a |= flag
-	} else {
-		*a &^= flag
+	for {
+		v0 := a.load()
+		v := v0
+		if value {
+			v |= flag
+		} else {
+			v &^= flag
+		}
+		if atomic.CompareAndSwapUint32((*uint32)(a), uint32(v0), uint32(v)) {
+			break
+		}
 	}
 }
 
-func (a Attribute) ABI() ABI { return ABI(a / attrABIBase) }
+func (a *Attribute) ABI() ABI { return ABI(a.load() / attrABIBase) }
 func (a *Attribute) SetABI(abi ABI) {
 	const mask = 1 // Only one ABI bit for now.
-	*a = (*a &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase
+	for {
+		v0 := a.load()
+		v := (v0 &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase
+		if atomic.CompareAndSwapUint32((*uint32)(a), uint32(v0), uint32(v)) {
+			break
+		}
+	}
 }
 
 var textAttrStrings = [...]struct {
@@ -689,6 +716,7 @@
 	{bit: AttrTopFrame, s: "TOPFRAME"},
 	{bit: AttrIndexed, s: ""},
 	{bit: AttrContentAddressable, s: ""},
+	{bit: AttrABIWrapper, s: "ABIWRAPPER"},
 }
 
 // TextAttrString formats a for printing in as part of a TEXT prog.
@@ -723,8 +751,8 @@
 }
 
 // The compiler needs *LSym to be assignable to cmd/compile/internal/ssa.Sym.
-func (s *LSym) CanBeAnSSASym() {
-}
+func (*LSym) CanBeAnSSASym() {}
+func (*LSym) CanBeAnSSAAux() {}
 
 type Pcln struct {
 	// Aux symbols for pcln
@@ -754,6 +782,17 @@
 	Gotype  *LSym
 }
 
+// RegArg provides spill/fill information for a register-resident argument
+// to a function.  These need spilling/filling in the safepoint/stackgrowth case.
+// At the time of fill/spill, the offset must be adjusted by the architecture-dependent
+// adjustment to hardware SP that occurs in a call instruction.  E.g., for AMD64,
+// at Offset+8 because the return address was pushed.
+type RegArg struct {
+	Addr           Addr
+	Reg            int16
+	Spill, Unspill As
+}
+
 // Link holds the context for writing object code from a compiler
 // to be linker input or for reading that input into the linker.
 type Link struct {
@@ -784,6 +823,7 @@
 	DebugInfo          func(fn *LSym, info *LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) // if non-nil, curfn is a *gc.Node
 	GenAbstractFunc    func(fn *LSym)
 	Errors             int
+	RegArgs            []RegArg
 
 	InParallel    bool // parallel backend phase in effect
 	UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges
@@ -832,6 +872,32 @@
 	ctxt.Bso.Flush()
 }
 
+func (ctxt *Link) SpillRegisterArgs(last *Prog, pa ProgAlloc) *Prog {
+	// Spill register args.
+	for _, ra := range ctxt.RegArgs {
+		spill := Appendp(last, pa)
+		spill.As = ra.Spill
+		spill.From.Type = TYPE_REG
+		spill.From.Reg = ra.Reg
+		spill.To = ra.Addr
+		last = spill
+	}
+	return last
+}
+
+func (ctxt *Link) UnspillRegisterArgs(last *Prog, pa ProgAlloc) *Prog {
+	// Unspill any spilled register args
+	for _, ra := range ctxt.RegArgs {
+		unspill := Appendp(last, pa)
+		unspill.As = ra.Unspill
+		unspill.From = ra.Addr
+		unspill.To.Type = TYPE_REG
+		unspill.To.Reg = ra.Reg
+		last = unspill
+	}
+	return last
+}
+
 // The smallest possible offset from the hardware stack pointer to a local
 // variable on the stack. Architectures that use a link register save its value
 // on the stack in the function prologue and so always have a pointer between
diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go
index 2b09699..679ce7e 100644
--- a/src/cmd/internal/obj/plist.go
+++ b/src/cmd/internal/obj/plist.go
@@ -80,6 +80,11 @@
 		if !strings.HasPrefix(s.Name, "\"\".") {
 			continue
 		}
+		if s.ABIWrapper() {
+			// Don't create an args_stackmap symbol reference for an ABI
+			// wrapper function
+			continue
+		}
 		found := false
 		for p := s.Func().Text; p != nil; p = p.Link {
 			if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == objabi.FUNCDATA_ArgsPointerMaps {
@@ -134,6 +139,7 @@
 	s.Set(AttrNoSplit, flag&NOSPLIT != 0)
 	s.Set(AttrReflectMethod, flag&REFLECTMETHOD != 0)
 	s.Set(AttrWrapper, flag&WRAPPER != 0)
+	s.Set(AttrABIWrapper, flag&ABIWRAPPER != 0)
 	s.Set(AttrNeedCtxt, flag&NEEDCTXT != 0)
 	s.Set(AttrNoFrame, flag&NOFRAME != 0)
 	s.Set(AttrTopFrame, flag&TOPFRAME != 0)
diff --git a/src/cmd/internal/obj/s390x/condition_code.go b/src/cmd/internal/obj/s390x/condition_code.go
index 764fc5b..f498fd6 100644
--- a/src/cmd/internal/obj/s390x/condition_code.go
+++ b/src/cmd/internal/obj/s390x/condition_code.go
@@ -124,3 +124,5 @@
 	// invalid
 	return fmt.Sprintf("Invalid (%#x)", c)
 }
+
+func (CCMask) CanBeAnSSAAux() {}
diff --git a/src/cmd/internal/obj/s390x/rotate.go b/src/cmd/internal/obj/s390x/rotate.go
index 388bd40f4..5407c8d 100644
--- a/src/cmd/internal/obj/s390x/rotate.go
+++ b/src/cmd/internal/obj/s390x/rotate.go
@@ -113,3 +113,5 @@
 func (r RotateParams) InMerge(mask uint64) *RotateParams {
 	return r.OutMerge(bits.RotateLeft64(mask, int(r.Amount)))
 }
+
+func (RotateParams) CanBeAnSSAAux() {}
diff --git a/src/cmd/internal/obj/textflag.go b/src/cmd/internal/obj/textflag.go
index d2cec73..2f55793 100644
--- a/src/cmd/internal/obj/textflag.go
+++ b/src/cmd/internal/obj/textflag.go
@@ -33,7 +33,7 @@
 	// This function uses its incoming context register.
 	NEEDCTXT = 64
 
-	// When passed to ggloblsym, causes Local to be set to true on the LSym it creates.
+	// When passed to objw.Global, causes Local to be set to true on the LSym it creates.
 	LOCAL = 128
 
 	// Allocate a word of thread local storage and store the offset from the
@@ -51,4 +51,7 @@
 	// Function is the top of the call stack. Call stack unwinders should stop
 	// at this function.
 	TOPFRAME = 2048
+
+	// Function is an ABI wrapper.
+	ABIWRAPPER = 4096
 )
diff --git a/src/cmd/internal/obj/x86/a.out.go b/src/cmd/internal/obj/x86/a.out.go
index 30c1a6a..3be4b59 100644
--- a/src/cmd/internal/obj/x86/a.out.go
+++ b/src/cmd/internal/obj/x86/a.out.go
@@ -263,6 +263,7 @@
 	FREGRET  = REG_X0
 	REGSP    = REG_SP
 	REGCTXT  = REG_DX
+	REGG     = REG_R14     // g register in ABIInternal
 	REGEXT   = REG_R15     // compiler allocates external registers R15 down
 	FREGMIN  = REG_X0 + 5  // first register variable
 	FREGEXT  = REG_X0 + 15 // first external register
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 184fb43..84de58a 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -637,13 +637,19 @@
 		}
 	}
 
-	if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() {
-		p = obj.Appendp(p, newprog)
-		p = load_g_cx(ctxt, p, newprog) // load g into CX
+	var regg int16
+	if !p.From.Sym.NoSplit() || (p.From.Sym.Wrapper() && !p.From.Sym.ABIWrapper()) {
+		if ctxt.Arch.Family == sys.AMD64 && objabi.Regabi_enabled != 0 && cursym.ABI() == obj.ABIInternal {
+			regg = REGG // use the g register directly in ABIInternal
+		} else {
+			p = obj.Appendp(p, newprog)
+			p = load_g_cx(ctxt, p, newprog) // load g into CX
+			regg = REG_CX
+		}
 	}
 
 	if !cursym.Func().Text.From.Sym.NoSplit() {
-		p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg)) // emit split check
+		p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg), regg) // emit split check
 	}
 
 	// Delve debugger would like the next instruction to be noted as the end of the function prologue.
@@ -690,12 +696,12 @@
 		p.To.Reg = REG_BP
 	}
 
-	if cursym.Func().Text.From.Sym.Wrapper() {
+	if cursym.Func().Text.From.Sym.Wrapper() && !cursym.Func().Text.From.Sym.ABIWrapper() {
 		// if g._panic != nil && g._panic.argp == FP {
 		//   g._panic.argp = bottom-of-frame
 		// }
 		//
-		//	MOVQ g_panic(CX), BX
+		//	MOVQ g_panic(g), BX
 		//	TESTQ BX, BX
 		//	JNE checkargp
 		// end:
@@ -718,7 +724,7 @@
 		p = obj.Appendp(p, newprog)
 		p.As = AMOVQ
 		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_CX
+		p.From.Reg = regg
 		p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = REG_BX
@@ -969,9 +975,9 @@
 
 // Append code to p to check for stack split.
 // Appends to (does not overwrite) p.
-// Assumes g is in CX.
+// Assumes g is in rg.
 // Returns last new instruction.
-func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) *obj.Prog {
+func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32, rg int16) *obj.Prog {
 	cmp := ACMPQ
 	lea := ALEAQ
 	mov := AMOVQ
@@ -993,7 +999,8 @@
 		p.As = cmp
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = REG_SP
-		indir_cx(ctxt, &p.To)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = rg
 		p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
 		if cursym.CFunc() {
 			p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
@@ -1021,7 +1028,8 @@
 		p.As = cmp
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = REG_AX
-		indir_cx(ctxt, &p.To)
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = rg
 		p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
 		if cursym.CFunc() {
 			p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
@@ -1047,7 +1055,8 @@
 		p = obj.Appendp(p, newprog)
 
 		p.As = mov
-		indir_cx(ctxt, &p.From)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Reg = rg
 		p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
 		if cursym.CFunc() {
 			p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
@@ -1114,7 +1123,8 @@
 	spfix.Spadj = -framesize
 
 	pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog)
-	pcdata = ctxt.StartUnsafePoint(pcdata, newprog)
+	spill := ctxt.StartUnsafePoint(pcdata, newprog)
+	pcdata = ctxt.SpillRegisterArgs(spill, newprog)
 
 	call := obj.Appendp(pcdata, newprog)
 	call.Pos = cursym.Func().Text.Pos
@@ -1139,7 +1149,8 @@
 		progedit(ctxt, callend.Link, newprog)
 	}
 
-	pcdata = ctxt.EndUnsafePoint(callend, newprog, -1)
+	pcdata = ctxt.UnspillRegisterArgs(callend, newprog)
+	pcdata = ctxt.EndUnsafePoint(pcdata, newprog, -1)
 
 	jmp := obj.Appendp(pcdata, newprog)
 	jmp.As = obj.AJMP
@@ -1147,9 +1158,9 @@
 	jmp.To.SetTarget(cursym.Func().Text.Link)
 	jmp.Spadj = +framesize
 
-	jls.To.SetTarget(call)
+	jls.To.SetTarget(spill)
 	if q1 != nil {
-		q1.To.SetTarget(call)
+		q1.To.SetTarget(spill)
 	}
 
 	return end
diff --git a/src/cmd/internal/objabi/path.go b/src/cmd/internal/objabi/path.go
index fd1c998..1a0784c 100644
--- a/src/cmd/internal/objabi/path.go
+++ b/src/cmd/internal/objabi/path.go
@@ -56,6 +56,8 @@
 		rval = true
 	case "syscall":
 		rval = true
+	case "crypto/x509/internal/macos": // libc function wrappers need to be ABIInternal
+		rval = true
 	default:
 		rval = strings.HasPrefix(pkgpath, "runtime/internal")
 	}
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index 52035e9..92d38bb 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -55,6 +55,7 @@
 	switch pkg {
 	case "runtime",
 		"sync/atomic",      // runtime may call to sync/atomic, due to go:linkname
+		"internal/abi",     // used by reflectcall (and maybe more)
 		"internal/bytealg", // for IndexByte
 		"internal/cpu":     // for cpu features
 		return true
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 17d5040..3148968 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -489,10 +489,16 @@
 	case 0:
 		// nothing to do
 	case 1, 2:
-		flags = loader.FlagStrictDups
+		flags |= loader.FlagStrictDups
 	default:
 		log.Fatalf("invalid -strictdups flag value %d", *FlagStrictDups)
 	}
+	if !*flagAbiWrap || ctxt.linkShared {
+		// Use ABI aliases if ABI wrappers are not used.
+		// TODO: for now we still use ABI aliases in shared linkage, even if
+		// the wrapper is enabled.
+		flags |= loader.FlagUseABIAlias
+	}
 	elfsetstring1 := func(str string, off int) { elfsetstring(ctxt, 0, str, off) }
 	ctxt.loader = loader.NewLoader(flags, elfsetstring1, &ctxt.ErrorReporter.ErrorReporter)
 	ctxt.ErrorReporter.SymName = func(s loader.Sym) string {
@@ -2091,6 +2097,26 @@
 		Errorf(nil, "cannot read symbols from shared library: %s", libpath)
 		return
 	}
+
+	// collect text symbol ABI versions.
+	symabi := make(map[string]int) // map (unmangled) symbol name to version
+	if *flagAbiWrap {
+		for _, elfsym := range syms {
+			if elf.ST_TYPE(elfsym.Info) != elf.STT_FUNC {
+				continue
+			}
+			// Demangle the name. Keep in sync with symtab.go:putelfsym.
+			if strings.HasSuffix(elfsym.Name, ".abiinternal") {
+				// ABIInternal symbol has mangled name, so the primary symbol is ABI0.
+				symabi[strings.TrimSuffix(elfsym.Name, ".abiinternal")] = 0
+			}
+			if strings.HasSuffix(elfsym.Name, ".abi0") {
+				// ABI0 symbol has mangled name, so the primary symbol is ABIInternal.
+				symabi[strings.TrimSuffix(elfsym.Name, ".abi0")] = sym.SymVerABIInternal
+			}
+		}
+	}
+
 	for _, elfsym := range syms {
 		if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION {
 			continue
@@ -2099,12 +2125,23 @@
 		// Symbols whose names start with "type." are compiler
 		// generated, so make functions with that prefix internal.
 		ver := 0
+		symname := elfsym.Name // (unmangled) symbol name
 		if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && strings.HasPrefix(elfsym.Name, "type.") {
 			ver = sym.SymVerABIInternal
+		} else if *flagAbiWrap && elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC {
+			if strings.HasSuffix(elfsym.Name, ".abiinternal") {
+				ver = sym.SymVerABIInternal
+				symname = strings.TrimSuffix(elfsym.Name, ".abiinternal")
+			} else if strings.HasSuffix(elfsym.Name, ".abi0") {
+				ver = 0
+				symname = strings.TrimSuffix(elfsym.Name, ".abi0")
+			} else if abi, ok := symabi[elfsym.Name]; ok {
+				ver = abi
+			}
 		}
 
 		l := ctxt.loader
-		s := l.LookupOrCreateSym(elfsym.Name, ver)
+		s := l.LookupOrCreateSym(symname, ver)
 
 		// Because loadlib above loads all .a files before loading
 		// any shared libraries, any non-dynimport symbols we find
@@ -2129,6 +2166,10 @@
 			}
 		}
 
+		if symname != elfsym.Name {
+			l.SetSymExtname(s, elfsym.Name)
+		}
+
 		// For function symbols, we don't know what ABI is
 		// available, so alias it under both ABIs.
 		//
@@ -2137,7 +2178,12 @@
 		// mangle Go function names in the .so to include the
 		// ABI.
 		if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC && ver == 0 {
-			alias := ctxt.loader.LookupOrCreateSym(elfsym.Name, sym.SymVerABIInternal)
+			if *flagAbiWrap {
+				if _, ok := symabi[symname]; ok {
+					continue // only use alias for functions w/o ABI wrappers
+				}
+			}
+			alias := ctxt.loader.LookupOrCreateSym(symname, sym.SymVerABIInternal)
 			if l.SymType(alias) != 0 {
 				continue
 			}
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index 5a096f1..cbd8118 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -92,11 +92,10 @@
 	FlagRound         = flag.Int("R", -1, "set address rounding `quantum`")
 	FlagTextAddr      = flag.Int64("T", -1, "set text segment `address`")
 	flagEntrySymbol   = flag.String("E", "", "set `entry` symbol name")
-
-	cpuprofile     = flag.String("cpuprofile", "", "write cpu profile to `file`")
-	memprofile     = flag.String("memprofile", "", "write memory profile to `file`")
-	memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
-
+	cpuprofile        = flag.String("cpuprofile", "", "write cpu profile to `file`")
+	memprofile        = flag.String("memprofile", "", "write memory profile to `file`")
+	memprofilerate    = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
+	flagAbiWrap       = flag.Bool("abiwrap", objabi.Regabi_enabled != 0, "support ABI wrapper functions")
 	benchmarkFlag     = flag.String("benchmark", "", "set to 'mem' or 'cpu' to enable phase benchmarking")
 	benchmarkFileFlag = flag.String("benchmarkprofile", "", "emit phase profiles to `base`_phase.{cpu,mem}prof")
 )
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index c98e4de..85a8ff4 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -102,6 +102,41 @@
 		elfshnum = xosect.Elfsect.(*ElfShdr).shnum
 	}
 
+	sname := ldr.SymExtname(x)
+
+	// For functions with ABI wrappers, we have to make sure that we
+	// don't wind up with two elf symbol table entries with the same
+	// name (since this will generated an error from the external
+	// linker). In the CgoExportStatic case, we want the ABI0 symbol
+	// to have the primary symbol table entry (since it's going to be
+	// called from C), so we rename the ABIInternal symbol. In all
+	// other cases, we rename the ABI0 symbol, since we want
+	// cross-load-module calls to target ABIInternal.
+	//
+	// TODO: generalize this for non-ELF (put the rename code in the
+	// loader, and store the rename result in SymExtname).
+	//
+	// TODO: avoid the ldr.Lookup calls below by instead using an aux
+	// sym or marker relocation to associate the wrapper with the
+	// wrapped function.
+	//
+	if *flagAbiWrap {
+		if !ldr.IsExternal(x) && ldr.SymType(x) == sym.STEXT {
+			// First case
+			if ldr.SymVersion(x) == sym.SymVerABIInternal {
+				if s2 := ldr.Lookup(sname, sym.SymVerABI0); s2 != 0 && ldr.AttrCgoExportStatic(s2) && ldr.SymType(s2) == sym.STEXT {
+					sname = sname + ".abiinternal"
+				}
+			}
+			// Second case
+			if ldr.SymVersion(x) == sym.SymVerABI0 && !ldr.AttrCgoExportStatic(x) {
+				if s2 := ldr.Lookup(sname, sym.SymVerABIInternal); s2 != 0 && ldr.SymType(s2) == sym.STEXT {
+					sname = sname + ".abi0"
+				}
+			}
+		}
+	}
+
 	// One pass for each binding: elf.STB_LOCAL, elf.STB_GLOBAL,
 	// maybe one day elf.STB_WEAK.
 	bind := elf.STB_GLOBAL
@@ -140,8 +175,6 @@
 		other |= 3 << 5
 	}
 
-	sname := ldr.SymExtname(x)
-
 	// When dynamically linking, we create Symbols by reading the names from
 	// the symbol tables of the shared libraries and so the names need to
 	// match exactly. Tools like DTrace will have to wait for now.
diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go
index 971cc43..98c2131 100644
--- a/src/cmd/link/internal/loader/loader.go
+++ b/src/cmd/link/internal/loader/loader.go
@@ -322,6 +322,7 @@
 const (
 	// Loader.flags
 	FlagStrictDups = 1 << iota
+	FlagUseABIAlias
 )
 
 func NewLoader(flags uint32, elfsetstring elfsetstringFunc, reporter *ErrorReporter) *Loader {
@@ -2270,6 +2271,9 @@
 // symbol. If the sym in question is not an alias, the sym itself is
 // returned.
 func (l *Loader) ResolveABIAlias(s Sym) Sym {
+	if l.flags&FlagUseABIAlias == 0 {
+		return s
+	}
 	if s == 0 {
 		return 0
 	}
diff --git a/src/crypto/x509/internal/macos/corefoundation.go b/src/crypto/x509/internal/macos/corefoundation.go
index 9b776d4..0572c6c 100644
--- a/src/crypto/x509/internal/macos/corefoundation.go
+++ b/src/crypto/x509/internal/macos/corefoundation.go
@@ -39,7 +39,6 @@
 const kCFAllocatorDefault = 0
 const kCFStringEncodingUTF8 = 0x08000100
 
-//go:linkname x509_CFStringCreateWithBytes x509_CFStringCreateWithBytes
 //go:cgo_import_dynamic x509_CFStringCreateWithBytes CFStringCreateWithBytes "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 // StringToCFString returns a copy of the UTF-8 contents of s as a new CFString.
@@ -52,7 +51,6 @@
 }
 func x509_CFStringCreateWithBytes_trampoline()
 
-//go:linkname x509_CFDictionaryGetValueIfPresent x509_CFDictionaryGetValueIfPresent
 //go:cgo_import_dynamic x509_CFDictionaryGetValueIfPresent CFDictionaryGetValueIfPresent "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFDictionaryGetValueIfPresent(dict CFRef, key CFString) (value CFRef, ok bool) {
@@ -67,7 +65,6 @@
 
 const kCFNumberSInt32Type = 3
 
-//go:linkname x509_CFNumberGetValue x509_CFNumberGetValue
 //go:cgo_import_dynamic x509_CFNumberGetValue CFNumberGetValue "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFNumberGetValue(num CFRef) (int32, error) {
@@ -81,7 +78,6 @@
 }
 func x509_CFNumberGetValue_trampoline()
 
-//go:linkname x509_CFDataGetLength x509_CFDataGetLength
 //go:cgo_import_dynamic x509_CFDataGetLength CFDataGetLength "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFDataGetLength(data CFRef) int {
@@ -90,7 +86,6 @@
 }
 func x509_CFDataGetLength_trampoline()
 
-//go:linkname x509_CFDataGetBytePtr x509_CFDataGetBytePtr
 //go:cgo_import_dynamic x509_CFDataGetBytePtr CFDataGetBytePtr "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFDataGetBytePtr(data CFRef) uintptr {
@@ -99,7 +94,6 @@
 }
 func x509_CFDataGetBytePtr_trampoline()
 
-//go:linkname x509_CFArrayGetCount x509_CFArrayGetCount
 //go:cgo_import_dynamic x509_CFArrayGetCount CFArrayGetCount "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFArrayGetCount(array CFRef) int {
@@ -108,7 +102,6 @@
 }
 func x509_CFArrayGetCount_trampoline()
 
-//go:linkname x509_CFArrayGetValueAtIndex x509_CFArrayGetValueAtIndex
 //go:cgo_import_dynamic x509_CFArrayGetValueAtIndex CFArrayGetValueAtIndex "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFArrayGetValueAtIndex(array CFRef, index int) CFRef {
@@ -117,7 +110,6 @@
 }
 func x509_CFArrayGetValueAtIndex_trampoline()
 
-//go:linkname x509_CFEqual x509_CFEqual
 //go:cgo_import_dynamic x509_CFEqual CFEqual "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFEqual(a, b CFRef) bool {
@@ -126,7 +118,6 @@
 }
 func x509_CFEqual_trampoline()
 
-//go:linkname x509_CFRelease x509_CFRelease
 //go:cgo_import_dynamic x509_CFRelease CFRelease "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation"
 
 func CFRelease(ref CFRef) {
diff --git a/src/crypto/x509/internal/macos/corefoundation.s b/src/crypto/x509/internal/macos/corefoundation.s
index a4495d6..1ce39fa 100644
--- a/src/crypto/x509/internal/macos/corefoundation.s
+++ b/src/crypto/x509/internal/macos/corefoundation.s
@@ -6,21 +6,24 @@
 
 #include "textflag.h"
 
-TEXT ·x509_CFArrayGetCount_trampoline(SB),NOSPLIT,$0-0
+// The trampolines are ABIInternal as they are address-taken in
+// Go code.
+
+TEXT ·x509_CFArrayGetCount_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFArrayGetCount(SB)
-TEXT ·x509_CFArrayGetValueAtIndex_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFArrayGetValueAtIndex_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFArrayGetValueAtIndex(SB)
-TEXT ·x509_CFDataGetBytePtr_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFDataGetBytePtr_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFDataGetBytePtr(SB)
-TEXT ·x509_CFDataGetLength_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFDataGetLength_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFDataGetLength(SB)
-TEXT ·x509_CFStringCreateWithBytes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFStringCreateWithBytes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFStringCreateWithBytes(SB)
-TEXT ·x509_CFRelease_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFRelease_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFRelease(SB)
-TEXT ·x509_CFDictionaryGetValueIfPresent_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFDictionaryGetValueIfPresent_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFDictionaryGetValueIfPresent(SB)
-TEXT ·x509_CFNumberGetValue_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFNumberGetValue_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFNumberGetValue(SB)
-TEXT ·x509_CFEqual_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_CFEqual_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_CFEqual(SB)
diff --git a/src/crypto/x509/internal/macos/security.go b/src/crypto/x509/internal/macos/security.go
index 5e39e93..3163e3a 100644
--- a/src/crypto/x509/internal/macos/security.go
+++ b/src/crypto/x509/internal/macos/security.go
@@ -63,7 +63,6 @@
 
 const errSecNoTrustSettings = -25263
 
-//go:linkname x509_SecTrustSettingsCopyCertificates x509_SecTrustSettingsCopyCertificates
 //go:cgo_import_dynamic x509_SecTrustSettingsCopyCertificates SecTrustSettingsCopyCertificates "/System/Library/Frameworks/Security.framework/Versions/A/Security"
 
 func SecTrustSettingsCopyCertificates(domain SecTrustSettingsDomain) (certArray CFRef, err error) {
@@ -80,7 +79,6 @@
 
 const kSecFormatX509Cert int32 = 9
 
-//go:linkname x509_SecItemExport x509_SecItemExport
 //go:cgo_import_dynamic x509_SecItemExport SecItemExport "/System/Library/Frameworks/Security.framework/Versions/A/Security"
 
 func SecItemExport(cert CFRef) (data CFRef, err error) {
@@ -95,7 +93,6 @@
 
 const errSecItemNotFound = -25300
 
-//go:linkname x509_SecTrustSettingsCopyTrustSettings x509_SecTrustSettingsCopyTrustSettings
 //go:cgo_import_dynamic x509_SecTrustSettingsCopyTrustSettings SecTrustSettingsCopyTrustSettings "/System/Library/Frameworks/Security.framework/Versions/A/Security"
 
 func SecTrustSettingsCopyTrustSettings(cert CFRef, domain SecTrustSettingsDomain) (trustSettings CFRef, err error) {
@@ -110,7 +107,6 @@
 }
 func x509_SecTrustSettingsCopyTrustSettings_trampoline()
 
-//go:linkname x509_SecPolicyCopyProperties x509_SecPolicyCopyProperties
 //go:cgo_import_dynamic x509_SecPolicyCopyProperties SecPolicyCopyProperties "/System/Library/Frameworks/Security.framework/Versions/A/Security"
 
 func SecPolicyCopyProperties(policy CFRef) CFRef {
diff --git a/src/crypto/x509/internal/macos/security.s b/src/crypto/x509/internal/macos/security.s
index bd446db..bea265a 100644
--- a/src/crypto/x509/internal/macos/security.s
+++ b/src/crypto/x509/internal/macos/security.s
@@ -6,11 +6,14 @@
 
 #include "textflag.h"
 
-TEXT ·x509_SecTrustSettingsCopyCertificates_trampoline(SB),NOSPLIT,$0-0
+// The trampolines are ABIInternal as they are address-taken in
+// Go code.
+
+TEXT ·x509_SecTrustSettingsCopyCertificates_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_SecTrustSettingsCopyCertificates(SB)
-TEXT ·x509_SecItemExport_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_SecItemExport_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_SecItemExport(SB)
-TEXT ·x509_SecTrustSettingsCopyTrustSettings_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_SecTrustSettingsCopyTrustSettings_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_SecTrustSettingsCopyTrustSettings(SB)
-TEXT ·x509_SecPolicyCopyProperties_trampoline(SB),NOSPLIT,$0-0
+TEXT ·x509_SecPolicyCopyProperties_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	x509_SecPolicyCopyProperties(SB)
diff --git a/src/embed/embed.go b/src/embed/embed.go
index 98da870..7f2719d 100644
--- a/src/embed/embed.go
+++ b/src/embed/embed.go
@@ -143,7 +143,7 @@
 // See the package documentation for more details about initializing an FS.
 type FS struct {
 	// The compiler knows the layout of this struct.
-	// See cmd/compile/internal/gc's initEmbed.
+	// See cmd/compile/internal/staticdata's WriteEmbed.
 	//
 	// The files list is sorted by name but not by simple string comparison.
 	// Instead, each file's name takes the form "dir/elem" or "dir/elem/".
@@ -213,7 +213,7 @@
 // It implements fs.FileInfo and fs.DirEntry.
 type file struct {
 	// The compiler knows the layout of this struct.
-	// See cmd/compile/internal/gc's initEmbed.
+	// See cmd/compile/internal/staticdata's WriteEmbed.
 	name string
 	data string
 	hash [16]byte // truncated SHA256 hash
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index c97c668..e5c849e 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -76,8 +76,12 @@
 	  unicode/utf8, unicode/utf16, unicode,
 	  unsafe;
 
+	# These packages depend only on unsafe.
+	unsafe
+	< internal/abi;
+
 	# RUNTIME is the core runtime group of packages, all of them very light-weight.
-	internal/cpu, unsafe
+	internal/abi, internal/cpu, unsafe
 	< internal/bytealg
 	< internal/unsafeheader
 	< runtime/internal/sys
@@ -285,7 +289,7 @@
 	math/big, go/token
 	< go/constant;
 
-	container/heap, go/constant, go/parser
+	container/heap, go/constant, go/parser, regexp
 	< go/types;
 
 	FMT
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index 4641442..223c363 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -17,6 +17,7 @@
 	"go/token"
 	"math"
 	"math/big"
+	"math/bits"
 	"strconv"
 	"strings"
 	"sync"
@@ -607,7 +608,11 @@
 func BitLen(x Value) int {
 	switch x := x.(type) {
 	case int64Val:
-		return i64toi(x).val.BitLen()
+		u := uint64(x)
+		if x < 0 {
+			u = uint64(-x)
+		}
+		return 64 - bits.LeadingZeros64(u)
 	case intVal:
 		return x.val.BitLen()
 	case unknownVal:
@@ -1015,52 +1020,55 @@
 	}
 	// ord(x) <= ord(y)
 
-	switch x := x.(type) {
+	// Prefer to return the original x and y arguments when possible,
+	// to avoid unnecessary heap allocations.
+
+	switch x1 := x.(type) {
 	case boolVal, *stringVal, complexVal:
 		return x, y
 
 	case int64Val:
-		switch y := y.(type) {
+		switch y.(type) {
 		case int64Val:
 			return x, y
 		case intVal:
-			return i64toi(x), y
+			return i64toi(x1), y
 		case ratVal:
-			return i64tor(x), y
+			return i64tor(x1), y
 		case floatVal:
-			return i64tof(x), y
+			return i64tof(x1), y
 		case complexVal:
-			return vtoc(x), y
+			return vtoc(x1), y
 		}
 
 	case intVal:
-		switch y := y.(type) {
+		switch y.(type) {
 		case intVal:
 			return x, y
 		case ratVal:
-			return itor(x), y
+			return itor(x1), y
 		case floatVal:
-			return itof(x), y
+			return itof(x1), y
 		case complexVal:
-			return vtoc(x), y
+			return vtoc(x1), y
 		}
 
 	case ratVal:
-		switch y := y.(type) {
+		switch y.(type) {
 		case ratVal:
 			return x, y
 		case floatVal:
-			return rtof(x), y
+			return rtof(x1), y
 		case complexVal:
-			return vtoc(x), y
+			return vtoc(x1), y
 		}
 
 	case floatVal:
-		switch y := y.(type) {
+		switch y.(type) {
 		case floatVal:
 			return x, y
 		case complexVal:
-			return vtoc(x), y
+			return vtoc(x1), y
 		}
 	}
 
diff --git a/src/go/constant/value_test.go b/src/go/constant/value_test.go
index 2866774..91ad0b0 100644
--- a/src/go/constant/value_test.go
+++ b/src/go/constant/value_test.go
@@ -706,3 +706,24 @@
 		})
 	}
 }
+
+var bitLenTests = []struct {
+	val  int64
+	want int
+}{
+	{0, 0},
+	{1, 1},
+	{-16, 5},
+	{1 << 61, 62},
+	{1 << 62, 63},
+	{-1 << 62, 63},
+	{-1 << 63, 64},
+}
+
+func TestBitLen(t *testing.T) {
+	for _, test := range bitLenTests {
+		if got := BitLen(MakeInt64(test.val)); got != test.want {
+			t.Errorf("%v: got %v, want %v", test.val, got, test.want)
+		}
+	}
+}
diff --git a/src/go/internal/gcimporter/iimport.go b/src/go/internal/gcimporter/iimport.go
index c59dd16..a3184e7 100644
--- a/src/go/internal/gcimporter/iimport.go
+++ b/src/go/internal/gcimporter/iimport.go
@@ -15,6 +15,7 @@
 	"go/token"
 	"go/types"
 	"io"
+	"math/big"
 	"sort"
 )
 
@@ -320,7 +321,9 @@
 		val = constant.MakeString(r.string())
 
 	case types.IsInteger:
-		val = r.mpint(b)
+		var x big.Int
+		r.mpint(&x, b)
+		val = constant.Make(&x)
 
 	case types.IsFloat:
 		val = r.mpfloat(b)
@@ -365,8 +368,8 @@
 	return
 }
 
-func (r *importReader) mpint(b *types.Basic) constant.Value {
-	signed, maxBytes := intSize(b)
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+	signed, maxBytes := intSize(typ)
 
 	maxSmall := 256 - maxBytes
 	if signed {
@@ -385,7 +388,8 @@
 				v = ^v
 			}
 		}
-		return constant.MakeInt64(v)
+		x.SetInt64(v)
+		return
 	}
 
 	v := -n
@@ -395,39 +399,23 @@
 	if v < 1 || uint(v) > maxBytes {
 		errorf("weird decoding: %v, %v => %v", n, signed, v)
 	}
-
-	buf := make([]byte, v)
-	io.ReadFull(&r.declReader, buf)
-
-	// convert to little endian
-	// TODO(gri) go/constant should have a more direct conversion function
-	//           (e.g., once it supports a big.Float based implementation)
-	for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
-		buf[i], buf[j] = buf[j], buf[i]
-	}
-
-	x := constant.MakeFromBytes(buf)
+	b := make([]byte, v)
+	io.ReadFull(&r.declReader, b)
+	x.SetBytes(b)
 	if signed && n&1 != 0 {
-		x = constant.UnaryOp(token.SUB, x, 0)
+		x.Neg(x)
 	}
-	return x
 }
 
-func (r *importReader) mpfloat(b *types.Basic) constant.Value {
-	x := r.mpint(b)
-	if constant.Sign(x) == 0 {
-		return x
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+	var mant big.Int
+	r.mpint(&mant, typ)
+	var f big.Float
+	f.SetInt(&mant)
+	if f.Sign() != 0 {
+		f.SetMantExp(&f, int(r.int64()))
 	}
-
-	exp := r.int64()
-	switch {
-	case exp > 0:
-		x = constant.Shift(x, token.SHL, uint(exp))
-	case exp < 0:
-		d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
-		x = constant.BinaryOp(x, token.QUO, d)
-	}
-	return x
+	return constant.Make(&f)
 }
 
 func (r *importReader) ident() string {
diff --git a/src/go/types/api.go b/src/go/types/api.go
index d625959..b5bbb2d 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -101,6 +101,13 @@
 // A Config specifies the configuration for type checking.
 // The zero value for Config is a ready-to-use default configuration.
 type Config struct {
+	// GoVersion describes the accepted Go language version. The string
+	// must follow the format "go%d.%d" (e.g. "go1.12") or it must be
+	// empty; an empty string indicates the latest language version.
+	// If the format is invalid, invoking the type checker will cause a
+	// panic.
+	GoVersion string
+
 	// If IgnoreFuncBodies is set, function bodies are not
 	// type-checked.
 	IgnoreFuncBodies bool
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index 75cebc9..dde451e 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -42,7 +42,7 @@
 	return pkg.Name()
 }
 
-func mayTypecheck(t *testing.T, path, source string, info *Info) string {
+func mayTypecheck(t *testing.T, path, source string, info *Info) (string, error) {
 	fset := token.NewFileSet()
 	f, err := parser.ParseFile(fset, path, source, 0)
 	if f == nil { // ignore errors unless f is nil
@@ -52,8 +52,8 @@
 		Error:    func(err error) {},
 		Importer: importer.Default(),
 	}
-	pkg, _ := conf.Check(f.Name.Name, fset, []*ast.File{f}, info)
-	return pkg.Name()
+	pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, info)
+	return pkg.Name(), err
 }
 
 func TestValuesInfo(t *testing.T) {
@@ -175,6 +175,9 @@
 }
 
 func TestTypesInfo(t *testing.T) {
+	// Test sources that are not expected to typecheck must start with the broken prefix.
+	const broken = "package broken_"
+
 	var tests = []struct {
 		src  string
 		expr string // expression
@@ -187,6 +190,39 @@
 		{`package b3; var x interface{} = 0i`, `0i`, `complex128`},
 		{`package b4; var x interface{} = "foo"`, `"foo"`, `string`},
 
+		// uses of nil
+		{`package n0; var _ *int = nil`, `nil`, `untyped nil`},
+		{`package n1; var _ func() = nil`, `nil`, `untyped nil`},
+		{`package n2; var _ []byte = nil`, `nil`, `untyped nil`},
+		{`package n3; var _ map[int]int = nil`, `nil`, `untyped nil`},
+		{`package n4; var _ chan int = nil`, `nil`, `untyped nil`},
+		{`package n5; var _ interface{} = nil`, `nil`, `untyped nil`},
+		{`package n6; import "unsafe"; var _ unsafe.Pointer = nil`, `nil`, `untyped nil`},
+
+		{`package n10; var (x *int; _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n11; var (x func(); _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n12; var (x []byte; _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n13; var (x map[int]int; _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n14; var (x chan int; _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n15; var (x interface{}; _ = x == nil)`, `nil`, `untyped nil`},
+		{`package n15; import "unsafe"; var (x unsafe.Pointer; _ = x == nil)`, `nil`, `untyped nil`},
+
+		{`package n20; var _ = (*int)(nil)`, `nil`, `untyped nil`},
+		{`package n21; var _ = (func())(nil)`, `nil`, `untyped nil`},
+		{`package n22; var _ = ([]byte)(nil)`, `nil`, `untyped nil`},
+		{`package n23; var _ = (map[int]int)(nil)`, `nil`, `untyped nil`},
+		{`package n24; var _ = (chan int)(nil)`, `nil`, `untyped nil`},
+		{`package n25; var _ = (interface{})(nil)`, `nil`, `untyped nil`},
+		{`package n26; import "unsafe"; var _ = unsafe.Pointer(nil)`, `nil`, `untyped nil`},
+
+		{`package n30; func f(*int) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n31; func f(func()) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n32; func f([]byte) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n33; func f(map[int]int) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n34; func f(chan int) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n35; func f(interface{}) { f(nil) }`, `nil`, `untyped nil`},
+		{`package n35; import "unsafe"; func f(unsafe.Pointer) { f(nil) }`, `nil`, `untyped nil`},
+
 		// comma-ok expressions
 		{`package p0; var x interface{}; var _, _ = x.(int)`,
 			`x.(int)`,
@@ -268,17 +304,27 @@
 		},
 
 		// tests for broken code that doesn't parse or type-check
-		{`package x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
-		{`package x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
-		{`package x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a; f: b;}}`, `b`, `string`},
-		{`package x3; var x = panic("");`, `panic`, `func(interface{})`},
+		{broken + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
+		{broken + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
+		{broken + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a; f: b;}}`, `b`, `string`},
+		{broken + `x3; var x = panic("");`, `panic`, `func(interface{})`},
 		{`package x4; func _() { panic("") }`, `panic`, `func(interface{})`},
-		{`package x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`},
+		{broken + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`},
 	}
 
 	for _, test := range tests {
 		info := Info{Types: make(map[ast.Expr]TypeAndValue)}
-		name := mayTypecheck(t, "TypesInfo", test.src, &info)
+		var name string
+		if strings.HasPrefix(test.src, broken) {
+			var err error
+			name, err = mayTypecheck(t, "TypesInfo", test.src, &info)
+			if err == nil {
+				t.Errorf("package %s: expected to fail but passed", name)
+				continue
+			}
+		} else {
+			name = mustTypecheck(t, "TypesInfo", test.src, &info)
+		}
 
 		// look for expression type
 		var typ Type
diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go
index 616564b..d6f18c9 100644
--- a/src/go/types/assignments.go
+++ b/src/go/types/assignments.go
@@ -120,6 +120,7 @@
 		if lhs.typ == nil {
 			lhs.typ = Typ[Invalid]
 		}
+		lhs.used = true
 		return nil
 	}
 
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index fd35f78..078ed44 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -353,8 +353,8 @@
 			return
 		}
 
-		if ok, code := x.assignableTo(check, m.key, nil); !ok {
-			check.invalidArg(x, code, "%s is not assignable to %s", x, m.key)
+		check.assignment(x, m.key, "argument to delete")
+		if x.mode == invalid {
 			return
 		}
 
diff --git a/src/go/types/check.go b/src/go/types/check.go
index 280792e..3bc8ee0 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -8,6 +8,7 @@
 
 import (
 	"errors"
+	"fmt"
 	"go/ast"
 	"go/constant"
 	"go/token"
@@ -69,6 +70,12 @@
 	path, dir string
 }
 
+// A dotImportKey describes a dot-imported object in the given scope.
+type dotImportKey struct {
+	scope *Scope
+	obj   Object
+}
+
 // A Checker maintains the state of the type checker.
 // It must be created with NewChecker.
 type Checker struct {
@@ -78,16 +85,18 @@
 	fset *token.FileSet
 	pkg  *Package
 	*Info
-	objMap map[Object]*declInfo       // maps package-level objects and (non-interface) methods to declaration info
-	impMap map[importKey]*Package     // maps (import path, source directory) to (complete or fake) package
-	posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
-	pkgCnt map[string]int             // counts number of imported packages with a given name (for better error messages)
+	version version                    // accepted language version
+	objMap  map[Object]*declInfo       // maps package-level objects and (non-interface) methods to declaration info
+	impMap  map[importKey]*Package     // maps (import path, source directory) to (complete or fake) package
+	posMap  map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
+	pkgCnt  map[string]int             // counts number of imported packages with a given name (for better error messages)
 
 	// information collected during type-checking of a set of package files
 	// (initialized by Files, valid only for the duration of check.Files;
 	// maps and lists are allocated on demand)
-	files            []*ast.File                             // package files
-	unusedDotImports map[*Scope]map[*Package]*ast.ImportSpec // unused dot-imported packages
+	files        []*ast.File               // package files
+	imports      []*PkgName                // list of imported packages
+	dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
 
 	firstErr error                 // first error encountered
 	methods  map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods
@@ -104,22 +113,6 @@
 	indent int // indentation for tracing
 }
 
-// addUnusedImport adds the position of a dot-imported package
-// pkg to the map of dot imports for the given file scope.
-func (check *Checker) addUnusedDotImport(scope *Scope, pkg *Package, spec *ast.ImportSpec) {
-	mm := check.unusedDotImports
-	if mm == nil {
-		mm = make(map[*Scope]map[*Package]*ast.ImportSpec)
-		check.unusedDotImports = mm
-	}
-	m := mm[scope]
-	if m == nil {
-		m = make(map[*Package]*ast.ImportSpec)
-		mm[scope] = m
-	}
-	m[pkg] = spec
-}
-
 // addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists
 func (check *Checker) addDeclDep(to Object) {
 	from := check.decl
@@ -185,15 +178,21 @@
 		info = new(Info)
 	}
 
+	version, err := parseGoVersion(conf.GoVersion)
+	if err != nil {
+		panic(fmt.Sprintf("invalid Go version %q (%v)", conf.GoVersion, err))
+	}
+
 	return &Checker{
-		conf:   conf,
-		fset:   fset,
-		pkg:    pkg,
-		Info:   info,
-		objMap: make(map[Object]*declInfo),
-		impMap: make(map[importKey]*Package),
-		posMap: make(map[*Interface][]token.Pos),
-		pkgCnt: make(map[string]int),
+		conf:    conf,
+		fset:    fset,
+		pkg:     pkg,
+		Info:    info,
+		version: version,
+		objMap:  make(map[Object]*declInfo),
+		impMap:  make(map[importKey]*Package),
+		posMap:  make(map[*Interface][]token.Pos),
+		pkgCnt:  make(map[string]int),
 	}
 }
 
@@ -202,7 +201,8 @@
 func (check *Checker) initFiles(files []*ast.File) {
 	// start with a clean slate (check.Files may be called multiple times)
 	check.files = nil
-	check.unusedDotImports = nil
+	check.imports = nil
+	check.dotImportMap = nil
 
 	check.firstErr = nil
 	check.methods = nil
@@ -272,10 +272,16 @@
 	if !check.conf.DisableUnusedImportCheck {
 		check.unusedImports()
 	}
+	// no longer needed - release memory
+	check.imports = nil
+	check.dotImportMap = nil
 
 	check.recordUntyped()
 
 	check.pkg.complete = true
+
+	// TODO(rFindley) There's more memory we should release at this point.
+
 	return
 }
 
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index ce31dab..ca7d926 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -27,12 +27,14 @@
 
 import (
 	"flag"
+	"fmt"
 	"go/ast"
 	"go/importer"
 	"go/parser"
 	"go/scanner"
 	"go/token"
 	"internal/testenv"
+	"io/ioutil"
 	"os"
 	"path/filepath"
 	"regexp"
@@ -45,57 +47,10 @@
 var (
 	haltOnError = flag.Bool("halt", false, "halt on error")
 	listErrors  = flag.Bool("errlist", false, "list errors")
-	testFiles   = flag.String("files", "", "space-separated list of test files")
+	testFiles   = flag.String("files", "", "comma-separated list of test files")
+	goVersion   = flag.String("lang", "", "Go language version (e.g. \"go1.12\"")
 )
 
-// The test filenames do not end in .go so that they are invisible
-// to gofmt since they contain comments that must not change their
-// positions relative to surrounding tokens.
-
-// Each tests entry is list of files belonging to the same package.
-var tests = [][]string{
-	{"testdata/errors.src"},
-	{"testdata/importdecl0a.src", "testdata/importdecl0b.src"},
-	{"testdata/importdecl1a.src", "testdata/importdecl1b.src"},
-	{"testdata/importC.src"}, // special handling in checkFiles
-	{"testdata/cycles.src"},
-	{"testdata/cycles1.src"},
-	{"testdata/cycles2.src"},
-	{"testdata/cycles3.src"},
-	{"testdata/cycles4.src"},
-	{"testdata/cycles5.src"},
-	{"testdata/init0.src"},
-	{"testdata/init1.src"},
-	{"testdata/init2.src"},
-	{"testdata/decls0.src"},
-	{"testdata/decls1.src"},
-	{"testdata/decls2a.src", "testdata/decls2b.src"},
-	{"testdata/decls3.src"},
-	{"testdata/decls4.src"},
-	{"testdata/decls5.src"},
-	{"testdata/const0.src"},
-	{"testdata/const1.src"},
-	{"testdata/constdecl.src"},
-	{"testdata/vardecl.src"},
-	{"testdata/expr0.src"},
-	{"testdata/expr1.src"},
-	{"testdata/expr2.src"},
-	{"testdata/expr3.src"},
-	{"testdata/methodsets.src"},
-	{"testdata/shifts.src"},
-	{"testdata/builtins.src"},
-	{"testdata/conversions.src"},
-	{"testdata/conversions2.src"},
-	{"testdata/stmt0.src"},
-	{"testdata/stmt1.src"},
-	{"testdata/gotos.src"},
-	{"testdata/labels.src"},
-	{"testdata/literals.src"},
-	{"testdata/issues.src"},
-	{"testdata/blank.src"},
-	{"testdata/issue25008b.src", "testdata/issue25008a.src"}, // order (b before a) is crucial!
-}
-
 var fset = token.NewFileSet()
 
 // Positioned errors are of the form filename:line:column: message .
@@ -114,11 +69,11 @@
 	return
 }
 
-func parseFiles(t *testing.T, filenames []string) ([]*ast.File, []error) {
+func parseFiles(t *testing.T, filenames []string, srcs [][]byte) ([]*ast.File, []error) {
 	var files []*ast.File
 	var errlist []error
-	for _, filename := range filenames {
-		file, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)
+	for i, filename := range filenames {
+		file, err := parser.ParseFile(fset, filename, srcs[i], parser.AllErrors)
 		if file == nil {
 			t.Fatalf("%s: %s", filename, err)
 		}
@@ -147,19 +102,17 @@
 // errMap collects the regular expressions of ERROR comments found
 // in files and returns them as a map of error positions to error messages.
 //
-func errMap(t *testing.T, testname string, files []*ast.File) map[string][]string {
+// srcs must be a slice of the same length as files, containing the original
+// source for the parsed AST.
+func errMap(t *testing.T, files []*ast.File, srcs [][]byte) map[string][]string {
 	// map of position strings to lists of error message patterns
 	errmap := make(map[string][]string)
 
-	for _, file := range files {
-		filename := fset.Position(file.Package).Filename
-		src, err := os.ReadFile(filename)
-		if err != nil {
-			t.Fatalf("%s: could not read %s", testname, filename)
-		}
-
+	for i, file := range files {
+		tok := fset.File(file.Package)
+		src := srcs[i]
 		var s scanner.Scanner
-		s.Init(fset.AddFile(filename, -1, len(src)), src, nil, scanner.ScanComments)
+		s.Init(tok, src, nil, scanner.ScanComments)
 		var prev token.Pos // position of last non-comment, non-semicolon token
 		var here token.Pos // position immediately after the token at position prev
 
@@ -236,15 +189,38 @@
 	}
 }
 
-func checkFiles(t *testing.T, testfiles []string) {
+// goVersionRx matches a Go version string using '_', e.g. "go1_12".
+var goVersionRx = regexp.MustCompile(`^go[1-9][0-9]*_(0|[1-9][0-9]*)$`)
+
+// asGoVersion returns a regular Go language version string
+// if s is a Go version string using '_' rather than '.' to
+// separate the major and minor version numbers (e.g. "go1_12").
+// Otherwise it returns the empty string.
+func asGoVersion(s string) string {
+	if goVersionRx.MatchString(s) {
+		return strings.Replace(s, "_", ".", 1)
+	}
+	return ""
+}
+
+func checkFiles(t *testing.T, goVersion string, filenames []string, srcs [][]byte) {
+	if len(filenames) == 0 {
+		t.Fatal("no source files")
+	}
+
 	// parse files and collect parser errors
-	files, errlist := parseFiles(t, testfiles)
+	files, errlist := parseFiles(t, filenames, srcs)
 
 	pkgName := "<no package>"
 	if len(files) > 0 {
 		pkgName = files[0].Name.Name
 	}
 
+	// if no Go version is given, consider the package name
+	if goVersion == "" {
+		goVersion = asGoVersion(pkgName)
+	}
+
 	if *listErrors && len(errlist) > 0 {
 		t.Errorf("--- %s:", pkgName)
 		for _, err := range errlist {
@@ -254,10 +230,15 @@
 
 	// typecheck and collect typechecker errors
 	var conf Config
+	conf.GoVersion = goVersion
+
 	// special case for importC.src
-	if len(testfiles) == 1 && strings.HasSuffix(testfiles[0], "importC.src") {
-		conf.FakeImportC = true
+	if len(filenames) == 1 {
+		if strings.HasSuffix(filenames[0], "importC.src") {
+			conf.FakeImportC = true
+		}
 	}
+
 	conf.Importer = importer.Default()
 	conf.Error = func(err error) {
 		if *haltOnError {
@@ -292,7 +273,7 @@
 
 	// match and eliminate errors;
 	// we are expecting the following errors
-	errmap := errMap(t, pkgName, files)
+	errmap := errMap(t, files, srcs)
 	eliminate(t, errmap, errlist)
 
 	// there should be no expected errors left
@@ -306,44 +287,66 @@
 	}
 }
 
+// TestCheck is for manual testing of selected input files, provided with -files.
+// The accepted Go language version can be controlled with the -lang flag.
 func TestCheck(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-
-	// Declare builtins for testing.
-	DefPredeclaredTestFuncs()
-
-	// If explicit test files are specified, only check those.
-	if files := *testFiles; files != "" {
-		checkFiles(t, strings.Split(files, " "))
+	if *testFiles == "" {
 		return
 	}
-
-	// Otherwise, run all the tests.
-	for _, files := range tests {
-		checkFiles(t, files)
-	}
+	testenv.MustHaveGoBuild(t)
+	DefPredeclaredTestFuncs()
+	testPkg(t, strings.Split(*testFiles, ","), *goVersion)
 }
 
-func TestFixedBugs(t *testing.T) { testDir(t, "fixedbugs") }
+func TestLongConstants(t *testing.T) {
+	format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant"
+	src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001))
+	checkFiles(t, "", []string{"longconst.go"}, [][]byte{[]byte(src)})
+}
+
+func TestTestdata(t *testing.T)  { DefPredeclaredTestFuncs(); testDir(t, "testdata") }
+func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
 
 func testDir(t *testing.T, dir string) {
 	testenv.MustHaveGoBuild(t)
 
-	dirs, err := os.ReadDir(dir)
+	fis, err := os.ReadDir(dir)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
+		return
 	}
 
-	for _, d := range dirs {
-		testname := filepath.Base(d.Name())
-		testname = strings.TrimSuffix(testname, filepath.Ext(testname))
-		t.Run(testname, func(t *testing.T) {
-			filename := filepath.Join(dir, d.Name())
-			if d.IsDir() {
-				t.Errorf("skipped directory %q", filename)
-				return
+	for _, fi := range fis {
+		path := filepath.Join(dir, fi.Name())
+
+		// if fi is a directory, its files make up a single package
+		var filenames []string
+		if fi.IsDir() {
+			fis, err := ioutil.ReadDir(path)
+			if err != nil {
+				t.Error(err)
+				continue
 			}
-			checkFiles(t, []string{filename})
+			for _, fi := range fis {
+				filenames = append(filenames, filepath.Join(path, fi.Name()))
+			}
+		} else {
+			filenames = []string{path}
+		}
+		t.Run(filepath.Base(path), func(t *testing.T) {
+			testPkg(t, filenames, "")
 		})
 	}
 }
+
+func testPkg(t *testing.T, filenames []string, goVersion string) {
+	srcs := make([][]byte, len(filenames))
+	for i, filename := range filenames {
+		src, err := os.ReadFile(filename)
+		if err != nil {
+			t.Fatalf("could not read %s: %v", filename, err)
+		}
+		srcs[i] = src
+	}
+	checkFiles(t, goVersion, filenames, srcs)
+}
diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go
index 1cab1cc..c634d27 100644
--- a/src/go/types/conversions.go
+++ b/src/go/types/conversions.go
@@ -55,8 +55,8 @@
 		// - Keep untyped nil for untyped nil arguments.
 		// - For integer to string conversions, keep the argument type.
 		//   (See also the TODO below.)
-		if IsInterface(T) || constArg && !isConstType(T) {
-			final = Default(x.typ)
+		if IsInterface(T) || constArg && !isConstType(T) || x.isNil() {
+			final = Default(x.typ) // default type of untyped nil is untyped nil
 		} else if isInteger(x.typ) && isString(T) {
 			final = x.typ
 		}
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index 1f0bc35..b861cde 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -189,7 +189,7 @@
 		check.varDecl(obj, d.lhs, d.typ, d.init)
 	case *TypeName:
 		// invalid recursive types are detected via path
-		check.typeDecl(obj, d.typ, def, d.alias)
+		check.typeDecl(obj, d.typ, def, d.aliasPos)
 	case *Func:
 		// functions may be recursive - no need to track dependencies
 		check.funcDecl(obj, d)
@@ -234,7 +234,7 @@
 			// this information explicitly in the object.
 			var alias bool
 			if d := check.objMap[obj]; d != nil {
-				alias = d.alias // package-level object
+				alias = d.aliasPos.IsValid() // package-level object
 			} else {
 				alias = obj.IsAlias() // function local object
 			}
@@ -504,6 +504,20 @@
 func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) {
 	assert(obj.typ == nil)
 
+	// If we have undefined variable types due to errors,
+	// mark variables as used to avoid follow-on errors.
+	// Matches compiler behavior.
+	defer func() {
+		if obj.typ == Typ[Invalid] {
+			obj.used = true
+		}
+		for _, lhs := range lhs {
+			if lhs.typ == Typ[Invalid] {
+				lhs.used = true
+			}
+		}
+	}()
+
 	// determine type, if any
 	if typ != nil {
 		obj.typ = check.typ(typ)
@@ -626,14 +640,17 @@
 	}
 }
 
-func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, alias bool) {
+func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, aliasPos token.Pos) {
 	assert(obj.typ == nil)
 
 	check.later(func() {
 		check.validType(obj.typ, nil)
 	})
 
-	if alias {
+	if aliasPos.IsValid() {
+		if !check.allowVersion(obj.pkg, 1, 9) {
+			check.errorf(atPos(aliasPos), _BadDecl, "type aliases requires go1.9 or later")
+		}
 
 		obj.typ = Typ[Invalid]
 		obj.typ = check.typ(typ)
@@ -664,9 +681,12 @@
 
 	}
 
+	// TODO(rFindley): move to the callsite, as this is only needed for top-level
+	//                 decls.
 	check.addMethodDecls(obj)
 }
 
+// TODO(rFindley): rename to collectMethods, to be consistent with types2.
 func (check *Checker) addMethodDecls(obj *TypeName) {
 	// get associated methods
 	// (Checker.collectObjects only collects methods with non-blank names;
@@ -677,7 +697,7 @@
 		return
 	}
 	delete(check.methods, obj)
-	assert(!check.objMap[obj].alias) // don't use TypeName.IsAlias (requires fully set up object)
+	assert(!check.objMap[obj].aliasPos.IsValid()) // don't use TypeName.IsAlias (requires fully set up object)
 
 	// use an objset to check for name conflicts
 	var mset objset
@@ -737,8 +757,12 @@
 	obj.typ = sig // guard against cycles
 	fdecl := decl.fdecl
 	check.funcType(sig, fdecl.Recv, fdecl.Type)
-	if sig.recv == nil && obj.name == "init" && (sig.params.Len() > 0 || sig.results.Len() > 0) {
-		check.errorf(fdecl, _InvalidInitSig, "func init must have no arguments and no return values")
+	if sig.recv == nil {
+		if obj.name == "init" && (sig.params.Len() > 0 || sig.results.Len() > 0) {
+			check.errorf(fdecl, _InvalidInitDecl, "func init must have no arguments and no return values")
+		} else if obj.name == "main" && check.pkg.name == "main" && (sig.params.Len() > 0 || sig.results.Len() > 0) {
+			check.errorf(fdecl, _InvalidMainDecl, "func main must have no arguments and no return values")
+		}
 		// ok to continue
 	}
 
@@ -846,7 +870,7 @@
 			check.declare(check.scope, d.spec.Name, obj, scopePos)
 			// mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl)
 			obj.setColor(grey + color(check.push(obj)))
-			check.typeDecl(obj, d.spec.Type, nil, d.spec.Assign.IsValid())
+			check.typeDecl(obj, d.spec.Type, nil, d.spec.Assign)
 			check.pop().setColor(black)
 		default:
 			check.invalidAST(d.node(), "unknown ast.Decl node %T", d.node())
diff --git a/src/go/types/errorcodes.go b/src/go/types/errorcodes.go
index c01a12c..ac28c3b 100644
--- a/src/go/types/errorcodes.go
+++ b/src/go/types/errorcodes.go
@@ -386,8 +386,8 @@
 	// _InvalidInitSig occurs when an init function declares parameters or
 	// results.
 	//
-	// Example:
-	//  func init() int { return 1 }
+	// Deprecated: no longer emitted by the type checker. _InvalidInitDecl is
+	// used instead.
 	_InvalidInitSig
 
 	// _InvalidInitDecl occurs when init is declared as anything other than a
@@ -395,6 +395,9 @@
 	//
 	// Example:
 	//  var init = 1
+	//
+	// Example:
+	//  func init() int { return 1 }
 	_InvalidInitDecl
 
 	// _InvalidMainDecl occurs when main is declared as anything other than a
@@ -1363,4 +1366,7 @@
 	//  	return i
 	//  }
 	_InvalidGo
+
+	// _BadDecl occurs when a declaration has invalid syntax.
+	_BadDecl
 )
diff --git a/src/go/types/eval_test.go b/src/go/types/eval_test.go
index d940bf0..3a97ac0 100644
--- a/src/go/types/eval_test.go
+++ b/src/go/types/eval_test.go
@@ -76,7 +76,7 @@
 		`false == false`,
 		`12345678 + 87654321 == 99999999`,
 		`10 * 20 == 200`,
-		`(1<<1000)*2 >> 100 == 2<<900`,
+		`(1<<500)*2 >> 100 == 2<<400`,
 		`"foo" + "bar" == "foobar"`,
 		`"abc" <= "bcd"`,
 		`len([10]struct{}{}) == 2*5`,
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index eb20561..aec3172 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -78,13 +78,77 @@
 	return true
 }
 
+// overflow checks that the constant x is representable by its type.
+// For untyped constants, it checks that the value doesn't become
+// arbitrarily large.
+func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) {
+	assert(x.mode == constant_)
+
+	if x.val.Kind() == constant.Unknown {
+		// TODO(gri) We should report exactly what went wrong. At the
+		//           moment we don't have the (go/constant) API for that.
+		//           See also TODO in go/constant/value.go.
+		check.errorf(atPos(opPos), _InvalidConstVal, "constant result is not representable")
+		return
+	}
+
+	// Typed constants must be representable in
+	// their type after each constant operation.
+	if typ, ok := x.typ.Underlying().(*Basic); ok && isTyped(typ) {
+		check.representable(x, typ)
+		return
+	}
+
+	// Untyped integer values must not grow arbitrarily.
+	const prec = 512 // 512 is the constant precision
+	if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec {
+		check.errorf(atPos(opPos), _InvalidConstVal, "constant %s overflow", opName(x.expr))
+		x.val = constant.MakeUnknown()
+	}
+}
+
+// opName returns the name of an operation, or the empty string.
+// For now, only operations that might overflow are handled.
+// TODO(gri) Expand this to a general mechanism giving names to
+//           nodes?
+func opName(e ast.Expr) string {
+	switch e := e.(type) {
+	case *ast.BinaryExpr:
+		if int(e.Op) < len(op2str2) {
+			return op2str2[e.Op]
+		}
+	case *ast.UnaryExpr:
+		if int(e.Op) < len(op2str1) {
+			return op2str1[e.Op]
+		}
+	}
+	return ""
+}
+
+var op2str1 = [...]string{
+	token.XOR: "bitwise complement",
+}
+
+// This is only used for operations that may cause overflow.
+var op2str2 = [...]string{
+	token.ADD: "addition",
+	token.SUB: "subtraction",
+	token.XOR: "bitwise XOR",
+	token.MUL: "multiplication",
+	token.SHL: "shift",
+}
+
 // The unary expression e may be nil. It's passed in for better error messages only.
-func (check *Checker) unary(x *operand, e *ast.UnaryExpr, op token.Token) {
-	switch op {
+func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
+	check.expr(x, e.X)
+	if x.mode == invalid {
+		return
+	}
+	switch e.Op {
 	case token.AND:
 		// spec: "As an exception to the addressability
 		// requirement x may also be a composite literal."
-		if _, ok := unparen(x.expr).(*ast.CompositeLit); !ok && x.mode != variable {
+		if _, ok := unparen(e.X).(*ast.CompositeLit); !ok && x.mode != variable {
 			check.invalidOp(x, _UnaddressableOperand, "cannot take address of %s", x)
 			x.mode = invalid
 			return
@@ -111,26 +175,23 @@
 		return
 	}
 
-	if !check.op(unaryOpPredicates, x, op) {
+	if !check.op(unaryOpPredicates, x, e.Op) {
 		x.mode = invalid
 		return
 	}
 
 	if x.mode == constant_ {
-		typ := x.typ.Underlying().(*Basic)
+		if x.val.Kind() == constant.Unknown {
+			// nothing to do (and don't cause an error below in the overflow check)
+			return
+		}
 		var prec uint
-		if isUnsigned(typ) {
-			prec = uint(check.conf.sizeof(typ) * 8)
+		if isUnsigned(x.typ) {
+			prec = uint(check.conf.sizeof(x.typ) * 8)
 		}
-		x.val = constant.UnaryOp(op, x.val, prec)
-		// Typed constants must be representable in
-		// their type after each constant operation.
-		if isTyped(typ) {
-			if e != nil {
-				x.expr = e // for better error message
-			}
-			check.representable(x, typ)
-		}
+		x.val = constant.UnaryOp(e.Op, x.val, prec)
+		x.expr = e
+		check.overflow(x, e.Op, x.Pos())
 		return
 	}
 
@@ -579,6 +640,8 @@
 			if !hasNil(target) {
 				return nil
 			}
+			// Preserve the type of nil as UntypedNil: see #13061.
+			return Typ[UntypedNil]
 		default:
 			return nil
 		}
@@ -665,15 +728,16 @@
 	x.typ = Typ[UntypedBool]
 }
 
-func (check *Checker) shift(x, y *operand, e *ast.BinaryExpr, op token.Token) {
-	untypedx := isUntyped(x.typ)
+// If e != nil, it must be the shift expression; it may be nil for non-constant shifts.
+func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) {
+	// TODO(gri) This function seems overly complex. Revisit.
 
 	var xval constant.Value
 	if x.mode == constant_ {
 		xval = constant.ToInt(x.val)
 	}
 
-	if isInteger(x.typ) || untypedx && xval != nil && xval.Kind() == constant.Int {
+	if isInteger(x.typ) || isUntyped(x.typ) && xval != nil && xval.Kind() == constant.Int {
 		// The lhs is of integer type or an untyped constant representable
 		// as an integer. Nothing to do.
 	} else {
@@ -685,19 +749,33 @@
 
 	// spec: "The right operand in a shift expression must have integer type
 	// or be an untyped constant representable by a value of type uint."
-	switch {
-	case isInteger(y.typ):
-		// nothing to do
-	case isUntyped(y.typ):
+
+	// Provide a good error message for negative shift counts.
+	if y.mode == constant_ {
+		yval := constant.ToInt(y.val) // consider -1, 1.0, but not -1.1
+		if yval.Kind() == constant.Int && constant.Sign(yval) < 0 {
+			check.invalidOp(y, _InvalidShiftCount, "negative shift count %s", y)
+			x.mode = invalid
+			return
+		}
+	}
+
+	// Caution: Check for isUntyped first because isInteger includes untyped
+	//          integers (was bug #43697).
+	if isUntyped(y.typ) {
 		check.convertUntyped(y, Typ[Uint])
 		if y.mode == invalid {
 			x.mode = invalid
 			return
 		}
-	default:
+	} else if !isInteger(y.typ) {
 		check.invalidOp(y, _InvalidShiftCount, "shift count %s must be integer", y)
 		x.mode = invalid
 		return
+	} else if !isUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) {
+		check.invalidOp(y, _InvalidShiftCount, "signed shift count %s requires go1.13 or later", y)
+		x.mode = invalid
+		return
 	}
 
 	var yval constant.Value
@@ -716,8 +794,17 @@
 
 	if x.mode == constant_ {
 		if y.mode == constant_ {
+			// if either x or y has an unknown value, the result is unknown
+			if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+				x.val = constant.MakeUnknown()
+				// ensure the correct type - see comment below
+				if !isInteger(x.typ) {
+					x.typ = Typ[UntypedInt]
+				}
+				return
+			}
 			// rhs must be within reasonable bounds in constant shifts
-			const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64
+			const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see issue #44057)
 			s, ok := constant.Uint64Val(yval)
 			if !ok || s > shiftBound {
 				check.invalidOp(y, _InvalidShiftCount, "invalid shift count %s", y)
@@ -733,19 +820,17 @@
 			}
 			// x is a constant so xval != nil and it must be of Int kind.
 			x.val = constant.Shift(xval, op, uint(s))
-			// Typed constants must be representable in
-			// their type after each constant operation.
-			if isTyped(x.typ) {
-				if e != nil {
-					x.expr = e // for better error message
-				}
-				check.representable(x, x.typ.Underlying().(*Basic))
+			x.expr = e
+			opPos := x.Pos()
+			if b, _ := e.(*ast.BinaryExpr); b != nil {
+				opPos = b.OpPos
 			}
+			check.overflow(x, op, opPos)
 			return
 		}
 
 		// non-constant shift with constant lhs
-		if untypedx {
+		if isUntyped(x.typ) {
 			// spec: "If the left operand of a non-constant shift
 			// expression is an untyped constant, the type of the
 			// constant is what it would be if the shift expression
@@ -801,8 +886,9 @@
 	token.LOR:  isBoolean,
 }
 
-// The binary expression e may be nil. It's passed in for better error messages only.
-func (check *Checker) binary(x *operand, e *ast.BinaryExpr, lhs, rhs ast.Expr, op token.Token, opPos token.Pos) {
+// If e != nil, it must be the binary expression; it may be nil for non-constant expressions
+// (when invoked for an assignment operation where the binary expression is implicit).
+func (check *Checker) binary(x *operand, e ast.Expr, lhs, rhs ast.Expr, op token.Token, opPos token.Pos) {
 	var y operand
 
 	check.expr(x, lhs)
@@ -877,30 +963,19 @@
 	}
 
 	if x.mode == constant_ && y.mode == constant_ {
-		xval := x.val
-		yval := y.val
-		typ := x.typ.Underlying().(*Basic)
+		// if either x or y has an unknown value, the result is unknown
+		if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+			x.val = constant.MakeUnknown()
+			// x.typ is unchanged
+			return
+		}
 		// force integer division of integer operands
-		if op == token.QUO && isInteger(typ) {
+		if op == token.QUO && isInteger(x.typ) {
 			op = token.QUO_ASSIGN
 		}
-		x.val = constant.BinaryOp(xval, op, yval)
-		// report error if valid operands lead to an invalid result
-		if xval.Kind() != constant.Unknown && yval.Kind() != constant.Unknown && x.val.Kind() == constant.Unknown {
-			// TODO(gri) We should report exactly what went wrong. At the
-			//           moment we don't have the (go/constant) API for that.
-			//           See also TODO in go/constant/value.go.
-			check.errorf(atPos(opPos), _InvalidConstVal, "constant result is not representable")
-			// TODO(gri) Should we mark operands with unknown values as invalid?
-		}
-		// Typed constants must be representable in
-		// their type after each constant operation.
-		if isTyped(typ) {
-			if e != nil {
-				x.expr = e // for better error message
-			}
-			check.representable(x, typ)
-		}
+		x.val = constant.BinaryOp(x.val, op, y.val)
+		x.expr = e
+		check.overflow(x, op, opPos)
 		return
 	}
 
@@ -1079,6 +1154,24 @@
 		goto Error
 
 	case *ast.BasicLit:
+		switch e.Kind {
+		case token.INT, token.FLOAT, token.IMAG:
+			check.langCompat(e)
+			// The max. mantissa precision for untyped numeric values
+			// is 512 bits, or 4048 bits for each of the two integer
+			// parts of a fraction for floating-point numbers that are
+			// represented accurately in the go/constant package.
+			// Constant literals that are longer than this many bits
+			// are not meaningful; and excessively long constants may
+			// consume a lot of space and time for a useless conversion.
+			// Cap constant length with a generous upper limit that also
+			// allows for separators between all digits.
+			const limit = 10000
+			if len(e.Value) > limit {
+				check.errorf(e, _InvalidConstVal, "excessively long constant: %s... (%d chars)", e.Value[:10], len(e.Value))
+				goto Error
+			}
+		}
 		x.setConst(e.Kind, e.Value)
 		if x.mode == invalid {
 			// The parser already establishes syntactic correctness.
@@ -1536,11 +1629,7 @@
 		}
 
 	case *ast.UnaryExpr:
-		check.expr(x, e.X)
-		if x.mode == invalid {
-			goto Error
-		}
-		check.unary(x, e, e.Op)
+		check.unary(x, e)
 		if x.mode == invalid {
 			goto Error
 		}
diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go
index 148edbf..954a7ca 100644
--- a/src/go/types/predicates.go
+++ b/src/go/types/predicates.go
@@ -6,8 +6,6 @@
 
 package types
 
-import "sort"
-
 func isNamed(typ Type) bool {
 	if _, ok := typ.(*Basic); ok {
 		return ok
@@ -273,8 +271,8 @@
 					p = p.prev
 				}
 				if debug {
-					assert(sort.IsSorted(byUniqueMethodName(a)))
-					assert(sort.IsSorted(byUniqueMethodName(b)))
+					assertSortedMethods(a)
+					assertSortedMethods(b)
 				}
 				for i, f := range a {
 					g := b[i]
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index b637f8b..e441159 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -23,7 +23,7 @@
 	init      ast.Expr      // init/orig expression, or nil
 	inherited bool          // if set, the init expression is inherited from a previous constant declaration
 	fdecl     *ast.FuncDecl // func declaration, or nil
-	alias     bool          // type alias declaration
+	aliasPos  token.Pos     // If valid, the decl is a type alias and aliasPos is the position of '='.
 
 	// The deps field tracks initialization expression dependencies.
 	deps map[Object]bool // lazily initialized
@@ -252,14 +252,6 @@
 					return
 				}
 
-				// add package to list of explicit imports
-				// (this functionality is provided as a convenience
-				// for clients; it is not needed for type-checking)
-				if !pkgImports[imp] {
-					pkgImports[imp] = true
-					pkg.imports = append(pkg.imports, imp)
-				}
-
 				// local name overrides imported package name
 				name := imp.name
 				if d.spec.Name != nil {
@@ -269,27 +261,41 @@
 						check.errorf(d.spec.Name, _ImportCRenamed, `cannot rename import "C"`)
 						return
 					}
-					if name == "init" {
-						check.errorf(d.spec.Name, _InvalidInitDecl, "cannot declare init - must be func")
-						return
-					}
 				}
 
-				obj := NewPkgName(d.spec.Pos(), pkg, name, imp)
+				if name == "init" {
+					check.errorf(d.spec.Name, _InvalidInitDecl, "cannot import package as init - init must be a func")
+					return
+				}
+
+				// add package to list of explicit imports
+				// (this functionality is provided as a convenience
+				// for clients; it is not needed for type-checking)
+				if !pkgImports[imp] {
+					pkgImports[imp] = true
+					pkg.imports = append(pkg.imports, imp)
+				}
+
+				pkgName := NewPkgName(d.spec.Pos(), pkg, name, imp)
 				if d.spec.Name != nil {
 					// in a dot-import, the dot represents the package
-					check.recordDef(d.spec.Name, obj)
+					check.recordDef(d.spec.Name, pkgName)
 				} else {
-					check.recordImplicit(d.spec, obj)
+					check.recordImplicit(d.spec, pkgName)
 				}
 
 				if path == "C" {
 					// match cmd/compile (not prescribed by spec)
-					obj.used = true
+					pkgName.used = true
 				}
 
 				// add import to file scope
+				check.imports = append(check.imports, pkgName)
 				if name == "." {
+					// dot-import
+					if check.dotImportMap == nil {
+						check.dotImportMap = make(map[dotImportKey]*PkgName)
+					}
 					// merge imported scope with file scope
 					for _, obj := range imp.scope.elems {
 						// A package scope may contain non-exported objects,
@@ -303,16 +309,15 @@
 							if alt := fileScope.Insert(obj); alt != nil {
 								check.errorf(d.spec.Name, _DuplicateDecl, "%s redeclared in this block", obj.Name())
 								check.reportAltDecl(alt)
+							} else {
+								check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName
 							}
 						}
 					}
-					// add position to set of dot-import positions for this file
-					// (this is only needed for "imported but not used" errors)
-					check.addUnusedDotImport(fileScope, imp, d.spec)
 				} else {
 					// declare imported package object in file scope
 					// (no need to provide s.Name since we called check.recordDef earlier)
-					check.declare(fileScope, nil, obj, token.NoPos)
+					check.declare(fileScope, nil, pkgName, token.NoPos)
 				}
 			case constDecl:
 				// declare all constants
@@ -361,7 +366,7 @@
 				}
 			case typeDecl:
 				obj := NewTypeName(d.spec.Name.Pos(), pkg, d.spec.Name.Name, nil)
-				check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, typ: d.spec.Type, alias: d.spec.Assign.IsValid()})
+				check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, typ: d.spec.Type, aliasPos: d.spec.Assign})
 			case funcDecl:
 				info := &declInfo{file: fileScope, fdecl: d.decl}
 				name := d.decl.Name.Name
@@ -488,7 +493,7 @@
 		// we're done if tdecl defined tname as a new type
 		// (rather than an alias)
 		tdecl := check.objMap[tname] // must exist for objects in package scope
-		if !tdecl.alias {
+		if !tdecl.aliasPos.IsValid() {
 			return ptr, tname
 		}
 
@@ -529,7 +534,7 @@
 	// phase 1
 	for _, obj := range objList {
 		// If we have a type alias, collect it for the 2nd phase.
-		if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].alias {
+		if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].aliasPos.IsValid() {
 			aliasList = append(aliasList, tname)
 			continue
 		}
@@ -566,39 +571,30 @@
 	// any of its exported identifiers. To import a package solely for its side-effects
 	// (initialization), use the blank identifier as explicit package name."
 
-	// check use of regular imported packages
-	for _, scope := range check.pkg.scope.children /* file scopes */ {
-		for _, obj := range scope.elems {
-			if obj, ok := obj.(*PkgName); ok {
-				// Unused "blank imports" are automatically ignored
-				// since _ identifiers are not entered into scopes.
-				if !obj.used {
-					path := obj.imported.path
-					base := pkgName(path)
-					if obj.name == base {
-						check.softErrorf(obj, _UnusedImport, "%q imported but not used", path)
-					} else {
-						check.softErrorf(obj, _UnusedImport, "%q imported but not used as %s", path, obj.name)
-					}
-				}
-			}
-		}
-	}
-
-	// check use of dot-imported packages
-	for _, unusedDotImports := range check.unusedDotImports {
-		for pkg, pos := range unusedDotImports {
-			check.softErrorf(pos, _UnusedImport, "%q imported but not used", pkg.path)
+	for _, obj := range check.imports {
+		if !obj.used && obj.name != "_" {
+			check.errorUnusedPkg(obj)
 		}
 	}
 }
 
-// pkgName returns the package name (last element) of an import path.
-func pkgName(path string) string {
-	if i := strings.LastIndex(path, "/"); i >= 0 {
-		path = path[i+1:]
+func (check *Checker) errorUnusedPkg(obj *PkgName) {
+	// If the package was imported with a name other than the final
+	// import path element, show it explicitly in the error message.
+	// Note that this handles both renamed imports and imports of
+	// packages containing unconventional package declarations.
+	// Note that this uses / always, even on Windows, because Go import
+	// paths always use forward slashes.
+	path := obj.imported.path
+	elem := path
+	if i := strings.LastIndex(elem, "/"); i >= 0 {
+		elem = elem[i+1:]
 	}
-	return path
+	if obj.name == "" || obj.name == "." || obj.name == elem {
+		check.softErrorf(obj, _UnusedImport, "%q imported but not used", path)
+	} else {
+		check.softErrorf(obj, _UnusedImport, "%q imported but not used as %s", path, obj.name)
+	}
 }
 
 // dir makes a good-faith attempt to return the directory
diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go
index 5ca4493..29f7113 100644
--- a/src/go/types/stdlib_test.go
+++ b/src/go/types/stdlib_test.go
@@ -106,6 +106,7 @@
 		// get per-file instructions
 		expectErrors := false
 		filename := filepath.Join(path, f.Name())
+		goVersion := ""
 		if comment := firstComment(filename); comment != "" {
 			fields := strings.Fields(comment)
 			switch fields[0] {
@@ -115,13 +116,17 @@
 				expectErrors = true
 				for _, arg := range fields[1:] {
 					if arg == "-0" || arg == "-+" || arg == "-std" {
-						// Marked explicitly as not expected errors (-0),
+						// Marked explicitly as not expecting errors (-0),
 						// or marked as compiling runtime/stdlib, which is only done
 						// to trigger runtime/stdlib-only error output.
 						// In both cases, the code should typecheck.
 						expectErrors = false
 						break
 					}
+					const prefix = "-lang="
+					if strings.HasPrefix(arg, prefix) {
+						goVersion = arg[len(prefix):]
+					}
 				}
 			}
 		}
@@ -129,7 +134,7 @@
 		// parse and type-check file
 		file, err := parser.ParseFile(fset, filename, nil, 0)
 		if err == nil {
-			conf := Config{Importer: stdLibImporter}
+			conf := Config{GoVersion: goVersion, Importer: stdLibImporter}
 			_, err = conf.Check(filename, fset, []*ast.File{file}, nil)
 		}
 
@@ -157,6 +162,7 @@
 		"directive.go",   // tests compiler rejection of bad directive placement - ignore
 		"embedfunc.go",   // tests //go:embed
 		"embedvers.go",   // tests //go:embed
+		"linkname2.go",   // go/types doesn't check validity of //go:xxx directives
 	)
 }
 
@@ -170,19 +176,15 @@
 	testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "fixedbugs"),
 		"bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
 		"issue6889.go",   // gc-specific test
-		"issue7746.go",   // large constants - consumes too much memory
 		"issue11362.go",  // canonical import path check
 		"issue16369.go",  // go/types handles this correctly - not an issue
 		"issue18459.go",  // go/types doesn't check validity of //go:xxx directives
 		"issue18882.go",  // go/types doesn't check validity of //go:xxx directives
-		"issue20232.go",  // go/types handles larger constants than gc
 		"issue20529.go",  // go/types does not have constraints on stack size
 		"issue22200.go",  // go/types does not have constraints on stack size
 		"issue22200b.go", // go/types does not have constraints on stack size
 		"issue25507.go",  // go/types does not have constraints on stack size
 		"issue20780.go",  // go/types does not have constraints on stack size
-		"issue31747.go",  // go/types does not have constraints on language level (-lang=go1.12) (see #31793)
-		"issue34329.go",  // go/types does not have constraints on language level (-lang=go1.13) (see #31793)
 		"bug251.go",      // issue #34333 which was exposed with fix for #34151
 		"issue42058a.go", // go/types does not have constraints on channel element size
 		"issue42058b.go", // go/types does not have constraints on channel element size
diff --git a/src/go/types/testdata/builtins.src b/src/go/types/testdata/builtins.src
index 98830eb..6ee28f1 100644
--- a/src/go/types/testdata/builtins.src
+++ b/src/go/types/testdata/builtins.src
@@ -283,7 +283,7 @@
 	delete() // ERROR not enough arguments
 	delete(1) // ERROR not enough arguments
 	delete(1, 2, 3) // ERROR too many arguments
-	delete(m, 0 /* ERROR not assignable */)
+	delete(m, 0 /* ERROR cannot use */)
 	delete(m, s)
 	_ = delete /* ERROR used as value */ (m, s)
 
@@ -514,7 +514,7 @@
 	panic("foo")
 	panic(false)
 	panic(1<<10)
-	panic(1 /* ERROR overflows */ <<1000)
+	panic(1 << /* ERROR constant shift overflow */ 1000)
 	_ = panic /* ERROR used as value */ (0)
 
 	var s []byte
@@ -538,7 +538,7 @@
 	print(2.718281828)
 	print(false)
 	print(1<<10)
-	print(1 /* ERROR overflows */ <<1000)
+	print(1 << /* ERROR constant shift overflow */ 1000)
 	println(nil /* ERROR untyped nil */ )
 
 	var s []int
@@ -564,7 +564,7 @@
 	println(2.718281828)
 	println(false)
 	println(1<<10)
-	println(1 /* ERROR overflows */ <<1000)
+	println(1 << /* ERROR constant shift overflow */ 1000)
 	println(nil /* ERROR untyped nil */ )
 
 	var s []int
@@ -695,7 +695,7 @@
 	_ = unsafe.Alignof(42)
 	_ = unsafe.Alignof(new(struct{}))
 	_ = unsafe.Alignof(1<<10)
-	_ = unsafe.Alignof(1 /* ERROR overflows */ <<1000)
+	_ = unsafe.Alignof(1 << /* ERROR constant shift overflow */ 1000)
 	_ = unsafe.Alignof(nil /* ERROR "untyped nil */ )
 	unsafe /* ERROR not used */ .Alignof(x)
 
@@ -783,7 +783,7 @@
 	_ = unsafe.Sizeof(42)
 	_ = unsafe.Sizeof(new(complex128))
 	_ = unsafe.Sizeof(1<<10)
-	_ = unsafe.Sizeof(1 /* ERROR overflows */ <<1000)
+	_ = unsafe.Sizeof(1 << /* ERROR constant shift overflow */ 1000)
 	_ = unsafe.Sizeof(nil /* ERROR untyped nil */ )
 	unsafe /* ERROR not used */ .Sizeof(x)
 
diff --git a/src/go/types/testdata/const0.src b/src/go/types/testdata/const0.src
index adbbf28..5608b15 100644
--- a/src/go/types/testdata/const0.src
+++ b/src/go/types/testdata/const0.src
@@ -348,3 +348,16 @@
 	assert(one == 1)
 	assert(iota == 0)
 })
+
+// untyped constants must not get arbitrarily large
+const prec = 512 // internal maximum precision for integers
+const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1
+
+const _ = maxInt + /* ERROR constant addition overflow */ 1
+const _ = -maxInt - /* ERROR constant subtraction overflow */ 1
+const _ = maxInt ^ /* ERROR constant bitwise XOR overflow */ -1
+const _ = maxInt * /* ERROR constant multiplication overflow */ 2
+const _ = maxInt << /* ERROR constant shift overflow */ 2
+const _ = 1 << /* ERROR constant shift overflow */ prec
+
+const _ = ^ /* ERROR constant bitwise complement overflow */ maxInt
diff --git a/src/go/types/testdata/const1.src b/src/go/types/testdata/const1.src
index d827704..56b6bd4 100644
--- a/src/go/types/testdata/const1.src
+++ b/src/go/types/testdata/const1.src
@@ -43,7 +43,12 @@
 
 const (
 	smallestFloat32 = 1.0 / (1<<(127 - 1 + 23))
-	smallestFloat64 = 1.0 / (1<<(1023 - 1 + 52))
+	// TODO(gri) The compiler limits integers to 512 bit and thus
+	//           we cannot compute the value (1<<(1023 - 1 + 52))
+	//           without overflow. For now we match the compiler.
+	//           See also issue #44057.
+	// smallestFloat64 = 1.0 / (1<<(1023 - 1 + 52))
+	smallestFloat64 = 4.940656458412465441765687928682213723651e-324
 )
 
 const (
@@ -53,7 +58,12 @@
 
 const (
 	maxFloat32 = 1<<127 * (1<<24 - 1) / (1.0<<23)
-	maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52)
+	// TODO(gri) The compiler limits integers to 512 bit and thus
+	//           we cannot compute the value 1<<1023
+	//           without overflow. For now we match the compiler.
+	//           See also issue #44057.
+	// maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52)
+	maxFloat64 = 1.797693134862315708145274237317043567981e+308
 )
 
 const (
@@ -271,7 +281,9 @@
 	_ = assert(float64(smallestFloat32) == smallestFloat32)
 	_ = assert(float64(smallestFloat32/2) == smallestFloat32/2)
 	_ = assert(float64(smallestFloat64) == smallestFloat64)
-	_ = assert(float64(smallestFloat64/2) == 0)
+	// TODO(gri) With the change to the declaration of smallestFloat64
+	//           this now fails to be true. See issue #44058.
+	// _ = assert(float64(smallestFloat64/2) == 0)
 )
 
 const (
diff --git a/src/go/types/testdata/decls2a.src b/src/go/types/testdata/decls2/decls2a.src
similarity index 100%
rename from src/go/types/testdata/decls2a.src
rename to src/go/types/testdata/decls2/decls2a.src
diff --git a/src/go/types/testdata/decls2b.src b/src/go/types/testdata/decls2/decls2b.src
similarity index 100%
rename from src/go/types/testdata/decls2b.src
rename to src/go/types/testdata/decls2/decls2b.src
diff --git a/src/go/types/testdata/go1_12.src b/src/go/types/testdata/go1_12.src
new file mode 100644
index 0000000..1e529f1
--- /dev/null
+++ b/src/go/types/testdata/go1_12.src
@@ -0,0 +1,35 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_12 // go1.12
+
+// numeric literals
+const (
+	_ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later"
+	_ = 0b111 // ERROR "binary literals requires go1.13 or later"
+	_ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later"
+	_ = 0xabc // ok
+	_ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later"
+
+	_ = 0B111 // ERROR "binary"
+	_ = 0O567 // ERROR "octal"
+	_ = 0Xabc // ok
+	_ = 0X0P1 // ERROR "hexadecimal floating-point"
+
+	_ = 1_000i // ERROR "underscores"
+	_ = 0b111i // ERROR "binary"
+	_ = 0o567i // ERROR "octal"
+	_ = 0xabci // ERROR "hexadecimal floating-point"
+	_ = 0x0p1i // ERROR "hexadecimal floating-point"
+)
+
+// signed shift counts
+var (
+	s int
+	_ = 1 << s // ERROR "invalid operation: signed shift count s \(variable of type int\) requires go1.13 or later"
+	_ = 1 >> s // ERROR "signed shift count"
+)
+
diff --git a/src/go/types/testdata/go1_13.src b/src/go/types/testdata/go1_13.src
new file mode 100644
index 0000000..6aa1364
--- /dev/null
+++ b/src/go/types/testdata/go1_13.src
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_13 // go1.13
+
+// interface embedding
+
+type I interface { m() }
+
+type _ interface {
+	m()
+	I // ERROR "duplicate method m"
+}
+
+type _ interface {
+	I
+	I // ERROR "duplicate method m"
+}
+
diff --git a/src/go/types/testdata/go1_8.src b/src/go/types/testdata/go1_8.src
new file mode 100644
index 0000000..3ead1e9
--- /dev/null
+++ b/src/go/types/testdata/go1_8.src
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_8 // go1.8
+
+// type alias declarations
+type any = /* ERROR type aliases requires go1.9 or later */ interface{}
+
diff --git a/src/go/types/testdata/importdecl0a.src b/src/go/types/testdata/importdecl0/importdecl0a.src
similarity index 95%
rename from src/go/types/testdata/importdecl0a.src
rename to src/go/types/testdata/importdecl0/importdecl0a.src
index e96fca3..5ceb96e 100644
--- a/src/go/types/testdata/importdecl0a.src
+++ b/src/go/types/testdata/importdecl0/importdecl0a.src
@@ -10,7 +10,7 @@
 	// we can have multiple blank imports (was bug)
 	_ "math"
 	_ "net/rpc"
-	init /* ERROR "cannot declare init" */ "fmt"
+	init /* ERROR "cannot import package as init" */ "fmt"
 	// reflect defines a type "flag" which shows up in the gc export data
 	"reflect"
 	. /* ERROR "imported but not used" */ "reflect"
diff --git a/src/go/types/testdata/importdecl0b.src b/src/go/types/testdata/importdecl0/importdecl0b.src
similarity index 93%
rename from src/go/types/testdata/importdecl0b.src
rename to src/go/types/testdata/importdecl0/importdecl0b.src
index 6844e70..5569042 100644
--- a/src/go/types/testdata/importdecl0b.src
+++ b/src/go/types/testdata/importdecl0/importdecl0b.src
@@ -8,7 +8,7 @@
 import m "math"
 
 import . "testing" // declares T in file scope
-import . /* ERROR "imported but not used" */ "unsafe"
+import . /* ERROR .unsafe. imported but not used */ "unsafe"
 import . "fmt"     // declares Println in file scope
 
 import (
diff --git a/src/go/types/testdata/importdecl1a.src b/src/go/types/testdata/importdecl1/importdecl1a.src
similarity index 100%
rename from src/go/types/testdata/importdecl1a.src
rename to src/go/types/testdata/importdecl1/importdecl1a.src
diff --git a/src/go/types/testdata/importdecl1b.src b/src/go/types/testdata/importdecl1/importdecl1b.src
similarity index 77%
rename from src/go/types/testdata/importdecl1b.src
rename to src/go/types/testdata/importdecl1/importdecl1b.src
index ee70bbd..43a7bcd 100644
--- a/src/go/types/testdata/importdecl1b.src
+++ b/src/go/types/testdata/importdecl1/importdecl1b.src
@@ -4,7 +4,7 @@
 
 package importdecl1
 
-import . /* ERROR "imported but not used" */ "unsafe"
+import . /* ERROR .unsafe. imported but not used */ "unsafe"
 
 type B interface {
 	A
diff --git a/src/go/types/testdata/issue25008a.src b/src/go/types/testdata/issue25008/issue25008a.src
similarity index 100%
rename from src/go/types/testdata/issue25008a.src
rename to src/go/types/testdata/issue25008/issue25008a.src
diff --git a/src/go/types/testdata/issue25008b.src b/src/go/types/testdata/issue25008/issue25008b.src
similarity index 100%
rename from src/go/types/testdata/issue25008b.src
rename to src/go/types/testdata/issue25008/issue25008b.src
diff --git a/src/go/types/testdata/main.src b/src/go/types/testdata/main.src
new file mode 100644
index 0000000..f892938
--- /dev/null
+++ b/src/go/types/testdata/main.src
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main()
+func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ (int)
+func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ () int
diff --git a/src/go/types/testdata/shifts.src b/src/go/types/testdata/shifts.src
index c9a38ae..4d3c59a 100644
--- a/src/go/types/testdata/shifts.src
+++ b/src/go/types/testdata/shifts.src
@@ -20,7 +20,7 @@
 		// This depends on the exact spec wording which is not
 		// done yet.
 		// TODO(gri) revisit and adjust when spec change is done
-		_ = 1<<- /* ERROR "truncated to uint" */ 1.0
+		_ = 1<<- /* ERROR "negative shift count" */ 1.0
 		_ = 1<<1075 /* ERROR "invalid shift" */
 		_ = 2.0<<1
 		_ = 1<<1.0
@@ -60,11 +60,13 @@
 		_ uint = 1 << u
 		_ float32 = 1 /* ERROR "must be integer" */ << u
 
-		// for issue 14822
+		// issue #14822
+		_ = 1<<( /* ERROR "overflows uint" */ 1<<64)
 		_ = 1<<( /* ERROR "invalid shift count" */ 1<<64-1)
-		_ = 1<<( /* ERROR "invalid shift count" */ 1<<64)
-		_ = u<<(1<<63) // valid
-		_ = u<<(1<<64) // valid
+
+		// issue #43697
+		_ = u<<( /* ERROR "overflows uint" */ 1<<64)
+		_ = u<<(1<<64-1)
 	)
 }
 
diff --git a/src/go/types/testdata/stmt0.src b/src/go/types/testdata/stmt0.src
index 1377729..de8f936 100644
--- a/src/go/types/testdata/stmt0.src
+++ b/src/go/types/testdata/stmt0.src
@@ -90,7 +90,7 @@
 
 	// assignments to _
 	_ = nil /* ERROR "use of untyped nil" */
-	_ = 1 /* ERROR overflow */ <<1000
+	_ = 1  << /* ERROR constant shift overflow */ 1000
 	(_) = 0
 }
 
diff --git a/src/go/types/testdata/vardecl.src b/src/go/types/testdata/vardecl.src
index 54f5ef1..6e2d1b5 100644
--- a/src/go/types/testdata/vardecl.src
+++ b/src/go/types/testdata/vardecl.src
@@ -158,6 +158,18 @@
 	}
 }
 
+
+// Invalid variable declarations must not lead to "declared but not used errors".
+func _() {
+	var a x        // ERROR undeclared name: x
+	var b = x      // ERROR undeclared name: x
+	var c int = x  // ERROR undeclared name: x
+	var d, e, f x  /* ERROR x */ /* ERROR x */ /* ERROR x */
+	var g, h, i = x /* ERROR x */, x /* ERROR x */, x /* ERROR x */
+	var j, k, l float32 = x /* ERROR x */, x /* ERROR x */, x /* ERROR x */
+	// but no "declared but not used" errors
+}
+
 // Invalid (unused) expressions must not lead to spurious "declared but not used errors"
 func _() {
 	var a, b, c int
@@ -203,4 +215,4 @@
 	_, _, _ = x, y, z
 }
 
-// TODO(gri) consolidate other var decl checks in this file
\ No newline at end of file
+// TODO(gri) consolidate other var decl checks in this file
diff --git a/src/go/types/type.go b/src/go/types/type.go
index 087cda4..66e194e 100644
--- a/src/go/types/type.go
+++ b/src/go/types/type.go
@@ -4,8 +4,6 @@
 
 package types
 
-import "sort"
-
 // A Type represents a type of Go.
 // All types implement the Type interface.
 type Type interface {
@@ -301,8 +299,8 @@
 	}
 
 	// sort for API stability
-	sort.Sort(byUniqueMethodName(methods))
-	sort.Stable(byUniqueTypeName(embeddeds))
+	sortMethods(methods)
+	sortTypes(embeddeds)
 
 	typ.methods = methods
 	typ.embeddeds = embeddeds
@@ -396,7 +394,7 @@
 	}
 
 	if methods != nil {
-		sort.Sort(byUniqueMethodName(methods))
+		sortMethods(methods)
 		t.allMethods = methods
 	}
 
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index 2b39801..b924949 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -51,12 +51,12 @@
 	}
 	assert(typ != nil)
 
-	// The object may be dot-imported: If so, remove its package from
-	// the map of unused dot imports for the respective file scope.
+	// The object may have been dot-imported.
+	// If so, mark the respective package as used.
 	// (This code is only needed for dot-imports. Without them,
 	// we only have to mark variables, see *Var case below).
-	if pkg := obj.Pkg(); pkg != check.pkg && pkg != nil {
-		delete(check.unusedDotImports[scope], pkg)
+	if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil {
+		pkgName.used = true
 	}
 
 	switch obj := obj.(type) {
@@ -518,8 +518,8 @@
 	}
 
 	// sort for API stability
-	sort.Sort(byUniqueMethodName(ityp.methods))
-	sort.Stable(byUniqueTypeName(ityp.embeddeds))
+	sortMethods(ityp.methods)
+	sortTypes(ityp.embeddeds)
 
 	check.later(func() { check.completeInterface(ityp) })
 }
@@ -578,9 +578,13 @@
 			check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
 			check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
 		default:
-			// check method signatures after all types are computed (issue #33656)
+			// We have a duplicate method name in an embedded (not explicitly declared) method.
+			// Check method signatures after all types are computed (issue #33656).
+			// If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+			// error here as well (even though we could do it eagerly) because it's the same
+			// error message.
 			check.atEnd(func() {
-				if !check.identical(m.typ, other.Type()) {
+				if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
 					check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
 					check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
 				}
@@ -613,6 +617,10 @@
 	}
 }
 
+func sortTypes(list []Type) {
+	sort.Stable(byUniqueTypeName(list))
+}
+
 // byUniqueTypeName named type lists can be sorted by their unique type names.
 type byUniqueTypeName []Type
 
@@ -627,6 +635,19 @@
 	return ""
 }
 
+func sortMethods(list []*Func) {
+	sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+	if !debug {
+		panic("internal error: assertSortedMethods called outside debug mode")
+	}
+	if !sort.IsSorted(byUniqueMethodName(list)) {
+		panic("internal error: methods not sorted")
+	}
+}
+
 // byUniqueMethodName method lists can be sorted by their unique method names.
 type byUniqueMethodName []*Func
 
diff --git a/src/go/types/version.go b/src/go/types/version.go
new file mode 100644
index 0000000..1546941
--- /dev/null
+++ b/src/go/types/version.go
@@ -0,0 +1,82 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// langCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func (check *Checker) langCompat(lit *ast.BasicLit) {
+	s := lit.Value
+	if len(s) <= 2 || check.allowVersion(check.pkg, 1, 13) {
+		return
+	}
+	// len(s) > 2
+	if strings.Contains(s, "_") {
+		check.errorf(lit, _InvalidLit, "underscores in numeric literals requires go1.13 or later")
+		return
+	}
+	if s[0] != '0' {
+		return
+	}
+	radix := s[1]
+	if radix == 'b' || radix == 'B' {
+		check.errorf(lit, _InvalidLit, "binary literals requires go1.13 or later")
+		return
+	}
+	if radix == 'o' || radix == 'O' {
+		check.errorf(lit, _InvalidLit, "0o/0O-style octal literals requires go1.13 or later")
+		return
+	}
+	if lit.Kind != token.INT && (radix == 'x' || radix == 'X') {
+		check.errorf(lit, _InvalidLit, "hexadecimal floating-point literals requires go1.13 or later")
+	}
+}
+
+// allowVersion reports whether the given package
+// is allowed to use version major.minor.
+func (check *Checker) allowVersion(pkg *Package, major, minor int) bool {
+	// We assume that imported packages have all been checked,
+	// so we only have to check for the local package.
+	if pkg != check.pkg {
+		return true
+	}
+	ma, mi := check.version.major, check.version.minor
+	return ma == 0 && mi == 0 || ma > major || ma == major && mi >= minor
+}
+
+type version struct {
+	major, minor int
+}
+
+// parseGoVersion parses a Go version string (such as "go1.12")
+// and returns the version, or an error. If s is the empty
+// string, the version is 0.0.
+func parseGoVersion(s string) (v version, err error) {
+	if s == "" {
+		return
+	}
+	matches := goVersionRx.FindStringSubmatch(s)
+	if matches == nil {
+		err = fmt.Errorf(`should be something like "go1.12"`)
+		return
+	}
+	v.major, err = strconv.Atoi(matches[1])
+	if err != nil {
+		return
+	}
+	v.minor, err = strconv.Atoi(matches[2])
+	return
+}
+
+// goVersionRx matches a Go version string, e.g. "go1.12".
+var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go
new file mode 100644
index 0000000..6700fac
--- /dev/null
+++ b/src/internal/abi/abi.go
@@ -0,0 +1,53 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import "unsafe"
+
+// RegArgs is a struct that has space for each argument
+// and return value register on the current architecture.
+//
+// Assembly code knows the layout of the first two fields
+// of RegArgs.
+//
+// RegArgs also contains additional space to hold pointers
+// when it may not be safe to keep them only in the integer
+// register space otherwise.
+type RegArgs struct {
+	Ints   [IntArgRegs]uintptr  // untyped integer registers
+	Floats [FloatArgRegs]uint64 // untyped float registers
+
+	// Fields above this point are known to assembly.
+
+	// Ptrs is a space that duplicates Ints but with pointer type,
+	// used to make pointers passed or returned  in registers
+	// visible to the GC by making the type unsafe.Pointer.
+	Ptrs [IntArgRegs]unsafe.Pointer
+
+	// ReturnIsPtr is a bitmap that indicates which registers
+	// contain or will contain pointers on the return path from
+	// a reflectcall. The i'th bit indicates whether the i'th
+	// register contains or will contain a valid Go pointer.
+	ReturnIsPtr IntArgRegBitmap
+}
+
+// IntArgRegBitmap is a bitmap large enough to hold one bit per
+// integer argument/return register.
+type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8
+
+// Set sets the i'th bit of the bitmap to 1.
+func (b *IntArgRegBitmap) Set(i int) {
+	b[i/8] |= uint8(1) << (i % 8)
+}
+
+// Get returns whether the i'th bit of the bitmap is set.
+//
+// nosplit because it's called in extremely sensitive contexts, like
+// on the reflectcall return path.
+//
+//go:nosplit
+func (b *IntArgRegBitmap) Get(i int) bool {
+	return b[i/8]&(uint8(1)<<(i%8)) != 0
+}
diff --git a/src/internal/abi/abi_amd64.go b/src/internal/abi/abi_amd64.go
new file mode 100644
index 0000000..70e2ed1
--- /dev/null
+++ b/src/internal/abi/abi_amd64.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build goexperiment.regabi
+
+package abi
+
+const (
+	// See abi_generic.go.
+
+	// Currently these values are zero because whatever uses
+	// them will expect the register ABI, which isn't ready
+	// yet.
+
+	// RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11.
+	IntArgRegs = 0 // 9
+
+	// X0 -> X14.
+	FloatArgRegs = 0 // 15
+
+	// We use SSE2 registers which support 64-bit float operations.
+	EffectiveFloatRegSize = 0 // 8
+)
diff --git a/src/internal/abi/abi_generic.go b/src/internal/abi/abi_generic.go
new file mode 100644
index 0000000..5ef9883
--- /dev/null
+++ b/src/internal/abi/abi_generic.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !goexperiment.regabi
+
+package abi
+
+const (
+	// ABI-related constants.
+	//
+	// In the generic case, these are all zero
+	// which lets them gracefully degrade to ABI0.
+
+	// IntArgRegs is the number of registers dedicated
+	// to passing integer argument values. Result registers are identical
+	// to argument registers, so this number is used for those too.
+	IntArgRegs = 0
+
+	// FloatArgRegs is the number of registers dedicated
+	// to passing floating-point argument values. Result registers are
+	// identical to argument registers, so this number is used for
+	// those too.
+	FloatArgRegs = 0
+
+	// EffectiveFloatRegSize describes the width of floating point
+	// registers on the current platform from the ABI's perspective.
+	//
+	// Since Go only supports 32-bit and 64-bit floating point primitives,
+	// this number should be either 0, 4, or 8. 0 indicates no floating
+	// point registers for the ABI or that floating point values will be
+	// passed via the softfloat ABI.
+	//
+	// For platforms that support larger floating point register widths,
+	// such as x87's 80-bit "registers" (not that we support x87 currently),
+	// use 8.
+	EffectiveFloatRegSize = 0
+)
diff --git a/src/reflect/abi.go b/src/reflect/abi.go
new file mode 100644
index 0000000..88af212
--- /dev/null
+++ b/src/reflect/abi.go
@@ -0,0 +1,403 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+
+import (
+	"internal/abi"
+	"unsafe"
+)
+
+// abiStep represents an ABI "instruction." Each instruction
+// describes one part of how to translate between a Go value
+// in memory and a call frame.
+type abiStep struct {
+	kind abiStepKind
+
+	// offset and size together describe a part of a Go value
+	// in memory.
+	offset uintptr
+	size   uintptr // size in bytes of the part
+
+	// These fields describe the ABI side of the translation.
+	stkOff uintptr // stack offset, used if kind == abiStepStack
+	ireg   int     // integer register index, used if kind == abiStepIntReg or kind == abiStepPointer
+	freg   int     // FP register index, used if kind == abiStepFloatReg
+}
+
+// abiStepKind is the "op-code" for an abiStep instruction.
+type abiStepKind int
+
+const (
+	abiStepBad      abiStepKind = iota
+	abiStepStack                // copy to/from stack
+	abiStepIntReg               // copy to/from integer register
+	abiStepPointer              // copy pointer to/from integer register
+	abiStepFloatReg             // copy to/from FP register
+)
+
+// abiSeq represents a sequence of ABI instructions for copying
+// from a series of reflect.Values to a call frame (for call arguments)
+// or vice-versa (for call results).
+//
+// An abiSeq should be populated by calling its addArg method.
+type abiSeq struct {
+	// steps is the set of instructions.
+	//
+	// The instructions are grouped together by whole arguments,
+	// with the starting index for the instructions
+	// of the i'th Go value available in valueStart.
+	//
+	// For instance, if this abiSeq represents 3 arguments
+	// passed to a function, then the 2nd argument's steps
+	// begin at steps[valueStart[1]].
+	//
+	// Because reflect accepts Go arguments in distinct
+	// Values and each Value is stored separately, each abiStep
+	// that begins a new argument will have its offset
+	// field == 0.
+	steps      []abiStep
+	valueStart []int
+
+	stackBytes   uintptr // stack space used
+	iregs, fregs int     // registers used
+}
+
+func (a *abiSeq) dump() {
+	for i, p := range a.steps {
+		println("part", i, p.kind, p.offset, p.size, p.stkOff, p.ireg, p.freg)
+	}
+	print("values ")
+	for _, i := range a.valueStart {
+		print(i, " ")
+	}
+	println()
+	println("stack", a.stackBytes)
+	println("iregs", a.iregs)
+	println("fregs", a.fregs)
+}
+
+// stepsForValue returns the ABI instructions for translating
+// the i'th Go argument or return value represented by this
+// abiSeq to the Go ABI.
+func (a *abiSeq) stepsForValue(i int) []abiStep {
+	s := a.valueStart[i]
+	var e int
+	if i == len(a.valueStart)-1 {
+		e = len(a.steps)
+	} else {
+		e = a.valueStart[i+1]
+	}
+	return a.steps[s:e]
+}
+
+// addArg extends the abiSeq with a new Go value of type t.
+//
+// If the value was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+func (a *abiSeq) addArg(t *rtype) *abiStep {
+	pStart := len(a.steps)
+	a.valueStart = append(a.valueStart, pStart)
+	if !a.regAssign(t, 0) {
+		a.steps = a.steps[:pStart]
+		a.stackAssign(t.size, uintptr(t.align))
+		return &a.steps[len(a.steps)-1]
+	}
+	return nil
+}
+
+// addRcvr extends the abiSeq with a new method call
+// receiver according to the interface calling convention.
+//
+// If the receiver was stack-assigned, returns the single
+// abiStep describing that translation, and nil otherwise.
+// Returns true if the receiver is a pointer.
+func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
+	// The receiver is always one word.
+	a.valueStart = append(a.valueStart, len(a.steps))
+	var ok, ptr bool
+	if ifaceIndir(rcvr) || rcvr.pointers() {
+		ok = a.assignIntN(0, ptrSize, 1, 0b1)
+		ptr = true
+	} else {
+		// TODO(mknyszek): Is this case even possible?
+		// The interface data work never contains a non-pointer
+		// value. This case was copied over from older code
+		// in the reflect package which only conditionally added
+		// a pointer bit to the reflect.(Value).Call stack frame's
+		// GC bitmap.
+		ok = a.assignIntN(0, ptrSize, 1, 0b0)
+		ptr = false
+	}
+	if !ok {
+		a.stackAssign(ptrSize, ptrSize)
+		return &a.steps[len(a.steps)-1], ptr
+	}
+	return nil, ptr
+}
+
+// regAssign attempts to reserve argument registers for a value of
+// type t, stored at some offset.
+//
+// It returns whether or not the assignment succeeded, but
+// leaves any changes it made to a.steps behind, so the caller
+// must undo that work by adjusting a.steps if it fails.
+//
+// This method along with the assign* methods represent the
+// complete register-assignment algorithm for the Go ABI.
+func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
+	switch t.Kind() {
+	case UnsafePointer, Ptr, Chan, Map, Func:
+		return a.assignIntN(offset, t.size, 1, 0b1)
+	case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
+		return a.assignIntN(offset, t.size, 1, 0b0)
+	case Int64, Uint64:
+		switch ptrSize {
+		case 4:
+			return a.assignIntN(offset, 4, 2, 0b0)
+		case 8:
+			return a.assignIntN(offset, 8, 1, 0b0)
+		}
+	case Float32, Float64:
+		return a.assignFloatN(offset, t.size, 1)
+	case Complex64:
+		return a.assignFloatN(offset, 4, 2)
+	case Complex128:
+		return a.assignFloatN(offset, 8, 2)
+	case String:
+		return a.assignIntN(offset, ptrSize, 2, 0b01)
+	case Interface:
+		return a.assignIntN(offset, ptrSize, 2, 0b10)
+	case Slice:
+		return a.assignIntN(offset, ptrSize, 3, 0b001)
+	case Array:
+		tt := (*arrayType)(unsafe.Pointer(t))
+		switch tt.len {
+		case 0:
+			// There's nothing to assign, so don't modify
+			// a.steps but succeed so the caller doesn't
+			// try to stack-assign this value.
+			return true
+		case 1:
+			return a.regAssign(tt.elem, offset)
+		default:
+			return false
+		}
+	case Struct:
+		if t.size == 0 {
+			// There's nothing to assign, so don't modify
+			// a.steps but succeed so the caller doesn't
+			// try to stack-assign this value.
+			return true
+		}
+		st := (*structType)(unsafe.Pointer(t))
+		for i := range st.fields {
+			f := &st.fields[i]
+			if f.typ.Size() == 0 {
+				// Ignore zero-sized fields.
+				continue
+			}
+			if !a.regAssign(f.typ, offset+f.offset()) {
+				return false
+			}
+		}
+		return true
+	default:
+		print("t.Kind == ", t.Kind(), "\n")
+		panic("unknown type kind")
+	}
+	panic("unhandled register assignment path")
+}
+
+// assignIntN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n integer registers.
+//
+// Bit i in ptrMap indicates whether the i'th value is a pointer.
+// n must be <= 8.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
+	if n > 8 || n < 0 {
+		panic("invalid n")
+	}
+	if ptrMap != 0 && size != ptrSize {
+		panic("non-empty pointer map passed for non-pointer-size values")
+	}
+	if a.iregs+n > abi.IntArgRegs {
+		return false
+	}
+	for i := 0; i < n; i++ {
+		kind := abiStepIntReg
+		if ptrMap&(uint8(1)<<i) != 0 {
+			kind = abiStepPointer
+		}
+		a.steps = append(a.steps, abiStep{
+			kind:   kind,
+			offset: offset + uintptr(i)*size,
+			size:   size,
+			ireg:   a.iregs,
+		})
+		a.iregs++
+	}
+	return true
+}
+
+// assignFloatN assigns n values to registers, each "size" bytes large,
+// from the data at [offset, offset+n*size) in memory. Each value at
+// [offset+i*size, offset+(i+1)*size) for i < n is assigned to the
+// next n floating-point registers.
+//
+// Returns whether assignment succeeded.
+func (a *abiSeq) assignFloatN(offset, size uintptr, n int) bool {
+	if n < 0 {
+		panic("invalid n")
+	}
+	if a.fregs+n > abi.FloatArgRegs || abi.EffectiveFloatRegSize < size {
+		return false
+	}
+	for i := 0; i < n; i++ {
+		a.steps = append(a.steps, abiStep{
+			kind:   abiStepFloatReg,
+			offset: offset + uintptr(i)*size,
+			size:   size,
+			freg:   a.fregs,
+		})
+		a.fregs++
+	}
+	return true
+}
+
+// stackAssign reserves space for one value that is "size" bytes
+// large with alignment "alignment" to the stack.
+//
+// Should not be called directly; use addArg instead.
+func (a *abiSeq) stackAssign(size, alignment uintptr) {
+	a.stackBytes = align(a.stackBytes, alignment)
+	a.steps = append(a.steps, abiStep{
+		kind:   abiStepStack,
+		offset: 0, // Only used for whole arguments, so the memory offset is 0.
+		size:   size,
+		stkOff: a.stackBytes,
+	})
+	a.stackBytes += size
+}
+
+// abiDesc describes the ABI for a function or method.
+type abiDesc struct {
+	// call and ret represent the translation steps for
+	// the call and return paths of a Go function.
+	call, ret abiSeq
+
+	// These fields describe the stack space allocated
+	// for the call. stackCallArgsSize is the amount of space
+	// reserved for arguments but not return values. retOffset
+	// is the offset at which return values begin, and
+	// spill is the size in bytes of additional space reserved
+	// to spill argument registers into in case of preemption in
+	// reflectcall's stack frame.
+	stackCallArgsSize, retOffset, spill uintptr
+
+	// stackPtrs is a bitmap that indicates whether
+	// each word in the ABI stack space (stack-assigned
+	// args + return values) is a pointer. Used
+	// as the heap pointer bitmap for stack space
+	// passed to reflectcall.
+	stackPtrs *bitVector
+
+	// outRegPtrs is a bitmap whose i'th bit indicates
+	// whether the i'th integer result register contains
+	// a pointer. Used by reflectcall to make result
+	// pointers visible to the GC.
+	outRegPtrs abi.IntArgRegBitmap
+}
+
+func (a *abiDesc) dump() {
+	println("ABI")
+	println("call")
+	a.call.dump()
+	println("ret")
+	a.ret.dump()
+	println("stackCallArgsSize", a.stackCallArgsSize)
+	println("retOffset", a.retOffset)
+	println("spill", a.spill)
+}
+
+func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
+	// We need to add space for this argument to
+	// the frame so that it can spill args into it.
+	//
+	// The size of this space is just the sum of the sizes
+	// of each register-allocated type.
+	//
+	// TODO(mknyszek): Remove this when we no longer have
+	// caller reserved spill space.
+	spillInt := uintptr(0)
+	spillFloat := uintptr(0)
+
+	// Compute gc program & stack bitmap for stack arguments
+	stackPtrs := new(bitVector)
+
+	// Compute abiSeq for input parameters.
+	var in abiSeq
+	if rcvr != nil {
+		stkStep, isPtr := in.addRcvr(rcvr)
+		if stkStep != nil {
+			if isPtr {
+				stackPtrs.append(1)
+			} else {
+				stackPtrs.append(0)
+			}
+		} else {
+			spillInt += ptrSize
+		}
+	}
+	for _, arg := range t.in() {
+		i, f := in.iregs, in.fregs
+		stkStep := in.addArg(arg)
+		if stkStep != nil {
+			addTypeBits(stackPtrs, stkStep.stkOff, arg)
+		} else {
+			i, f = in.iregs-i, in.fregs-f
+			spillInt += uintptr(i) * ptrSize
+			spillFloat += uintptr(f) * abi.EffectiveFloatRegSize
+		}
+	}
+	spill := align(spillInt+spillFloat, ptrSize)
+
+	// From the input parameters alone, we now know
+	// the stackCallArgsSize and retOffset.
+	stackCallArgsSize := in.stackBytes
+	retOffset := align(in.stackBytes, ptrSize)
+
+	// Compute the stack frame pointer bitmap and register
+	// pointer bitmap for return values.
+	outRegPtrs := abi.IntArgRegBitmap{}
+
+	// Compute abiSeq for output parameters.
+	var out abiSeq
+	// Stack-assigned return values do not share
+	// space with arguments like they do with registers,
+	// so we need to inject a stack offset here.
+	// Fake it by artifically extending stackBytes by
+	// the return offset.
+	out.stackBytes = retOffset
+	for i, res := range t.out() {
+		stkStep := out.addArg(res)
+		if stkStep != nil {
+			addTypeBits(stackPtrs, stkStep.stkOff, res)
+		} else {
+			for _, st := range out.stepsForValue(i) {
+				if st.kind == abiStepPointer {
+					outRegPtrs.Set(st.ireg)
+				}
+			}
+		}
+	}
+	// Undo the faking from earlier so that stackBytes
+	// is accurate.
+	out.stackBytes -= retOffset
+	return abiDesc{in, out, stackCallArgsSize, retOffset, spill, stackPtrs, outRegPtrs}
+}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index de426b5..ddcfca9 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -23,15 +23,17 @@
 
 func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) {
 	var ft *rtype
-	var s *bitVector
+	var abi abiDesc
 	if rcvr != nil {
-		ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype))
+		ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype))
 	} else {
-		ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil)
+		ft, _, abi = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil)
 	}
+	argSize = abi.stackCallArgsSize
+	retOffset = abi.retOffset
 	frametype = ft
-	for i := uint32(0); i < s.n; i++ {
-		stack = append(stack, s.data[i/8]>>(i%8)&1)
+	for i := uint32(0); i < abi.stackPtrs.n; i++ {
+		stack = append(stack, abi.stackPtrs.data[i/8]>>(i%8)&1)
 	}
 	if ft.kind&kindGCProg != 0 {
 		panic("can't handle gc programs")
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index 67dc485..e17d4ea 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -60,9 +60,9 @@
 	code := **(**uintptr)(unsafe.Pointer(&dummy))
 
 	// makeFuncImpl contains a stack map for use by the runtime
-	_, argLen, _, stack, _ := funcLayout(ftyp, nil)
+	_, _, abi := funcLayout(ftyp, nil)
 
-	impl := &makeFuncImpl{code: code, stack: stack, argLen: argLen, ftyp: ftyp, fn: fn}
+	impl := &makeFuncImpl{code: code, stack: abi.stackPtrs, argLen: abi.stackCallArgsSize, ftyp: ftyp, fn: fn}
 
 	return Value{t, unsafe.Pointer(impl), flag(Func)}
 }
@@ -112,12 +112,12 @@
 	code := **(**uintptr)(unsafe.Pointer(&dummy))
 
 	// methodValue contains a stack map for use by the runtime
-	_, argLen, _, stack, _ := funcLayout(ftyp, nil)
+	_, _, abi := funcLayout(ftyp, nil)
 
 	fv := &methodValue{
 		fn:     code,
-		stack:  stack,
-		argLen: argLen,
+		stack:  abi.stackPtrs,
+		argLen: abi.stackCallArgsSize,
 		method: int(v.flag) >> flagMethodShift,
 		rcvr:   rcvr,
 	}
diff --git a/src/reflect/type.go b/src/reflect/type.go
index a1cdf45..d528166 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -1865,7 +1865,7 @@
 
 	// Make a map type.
 	// Note: flag values must match those used in the TMAP case
-	// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
+	// in ../cmd/compile/internal/gc/reflect.go:writeType.
 	var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
 	mt := **(**mapType)(unsafe.Pointer(&imap))
 	mt.str = resolveReflectName(newName(s, "", false))
@@ -2984,21 +2984,20 @@
 
 type layoutType struct {
 	t         *rtype
-	argSize   uintptr // size of arguments
-	retOffset uintptr // offset of return values.
-	stack     *bitVector
 	framePool *sync.Pool
+	abi       abiDesc
 }
 
 var layoutCache sync.Map // map[layoutKey]layoutType
 
 // funcLayout computes a struct type representing the layout of the
-// function arguments and return values for the function type t.
+// stack-assigned function arguments and return values for the function
+// type t.
 // If rcvr != nil, rcvr specifies the type of the receiver.
 // The returned type exists only for GC, so we only fill out GC relevant info.
 // Currently, that's just size and the GC program. We also fill in
 // the name for possible debugging use.
-func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) {
+func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) {
 	if t.Kind() != Func {
 		panic("reflect: funcLayout of non-func type " + t.String())
 	}
@@ -3008,46 +3007,24 @@
 	k := layoutKey{t, rcvr}
 	if lti, ok := layoutCache.Load(k); ok {
 		lt := lti.(layoutType)
-		return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
+		return lt.t, lt.framePool, lt.abi
 	}
 
-	// compute gc program & stack bitmap for arguments
-	ptrmap := new(bitVector)
-	var offset uintptr
-	if rcvr != nil {
-		// Reflect uses the "interface" calling convention for
-		// methods, where receivers take one word of argument
-		// space no matter how big they actually are.
-		if ifaceIndir(rcvr) || rcvr.pointers() {
-			ptrmap.append(1)
-		} else {
-			ptrmap.append(0)
-		}
-		offset += ptrSize
-	}
-	for _, arg := range t.in() {
-		offset += -offset & uintptr(arg.align-1)
-		addTypeBits(ptrmap, offset, arg)
-		offset += arg.size
-	}
-	argSize = offset
-	offset += -offset & (ptrSize - 1)
-	retOffset = offset
-	for _, res := range t.out() {
-		offset += -offset & uintptr(res.align-1)
-		addTypeBits(ptrmap, offset, res)
-		offset += res.size
-	}
-	offset += -offset & (ptrSize - 1)
+	// Compute the ABI layout.
+	abi = newAbiDesc(t, rcvr)
 
 	// build dummy rtype holding gc program
 	x := &rtype{
-		align:   ptrSize,
-		size:    offset,
-		ptrdata: uintptr(ptrmap.n) * ptrSize,
+		align: ptrSize,
+		// Don't add spill space here; it's only necessary in
+		// reflectcall's frame, not in the allocated frame.
+		// TODO(mknyszek): Remove this comment when register
+		// spill space in the frame is no longer required.
+		size:    align(abi.retOffset+abi.ret.stackBytes, ptrSize),
+		ptrdata: uintptr(abi.stackPtrs.n) * ptrSize,
 	}
-	if ptrmap.n > 0 {
-		x.gcdata = &ptrmap.data[0]
+	if abi.stackPtrs.n > 0 {
+		x.gcdata = &abi.stackPtrs.data[0]
 	}
 
 	var s string
@@ -3064,13 +3041,11 @@
 	}}
 	lti, _ := layoutCache.LoadOrStore(k, layoutType{
 		t:         x,
-		argSize:   argSize,
-		retOffset: retOffset,
-		stack:     ptrmap,
 		framePool: framePool,
+		abi:       abi,
 	})
 	lt := lti.(layoutType)
-	return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
+	return lt.t, lt.framePool, lt.abi
 }
 
 // ifaceIndir reports whether t is stored indirectly in an interface value.
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 1f185b5..eae1b9b 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -5,6 +5,7 @@
 package reflect
 
 import (
+	"internal/abi"
 	"internal/unsafeheader"
 	"math"
 	"runtime"
@@ -352,6 +353,8 @@
 
 var callGC bool // for testing; see TestCallMethodJump
 
+const debugReflectCall = false
+
 func (v Value) call(op string, in []Value) []Value {
 	// Get function pointer, type.
 	t := (*funcType)(unsafe.Pointer(v.typ))
@@ -430,50 +433,112 @@
 	}
 	nout := t.NumOut()
 
-	// Compute frame type.
-	frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype)
+	// Register argument space.
+	var regArgs abi.RegArgs
 
-	// Allocate a chunk of memory for frame.
-	var args unsafe.Pointer
-	if nout == 0 {
-		args = framePool.Get().(unsafe.Pointer)
-	} else {
-		// Can't use pool if the function has return values.
-		// We will leak pointer to args in ret, so its lifetime is not scoped.
-		args = unsafe_New(frametype)
+	// Compute frame type.
+	frametype, framePool, abi := funcLayout(t, rcvrtype)
+
+	// Allocate a chunk of memory for frame if needed.
+	var stackArgs unsafe.Pointer
+	if frametype.size != 0 {
+		if nout == 0 {
+			stackArgs = framePool.Get().(unsafe.Pointer)
+		} else {
+			// Can't use pool if the function has return values.
+			// We will leak pointer to args in ret, so its lifetime is not scoped.
+			stackArgs = unsafe_New(frametype)
+		}
 	}
-	off := uintptr(0)
+	frameSize := frametype.size
+
+	if debugReflectCall {
+		println("reflect.call", t.String())
+		abi.dump()
+	}
 
 	// Copy inputs into args.
+
+	// Handle receiver.
+	inStart := 0
 	if rcvrtype != nil {
-		storeRcvr(rcvr, args)
-		off = ptrSize
+		// Guaranteed to only be one word in size,
+		// so it will only take up exactly 1 abiStep (either
+		// in a register or on the stack).
+		switch st := abi.call.steps[0]; st.kind {
+		case abiStepStack:
+			storeRcvr(rcvr, stackArgs)
+		case abiStepIntReg, abiStepPointer:
+			// Even pointers can go into the uintptr slot because
+			// they'll be kept alive by the Values referenced by
+			// this frame. Reflection forces these to be heap-allocated,
+			// so we don't need to worry about stack copying.
+			storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
+		case abiStepFloatReg:
+			storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
+		default:
+			panic("unknown ABI parameter kind")
+		}
+		inStart = 1
 	}
+
+	// Handle arguments.
 	for i, v := range in {
 		v.mustBeExported()
 		targ := t.In(i).(*rtype)
-		a := uintptr(targ.align)
-		off = (off + a - 1) &^ (a - 1)
-		n := targ.size
-		if n == 0 {
-			// Not safe to compute args+off pointing at 0 bytes,
-			// because that might point beyond the end of the frame,
-			// but we still need to call assignTo to check assignability.
-			v.assignTo("reflect.Value.Call", targ, nil)
-			continue
+		// TODO(mknyszek): Figure out if it's possible to get some
+		// scratch space for this assignment check. Previously, it
+		// was possible to use space in the argument frame.
+		v = v.assignTo("reflect.Value.Call", targ, nil)
+	stepsLoop:
+		for _, st := range abi.call.stepsForValue(i + inStart) {
+			switch st.kind {
+			case abiStepStack:
+				// Copy values to the "stack."
+				addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
+				if v.flag&flagIndir != 0 {
+					typedmemmove(targ, addr, v.ptr)
+				} else {
+					*(*unsafe.Pointer)(addr) = v.ptr
+				}
+				// There's only one step for a stack-allocated value.
+				break stepsLoop
+			case abiStepIntReg, abiStepPointer:
+				// Copy values to "integer registers."
+				if v.flag&flagIndir != 0 {
+					offset := add(v.ptr, st.offset, "precomputed value offset")
+					memmove(unsafe.Pointer(&regArgs.Ints[st.ireg]), offset, st.size)
+				} else {
+					if st.kind == abiStepPointer {
+						// Duplicate this pointer in the pointer area of the
+						// register space. Otherwise, there's the potential for
+						// this to be the last reference to v.ptr.
+						regArgs.Ptrs[st.ireg] = v.ptr
+					}
+					regArgs.Ints[st.ireg] = uintptr(v.ptr)
+				}
+			case abiStepFloatReg:
+				// Copy values to "float registers."
+				if v.flag&flagIndir == 0 {
+					panic("attempted to copy pointer to FP register")
+				}
+				offset := add(v.ptr, st.offset, "precomputed value offset")
+				memmove(unsafe.Pointer(&regArgs.Floats[st.freg]), offset, st.size)
+			default:
+				panic("unknown ABI part kind")
+			}
 		}
-		addr := add(args, off, "n > 0")
-		v = v.assignTo("reflect.Value.Call", targ, addr)
-		if v.flag&flagIndir != 0 {
-			typedmemmove(targ, addr, v.ptr)
-		} else {
-			*(*unsafe.Pointer)(addr) = v.ptr
-		}
-		off += n
 	}
+	// TODO(mknyszek): Remove this when we no longer have
+	// caller reserved spill space.
+	frameSize = align(frameSize, ptrSize)
+	frameSize += abi.spill
+
+	// Mark pointers in registers for the return path.
+	regArgs.ReturnIsPtr = abi.outRegPtrs
 
 	// Call.
-	call(frametype, fn, args, uint32(frametype.size), uint32(retOffset))
+	call(frametype, fn, stackArgs, uint32(frametype.size), uint32(abi.retOffset), uint32(frameSize), &regArgs)
 
 	// For testing; see TestCallMethodJump.
 	if callGC {
@@ -482,34 +547,82 @@
 
 	var ret []Value
 	if nout == 0 {
-		typedmemclr(frametype, args)
-		framePool.Put(args)
+		if stackArgs != nil {
+			typedmemclr(frametype, stackArgs)
+			framePool.Put(stackArgs)
+		}
 	} else {
-		// Zero the now unused input area of args,
-		// because the Values returned by this function contain pointers to the args object,
-		// and will thus keep the args object alive indefinitely.
-		typedmemclrpartial(frametype, args, 0, retOffset)
+		if stackArgs != nil {
+			// Zero the now unused input area of args,
+			// because the Values returned by this function contain pointers to the args object,
+			// and will thus keep the args object alive indefinitely.
+			typedmemclrpartial(frametype, stackArgs, 0, abi.retOffset)
+		}
 
 		// Wrap Values around return values in args.
 		ret = make([]Value, nout)
-		off = retOffset
 		for i := 0; i < nout; i++ {
 			tv := t.Out(i)
-			a := uintptr(tv.Align())
-			off = (off + a - 1) &^ (a - 1)
-			if tv.Size() != 0 {
+			if tv.Size() == 0 {
+				// For zero-sized return value, args+off may point to the next object.
+				// In this case, return the zero value instead.
+				ret[i] = Zero(tv)
+				continue
+			}
+			steps := abi.ret.stepsForValue(i)
+			if st := steps[0]; st.kind == abiStepStack {
+				// This value is on the stack. If part of a value is stack
+				// allocated, the entire value is according to the ABI. So
+				// just make an indirection into the allocated frame.
 				fl := flagIndir | flag(tv.Kind())
-				ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl}
+				ret[i] = Value{tv.common(), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
 				// Note: this does introduce false sharing between results -
 				// if any result is live, they are all live.
 				// (And the space for the args is live as well, but as we've
 				// cleared that space it isn't as big a deal.)
-			} else {
-				// For zero-sized return value, args+off may point to the next object.
-				// In this case, return the zero value instead.
-				ret[i] = Zero(tv)
+				continue
 			}
-			off += tv.Size()
+
+			// Handle pointers passed in registers.
+			if !ifaceIndir(tv.common()) {
+				// Pointer-valued data gets put directly
+				// into v.ptr.
+				if steps[0].kind != abiStepPointer {
+					print("kind=", steps[0].kind, ", type=", tv.String(), "\n")
+					panic("mismatch between ABI description and types")
+				}
+				ret[i] = Value{tv.common(), regArgs.Ptrs[steps[0].ireg], flag(t.Kind())}
+				continue
+			}
+
+			// All that's left is values passed in registers that we need to
+			// create space for and copy values back into.
+			//
+			// TODO(mknyszek): We make a new allocation for each register-allocated
+			// value, but previously we could always point into the heap-allocated
+			// stack frame. This is a regression that could be fixed by adding
+			// additional space to the allocated stack frame and storing the
+			// register-allocated return values into the allocated stack frame and
+			// referring there in the resulting Value.
+			s := unsafe_New(tv.common())
+			for _, st := range steps {
+				switch st.kind {
+				case abiStepIntReg:
+					offset := add(s, st.offset, "precomputed value offset")
+					memmove(offset, unsafe.Pointer(&regArgs.Ints[st.ireg]), st.size)
+				case abiStepPointer:
+					s := add(s, st.offset, "precomputed value offset")
+					*((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
+				case abiStepFloatReg:
+					offset := add(s, st.offset, "precomputed value offset")
+					memmove(offset, unsafe.Pointer(&regArgs.Floats[st.freg]), st.size)
+				case abiStepStack:
+					panic("register-based return value has stack component")
+				default:
+					panic("unknown ABI part kind")
+				}
+			}
+			ret[i] = Value{tv.common(), s, flagIndir | flag(tv.Kind())}
 		}
 	}
 
@@ -709,7 +822,8 @@
 func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
 	rcvr := ctxt.rcvr
 	rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
-	frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype)
+	frametype, framePool, abid := funcLayout(t, rcvrtype)
+	argSize, retOffset := abid.stackCallArgsSize, abid.retOffset
 
 	// Make a new frame that is one word bigger so we can store the receiver.
 	// This space is used for both arguments and return values.
@@ -727,10 +841,19 @@
 		typedmemmovepartial(frametype, add(scratch, argOffset, "argSize > argOffset"), frame, argOffset, argSize-argOffset)
 	}
 
+	frameSize := frametype.size
+	// TODO(mknyszek): Remove this when we no longer have
+	// caller reserved spill space.
+	frameSize = align(frameSize, ptrSize)
+	frameSize += abid.spill
+
 	// Call.
 	// Call copies the arguments from scratch to the stack, calls fn,
 	// and then copies the results back into scratch.
-	call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset))
+	//
+	// TODO(mknyszek): Have this actually support the register-based ABI.
+	var regs abi.RegArgs
+	call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset), uint32(frameSize), &regs)
 
 	// Copy return values.
 	// Ignore any changes to args and just copy return values.
@@ -2802,14 +2925,32 @@
 //go:noescape
 func maplen(m unsafe.Pointer) int
 
-// call calls fn with a copy of the n argument bytes pointed at by arg.
-// After fn returns, reflectcall copies n-retoffset result bytes
-// back into arg+retoffset before returning. If copying result bytes back,
-// the caller must pass the argument frame type as argtype, so that
-// call can execute appropriate write barriers during the copy.
+// call calls fn with "stackArgsSize" bytes of stack arguments laid out
+// at stackArgs and register arguments laid out in regArgs. frameSize is
+// the total amount of stack space that will be reserved by call, so this
+// should include enough space to spill register arguments to the stack in
+// case of preemption.
 //
+// After fn returns, call copies stackArgsSize-stackRetOffset result bytes
+// back into stackArgs+stackRetOffset before returning, for any return
+// values passed on the stack. Register-based return values will be found
+// in the same regArgs structure.
+//
+// regArgs must also be prepared with an appropriate ReturnIsPtr bitmap
+// indicating which registers will contain pointer-valued return values. The
+// purpose of this bitmap is to keep pointers visible to the GC between
+// returning from reflectcall and actually using them.
+//
+// If copying result bytes back from the stack, the caller must pass the
+// argument frame type as stackArgsType, so that call can execute appropriate
+// write barriers during the copy.
+//
+// Arguments passed through to call do not escape. The type is used only in a
+// very limited callee of call, the stackArgs are copied, and regArgs is only
+// used in the call frame.
+//go:noescape
 //go:linkname call runtime.reflectcall
-func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
+func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
 
 func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
 
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index 27d8df9..72c7449 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -11,3 +11,8 @@
 DATA runtime·no_pointers_stackmap+0x00(SB)/4, $2
 DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0
 GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
+
+#ifndef GOARCH_amd64
+TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+	JMP	·sigpanic<ABIInternal>(SB)
+#endif
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index fa3b1be..471451d 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -273,25 +273,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $0-4
-	MOVL	buf+0(FP), AX		// gobuf
-	LEAL	buf+0(FP), BX		// caller's SP
-	MOVL	BX, gobuf_sp(AX)
-	MOVL	0(SP), BX		// caller's PC
-	MOVL	BX, gobuf_pc(AX)
-	MOVL	$0, gobuf_ret(AX)
-	// Assert ctxt is zero. See func save.
-	MOVL	gobuf_ctxt(AX), BX
-	TESTL	BX, BX
-	JZ	2(PC)
-	CALL	runtime·badctxt(SB)
-	get_tls(CX)
-	MOVL	g(CX), BX
-	MOVL	BX, gobuf_g(AX)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $8-4
@@ -477,7 +458,7 @@
 	JMP runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -489,8 +470,8 @@
 	JMP	AX
 // Note: can't just "JMP NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall(SB), NOSPLIT, $0-20
-	MOVL	argsize+12(FP), CX
+TEXT ·reflectcall(SB), NOSPLIT, $0-28
+	MOVL	frameSize+20(FP), CX
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -522,11 +503,11 @@
 	JMP	AX
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-20;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-28;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVL	argptr+8(FP), SI;		\
-	MOVL	argsize+12(FP), CX;		\
+	MOVL	stackArgs+8(FP), SI;		\
+	MOVL	stackArgsSize+12(FP), CX;		\
 	MOVL	SP, DI;				\
 	REP;MOVSB;				\
 	/* call function */			\
@@ -535,10 +516,10 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	CALL	AX;				\
 	/* copy return values back */		\
-	MOVL	argtype+0(FP), DX;		\
-	MOVL	argptr+8(FP), DI;		\
-	MOVL	argsize+12(FP), CX;		\
-	MOVL	retoffset+16(FP), BX;		\
+	MOVL	stackArgsType+0(FP), DX;		\
+	MOVL	stackArgs+8(FP), DI;		\
+	MOVL	stackArgsSize+12(FP), CX;		\
+	MOVL	stackRetOffset+16(FP), BX;		\
 	MOVL	SP, SI;				\
 	ADDL	BX, DI;				\
 	ADDL	BX, SI;				\
@@ -550,11 +531,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
 	MOVL	DX, 0(SP)
 	MOVL	DI, 4(SP)
 	MOVL	SI, 8(SP)
 	MOVL	CX, 12(SP)
+	MOVL	$0, 16(SP)
 	CALL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 4ac8708..05422c9 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -84,9 +84,7 @@
 DATA _rt0_amd64_lib_argv<>(SB)/8, $0
 GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8
 
-// Defined as ABIInternal since it does not use the stack-based Go ABI (and
-// in addition there are no calls to this entry point from Go code).
-TEXT runtime·rt0_go<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT,$0
 	// copy arguments forward on an even stack
 	MOVQ	DI, AX		// argc
 	MOVQ	SI, BX		// argv
@@ -256,26 +254,6 @@
  *  go-routine
  */
 
-// func gosave(buf *gobuf)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $0-8
-	MOVQ	buf+0(FP), AX		// gobuf
-	LEAQ	buf+0(FP), BX		// caller's SP
-	MOVQ	BX, gobuf_sp(AX)
-	MOVQ	0(SP), BX		// caller's PC
-	MOVQ	BX, gobuf_pc(AX)
-	MOVQ	$0, gobuf_ret(AX)
-	MOVQ	BP, gobuf_bp(AX)
-	// Assert ctxt is zero. See func save.
-	MOVQ	gobuf_ctxt(AX), BX
-	TESTQ	BX, BX
-	JZ	2(PC)
-	CALL	runtime·badctxt(SB)
-	get_tls(CX)
-	MOVQ	g(CX), BX
-	MOVQ	BX, gobuf_g(AX)
-	RET
-
 // func gogo(buf *gobuf)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $16-8
@@ -284,6 +262,7 @@
 	MOVQ	0(DX), CX		// make sure g != nil
 	get_tls(CX)
 	MOVQ	DX, g(CX)
+	MOVQ	DX, R14		// set the g register
 	MOVQ	gobuf_sp(BX), SP	// restore SP
 	MOVQ	gobuf_ret(BX), AX
 	MOVQ	gobuf_ctxt(BX), DX
@@ -320,6 +299,7 @@
 	MOVQ	$runtime·badmcall(SB), AX
 	JMP	AX
 	MOVQ	SI, g(CX)	// g = m->g0
+	MOVQ	SI, R14	// set the g register
 	MOVQ	(g_sched+gobuf_sp)(SI), SP	// sp = m->g0->sched.sp
 	PUSHQ	AX
 	MOVQ	DI, DX
@@ -366,6 +346,7 @@
 
 	// switch to g0
 	MOVQ	DX, g(CX)
+	MOVQ	DX, R14 // set the g register
 	MOVQ	(g_sched+gobuf_sp)(DX), BX
 	// make it look like mstart called systemstack on g0, to stop traceback
 	SUBQ	$8, BX
@@ -464,8 +445,77 @@
 	MOVL	$0, DX
 	JMP	runtime·morestack(SB)
 
+// REFLECTCALL_USE_REGABI is not defined. It must be defined in conjunction with the
+// register constants in the internal/abi package.
+
+#ifdef REFLECTCALL_USE_REGABI
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
+TEXT spillArgs<>(SB),NOSPLIT,$0-0
+	MOVQ AX, 0(R12)
+	MOVQ BX, 8(R12)
+	MOVQ CX, 16(R12)
+	MOVQ DI, 24(R12)
+	MOVQ SI, 32(R12)
+	MOVQ R8, 40(R12)
+	MOVQ R9, 48(R12)
+	MOVQ R10, 56(R12)
+	MOVQ R11, 64(R12)
+	MOVQ X0, 72(R12)
+	MOVQ X1, 80(R12)
+	MOVQ X2, 88(R12)
+	MOVQ X3, 96(R12)
+	MOVQ X4, 104(R12)
+	MOVQ X5, 112(R12)
+	MOVQ X6, 120(R12)
+	MOVQ X7, 128(R12)
+	MOVQ X8, 136(R12)
+	MOVQ X9, 144(R12)
+	MOVQ X10, 152(R12)
+	MOVQ X11, 160(R12)
+	MOVQ X12, 168(R12)
+	MOVQ X13, 176(R12)
+	MOVQ X14, 184(R12)
+	RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
+TEXT unspillArgs<>(SB),NOSPLIT,$0-0
+	MOVQ 0(R12), AX
+	MOVQ 8(R12), BX
+	MOVQ 16(R12), CX
+	MOVQ 24(R12), DI
+	MOVQ 32(R12), SI
+	MOVQ 40(R12), R8
+	MOVQ 48(R12), R9
+	MOVQ 56(R12), R10
+	MOVQ 64(R12), R11
+	MOVQ 72(R12), X0
+	MOVQ 80(R12), X1
+	MOVQ 88(R12), X2
+	MOVQ 96(R12), X3
+	MOVQ 104(R12), X4
+	MOVQ 112(R12), X5
+	MOVQ 120(R12), X6
+	MOVQ 128(R12), X7
+	MOVQ 136(R12), X8
+	MOVQ 144(R12), X9
+	MOVQ 152(R12), X10
+	MOVQ 160(R12), X11
+	MOVQ 168(R12), X12
+	MOVQ 176(R12), X13
+	MOVQ 184(R12), X14
+	RET
+#else
+// spillArgs stores return values from registers to a pointer in R12.
+TEXT spillArgs<>(SB),NOSPLIT,$0-0
+	RET
+
+// unspillArgs loads args into registers from a pointer in R12.
+TEXT unspillArgs<>(SB),NOSPLIT,$0-0
+	RET
+#endif
+
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -477,8 +527,8 @@
 	JMP	AX
 // Note: can't just "JMP NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-32
-	MOVLQZX argsize+24(FP), CX
+TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-48
+	MOVLQZX frameSize+32(FP), CX
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -510,23 +560,28 @@
 	JMP	AX
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-32;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVQ	argptr+16(FP), SI;		\
-	MOVLQZX argsize+24(FP), CX;		\
+	MOVQ	stackArgs+16(FP), SI;		\
+	MOVLQZX stackArgsSize+24(FP), CX;		\
 	MOVQ	SP, DI;				\
 	REP;MOVSB;				\
+	/* set up argument registers */		\
+	MOVQ    regArgs+40(FP), R12;		\
+	CALL    unspillArgs<>(SB);		\
 	/* call function */			\
 	MOVQ	f+8(FP), DX;			\
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
-	MOVQ	(DX), AX;			\
-	CALL	AX;				\
-	/* copy return values back */		\
-	MOVQ	argtype+0(FP), DX;		\
-	MOVQ	argptr+16(FP), DI;		\
-	MOVLQZX	argsize+24(FP), CX;		\
-	MOVLQZX	retoffset+28(FP), BX;		\
+	MOVQ	(DX), R12;			\
+	CALL	R12;				\
+	/* copy register return values back */		\
+	MOVQ    regArgs+40(FP), R12;		\
+	CALL    spillArgs<>(SB);		\
+	MOVLQZX	stackArgsSize+24(FP), CX;		\
+	MOVLQZX	stackRetOffset+28(FP), BX;		\
+	MOVQ	stackArgs+16(FP), DI;		\
+	MOVQ	stackArgsType+0(FP), DX;		\
 	MOVQ	SP, SI;				\
 	ADDQ	BX, DI;				\
 	ADDQ	BX, SI;				\
@@ -538,12 +593,13 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	NO_LOCAL_POINTERS
 	MOVQ	DX, 0(SP)
 	MOVQ	DI, 8(SP)
 	MOVQ	SI, 16(SP)
 	MOVQ	CX, 24(SP)
+	MOVQ	R12, 32(SP)
 	CALL	runtime·reflectcallmove(SB)
 	RET
 
@@ -604,18 +660,20 @@
 	MOVQ	0(DX), BX
 	JMP	BX	// but first run the deferred function
 
-// Save state of caller into g->sched. Smashes R8, R9.
+// Save state of caller into g->sched. Smashes R9.
 TEXT gosave<>(SB),NOSPLIT,$0
-	get_tls(R8)
-	MOVQ	g(R8), R8
+#ifndef GOEXPERIMENT_REGABI
+	get_tls(R14)
+	MOVQ	g(R14), R14
+#endif
 	MOVQ	0(SP), R9
-	MOVQ	R9, (g_sched+gobuf_pc)(R8)
+	MOVQ	R9, (g_sched+gobuf_pc)(R14)
 	LEAQ	8(SP), R9
-	MOVQ	R9, (g_sched+gobuf_sp)(R8)
-	MOVQ	$0, (g_sched+gobuf_ret)(R8)
-	MOVQ	BP, (g_sched+gobuf_bp)(R8)
+	MOVQ	R9, (g_sched+gobuf_sp)(R14)
+	MOVQ	$0, (g_sched+gobuf_ret)(R14)
+	MOVQ	BP, (g_sched+gobuf_bp)(R14)
 	// Assert ctxt is zero. See func save.
-	MOVQ	(g_sched+gobuf_ctxt)(R8), R9
+	MOVQ	(g_sched+gobuf_ctxt)(R14), R9
 	TESTQ	R9, R9
 	JZ	2(PC)
 	CALL	runtime·badctxt(SB)
@@ -846,6 +904,7 @@
 TEXT setg_gcc<>(SB),NOSPLIT,$0
 	get_tls(AX)
 	MOVQ	DI, g(AX)
+	MOVQ	DI, R14 // set the g register
 	RET
 
 TEXT runtime·abort(SB),NOSPLIT,$0-0
@@ -1382,6 +1441,18 @@
 	POPQ	R15
 	RET
 
+// Initialize special registers then jump to sigpanic.
+// This function is injected from the signal handler for panicking
+// signals. It is quite painful to set X15 in the signal context,
+// so we do it here.
+TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+#ifdef GOEXPERIMENT_REGABI
+	get_tls(R14)
+	MOVQ	g(R14), R14
+	XORPS	X15, X15
+#endif
+	JMP	·sigpanic<ABIInternal>(SB)
+
 // gcWriteBarrier performs a heap pointer write and informs the GC.
 //
 // gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
@@ -1390,24 +1461,28 @@
 // It clobbers FLAGS. It does not clobber any general-purpose registers,
 // but may clobber others (e.g., SSE registers).
 // Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$120
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
 	// Save the registers clobbered by the fast path. This is slightly
 	// faster than having the caller spill these.
-	MOVQ	R14, 104(SP)
-	MOVQ	R13, 112(SP)
+	MOVQ	R12, 96(SP)
+	MOVQ	R13, 104(SP)
 	// TODO: Consider passing g.m.p in as an argument so they can be shared
 	// across a sequence of write barriers.
+#ifdef GOEXPERIMENT_REGABI
+	MOVQ	g_m(R14), R13
+#else
 	get_tls(R13)
 	MOVQ	g(R13), R13
 	MOVQ	g_m(R13), R13
+#endif
 	MOVQ	m_p(R13), R13
-	MOVQ	(p_wbBuf+wbBuf_next)(R13), R14
+	MOVQ	(p_wbBuf+wbBuf_next)(R13), R12
 	// Increment wbBuf.next position.
-	LEAQ	16(R14), R14
-	MOVQ	R14, (p_wbBuf+wbBuf_next)(R13)
-	CMPQ	R14, (p_wbBuf+wbBuf_end)(R13)
+	LEAQ	16(R12), R12
+	MOVQ	R12, (p_wbBuf+wbBuf_next)(R13)
+	CMPQ	R12, (p_wbBuf+wbBuf_end)(R13)
 	// Record the write.
-	MOVQ	AX, -16(R14)	// Record value
+	MOVQ	AX, -16(R12)	// Record value
 	// Note: This turns bad pointer writes into bad
 	// pointer reads, which could be confusing. We could avoid
 	// reading from obviously bad pointers, which would
@@ -1415,12 +1490,12 @@
 	// patch this up in the signal handler, or use XCHG to
 	// combine the read and the write.
 	MOVQ	(DI), R13
-	MOVQ	R13, -8(R14)	// Record *slot
+	MOVQ	R13, -8(R12)	// Record *slot
 	// Is the buffer full? (flags set in CMPQ above)
 	JEQ	flush
 ret:
-	MOVQ	104(SP), R14
-	MOVQ	112(SP), R13
+	MOVQ	96(SP), R12
+	MOVQ	104(SP), R13
 	// Do the write.
 	MOVQ	AX, (DI)
 	RET
@@ -1450,10 +1525,10 @@
 	MOVQ	R9, 64(SP)
 	MOVQ	R10, 72(SP)
 	MOVQ	R11, 80(SP)
-	MOVQ	R12, 88(SP)
+	// R12 already saved
 	// R13 already saved
-	// R14 already saved
-	MOVQ	R15, 96(SP)
+	// R14 is g
+	MOVQ	R15, 88(SP)
 
 	// This takes arguments DI and AX
 	CALL	runtime·wbBufFlush(SB)
@@ -1469,8 +1544,7 @@
 	MOVQ	64(SP), R9
 	MOVQ	72(SP), R10
 	MOVQ	80(SP), R11
-	MOVQ	88(SP), R12
-	MOVQ	96(SP), R15
+	MOVQ	88(SP), R15
 	JMP	ret
 
 // gcWriteBarrierCX is gcWriteBarrier, but with args in DI and CX.
@@ -1732,67 +1806,67 @@
 TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicIndex(SB)
+	JMP	runtime·goPanicIndex<ABIInternal>(SB)
 TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicIndexU(SB)
+	JMP	runtime·goPanicIndexU<ABIInternal>(SB)
 TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSliceAlen(SB)
+	JMP	runtime·goPanicSliceAlen<ABIInternal>(SB)
 TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSliceAlenU(SB)
+	JMP	runtime·goPanicSliceAlenU<ABIInternal>(SB)
 TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSliceAcap(SB)
+	JMP	runtime·goPanicSliceAcap<ABIInternal>(SB)
 TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSliceAcapU(SB)
+	JMP	runtime·goPanicSliceAcapU<ABIInternal>(SB)
 TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicSliceB(SB)
+	JMP	runtime·goPanicSliceB<ABIInternal>(SB)
 TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicSliceBU(SB)
+	JMP	runtime·goPanicSliceBU<ABIInternal>(SB)
 TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	DX, x+0(FP)
 	MOVQ	BX, y+8(FP)
-	JMP	runtime·goPanicSlice3Alen(SB)
+	JMP	runtime·goPanicSlice3Alen<ABIInternal>(SB)
 TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	DX, x+0(FP)
 	MOVQ	BX, y+8(FP)
-	JMP	runtime·goPanicSlice3AlenU(SB)
+	JMP	runtime·goPanicSlice3AlenU<ABIInternal>(SB)
 TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	DX, x+0(FP)
 	MOVQ	BX, y+8(FP)
-	JMP	runtime·goPanicSlice3Acap(SB)
+	JMP	runtime·goPanicSlice3Acap<ABIInternal>(SB)
 TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	DX, x+0(FP)
 	MOVQ	BX, y+8(FP)
-	JMP	runtime·goPanicSlice3AcapU(SB)
+	JMP	runtime·goPanicSlice3AcapU<ABIInternal>(SB)
 TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSlice3B(SB)
+	JMP	runtime·goPanicSlice3B<ABIInternal>(SB)
 TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	CX, x+0(FP)
 	MOVQ	DX, y+8(FP)
-	JMP	runtime·goPanicSlice3BU(SB)
+	JMP	runtime·goPanicSlice3BU<ABIInternal>(SB)
 TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicSlice3C(SB)
+	JMP	runtime·goPanicSlice3C<ABIInternal>(SB)
 TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
 	MOVQ	AX, x+0(FP)
 	MOVQ	CX, y+8(FP)
-	JMP	runtime·goPanicSlice3CU(SB)
+	JMP	runtime·goPanicSlice3CU<ABIInternal>(SB)
 
 #ifdef GOOS_android
 // Use the free TLS_SLOT_APP slot #2 on Android Q.
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index c54b4eb..23619b1 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -206,23 +206,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4
-	MOVW	buf+0(FP), R0
-	MOVW	R13, gobuf_sp(R0)
-	MOVW	LR, gobuf_pc(R0)
-	MOVW	g, gobuf_g(R0)
-	MOVW	$0, R11
-	MOVW	R11, gobuf_lr(R0)
-	MOVW	R11, gobuf_ret(R0)
-	// Assert ctxt is zero. See func save.
-	MOVW	gobuf_ctxt(R0), R0
-	CMP	R0, R11
-	B.EQ	2(PC)
-	CALL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB),NOSPLIT,$8-4
@@ -421,7 +404,7 @@
 	B runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -432,8 +415,8 @@
 	MOVW	$NAME(SB), R1;		\
 	B	(R1)
 
-TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
-	MOVW	argsize+12(FP), R0
+TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
+	MOVW	frameSize+20(FP), R0
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -465,11 +448,11 @@
 	B	(R1)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-20;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-28;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVW	argptr+8(FP), R0;		\
-	MOVW	argsize+12(FP), R2;		\
+	MOVW	stackArgs+8(FP), R0;		\
+	MOVW	stackArgsSize+12(FP), R2;		\
 	ADD	$4, R13, R1;			\
 	CMP	$0, R2;				\
 	B.EQ	5(PC);				\
@@ -483,10 +466,10 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	BL	(R0);				\
 	/* copy return values back */		\
-	MOVW	argtype+0(FP), R4;		\
-	MOVW	argptr+8(FP), R0;		\
-	MOVW	argsize+12(FP), R2;		\
-	MOVW	retoffset+16(FP), R3;		\
+	MOVW	stackArgsType+0(FP), R4;		\
+	MOVW	stackArgs+8(FP), R0;		\
+	MOVW	stackArgsSize+12(FP), R2;		\
+	MOVW	stackArgsRetOffset+16(FP), R3;		\
 	ADD	$4, R13, R1;			\
 	ADD	R3, R1;				\
 	ADD	R3, R0;				\
@@ -498,11 +481,13 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
 	MOVW	R4, 4(R13)
 	MOVW	R0, 8(R13)
 	MOVW	R1, 12(R13)
 	MOVW	R2, 16(R13)
+	MOVW	$0, R7
+	MOVW	R7, 20(R13)
 	BL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index a09172f..0ab92be 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -113,23 +113,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
-	MOVD	buf+0(FP), R3
-	MOVD	RSP, R0
-	MOVD	R0, gobuf_sp(R3)
-	MOVD	R29, gobuf_bp(R3)
-	MOVD	LR, gobuf_pc(R3)
-	MOVD	g, gobuf_g(R3)
-	MOVD	ZR, gobuf_lr(R3)
-	MOVD	ZR, gobuf_ret(R3)
-	// Assert ctxt is zero. See func save.
-	MOVD	gobuf_ctxt(R3), R0
-	CBZ	R0, 2(PC)
-	CALL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $24-8
@@ -329,7 +312,7 @@
 	B runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -342,8 +325,8 @@
 	B	(R27)
 // Note: can't just "B NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
-	MOVWU argsize+24(FP), R16
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+	MOVWU	frameSize+32(FP), R16
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -375,11 +358,11 @@
 	B	(R0)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVD	arg+16(FP), R3;			\
-	MOVWU	argsize+24(FP), R4;		\
+	MOVD	stackArgs+16(FP), R3;			\
+	MOVWU	stackArgsSize+24(FP), R4;		\
 	ADD	$8, RSP, R5;			\
 	BIC	$0xf, R4, R6;			\
 	CBZ	R6, 6(PC);			\
@@ -405,10 +388,10 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	BL	(R0);				\
 	/* copy return values back */		\
-	MOVD	argtype+0(FP), R7;		\
-	MOVD	arg+16(FP), R3;			\
-	MOVWU	n+24(FP), R4;			\
-	MOVWU	retoffset+28(FP), R6;		\
+	MOVD	stackArgsType+0(FP), R7;		\
+	MOVD	stackArgs+16(FP), R3;			\
+	MOVWU	stackArgsSize+24(FP), R4;			\
+	MOVWU	stackRetOffset+28(FP), R6;		\
 	ADD	$8, RSP, R5;			\
 	ADD	R6, R5; 			\
 	ADD	R6, R3;				\
@@ -420,11 +403,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $40-0
+TEXT callRet<>(SB), NOSPLIT, $48-0
 	MOVD	R7, 8(RSP)
 	MOVD	R3, 16(RSP)
 	MOVD	R5, 24(RSP)
 	MOVD	R4, 32(RSP)
+	MOVD	$0, 40(RSP)
 	BL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 19781f7..6949506 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -89,21 +89,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
-	MOVV	buf+0(FP), R1
-	MOVV	R29, gobuf_sp(R1)
-	MOVV	R31, gobuf_pc(R1)
-	MOVV	g, gobuf_g(R1)
-	MOVV	R0, gobuf_lr(R1)
-	MOVV	R0, gobuf_ret(R1)
-	// Assert ctxt is zero. See func save.
-	MOVV	gobuf_ctxt(R1), R1
-	BEQ	R1, 2(PC)
-	JAL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $16-8
@@ -279,7 +264,7 @@
 	JMP	runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -292,8 +277,8 @@
 	JMP	(R4)
 // Note: can't just "BR NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
-	MOVWU argsize+24(FP), R1
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+	MOVWU	frameSize+32(FP), R1
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -325,11 +310,11 @@
 	JMP	(R4)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVV	arg+16(FP), R1;			\
-	MOVWU	argsize+24(FP), R2;			\
+	MOVV	stackArgs+16(FP), R1;			\
+	MOVWU	stackArgsSize+24(FP), R2;			\
 	MOVV	R29, R3;				\
 	ADDV	$8, R3;			\
 	ADDV	R3, R2;				\
@@ -345,10 +330,10 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	JAL	(R4);				\
 	/* copy return values back */		\
-	MOVV	argtype+0(FP), R5;		\
-	MOVV	arg+16(FP), R1;			\
-	MOVWU	n+24(FP), R2;			\
-	MOVWU	retoffset+28(FP), R4;		\
+	MOVV	stackArgsType+0(FP), R5;		\
+	MOVV	stackArgs+16(FP), R1;			\
+	MOVWU	stackArgsSize+24(FP), R2;			\
+	MOVWU	stackRetOffset+28(FP), R4;		\
 	ADDV	$8, R29, R3;				\
 	ADDV	R4, R3; 			\
 	ADDV	R4, R1;				\
@@ -360,11 +345,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	MOVV	R5, 8(R29)
 	MOVV	R1, 16(R29)
 	MOVV	R3, 24(R29)
 	MOVV	R2, 32(R29)
+	MOVV	$0, 40(R29)
 	JAL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index ee87d81..8e5753d 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -90,21 +90,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4
-	MOVW	buf+0(FP), R1
-	MOVW	R29, gobuf_sp(R1)
-	MOVW	R31, gobuf_pc(R1)
-	MOVW	g, gobuf_g(R1)
-	MOVW	R0, gobuf_lr(R1)
-	MOVW	R0, gobuf_ret(R1)
-	// Assert ctxt is zero. See func save.
-	MOVW	gobuf_ctxt(R1), R1
-	BEQ	R1, 2(PC)
-	JAL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB),NOSPLIT,$8-4
@@ -280,7 +265,7 @@
 	JMP	runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 
@@ -291,8 +276,8 @@
 	MOVW	$NAME(SB), R4;	\
 	JMP	(R4)
 
-TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
-	MOVW	argsize+12(FP), R1
+TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
+	MOVW	frameSize+20(FP), R1
 
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
@@ -325,11 +310,11 @@
 	JMP	(R4)
 
 #define CALLFN(NAME,MAXSIZE)	\
-TEXT NAME(SB),WRAPPER,$MAXSIZE-20;	\
+TEXT NAME(SB),WRAPPER,$MAXSIZE-28;	\
 	NO_LOCAL_POINTERS;	\
 	/* copy arguments to stack */		\
-	MOVW	arg+8(FP), R1;	\
-	MOVW	argsize+12(FP), R2;	\
+	MOVW	stackArgs+8(FP), R1;	\
+	MOVW	stackArgsSize+12(FP), R2;	\
 	MOVW	R29, R3;	\
 	ADDU	$4, R3;	\
 	ADDU	R3, R2;	\
@@ -345,10 +330,10 @@
 	PCDATA	$PCDATA_StackMapIndex, $0;	\
 	JAL	(R4);	\
 	/* copy return values back */		\
-	MOVW	argtype+0(FP), R5;	\
-	MOVW	arg+8(FP), R1;	\
-	MOVW	n+12(FP), R2;	\
-	MOVW	retoffset+16(FP), R4;	\
+	MOVW	stackArgsType+0(FP), R5;	\
+	MOVW	stackArgs+8(FP), R1;	\
+	MOVW	stackArgsSize+12(FP), R2;	\
+	MOVW	stackRetOffset+16(FP), R4;	\
 	ADDU	$4, R29, R3;	\
 	ADDU	R4, R3;	\
 	ADDU	R4, R1;	\
@@ -360,11 +345,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
 	MOVW	R5, 4(R29)
 	MOVW	R1, 8(R29)
 	MOVW	R3, 12(R29)
 	MOVW	R2, 16(R29)
+	MOVW    $0, 20(R29)
 	JAL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index dc34c0e..834023c 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -128,23 +128,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
-	MOVD	buf+0(FP), R3
-	MOVD	R1, gobuf_sp(R3)
-	MOVD	LR, R31
-	MOVD	R31, gobuf_pc(R3)
-	MOVD	g, gobuf_g(R3)
-	MOVD	R0, gobuf_lr(R3)
-	MOVD	R0, gobuf_ret(R3)
-	// Assert ctxt is zero. See func save.
-	MOVD	gobuf_ctxt(R3), R3
-	CMP	R0, R3
-	BEQ	2(PC)
-	BL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $16-8
@@ -356,7 +339,7 @@
 	BR	runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -370,8 +353,8 @@
 	BR	(CTR)
 // Note: can't just "BR NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
-	MOVWZ argsize+24(FP), R3
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+	MOVWZ	frameSize+32(FP), R3
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -404,11 +387,11 @@
 	BR	(CTR)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVD	arg+16(FP), R3;			\
-	MOVWZ	argsize+24(FP), R4;			\
+	MOVD	stackArgs+16(FP), R3;			\
+	MOVWZ	stackArgsSize+24(FP), R4;			\
 	MOVD    R1, R5;				\
 	CMP	R4, $8;				\
 	BLT	tailsetup;			\
@@ -456,10 +439,10 @@
 	MOVD	24(R1), R2;			\
 #endif						\
 	/* copy return values back */		\
-	MOVD	argtype+0(FP), R7;		\
-	MOVD	arg+16(FP), R3;			\
-	MOVWZ	n+24(FP), R4;			\
-	MOVWZ	retoffset+28(FP), R6;		\
+	MOVD	stackArgsType+0(FP), R7;		\
+	MOVD	stackArgs+16(FP), R3;			\
+	MOVWZ	stackArgsSize+24(FP), R4;			\
+	MOVWZ	stackRetOffset+28(FP), R6;		\
 	ADD	$FIXED_FRAME, R1, R5;		\
 	ADD	R6, R5; 			\
 	ADD	R6, R3;				\
@@ -471,11 +454,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	MOVD	R7, FIXED_FRAME+0(R1)
 	MOVD	R3, FIXED_FRAME+8(R1)
 	MOVD	R5, FIXED_FRAME+16(R1)
 	MOVD	R4, FIXED_FRAME+24(R1)
+	MOVD	$0, FIXED_FRAME+32(R1)
 	BL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 01b42dc..31e324d 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -297,21 +297,6 @@
 	JALR	RA, T1
 	JMP	runtime·badmcall2(SB)
 
-// func gosave(buf *gobuf)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
-	MOV	buf+0(FP), T1
-	MOV	X2, gobuf_sp(T1)
-	MOV	RA, gobuf_pc(T1)
-	MOV	g, gobuf_g(T1)
-	MOV	ZERO, gobuf_lr(T1)
-	MOV	ZERO, gobuf_ret(T1)
-	// Assert ctxt is zero. See func save.
-	MOV	gobuf_ctxt(T1), T1
-	BEQ	T1, ZERO, 2(PC)
-	CALL	runtime·badctxt(SB)
-	RET
-
 // Save state of caller into g->sched. Smashes X31.
 TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
 	MOV	X1, (g_sched+gobuf_pc)(g)
@@ -374,7 +359,7 @@
 	RET
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -386,13 +371,13 @@
 	JALR	ZERO, T2
 // Note: can't just "BR NAME(SB)" - bad inlining results.
 
-// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
+// func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 TEXT reflect·call(SB), NOSPLIT, $0-0
 	JMP	·reflectcall(SB)
 
-// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
-	MOVWU argsize+24(FP), T0
+// func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+	MOVWU	frameSize+32(FP), T0
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -424,11 +409,11 @@
 	JALR	ZERO, T2
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOV	arg+16(FP), A1;			\
-	MOVWU	argsize+24(FP), A2;		\
+	MOV	stackArgs+16(FP), A1;			\
+	MOVWU	stackArgsSize+24(FP), A2;		\
 	MOV	X2, A3;				\
 	ADD	$8, A3;				\
 	ADD	A3, A2;				\
@@ -444,10 +429,10 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	JALR	RA, A4;				\
 	/* copy return values back */		\
-	MOV	argtype+0(FP), A5;		\
-	MOV	arg+16(FP), A1;			\
-	MOVWU	n+24(FP), A2;			\
-	MOVWU	retoffset+28(FP), A4;		\
+	MOV	stackArgsType+0(FP), A5;		\
+	MOV	stackArgs+16(FP), A1;			\
+	MOVWU	stackArgsSize+24(FP), A2;			\
+	MOVWU	stackRetOffset+28(FP), A4;		\
 	ADD	$8, X2, A3;			\
 	ADD	A4, A3; 			\
 	ADD	A4, A1;				\
@@ -459,11 +444,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	MOV	A5, 8(X2)
 	MOV	A1, 16(X2)
 	MOV	A3, 24(X2)
 	MOV	A2, 32(X2)
+	MOV	$0, 40(X2)
 	CALL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 7baef37..fbd185c 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -174,21 +174,6 @@
  *  go-routine
  */
 
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $-8-8
-	MOVD	buf+0(FP), R3
-	MOVD	R15, gobuf_sp(R3)
-	MOVD	LR, gobuf_pc(R3)
-	MOVD	g, gobuf_g(R3)
-	MOVD	$0, gobuf_lr(R3)
-	MOVD	$0, gobuf_ret(R3)
-	// Assert ctxt is zero. See func save.
-	MOVD	gobuf_ctxt(R3), R3
-	CMPBEQ	R3, $0, 2(PC)
-	BL	runtime·badctxt(SB)
-	RET
-
 // void gogo(Gobuf*)
 // restore state from Gobuf; longjmp
 TEXT runtime·gogo(SB), NOSPLIT, $16-8
@@ -368,7 +353,7 @@
 	BR	runtime·morestack(SB)
 
 // reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
 // we don't have variable-sized frames, so we use a small number
 // of constant-sized-frame functions to encode a few bits of size in the pc.
 // Caution: ugly multiline assembly macros in your future!
@@ -381,8 +366,8 @@
 	BR	(R5)
 // Note: can't just "BR NAME(SB)" - bad inlining results.
 
-TEXT ·reflectcall(SB), NOSPLIT, $-8-32
-	MOVWZ argsize+24(FP), R3
+TEXT ·reflectcall(SB), NOSPLIT, $-8-48
+	MOVWZ	frameSize+32(FP), R3
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
 	DISPATCH(runtime·call64, 64)
@@ -414,11 +399,11 @@
 	BR	(R5)
 
 #define CALLFN(NAME,MAXSIZE)			\
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24;		\
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48;		\
 	NO_LOCAL_POINTERS;			\
 	/* copy arguments to stack */		\
-	MOVD	arg+16(FP), R4;			\
-	MOVWZ	argsize+24(FP), R5;		\
+	MOVD	stackArgs+16(FP), R4;			\
+	MOVWZ	stackArgsSize+24(FP), R5;		\
 	MOVD	$stack-MAXSIZE(SP), R6;		\
 loopArgs: /* copy 256 bytes at a time */	\
 	CMP	R5, $256;			\
@@ -439,11 +424,11 @@
 	PCDATA  $PCDATA_StackMapIndex, $0;	\
 	BL	(R8);				\
 	/* copy return values back */		\
-	MOVD	argtype+0(FP), R7;		\
-	MOVD	arg+16(FP), R6;			\
-	MOVWZ	n+24(FP), R5;			\
+	MOVD	stackArgsType+0(FP), R7;		\
+	MOVD	stackArgs+16(FP), R6;			\
+	MOVWZ	stackArgsSize+24(FP), R5;			\
 	MOVD	$stack-MAXSIZE(SP), R4;		\
-	MOVWZ	retoffset+28(FP), R1;		\
+	MOVWZ	stackRetOffset+28(FP), R1;		\
 	ADD	R1, R4;				\
 	ADD	R1, R6;				\
 	SUB	R1, R5;				\
@@ -454,11 +439,12 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	MOVD	R7, 8(R15)
 	MOVD	R6, 16(R15)
 	MOVD	R4, 24(R15)
 	MOVD	R5, 32(R15)
+	MOVD	$0, 40(R15)
 	BL	runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index fcb780f..cf3d961 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -296,14 +296,14 @@
 		JMP NAME(SB); \
 	End
 
-TEXT ·reflectcall(SB), NOSPLIT, $0-32
+TEXT ·reflectcall(SB), NOSPLIT, $0-48
 	I64Load fn+8(FP)
 	I64Eqz
 	If
 		CALLNORESUME runtime·sigpanic<ABIInternal>(SB)
 	End
 
-	MOVW argsize+24(FP), R0
+	MOVW frameSize+32(FP), R0
 
 	DISPATCH(runtime·call16, 16)
 	DISPATCH(runtime·call32, 32)
@@ -335,18 +335,18 @@
 	JMP runtime·badreflectcall(SB)
 
 #define CALLFN(NAME, MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
 	NO_LOCAL_POINTERS; \
-	MOVW argsize+24(FP), R0; \
+	MOVW stackArgsSize+24(FP), R0; \
 	\
 	Get R0; \
 	I64Eqz; \
 	Not; \
 	If; \
 		Get SP; \
-		I64Load argptr+16(FP); \
+		I64Load stackArgs+16(FP); \
 		I32WrapI64; \
-		I64Load argsize+24(FP); \
+		I64Load stackArgsSize+24(FP); \
 		I64Const $3; \
 		I64ShrU; \
 		I32WrapI64; \
@@ -359,12 +359,12 @@
 	I64Load $0; \
 	CALL; \
 	\
-	I64Load32U retoffset+28(FP); \
+	I64Load32U stackRetOffset+28(FP); \
 	Set R0; \
 	\
-	MOVD argtype+0(FP), RET0; \
+	MOVD stackArgsType+0(FP), RET0; \
 	\
-	I64Load argptr+16(FP); \
+	I64Load stackArgs+16(FP); \
 	Get R0; \
 	I64Add; \
 	Set RET1; \
@@ -375,7 +375,7 @@
 	I64Add; \
 	Set RET2; \
 	\
-	I64Load32U argsize+24(FP); \
+	I64Load32U stackArgsSize+24(FP); \
 	Get R0; \
 	I64Sub; \
 	Set RET3; \
@@ -387,12 +387,13 @@
 // separate function so it can allocate stack space for the arguments
 // to reflectcallmove. It does not follow the Go ABI; it expects its
 // arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
 	NO_LOCAL_POINTERS
 	MOVD RET0, 0(SP)
 	MOVD RET1, 8(SP)
 	MOVD RET2, 16(SP)
 	MOVD RET3, 24(SP)
+	MOVD $0,   32(SP)
 	CALL runtime·reflectcallmove(SB)
 	RET
 
diff --git a/src/runtime/cgo/gcc_amd64.S b/src/runtime/cgo/gcc_amd64.S
index 17d9d47..d75f864 100644
--- a/src/runtime/cgo/gcc_amd64.S
+++ b/src/runtime/cgo/gcc_amd64.S
@@ -30,9 +30,14 @@
 	pushq %r15
 
 #if defined(_WIN64)
+	movq %r8, %rdi	/* arg of setg_gcc */
+	call *%rdx	/* setg_gcc */
 	call *%rcx	/* fn */
 #else
-	call *%rdi	/* fn */
+	movq %rdi, %rbx
+	movq %rdx, %rdi	/* arg of setg_gcc */
+	call *%rsi	/* setg_gcc */
+	call *%rbx	/* fn */
 #endif
 
 	popq %r15
diff --git a/src/runtime/cgo/gcc_darwin_amd64.c b/src/runtime/cgo/gcc_darwin_amd64.c
index 51410d5..d5b7fd8 100644
--- a/src/runtime/cgo/gcc_darwin_amd64.c
+++ b/src/runtime/cgo/gcc_darwin_amd64.c
@@ -9,13 +9,16 @@
 #include "libcgo_unix.h"
 
 static void* threadentry(void*);
+static void (*setg_gcc)(void*);
 
 void
-x_cgo_init(G *g)
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
 	pthread_attr_t attr;
 	size_t size;
 
+	setg_gcc = setg;
+
 	pthread_attr_init(&attr);
 	pthread_attr_getstacksize(&attr, &size);
 	g->stacklo = (uintptr)&attr - size + 4096;
@@ -57,10 +60,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	// Move the g pointer into the slot reserved in thread local storage.
-	// Constant must match the one in cmd/link/internal/ld/sym.go.
-	asm volatile("movq %0, %%gs:0x30" :: "r"(ts.g));
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_dragonfly_amd64.c b/src/runtime/cgo/gcc_dragonfly_amd64.c
index d25db91..0003414 100644
--- a/src/runtime/cgo/gcc_dragonfly_amd64.c
+++ b/src/runtime/cgo/gcc_dragonfly_amd64.c
@@ -61,11 +61,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_freebsd_amd64.c b/src/runtime/cgo/gcc_freebsd_amd64.c
index 514a2f8..6071ec3 100644
--- a/src/runtime/cgo/gcc_freebsd_amd64.c
+++ b/src/runtime/cgo/gcc_freebsd_amd64.c
@@ -69,11 +69,6 @@
 	free(v);
 	_cgo_tsan_release();
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_linux_amd64.c b/src/runtime/cgo/gcc_linux_amd64.c
index f2bf648..c25e7e7 100644
--- a/src/runtime/cgo/gcc_linux_amd64.c
+++ b/src/runtime/cgo/gcc_linux_amd64.c
@@ -89,11 +89,6 @@
 	free(v);
 	_cgo_tsan_release();
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_netbsd_amd64.c b/src/runtime/cgo/gcc_netbsd_amd64.c
index dc966fc..9f4b031 100644
--- a/src/runtime/cgo/gcc_netbsd_amd64.c
+++ b/src/runtime/cgo/gcc_netbsd_amd64.c
@@ -62,11 +62,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
 	// On NetBSD, a new thread inherits the signal stack of the
 	// creating thread. That confuses minit, so we remove that
 	// signal stack here before calling the regular mstart. It's
@@ -78,6 +73,6 @@
 	ss.ss_flags = SS_DISABLE;
 	sigaltstack(&ss, nil);
 
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_openbsd_amd64.c b/src/runtime/cgo/gcc_openbsd_amd64.c
index 34319fb..09d2750 100644
--- a/src/runtime/cgo/gcc_openbsd_amd64.c
+++ b/src/runtime/cgo/gcc_openbsd_amd64.c
@@ -60,11 +60,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_solaris_amd64.c b/src/runtime/cgo/gcc_solaris_amd64.c
index 079bd12..e89e844 100644
--- a/src/runtime/cgo/gcc_solaris_amd64.c
+++ b/src/runtime/cgo/gcc_solaris_amd64.c
@@ -72,11 +72,6 @@
 	ts = *(ThreadStart*)v;
 	free(v);
 
-	/*
-	 * Set specific keys.
-	 */
-	setg_gcc((void*)ts.g);
-
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 	return nil;
 }
diff --git a/src/runtime/cgo/gcc_windows_amd64.c b/src/runtime/cgo/gcc_windows_amd64.c
index 0f8c817..25cfd08 100644
--- a/src/runtime/cgo/gcc_windows_amd64.c
+++ b/src/runtime/cgo/gcc_windows_amd64.c
@@ -12,10 +12,12 @@
 #include "libcgo_windows.h"
 
 static void threadentry(void*);
+static void (*setg_gcc)(void*);
 
 void
-x_cgo_init(G *g)
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
 {
+	setg_gcc = setg;
 }
 
 
@@ -46,10 +48,8 @@
 	 */
 	asm volatile (
 	  "movq %0, %%gs:0x28\n"	// MOVL tls0, 0x28(GS)
-	  "movq %%gs:0x28, %%rax\n" // MOVQ 0x28(GS), tmp
-	  "movq %1, 0(%%rax)\n" // MOVQ g, 0(GS)
-	  :: "r"(ts.tls), "r"(ts.g) : "%rax"
+	  :: "r"(ts.tls)
 	);
 
-	crosscall_amd64(ts.fn);
+	crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
 }
diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h
index aba500a..af4960e 100644
--- a/src/runtime/cgo/libcgo.h
+++ b/src/runtime/cgo/libcgo.h
@@ -66,7 +66,7 @@
 /*
  * Call fn in the 6c world.
  */
-void crosscall_amd64(void (*fn)(void));
+void crosscall_amd64(void (*fn)(void), void (*setg_gcc)(void*), void *g);
 
 /*
  * Call fn in the 8c world.
diff --git a/src/runtime/duff_amd64.s b/src/runtime/duff_amd64.s
index 2ff5bf6..df010f5 100644
--- a/src/runtime/duff_amd64.s
+++ b/src/runtime/duff_amd64.s
@@ -5,100 +5,100 @@
 #include "textflag.h"
 
 TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT, $0-0
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
-	MOVUPS	X0,(DI)
-	MOVUPS	X0,16(DI)
-	MOVUPS	X0,32(DI)
-	MOVUPS	X0,48(DI)
+	MOVUPS	X15,(DI)
+	MOVUPS	X15,16(DI)
+	MOVUPS	X15,32(DI)
+	MOVUPS	X15,48(DI)
 	LEAQ	64(DI),DI
 
 	RET
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 2b5affc..4994347 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -14,6 +14,7 @@
 package runtime
 
 import (
+	"internal/abi"
 	"runtime/internal/sys"
 	"unsafe"
 )
@@ -223,11 +224,18 @@
 // stack map of reflectcall is wrong.
 //
 //go:nosplit
-func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) {
+func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
 	if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
 		bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
 	}
 	memmove(dst, src, size)
+
+	// Move pointers returned in registers to a place where the GC can see them.
+	for i := range regs.Ints {
+		if regs.ReturnIsPtr.Get(i) {
+			regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
+		}
+	}
 }
 
 //go:nosplit
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index f4dbd77..7d0313b 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -7,6 +7,7 @@
 package runtime
 
 import (
+	"internal/abi"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -219,7 +220,11 @@
 					throw("bad kind in runfinq")
 				}
 				fingRunning = true
-				reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
+				// Pass a dummy RegArgs for now.
+				//
+				// TODO(mknyszek): Pass arguments in registers.
+				var regs abi.RegArgs
+				reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
 				fingRunning = false
 
 				// Drop finalizer queue heap references
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index 94ae75f..ef297f0 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -62,15 +62,15 @@
 func notags(w io.Writer) { fmt.Fprintln(w) }
 
 func zeroAMD64(w io.Writer) {
-	// X0: zero
+	// X15: zero
 	// DI: ptr to memory to be zeroed
 	// DI is updated as a side effect.
-	fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0")
+	fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT, $0-0")
 	for i := 0; i < 16; i++ {
-		fmt.Fprintln(w, "\tMOVUPS\tX0,(DI)")
-		fmt.Fprintln(w, "\tMOVUPS\tX0,16(DI)")
-		fmt.Fprintln(w, "\tMOVUPS\tX0,32(DI)")
-		fmt.Fprintln(w, "\tMOVUPS\tX0,48(DI)")
+		fmt.Fprintln(w, "\tMOVUPS\tX15,(DI)")
+		fmt.Fprintln(w, "\tMOVUPS\tX15,16(DI)")
+		fmt.Fprintln(w, "\tMOVUPS\tX15,32(DI)")
+		fmt.Fprintln(w, "\tMOVUPS\tX15,48(DI)")
 		fmt.Fprintln(w, "\tLEAQ\t64(DI),DI") // We use lea instead of add, to avoid clobbering flags
 		fmt.Fprintln(w)
 	}
@@ -84,7 +84,7 @@
 	//
 	// This is equivalent to a sequence of MOVSQ but
 	// for some reason that is 3.5x slower than this code.
-	fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0")
+	fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT, $0-0")
 	for i := 0; i < 64; i++ {
 		fmt.Fprintln(w, "\tMOVUPS\t(SI), X0")
 		fmt.Fprintln(w, "\tADDQ\t$16, SI")
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 428ff7f..abd1010 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -18,11 +18,11 @@
 
 //go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o"
 //go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o"
-//go:linkname libc___n_pthreads libc___n_pthread
+//go:linkname libc___n_pthreads libc___n_pthreads
 //go:linkname libc___mod_init libc___mod_init
 
 var (
-	libc___n_pthread,
+	libc___n_pthreads,
 	libc___mod_init libFunc
 )
 
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 5b2ccdd..e320eaa 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -5,6 +5,7 @@
 package runtime
 
 import (
+	"internal/abi"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
@@ -874,7 +875,13 @@
 		p.pc = getcallerpc()
 		p.sp = unsafe.Pointer(getcallersp())
 	}
-	reflectcall(nil, fn, arg, argsize, argsize)
+	// Pass a dummy RegArgs for now since no function actually implements
+	// the register-based ABI.
+	//
+	// TODO(mknyszek): Implement this properly, setting up arguments in
+	// registers as necessary in the caller.
+	var regs abi.RegArgs
+	reflectcall(nil, fn, arg, argsize, argsize, argsize, &regs)
 	if p != nil {
 		p.pc = 0
 		p.sp = unsafe.Pointer(nil)
@@ -968,7 +975,9 @@
 			}
 		} else {
 			p.argp = unsafe.Pointer(getargp(0))
-			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
+
+			var regs abi.RegArgs
+			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), &regs)
 		}
 		p.argp = nil
 
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
index 6949687..17dc320 100644
--- a/src/runtime/race/output_test.go
+++ b/src/runtime/race/output_test.go
@@ -7,6 +7,7 @@
 package race_test
 
 import (
+	"fmt"
 	"internal/testenv"
 	"os"
 	"os/exec"
@@ -71,9 +72,24 @@
 			"GORACE="+test.gorace,
 		)
 		got, _ := cmd.CombinedOutput()
-		if !regexp.MustCompile(test.re).MatchString(string(got)) {
-			t.Fatalf("failed test case %v, expect:\n%v\ngot:\n%s",
-				test.name, test.re, got)
+		matched := false
+		for _, re := range test.re {
+			if regexp.MustCompile(re).MatchString(string(got)) {
+				matched = true
+				break
+			}
+		}
+		if !matched {
+			exp := fmt.Sprintf("expect:\n%v\n", test.re[0])
+			if len(test.re) > 1 {
+				exp = fmt.Sprintf("expected one of %d patterns:\n",
+					len(test.re))
+				for k, re := range test.re {
+					exp += fmt.Sprintf("pattern %d:\n%v\n", k, re)
+				}
+			}
+			t.Fatalf("failed test case %v, %sgot:\n%s",
+				test.name, exp, got)
 		}
 	}
 }
@@ -84,7 +100,7 @@
 	goos   string
 	gorace string
 	source string
-	re     string
+	re     []string
 }{
 	{"simple", "run", "", "atexit_sleep_ms=0", `
 package main
@@ -107,7 +123,7 @@
 	store(x, 42)
 	done <- true
 }
-`, `==================
+`, []string{`==================
 WARNING: DATA RACE
 Write at 0x[0-9,a-f]+ by goroutine [0-9]:
   main\.store\(\)
@@ -129,7 +145,7 @@
 ==================
 Found 1 data race\(s\)
 exit status 66
-`},
+`}},
 
 	{"exitcode", "run", "", "atexit_sleep_ms=0 exitcode=13", `
 package main
@@ -143,7 +159,7 @@
 	x = 43
 	<-done
 }
-`, `exit status 13`},
+`, []string{`exit status 13`}},
 
 	{"strip_path_prefix", "run", "", "atexit_sleep_ms=0 strip_path_prefix=/main.", `
 package main
@@ -157,9 +173,9 @@
 	x = 43
 	<-done
 }
-`, `
+`, []string{`
       go:7 \+0x[0-9,a-f]+
-`},
+`}},
 
 	{"halt_on_error", "run", "", "atexit_sleep_ms=0 halt_on_error=1", `
 package main
@@ -173,10 +189,10 @@
 	x = 43
 	<-done
 }
-`, `
+`, []string{`
 ==================
 exit status 66
-`},
+`}},
 
 	{"test_fails_on_race", "test", "", "atexit_sleep_ms=0", `
 package main_test
@@ -193,12 +209,12 @@
 	<-done
 	t.Log(t.Failed())
 }
-`, `
+`, []string{`
 ==================
 --- FAIL: TestFail \(0...s\)
 .*main_test.go:14: true
 .*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
 
 	{"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", `
 package main
@@ -211,11 +227,11 @@
 	data[0] = 1
 	<-done
 }
-`, `
+`, []string{`
   runtime\.slicebytetostring\(\)
       .*/runtime/string\.go:.*
   main\.main\.func1\(\)
-      .*/main.go:7`},
+      .*/main.go:7`}},
 
 	// Test for https://golang.org/issue/33309
 	{"midstack_inlining_traceback", "run", "linux", "atexit_sleep_ms=0", `
@@ -241,7 +257,7 @@
 func h(c chan int) {
 	c <- x
 }
-`, `==================
+`, []string{`==================
 WARNING: DATA RACE
 Read at 0x[0-9,a-f]+ by goroutine [0-9]:
   main\.h\(\)
@@ -261,7 +277,7 @@
 ==================
 Found 1 data race\(s\)
 exit status 66
-`},
+`}},
 
 	// Test for https://golang.org/issue/17190
 	{"external_cgo_thread", "run", "linux", "atexit_sleep_ms=0", `
@@ -300,7 +316,25 @@
 	racy++
 	<- done
 }
-`, `==================
+`, []string{`==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by main goroutine:
+  main\.main\(\)
+      .*/main\.go:34 \+0x[0-9,a-f]+
+
+Previous write at 0x[0-9,a-f]+ by goroutine [0-9]:
+  main\.goCallback\(\)
+      .*/main\.go:27 \+0x[0-9,a-f]+
+  _cgoexp_[0-9a-z]+_goCallback\(\)
+      .*_cgo_gotypes\.go:[0-9]+ \+0x[0-9,a-f]+
+  _cgoexp_[0-9a-z]+_goCallback\(\)
+      <autogenerated>:1 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+  runtime\.newextram\(\)
+      .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
+==================`,
+		`==================
 WARNING: DATA RACE
 Read at 0x[0-9,a-f]+ by .*:
   main\..*
@@ -313,7 +347,7 @@
 Goroutine [0-9] \(running\) created at:
   runtime\.newextram\(\)
       .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
 	{"second_test_passes", "test", "", "atexit_sleep_ms=0", `
 package main_test
 import "testing"
@@ -331,11 +365,11 @@
 
 func TestPass(t *testing.T) {
 }
-`, `
+`, []string{`
 ==================
 --- FAIL: TestFail \(0...s\)
 .*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
 	{"mutex", "run", "", "atexit_sleep_ms=0", `
 package main
 import (
@@ -366,7 +400,7 @@
 	}
 	wg.Wait()
 	if (data == iterations*(threads+1)) { fmt.Println("pass") }
-}`, `pass`},
+}`, []string{`pass`}},
 	// Test for https://github.com/golang/go/issues/37355
 	{"chanmm", "run", "", "atexit_sleep_ms=0", `
 package main
@@ -395,7 +429,7 @@
 	wg.Wait()
 	_ = data
 }
-`, `==================
+`, []string{`==================
 WARNING: DATA RACE
 Write at 0x[0-9,a-f]+ by goroutine [0-9]:
   main\.main\.func2\(\)
@@ -408,5 +442,5 @@
 Goroutine [0-9] \(running\) created at:
   main\.main\(\)
       .*/main.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
 }
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 9818bc6..c3b7bbf 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -146,8 +146,10 @@
 // If addr (RARG1) is out of range, do nothing.
 // Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
 TEXT	racecalladdr<>(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_racectx(R14), RARG0	// goroutine context
 	// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
 	CMPQ	RARG1, runtime·racearenastart(SB)
@@ -183,8 +185,10 @@
 // R11 = caller's return address
 TEXT	racefuncenter<>(SB), NOSPLIT, $0-0
 	MOVQ	DX, R15		// save function entry context (for closures)
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_racectx(R14), RARG0	// goroutine context
 	MOVQ	R11, RARG1
 	// void __tsan_func_enter(ThreadState *thr, void *pc);
@@ -197,8 +201,10 @@
 // func runtime·racefuncexit()
 // Called from instrumented code.
 TEXT	runtime·racefuncexit(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_racectx(R14), RARG0	// goroutine context
 	// void __tsan_func_exit(ThreadState *thr);
 	MOVQ	$__tsan_func_exit(SB), AX
@@ -357,8 +363,10 @@
 	JAE	racecallatomic_ignore
 racecallatomic_ok:
 	// Addr is within the good range, call the atomic function.
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_racectx(R14), RARG0	// goroutine context
 	MOVQ	8(SP), RARG1	// caller pc
 	MOVQ	(SP), RARG2	// pc
@@ -370,8 +378,10 @@
 	// An attempt to synchronize on the address would cause crash.
 	MOVQ	AX, R15	// remember the original function
 	MOVQ	$__tsan_go_ignore_sync_begin(SB), AX
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_racectx(R14), RARG0	// goroutine context
 	CALL	racecall<>(SB)
 	MOVQ	R15, AX	// restore the original function
@@ -399,8 +409,10 @@
 
 // Switches SP to g0 stack and calls (AX). Arguments already set.
 TEXT	racecall<>(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
 	get_tls(R12)
 	MOVQ	g(R12), R14
+#endif
 	MOVQ	g_m(R14), R13
 	// Switch to g0 stack.
 	MOVQ	SP, R12		// callee-saved, preserved across the CALL
@@ -412,6 +424,9 @@
 	ANDQ	$~15, SP	// alignment for gcc ABI
 	CALL	AX
 	MOVQ	R12, SP
+	// Back to Go world, set special registers.
+	// The g register (R14) is preserved in C.
+	XORPS	X15, X15
 	RET
 
 // C->Go callback thunk that allows to call runtime·racesymbolize from C code.
@@ -419,7 +434,9 @@
 // The overall effect of Go->C->Go call chain is similar to that of mcall.
 // RARG0 contains command code. RARG1 contains command-specific context.
 // See racecallback for command codes.
-TEXT	runtime·racecallbackthunk(SB), NOSPLIT, $56-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// because its address is passed to C via funcPC.
+TEXT	runtime·racecallbackthunk<ABIInternal>(SB), NOSPLIT, $56-8
 	// Handle command raceGetProcCmd (0) here.
 	// First, code below assumes that we are on curg, while raceGetProcCmd
 	// can be executed on g0. Second, it is called frequently, so will
@@ -447,12 +464,13 @@
 	PUSHQ	R15
 	// Set g = g0.
 	get_tls(R12)
-	MOVQ	g(R12), R13
-	MOVQ	g_m(R13), R14
-	MOVQ	m_g0(R14), R15
+	MOVQ	g(R12), R14
+	MOVQ	g_m(R14), R13
+	MOVQ	m_g0(R13), R15
 	CMPQ	R13, R15
 	JEQ	noswitch	// branch if already on g0
 	MOVQ	R15, g(R12)	// g = m->g0
+	MOVQ	R15, R14	// set g register
 	PUSHQ	RARG1	// func arg
 	PUSHQ	RARG0	// func arg
 	CALL	runtime·racecallback(SB)
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 109f0da..b7c7b4c 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -853,7 +853,7 @@
 // layout of Itab known to compilers
 // allocated in non-garbage-collected memory
 // Needs to be in sync with
-// ../cmd/compile/internal/gc/reflect.go:/^func.dumptabs.
+// ../cmd/compile/internal/gc/reflect.go:/^func.WriteTabs.
 type itab struct {
 	inter *interfacetype
 	_type *_type
diff --git a/src/runtime/signal_amd64.go b/src/runtime/signal_amd64.go
index 6ab1f75..3eeb5e0 100644
--- a/src/runtime/signal_amd64.go
+++ b/src/runtime/signal_amd64.go
@@ -65,11 +65,14 @@
 	pc := uintptr(c.rip())
 	sp := uintptr(c.rsp())
 
+	// In case we are panicking from external code, we need to initialize
+	// Go special registers. We inject sigpanic0 (instead of sigpanic),
+	// which takes care of that.
 	if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
-		c.pushCall(funcPC(sigpanic), pc)
+		c.pushCall(funcPC(sigpanic0), pc)
 	} else {
 		// Not safe to push the call. Just clobber the frame.
-		c.set_rip(uint64(funcPC(sigpanic)))
+		c.set_rip(uint64(funcPC(sigpanic0)))
 	}
 }
 
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 2ee2c74..c0cc95e 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -4,7 +4,10 @@
 
 package runtime
 
-import "unsafe"
+import (
+	"internal/abi"
+	"unsafe"
+)
 
 // Should be a built-in for unsafe.Pointer?
 //go:nosplit
@@ -167,7 +170,6 @@
 // pointer-declared arguments.
 func cgocallback(fn, frame, ctxt uintptr)
 func gogo(buf *gobuf)
-func gosave(buf *gobuf)
 
 //go:noescape
 func jmpdefer(fv *funcval, argp uintptr)
@@ -175,19 +177,50 @@
 func setg(gg *g)
 func breakpoint()
 
-// reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
-// After fn returns, reflectcall copies n-retoffset result bytes
-// back into arg+retoffset before returning. If copying result bytes back,
-// the caller should pass the argument frame type as argtype, so that
-// call can execute appropriate write barriers during the copy.
+// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
+// frameSize, and regArgs.
 //
-// Package reflect always passes a frame type. In package runtime,
-// Windows callbacks are the only use of this that copies results
-// back, and those cannot have pointers in their results, so runtime
-// passes nil for the frame type.
+// Arguments passed on the stack and space for return values passed on the stack
+// must be laid out at the space pointed to by stackArgs (with total length
+// stackArgsSize) according to the ABI.
+//
+// stackRetOffset must be some value <= stackArgsSize that indicates the
+// offset within stackArgs where the return value space begins.
+//
+// frameSize is the total size of the argument frame at stackArgs and must
+// therefore be >= stackArgsSize. It must include additional space for spilling
+// register arguments for stack growth and preemption.
+//
+// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
+// since frameSize will be redundant with stackArgsSize.
+//
+// Arguments passed in registers must be laid out in regArgs according to the ABI.
+// regArgs will hold any return values passed in registers after the call.
+//
+// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
+// then copies back stackArgsSize-stackRetOffset bytes back to the return space
+// in stackArgs once fn has completed. It also "unspills" argument registers from
+// regArgs before calling fn, and spills them back into regArgs immediately
+// following the call to fn. If there are results being returned on the stack,
+// the caller should pass the argument frame type as stackArgsType so that
+// reflectcall can execute appropriate write barriers during the copy.
+//
+// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
+// registers on the return path will contain Go pointers. It will then store
+// these pointers in regArgs.Ptrs such that they are visible to the GC.
+//
+// Package reflect passes a frame type. In package runtime, there is only
+// one call that copies results back, in callbackWrap in syscall_windows.go, and it
+// does NOT pass a frame type, meaning there are no write barriers invoked. See that
+// call site for justification.
 //
 // Package reflect accesses this symbol through a linkname.
-func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
+//
+// Arguments passed through to reflectcall do not escape. The type is used
+// only in a very limited callee of reflectcall, the stackArgs are copied, and
+// regArgs is only used in the reflectcall frame.
+//go:noescape
+func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
 
 func procyield(cycles uint32)
 
@@ -357,3 +390,7 @@
 
 // Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
 func addmoduledata()
+
+// Injected by the signal handler for panicking signals. On many platforms it just
+// jumps to sigpanic.
+func sigpanic0()
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 630fb5d..0fe8c7e 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -5,6 +5,8 @@
 // System calls and other sys.stuff for AMD64, Darwin
 // System calls are implemented in libSystem, this file contains
 // trampolines that convert from Go to C calling convention.
+// The trampolines are ABIInternal as they are referenced from
+// Go code with funcPC.
 
 #include "go_asm.h"
 #include "go_tls.h"
@@ -13,7 +15,7 @@
 #define CLOCK_REALTIME		0
 
 // Exit the entire program (like C exit)
-TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
+TEXT runtime·exit_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	0(DI), DI		// arg 1 exit status
@@ -22,7 +24,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·open_trampoline(SB),NOSPLIT,$0
+TEXT runtime·open_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	8(DI), SI		// arg 2 flags
@@ -33,7 +35,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·close_trampoline(SB),NOSPLIT,$0
+TEXT runtime·close_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	0(DI), DI		// arg 1 fd
@@ -41,7 +43,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·read_trampoline(SB),NOSPLIT,$0
+TEXT runtime·read_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 buf
@@ -57,7 +59,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·write_trampoline(SB),NOSPLIT,$0
+TEXT runtime·write_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 buf
@@ -73,7 +75,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pipe_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	CALL	libc_pipe(SB)		// pointer already in DI
@@ -84,7 +86,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
+TEXT runtime·setitimer_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 new
@@ -94,7 +96,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
+TEXT runtime·madvise_trampoline<ABIInternal>(SB), NOSPLIT, $0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 len
@@ -105,12 +107,12 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0
+TEXT runtime·mlock_trampoline<ABIInternal>(SB), NOSPLIT, $0
 	UNDEF // unimplemented
 
 GLOBL timebase<>(SB),NOPTR,$(machTimebaseInfo__size)
 
-TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0
+TEXT runtime·nanotime_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	DI, BX
@@ -139,7 +141,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0
+TEXT runtime·walltime_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP			// make a frame; keep stack aligned
 	MOVQ	SP, BP
 	MOVQ	DI, SI			// arg 2 timespec
@@ -148,7 +150,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigaction_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 new
@@ -161,7 +163,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigprocmask_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 new
@@ -174,7 +176,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigaltstack_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 old
@@ -186,7 +188,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
+TEXT runtime·raiseproc_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	0(DI), BX	// signal
@@ -212,7 +214,7 @@
 
 // This is the function registered during sigaction and is invoked when
 // a signal is received. It just redirects to the Go function sigtrampgo.
-TEXT runtime·sigtramp(SB),NOSPLIT,$0
+TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
 	// This runs on the signal stack, so we have lots of stack available.
 	// We allocate our own stack space, because if we tell the linker
 	// how much we're using, the NOSPLIT check fails.
@@ -246,7 +248,7 @@
 
 // Used instead of sigtramp in programs that use cgo.
 // Arguments from kernel are in DI, SI, DX.
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
+TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
 	// If no traceback function, do usual sigtramp.
 	MOVQ	runtime·cgoTraceback(SB), AX
 	TESTQ	AX, AX
@@ -289,12 +291,12 @@
 	// The first three arguments, and the fifth, are already in registers.
 	// Set the two remaining arguments now.
 	MOVQ	runtime·cgoTraceback(SB), CX
-	MOVQ	$runtime·sigtramp(SB), R9
+	MOVQ	$runtime·sigtramp<ABIInternal>(SB), R9
 	MOVQ	_cgo_callers(SB), AX
 	JMP	AX
 
 sigtramp:
-	JMP	runtime·sigtramp(SB)
+	JMP	runtime·sigtramp<ABIInternal>(SB)
 
 sigtrampnog:
 	// Signal arrived on a non-Go thread. If this is SIGPROF, get a
@@ -320,7 +322,7 @@
 	MOVQ	_cgo_callers(SB), AX
 	JMP	AX
 
-TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
+TEXT runtime·mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP			// make a frame; keep stack aligned
 	MOVQ	SP, BP
 	MOVQ	DI, BX
@@ -343,7 +345,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
+TEXT runtime·munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 len
@@ -355,7 +357,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
+TEXT runtime·usleep_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	0(DI), DI	// arg 1 usec
@@ -367,7 +369,7 @@
 	// Nothing to do on Darwin, pthread already set thread-local storage up.
 	RET
 
-TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	8(DI), SI		// arg 2 miblen
@@ -380,7 +382,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sysctlbyname_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 oldp
@@ -392,14 +394,14 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
+TEXT runtime·kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	CALL	libc_kqueue(SB)
 	POPQ	BP
 	RET
 
-TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
+TEXT runtime·kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI		// arg 2 keventt
@@ -418,7 +420,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
+TEXT runtime·fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	4(DI), SI		// arg 2 cmd
@@ -475,7 +477,7 @@
 // A pointer to the arguments is passed in DI.
 // A single int32 result is returned in AX.
 // (For more results, make an args/results structure.)
-TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP	// make frame, keep stack 16-byte aligned.
 	MOVQ	SP, BP
 	MOVQ	0(DI), DI // arg 1 attr
@@ -483,7 +485,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_getstacksize_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 size
@@ -492,7 +494,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_setdetachstate_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 state
@@ -501,7 +503,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_create_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -514,7 +516,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·raise_trampoline(SB),NOSPLIT,$0
+TEXT runtime·raise_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVL	0(DI), DI	// arg 1 signal
@@ -522,7 +524,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 attr
@@ -531,7 +533,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_lock_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	0(DI), DI	// arg 1 mutex
@@ -539,7 +541,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_unlock_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	0(DI), DI	// arg 1 mutex
@@ -547,7 +549,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 attr
@@ -556,7 +558,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_wait_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 mutex
@@ -565,7 +567,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_timedwait_relative_np_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 mutex
@@ -575,7 +577,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_signal_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	0(DI), DI	// arg 1 cond
@@ -583,7 +585,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_self_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	DI, BX		// BX is caller-save
@@ -592,7 +594,7 @@
 	POPQ	BP
 	RET
 
-TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	MOVQ	8(DI), SI	// arg 2 sig
@@ -617,7 +619,7 @@
 //
 // syscall expects a 32-bit result and tests for 32-bit -1
 // to decide there was an error.
-TEXT runtime·syscall(SB),NOSPLIT,$0
+TEXT runtime·syscall<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -667,7 +669,7 @@
 //
 // syscallX is like syscall but expects a 64-bit result
 // and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscallX(SB),NOSPLIT,$0
+TEXT runtime·syscallX<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -703,7 +705,7 @@
 
 // syscallPtr is like syscallX except that the libc function reports an
 // error by returning NULL and setting errno.
-TEXT runtime·syscallPtr(SB),NOSPLIT,$0
+TEXT runtime·syscallPtr<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -756,7 +758,7 @@
 //
 // syscall6 expects a 32-bit result and tests for 32-bit -1
 // to decide there was an error.
-TEXT runtime·syscall6(SB),NOSPLIT,$0
+TEXT runtime·syscall6<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -809,7 +811,7 @@
 //
 // syscall6X is like syscall6 but expects a 64-bit result
 // and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscall6X(SB),NOSPLIT,$0
+TEXT runtime·syscall6X<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
@@ -845,7 +847,7 @@
 
 // syscallNoErr is like syscall6 but does not check for errors, and
 // only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+TEXT runtime·syscallNoErr<ABIInternal>(SB),NOSPLIT,$0
 	PUSHQ	BP
 	MOVQ	SP, BP
 	SUBQ	$16, SP
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 37cb8da..d48573c 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -215,9 +215,13 @@
 
 	MOVQ	SP, R12	// Save old SP; R12 unchanged by C code.
 
+#ifdef GOEXPERIMENT_REGABI
+	MOVQ	g_m(R14), BX // BX unchanged by C code.
+#else
 	get_tls(CX)
 	MOVQ	g(CX), AX
 	MOVQ	g_m(AX), BX // BX unchanged by C code.
+#endif
 
 	// Set vdsoPC and vdsoSP for SIGPROF traceback.
 	// Save the old values on stack and restore them on exit,
@@ -232,7 +236,11 @@
 	MOVQ	CX, m_vdsoPC(BX)
 	MOVQ	DX, m_vdsoSP(BX)
 
+#ifdef GOEXPERIMENT_REGABI
+	CMPQ	R14, m_curg(BX)	// Only switch if on curg.
+#else
 	CMPQ	AX, m_curg(BX)	// Only switch if on curg.
+#endif
 	JNE	noswitch
 
 	MOVQ	m_g0(BX), DX
@@ -275,9 +283,13 @@
 
 	MOVQ	SP, R12	// Save old SP; R12 unchanged by C code.
 
+#ifdef GOEXPERIMENT_REGABI
+	MOVQ	g_m(R14), BX // BX unchanged by C code.
+#else
 	get_tls(CX)
 	MOVQ	g(CX), AX
 	MOVQ	g_m(AX), BX // BX unchanged by C code.
+#endif
 
 	// Set vdsoPC and vdsoSP for SIGPROF traceback.
 	// Save the old values on stack and restore them on exit,
@@ -292,7 +304,11 @@
 	MOVQ	CX, m_vdsoPC(BX)
 	MOVQ	DX, m_vdsoSP(BX)
 
+#ifdef GOEXPERIMENT_REGABI
+	CMPQ	R14, m_curg(BX)	// Only switch if on curg.
+#else
 	CMPQ	AX, m_curg(BX)	// Only switch if on curg.
+#endif
 	JNE	noswitch
 
 	MOVQ	m_g0(BX), DX
@@ -632,6 +648,7 @@
 	get_tls(CX)
 	MOVQ	R13, g_m(R9)
 	MOVQ	R9, g(CX)
+	MOVQ	R9, R14 // set g register
 	CALL	runtime·stackcheck(SB)
 
 nog2:
diff --git a/src/runtime/syscall2_solaris.go b/src/runtime/syscall2_solaris.go
index e098e80..3310489 100644
--- a/src/runtime/syscall2_solaris.go
+++ b/src/runtime/syscall2_solaris.go
@@ -15,7 +15,6 @@
 //go:cgo_import_dynamic libc_gethostname gethostname "libc.so"
 //go:cgo_import_dynamic libc_getpid getpid "libc.so"
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-//go:cgo_import_dynamic libc_pipe pipe "libc.so"
 //go:cgo_import_dynamic libc_setgid setgid "libc.so"
 //go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
 //go:cgo_import_dynamic libc_setsid setsid "libc.so"
@@ -33,7 +32,6 @@
 //go:linkname libc_gethostname libc_gethostname
 //go:linkname libc_getpid libc_getpid
 //go:linkname libc_ioctl libc_ioctl
-//go:linkname libc_pipe libc_pipe
 //go:linkname libc_setgid libc_setgid
 //go:linkname libc_setgroups libc_setgroups
 //go:linkname libc_setsid libc_setsid
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 7835b49..add40bb 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -5,6 +5,7 @@
 package runtime
 
 import (
+	"internal/abi"
 	"runtime/internal/sys"
 	"unsafe"
 )
@@ -242,7 +243,11 @@
 
 	// Even though this is copying back results, we can pass a nil
 	// type because those results must not require write barriers.
-	reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset))
+	//
+	// Pass a dummy RegArgs for now.
+	// TODO(mknyszek): Pass arguments in registers.
+	var regs abi.RegArgs
+	reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset), uint32(c.retOffset)+sys.PtrSize, &regs)
 
 	// Extract the result.
 	a.result = *(*uintptr)(unsafe.Pointer(&frame[c.retOffset]))
diff --git a/src/runtime/textflag.h b/src/runtime/textflag.h
index daca36d..e727208 100644
--- a/src/runtime/textflag.h
+++ b/src/runtime/textflag.h
@@ -35,3 +35,5 @@
 // Function is the top of the call stack. Call stack unwinders should stop
 // at this function.
 #define TOPFRAME 2048
+// Function is an ABI wrapper.
+#define ABIWRAPPER 4096
diff --git a/src/runtime/type.go b/src/runtime/type.go
index 81455f3..18fc4bb 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -383,7 +383,7 @@
 }
 
 // Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
+// in ../cmd/compile/internal/gc/reflect.go:writeType.
 func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
 	return mt.flags&1 != 0
 }
diff --git a/src/strconv/bytealg.go b/src/strconv/bytealg.go
new file mode 100644
index 0000000..7f66f2a
--- /dev/null
+++ b/src/strconv/bytealg.go
@@ -0,0 +1,14 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package strconv
+
+import "internal/bytealg"
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	return bytealg.IndexByteString(s, c) != -1
+}
diff --git a/src/strconv/bytealg_bootstrap.go b/src/strconv/bytealg_bootstrap.go
new file mode 100644
index 0000000..a3a547d
--- /dev/null
+++ b/src/strconv/bytealg_bootstrap.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+package strconv
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
diff --git a/src/strconv/eisel_lemire.go b/src/strconv/eisel_lemire.go
index 6c7f852..fecd1b9 100644
--- a/src/strconv/eisel_lemire.go
+++ b/src/strconv/eisel_lemire.go
@@ -29,7 +29,7 @@
 	// Exp10 Range.
 	if man == 0 {
 		if neg {
-			f = math.Float64frombits(0x80000000_00000000) // Negative zero.
+			f = math.Float64frombits(0x8000000000000000) // Negative zero.
 		}
 		return f, true
 	}
@@ -39,7 +39,7 @@
 
 	// Normalization.
 	clz := bits.LeadingZeros64(man)
-	man <<= clz
+	man <<= uint(clz)
 	const float64ExponentBias = 1023
 	retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz)
 
@@ -84,9 +84,9 @@
 	if retExp2-1 >= 0x7FF-1 {
 		return 0, false
 	}
-	retBits := retExp2<<52 | retMantissa&0x000FFFFF_FFFFFFFF
+	retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF
 	if neg {
-		retBits |= 0x80000000_00000000
+		retBits |= 0x8000000000000000
 	}
 	return math.Float64frombits(retBits), true
 }
@@ -114,7 +114,7 @@
 
 	// Normalization.
 	clz := bits.LeadingZeros64(man)
-	man <<= clz
+	man <<= uint(clz)
 	const float32ExponentBias = 127
 	retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz)
 
@@ -122,13 +122,13 @@
 	xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
 
 	// Wider Approximation.
-	if xHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && xLo+man < man {
+	if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man {
 		yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
 		mergedHi, mergedLo := xHi, xLo+yHi
 		if mergedLo < xLo {
 			mergedHi++
 		}
-		if mergedHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
+		if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
 			return 0, false
 		}
 		xHi, xLo = mergedHi, mergedLo
@@ -140,7 +140,7 @@
 	retExp2 -= 1 ^ msb
 
 	// Half-way Ambiguity.
-	if xLo == 0 && xHi&0x3F_FFFFFFFF == 0 && retMantissa&3 == 1 {
+	if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 {
 		return 0, false
 	}
 
diff --git a/src/strconv/quote.go b/src/strconv/quote.go
index bcbdbc5..4ffa10b 100644
--- a/src/strconv/quote.go
+++ b/src/strconv/quote.go
@@ -7,7 +7,6 @@
 package strconv
 
 import (
-	"internal/bytealg"
 	"unicode/utf8"
 )
 
@@ -436,11 +435,6 @@
 	return string(buf), nil
 }
 
-// contains reports whether the string contains the byte c.
-func contains(s string, c byte) bool {
-	return bytealg.IndexByteString(s, c) != -1
-}
-
 // bsearch16 returns the smallest i such that a[i] >= x.
 // If there is no such i, bsearch16 returns len(a).
 func bsearch16(a []uint16, x uint16) int {
diff --git a/src/syscall/mkasm.go b/src/syscall/mkasm.go
index 2ebaf8d..e53d14b 100644
--- a/src/syscall/mkasm.go
+++ b/src/syscall/mkasm.go
@@ -53,7 +53,8 @@
 		fn := line[5 : len(line)-13]
 		if !trampolines[fn] {
 			trampolines[fn] = true
-			fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
+			// The trampolines are ABIInternal as they are address-taken in Go code.
+			fmt.Fprintf(&out, "TEXT ·%s_trampoline<ABIInternal>(SB),NOSPLIT,$0-0\n", fn)
 			fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
 		}
 	}
diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl
index 67e8d1d..c1ed3a3 100755
--- a/src/syscall/mksyscall.pl
+++ b/src/syscall/mksyscall.pl
@@ -351,9 +351,6 @@
 			$trampolines{$funcname} = 1;
 			# The assembly trampoline that jumps to the libc routine.
 			$text .= "func ${funcname}_trampoline()\n";
-			# Map syscall.funcname to just plain funcname.
-			# (The jump to this function is in the assembly trampoline, generated by mkasm.go.)
-			$text .= "//go:linkname $funcname $funcname\n";
 			# Tell the linker that funcname can be found in libSystem using varname without the libc_ prefix.
 			my $basename = substr $funcname, 5;
 			my $libc = "libc.so";
diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go
index afdadbf..162e944 100644
--- a/src/syscall/syscall_darwin.go
+++ b/src/syscall/syscall_darwin.go
@@ -115,7 +115,6 @@
 
 func libc_getfsstat_trampoline()
 
-//go:linkname libc_getfsstat libc_getfsstat
 //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib"
 
 func setattrlistTimes(path string, times []Timespec) error {
@@ -148,7 +147,6 @@
 
 func libc_setattrlist_trampoline()
 
-//go:linkname libc_setattrlist libc_setattrlist
 //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib"
 
 func utimensat(dirfd int, path string, times *[2]Timespec, flag int) error {
@@ -276,7 +274,6 @@
 
 func libc_fdopendir_trampoline()
 
-//go:linkname libc_fdopendir libc_fdopendir
 //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
 
 func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go
index 22ddb78..687efff 100644
--- a/src/syscall/syscall_darwin_amd64.go
+++ b/src/syscall/syscall_darwin_amd64.go
@@ -56,7 +56,6 @@
 
 func libc_sendfile_trampoline()
 
-//go:linkname libc_sendfile libc_sendfile
 //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib"
 
 // Implemented in the runtime package (runtime/sys_darwin_64.go)
diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go
index ecb9fff..ab57117 100644
--- a/src/syscall/syscall_darwin_arm64.go
+++ b/src/syscall/syscall_darwin_arm64.go
@@ -56,7 +56,6 @@
 
 func libc_sendfile_trampoline()
 
-//go:linkname libc_sendfile libc_sendfile
 //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib"
 
 // Implemented in the runtime package (runtime/sys_darwin_64.go)
diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go
index c246c3a..4f2cdf8 100644
--- a/src/syscall/zsyscall_darwin_amd64.go
+++ b/src/syscall/zsyscall_darwin_amd64.go
@@ -20,7 +20,6 @@
 
 func libc_getgroups_trampoline()
 
-//go:linkname libc_getgroups libc_getgroups
 //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -35,7 +34,6 @@
 
 func libc_setgroups_trampoline()
 
-//go:linkname libc_setgroups libc_setgroups
 //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -51,7 +49,6 @@
 
 func libc_wait4_trampoline()
 
-//go:linkname libc_wait4 libc_wait4
 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -67,7 +64,6 @@
 
 func libc_accept_trampoline()
 
-//go:linkname libc_accept libc_accept
 //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -82,7 +78,6 @@
 
 func libc_bind_trampoline()
 
-//go:linkname libc_bind libc_bind
 //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -97,7 +92,6 @@
 
 func libc_connect_trampoline()
 
-//go:linkname libc_connect libc_connect
 //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -113,7 +107,6 @@
 
 func libc_socket_trampoline()
 
-//go:linkname libc_socket libc_socket
 //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -128,7 +121,6 @@
 
 func libc_getsockopt_trampoline()
 
-//go:linkname libc_getsockopt libc_getsockopt
 //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -143,7 +135,6 @@
 
 func libc_setsockopt_trampoline()
 
-//go:linkname libc_setsockopt libc_setsockopt
 //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -158,7 +149,6 @@
 
 func libc_getpeername_trampoline()
 
-//go:linkname libc_getpeername libc_getpeername
 //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -173,7 +163,6 @@
 
 func libc_getsockname_trampoline()
 
-//go:linkname libc_getsockname libc_getsockname
 //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -188,7 +177,6 @@
 
 func libc_shutdown_trampoline()
 
-//go:linkname libc_shutdown libc_shutdown
 //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -203,7 +191,6 @@
 
 func libc_socketpair_trampoline()
 
-//go:linkname libc_socketpair libc_socketpair
 //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -225,7 +212,6 @@
 
 func libc_recvfrom_trampoline()
 
-//go:linkname libc_recvfrom libc_recvfrom
 //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -246,7 +232,6 @@
 
 func libc_sendto_trampoline()
 
-//go:linkname libc_sendto libc_sendto
 //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -262,7 +247,6 @@
 
 func libc_recvmsg_trampoline()
 
-//go:linkname libc_recvmsg libc_recvmsg
 //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -278,7 +262,6 @@
 
 func libc_sendmsg_trampoline()
 
-//go:linkname libc_sendmsg libc_sendmsg
 //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -294,7 +277,6 @@
 
 func libc_kevent_trampoline()
 
-//go:linkname libc_kevent libc_kevent
 //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -314,7 +296,6 @@
 
 func libc_utimes_trampoline()
 
-//go:linkname libc_utimes libc_utimes
 //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -329,7 +310,6 @@
 
 func libc_futimes_trampoline()
 
-//go:linkname libc_futimes libc_futimes
 //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -345,7 +325,6 @@
 
 func libc_fcntl_trampoline()
 
-//go:linkname libc_fcntl libc_fcntl
 //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -360,7 +339,6 @@
 
 func libc_pipe_trampoline()
 
-//go:linkname libc_pipe libc_pipe
 //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -375,7 +353,6 @@
 
 func libc_kill_trampoline()
 
-//go:linkname libc_kill libc_kill
 //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -395,7 +372,6 @@
 
 func libc_access_trampoline()
 
-//go:linkname libc_access libc_access
 //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -410,7 +386,6 @@
 
 func libc_adjtime_trampoline()
 
-//go:linkname libc_adjtime libc_adjtime
 //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -430,7 +405,6 @@
 
 func libc_chdir_trampoline()
 
-//go:linkname libc_chdir libc_chdir
 //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -450,7 +424,6 @@
 
 func libc_chflags_trampoline()
 
-//go:linkname libc_chflags libc_chflags
 //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -470,7 +443,6 @@
 
 func libc_chmod_trampoline()
 
-//go:linkname libc_chmod libc_chmod
 //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -490,7 +462,6 @@
 
 func libc_chown_trampoline()
 
-//go:linkname libc_chown libc_chown
 //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -510,7 +481,6 @@
 
 func libc_chroot_trampoline()
 
-//go:linkname libc_chroot libc_chroot
 //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -525,7 +495,6 @@
 
 func libc_close_trampoline()
 
-//go:linkname libc_close libc_close
 //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -540,7 +509,6 @@
 
 func libc_closedir_trampoline()
 
-//go:linkname libc_closedir libc_closedir
 //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -556,7 +524,6 @@
 
 func libc_dup_trampoline()
 
-//go:linkname libc_dup libc_dup
 //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -571,7 +538,6 @@
 
 func libc_dup2_trampoline()
 
-//go:linkname libc_dup2 libc_dup2
 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -596,7 +562,6 @@
 
 func libc_exchangedata_trampoline()
 
-//go:linkname libc_exchangedata libc_exchangedata
 //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -611,7 +576,6 @@
 
 func libc_fchdir_trampoline()
 
-//go:linkname libc_fchdir libc_fchdir
 //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -626,7 +590,6 @@
 
 func libc_fchflags_trampoline()
 
-//go:linkname libc_fchflags libc_fchflags
 //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -641,7 +604,6 @@
 
 func libc_fchmod_trampoline()
 
-//go:linkname libc_fchmod libc_fchmod
 //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -656,7 +618,6 @@
 
 func libc_fchown_trampoline()
 
-//go:linkname libc_fchown libc_fchown
 //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -671,7 +632,6 @@
 
 func libc_flock_trampoline()
 
-//go:linkname libc_flock libc_flock
 //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -687,7 +647,6 @@
 
 func libc_fpathconf_trampoline()
 
-//go:linkname libc_fpathconf libc_fpathconf
 //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -702,7 +661,6 @@
 
 func libc_fsync_trampoline()
 
-//go:linkname libc_fsync libc_fsync
 //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -717,7 +675,6 @@
 
 func libc_ftruncate_trampoline()
 
-//go:linkname libc_ftruncate libc_ftruncate
 //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -730,7 +687,6 @@
 
 func libc_getdtablesize_trampoline()
 
-//go:linkname libc_getdtablesize libc_getdtablesize
 //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -743,7 +699,6 @@
 
 func libc_getegid_trampoline()
 
-//go:linkname libc_getegid libc_getegid
 //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -756,7 +711,6 @@
 
 func libc_geteuid_trampoline()
 
-//go:linkname libc_geteuid libc_geteuid
 //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -769,7 +723,6 @@
 
 func libc_getgid_trampoline()
 
-//go:linkname libc_getgid libc_getgid
 //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -785,7 +738,6 @@
 
 func libc_getpgid_trampoline()
 
-//go:linkname libc_getpgid libc_getpgid
 //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -798,7 +750,6 @@
 
 func libc_getpgrp_trampoline()
 
-//go:linkname libc_getpgrp libc_getpgrp
 //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -811,7 +762,6 @@
 
 func libc_getpid_trampoline()
 
-//go:linkname libc_getpid libc_getpid
 //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -824,7 +774,6 @@
 
 func libc_getppid_trampoline()
 
-//go:linkname libc_getppid libc_getppid
 //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -840,7 +789,6 @@
 
 func libc_getpriority_trampoline()
 
-//go:linkname libc_getpriority libc_getpriority
 //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -855,7 +803,6 @@
 
 func libc_getrlimit_trampoline()
 
-//go:linkname libc_getrlimit libc_getrlimit
 //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -870,7 +817,6 @@
 
 func libc_getrusage_trampoline()
 
-//go:linkname libc_getrusage libc_getrusage
 //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -886,7 +832,6 @@
 
 func libc_getsid_trampoline()
 
-//go:linkname libc_getsid libc_getsid
 //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -899,7 +844,6 @@
 
 func libc_getuid_trampoline()
 
-//go:linkname libc_getuid libc_getuid
 //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -912,7 +856,6 @@
 
 func libc_issetugid_trampoline()
 
-//go:linkname libc_issetugid libc_issetugid
 //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -928,7 +871,6 @@
 
 func libc_kqueue_trampoline()
 
-//go:linkname libc_kqueue libc_kqueue
 //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -948,7 +890,6 @@
 
 func libc_lchown_trampoline()
 
-//go:linkname libc_lchown libc_lchown
 //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -973,7 +914,6 @@
 
 func libc_link_trampoline()
 
-//go:linkname libc_link libc_link
 //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -988,7 +928,6 @@
 
 func libc_listen_trampoline()
 
-//go:linkname libc_listen libc_listen
 //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1008,7 +947,6 @@
 
 func libc_mkdir_trampoline()
 
-//go:linkname libc_mkdir libc_mkdir
 //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1028,7 +966,6 @@
 
 func libc_mkfifo_trampoline()
 
-//go:linkname libc_mkfifo libc_mkfifo
 //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1048,7 +985,6 @@
 
 func libc_mknod_trampoline()
 
-//go:linkname libc_mknod libc_mknod
 //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1069,7 +1005,6 @@
 
 func libc_mlock_trampoline()
 
-//go:linkname libc_mlock libc_mlock
 //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1084,7 +1019,6 @@
 
 func libc_mlockall_trampoline()
 
-//go:linkname libc_mlockall libc_mlockall
 //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1105,7 +1039,6 @@
 
 func libc_mprotect_trampoline()
 
-//go:linkname libc_mprotect libc_mprotect
 //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1126,7 +1059,6 @@
 
 func libc_munlock_trampoline()
 
-//go:linkname libc_munlock libc_munlock
 //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1141,7 +1073,6 @@
 
 func libc_munlockall_trampoline()
 
-//go:linkname libc_munlockall libc_munlockall
 //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1162,7 +1093,6 @@
 
 func libc_open_trampoline()
 
-//go:linkname libc_open libc_open
 //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1183,7 +1113,6 @@
 
 func libc_pathconf_trampoline()
 
-//go:linkname libc_pathconf libc_pathconf
 //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1205,7 +1134,6 @@
 
 func libc_pread_trampoline()
 
-//go:linkname libc_pread libc_pread
 //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1227,7 +1155,6 @@
 
 func libc_pwrite_trampoline()
 
-//go:linkname libc_pwrite libc_pwrite
 //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1249,7 +1176,6 @@
 
 func libc_read_trampoline()
 
-//go:linkname libc_read libc_read
 //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1262,7 +1188,6 @@
 
 func libc_readdir_r_trampoline()
 
-//go:linkname libc_readdir_r libc_readdir_r
 //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1289,7 +1214,6 @@
 
 func libc_readlink_trampoline()
 
-//go:linkname libc_readlink libc_readlink
 //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1314,7 +1238,6 @@
 
 func libc_rename_trampoline()
 
-//go:linkname libc_rename libc_rename
 //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1334,7 +1257,6 @@
 
 func libc_revoke_trampoline()
 
-//go:linkname libc_revoke libc_revoke
 //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1354,7 +1276,6 @@
 
 func libc_rmdir_trampoline()
 
-//go:linkname libc_rmdir libc_rmdir
 //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1370,7 +1291,6 @@
 
 func libc_lseek_trampoline()
 
-//go:linkname libc_lseek libc_lseek
 //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1385,7 +1305,6 @@
 
 func libc_select_trampoline()
 
-//go:linkname libc_select libc_select
 //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1400,7 +1319,6 @@
 
 func libc_setegid_trampoline()
 
-//go:linkname libc_setegid libc_setegid
 //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1415,7 +1333,6 @@
 
 func libc_seteuid_trampoline()
 
-//go:linkname libc_seteuid libc_seteuid
 //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1430,7 +1347,6 @@
 
 func libc_setgid_trampoline()
 
-//go:linkname libc_setgid libc_setgid
 //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1450,7 +1366,6 @@
 
 func libc_setlogin_trampoline()
 
-//go:linkname libc_setlogin libc_setlogin
 //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1465,7 +1380,6 @@
 
 func libc_setpgid_trampoline()
 
-//go:linkname libc_setpgid libc_setpgid
 //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1480,7 +1394,6 @@
 
 func libc_setpriority_trampoline()
 
-//go:linkname libc_setpriority libc_setpriority
 //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1495,7 +1408,6 @@
 
 func libc_setprivexec_trampoline()
 
-//go:linkname libc_setprivexec libc_setprivexec
 //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1510,7 +1422,6 @@
 
 func libc_setregid_trampoline()
 
-//go:linkname libc_setregid libc_setregid
 //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1525,7 +1436,6 @@
 
 func libc_setreuid_trampoline()
 
-//go:linkname libc_setreuid libc_setreuid
 //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1540,7 +1450,6 @@
 
 func libc_setrlimit_trampoline()
 
-//go:linkname libc_setrlimit libc_setrlimit
 //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1556,7 +1465,6 @@
 
 func libc_setsid_trampoline()
 
-//go:linkname libc_setsid libc_setsid
 //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1571,7 +1479,6 @@
 
 func libc_settimeofday_trampoline()
 
-//go:linkname libc_settimeofday libc_settimeofday
 //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1586,7 +1493,6 @@
 
 func libc_setuid_trampoline()
 
-//go:linkname libc_setuid libc_setuid
 //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1611,7 +1517,6 @@
 
 func libc_symlink_trampoline()
 
-//go:linkname libc_symlink libc_symlink
 //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1626,7 +1531,6 @@
 
 func libc_sync_trampoline()
 
-//go:linkname libc_sync libc_sync
 //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1646,7 +1550,6 @@
 
 func libc_truncate_trampoline()
 
-//go:linkname libc_truncate libc_truncate
 //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1659,7 +1562,6 @@
 
 func libc_umask_trampoline()
 
-//go:linkname libc_umask libc_umask
 //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1679,7 +1581,6 @@
 
 func libc_undelete_trampoline()
 
-//go:linkname libc_undelete libc_undelete
 //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1699,7 +1600,6 @@
 
 func libc_unlink_trampoline()
 
-//go:linkname libc_unlink libc_unlink
 //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1719,7 +1619,6 @@
 
 func libc_unmount_trampoline()
 
-//go:linkname libc_unmount libc_unmount
 //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1741,7 +1640,6 @@
 
 func libc_write_trampoline()
 
-//go:linkname libc_write libc_write
 //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1763,7 +1661,6 @@
 
 func libc_writev_trampoline()
 
-//go:linkname libc_writev libc_writev
 //go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1779,7 +1676,6 @@
 
 func libc_mmap_trampoline()
 
-//go:linkname libc_mmap libc_mmap
 //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1794,7 +1690,6 @@
 
 func libc_munmap_trampoline()
 
-//go:linkname libc_munmap libc_munmap
 //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1810,7 +1705,6 @@
 
 func libc_fork_trampoline()
 
-//go:linkname libc_fork libc_fork
 //go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1825,7 +1719,6 @@
 
 func libc_ioctl_trampoline()
 
-//go:linkname libc_ioctl libc_ioctl
 //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1850,7 +1743,6 @@
 
 func libc_execve_trampoline()
 
-//go:linkname libc_execve libc_execve
 //go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1865,7 +1757,6 @@
 
 func libc_exit_trampoline()
 
-//go:linkname libc_exit libc_exit
 //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1886,7 +1777,6 @@
 
 func libc_sysctl_trampoline()
 
-//go:linkname libc_sysctl libc_sysctl
 //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1917,7 +1807,6 @@
 
 func libc_unlinkat_trampoline()
 
-//go:linkname libc_unlinkat libc_unlinkat
 //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1938,7 +1827,6 @@
 
 func libc_openat_trampoline()
 
-//go:linkname libc_openat libc_openat
 //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1960,7 +1848,6 @@
 
 func libc_getcwd_trampoline()
 
-//go:linkname libc_getcwd libc_getcwd
 //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1975,7 +1862,6 @@
 
 func libc_fstat64_trampoline()
 
-//go:linkname libc_fstat64 libc_fstat64
 //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1990,7 +1876,6 @@
 
 func libc_fstatfs64_trampoline()
 
-//go:linkname libc_fstatfs64 libc_fstatfs64
 //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2005,7 +1890,6 @@
 
 func libc_gettimeofday_trampoline()
 
-//go:linkname libc_gettimeofday libc_gettimeofday
 //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2025,7 +1909,6 @@
 
 func libc_lstat64_trampoline()
 
-//go:linkname libc_lstat64 libc_lstat64
 //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2045,7 +1928,6 @@
 
 func libc_stat64_trampoline()
 
-//go:linkname libc_stat64 libc_stat64
 //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2065,7 +1947,6 @@
 
 func libc_statfs64_trampoline()
 
-//go:linkname libc_statfs64 libc_statfs64
 //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2085,7 +1966,6 @@
 
 func libc_fstatat64_trampoline()
 
-//go:linkname libc_fstatat64 libc_fstatat64
 //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2101,5 +1981,4 @@
 
 func libc_ptrace_trampoline()
 
-//go:linkname libc_ptrace libc_ptrace
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/src/syscall/zsyscall_darwin_amd64.s b/src/syscall/zsyscall_darwin_amd64.s
index 492f947..5eb48ce 100644
--- a/src/syscall/zsyscall_darwin_amd64.s
+++ b/src/syscall/zsyscall_darwin_amd64.s
@@ -1,253 +1,253 @@
 // go run mkasm.go darwin amd64
 // Code generated by the command above; DO NOT EDIT.
 #include "textflag.h"
-TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getfsstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getfsstat(SB)
-TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setattrlist_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setattrlist(SB)
-TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fdopendir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fdopendir(SB)
-TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendfile_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendfile(SB)
-TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_wait4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_bind_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_connect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socket_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpeername_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockname_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_shutdown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socketpair_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvfrom_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendto_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_futimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pipe_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
-TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_access_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_adjtime_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chroot_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_close_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_closedir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_closedir(SB)
-TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exchangedata_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exchangedata(SB)
-TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_flock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fpathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fsync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ftruncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getdtablesize_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getdtablesize(SB)
-TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_geteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgrp_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getppid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrusage_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_issetugid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_link_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_listen_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkfifo_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mknod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mlock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mlock(SB)
-TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mlockall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mlockall(SB)
-TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mprotect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mprotect(SB)
-TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munlock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munlock(SB)
-TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munlockall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munlockall(SB)
-TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_open_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pread_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pwrite_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_read_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readdir_r_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readdir_r(SB)
-TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rename_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_revoke_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rmdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lseek_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_select_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_seteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setlogin_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setprivexec_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setprivexec(SB)
-TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setregid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setreuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setrlimit(SB)
-TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_settimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_symlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_truncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_umask_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_undelete_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_undelete(SB)
-TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unmount_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_write_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_writev_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_writev(SB)
-TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fork_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ioctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_execve_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
-TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlinkat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_openat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
-TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getcwd_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstat64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat64(SB)
-TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatfs64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs64(SB)
-TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_gettimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lstat64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat64(SB)
-TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_stat64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_stat64(SB)
-TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_statfs64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs64(SB)
-TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatat64_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat64(SB)
-TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ptrace_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go
index ede0091..0d8598d 100644
--- a/src/syscall/zsyscall_darwin_arm64.go
+++ b/src/syscall/zsyscall_darwin_arm64.go
@@ -20,7 +20,6 @@
 
 func libc_getgroups_trampoline()
 
-//go:linkname libc_getgroups libc_getgroups
 //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -35,7 +34,6 @@
 
 func libc_setgroups_trampoline()
 
-//go:linkname libc_setgroups libc_setgroups
 //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -51,7 +49,6 @@
 
 func libc_wait4_trampoline()
 
-//go:linkname libc_wait4 libc_wait4
 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -67,7 +64,6 @@
 
 func libc_accept_trampoline()
 
-//go:linkname libc_accept libc_accept
 //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -82,7 +78,6 @@
 
 func libc_bind_trampoline()
 
-//go:linkname libc_bind libc_bind
 //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -97,7 +92,6 @@
 
 func libc_connect_trampoline()
 
-//go:linkname libc_connect libc_connect
 //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -113,7 +107,6 @@
 
 func libc_socket_trampoline()
 
-//go:linkname libc_socket libc_socket
 //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -128,7 +121,6 @@
 
 func libc_getsockopt_trampoline()
 
-//go:linkname libc_getsockopt libc_getsockopt
 //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -143,7 +135,6 @@
 
 func libc_setsockopt_trampoline()
 
-//go:linkname libc_setsockopt libc_setsockopt
 //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -158,7 +149,6 @@
 
 func libc_getpeername_trampoline()
 
-//go:linkname libc_getpeername libc_getpeername
 //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -173,7 +163,6 @@
 
 func libc_getsockname_trampoline()
 
-//go:linkname libc_getsockname libc_getsockname
 //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -188,7 +177,6 @@
 
 func libc_shutdown_trampoline()
 
-//go:linkname libc_shutdown libc_shutdown
 //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -203,7 +191,6 @@
 
 func libc_socketpair_trampoline()
 
-//go:linkname libc_socketpair libc_socketpair
 //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -225,7 +212,6 @@
 
 func libc_recvfrom_trampoline()
 
-//go:linkname libc_recvfrom libc_recvfrom
 //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -246,7 +232,6 @@
 
 func libc_sendto_trampoline()
 
-//go:linkname libc_sendto libc_sendto
 //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -262,7 +247,6 @@
 
 func libc_recvmsg_trampoline()
 
-//go:linkname libc_recvmsg libc_recvmsg
 //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -278,7 +262,6 @@
 
 func libc_sendmsg_trampoline()
 
-//go:linkname libc_sendmsg libc_sendmsg
 //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -294,7 +277,6 @@
 
 func libc_kevent_trampoline()
 
-//go:linkname libc_kevent libc_kevent
 //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -314,7 +296,6 @@
 
 func libc_utimes_trampoline()
 
-//go:linkname libc_utimes libc_utimes
 //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -329,7 +310,6 @@
 
 func libc_futimes_trampoline()
 
-//go:linkname libc_futimes libc_futimes
 //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -345,7 +325,6 @@
 
 func libc_fcntl_trampoline()
 
-//go:linkname libc_fcntl libc_fcntl
 //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -360,7 +339,6 @@
 
 func libc_pipe_trampoline()
 
-//go:linkname libc_pipe libc_pipe
 //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -375,7 +353,6 @@
 
 func libc_kill_trampoline()
 
-//go:linkname libc_kill libc_kill
 //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -395,7 +372,6 @@
 
 func libc_access_trampoline()
 
-//go:linkname libc_access libc_access
 //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -410,7 +386,6 @@
 
 func libc_adjtime_trampoline()
 
-//go:linkname libc_adjtime libc_adjtime
 //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -430,7 +405,6 @@
 
 func libc_chdir_trampoline()
 
-//go:linkname libc_chdir libc_chdir
 //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -450,7 +424,6 @@
 
 func libc_chflags_trampoline()
 
-//go:linkname libc_chflags libc_chflags
 //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -470,7 +443,6 @@
 
 func libc_chmod_trampoline()
 
-//go:linkname libc_chmod libc_chmod
 //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -490,7 +462,6 @@
 
 func libc_chown_trampoline()
 
-//go:linkname libc_chown libc_chown
 //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -510,7 +481,6 @@
 
 func libc_chroot_trampoline()
 
-//go:linkname libc_chroot libc_chroot
 //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -525,7 +495,6 @@
 
 func libc_close_trampoline()
 
-//go:linkname libc_close libc_close
 //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -540,7 +509,6 @@
 
 func libc_closedir_trampoline()
 
-//go:linkname libc_closedir libc_closedir
 //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -556,7 +524,6 @@
 
 func libc_dup_trampoline()
 
-//go:linkname libc_dup libc_dup
 //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -571,7 +538,6 @@
 
 func libc_dup2_trampoline()
 
-//go:linkname libc_dup2 libc_dup2
 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -596,7 +562,6 @@
 
 func libc_exchangedata_trampoline()
 
-//go:linkname libc_exchangedata libc_exchangedata
 //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -611,7 +576,6 @@
 
 func libc_fchdir_trampoline()
 
-//go:linkname libc_fchdir libc_fchdir
 //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -626,7 +590,6 @@
 
 func libc_fchflags_trampoline()
 
-//go:linkname libc_fchflags libc_fchflags
 //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -641,7 +604,6 @@
 
 func libc_fchmod_trampoline()
 
-//go:linkname libc_fchmod libc_fchmod
 //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -656,7 +618,6 @@
 
 func libc_fchown_trampoline()
 
-//go:linkname libc_fchown libc_fchown
 //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -671,7 +632,6 @@
 
 func libc_flock_trampoline()
 
-//go:linkname libc_flock libc_flock
 //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -687,7 +647,6 @@
 
 func libc_fpathconf_trampoline()
 
-//go:linkname libc_fpathconf libc_fpathconf
 //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -702,7 +661,6 @@
 
 func libc_fsync_trampoline()
 
-//go:linkname libc_fsync libc_fsync
 //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -717,7 +675,6 @@
 
 func libc_ftruncate_trampoline()
 
-//go:linkname libc_ftruncate libc_ftruncate
 //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -730,7 +687,6 @@
 
 func libc_getdtablesize_trampoline()
 
-//go:linkname libc_getdtablesize libc_getdtablesize
 //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -743,7 +699,6 @@
 
 func libc_getegid_trampoline()
 
-//go:linkname libc_getegid libc_getegid
 //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -756,7 +711,6 @@
 
 func libc_geteuid_trampoline()
 
-//go:linkname libc_geteuid libc_geteuid
 //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -769,7 +723,6 @@
 
 func libc_getgid_trampoline()
 
-//go:linkname libc_getgid libc_getgid
 //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -785,7 +738,6 @@
 
 func libc_getpgid_trampoline()
 
-//go:linkname libc_getpgid libc_getpgid
 //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -798,7 +750,6 @@
 
 func libc_getpgrp_trampoline()
 
-//go:linkname libc_getpgrp libc_getpgrp
 //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -811,7 +762,6 @@
 
 func libc_getpid_trampoline()
 
-//go:linkname libc_getpid libc_getpid
 //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -824,7 +774,6 @@
 
 func libc_getppid_trampoline()
 
-//go:linkname libc_getppid libc_getppid
 //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -840,7 +789,6 @@
 
 func libc_getpriority_trampoline()
 
-//go:linkname libc_getpriority libc_getpriority
 //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -855,7 +803,6 @@
 
 func libc_getrlimit_trampoline()
 
-//go:linkname libc_getrlimit libc_getrlimit
 //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -870,7 +817,6 @@
 
 func libc_getrusage_trampoline()
 
-//go:linkname libc_getrusage libc_getrusage
 //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -886,7 +832,6 @@
 
 func libc_getsid_trampoline()
 
-//go:linkname libc_getsid libc_getsid
 //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -899,7 +844,6 @@
 
 func libc_getuid_trampoline()
 
-//go:linkname libc_getuid libc_getuid
 //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -912,7 +856,6 @@
 
 func libc_issetugid_trampoline()
 
-//go:linkname libc_issetugid libc_issetugid
 //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -928,7 +871,6 @@
 
 func libc_kqueue_trampoline()
 
-//go:linkname libc_kqueue libc_kqueue
 //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -948,7 +890,6 @@
 
 func libc_lchown_trampoline()
 
-//go:linkname libc_lchown libc_lchown
 //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -973,7 +914,6 @@
 
 func libc_link_trampoline()
 
-//go:linkname libc_link libc_link
 //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -988,7 +928,6 @@
 
 func libc_listen_trampoline()
 
-//go:linkname libc_listen libc_listen
 //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1008,7 +947,6 @@
 
 func libc_mkdir_trampoline()
 
-//go:linkname libc_mkdir libc_mkdir
 //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1028,7 +966,6 @@
 
 func libc_mkfifo_trampoline()
 
-//go:linkname libc_mkfifo libc_mkfifo
 //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1048,7 +985,6 @@
 
 func libc_mknod_trampoline()
 
-//go:linkname libc_mknod libc_mknod
 //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1069,7 +1005,6 @@
 
 func libc_mlock_trampoline()
 
-//go:linkname libc_mlock libc_mlock
 //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1084,7 +1019,6 @@
 
 func libc_mlockall_trampoline()
 
-//go:linkname libc_mlockall libc_mlockall
 //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1105,7 +1039,6 @@
 
 func libc_mprotect_trampoline()
 
-//go:linkname libc_mprotect libc_mprotect
 //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1126,7 +1059,6 @@
 
 func libc_munlock_trampoline()
 
-//go:linkname libc_munlock libc_munlock
 //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1141,7 +1073,6 @@
 
 func libc_munlockall_trampoline()
 
-//go:linkname libc_munlockall libc_munlockall
 //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1162,7 +1093,6 @@
 
 func libc_open_trampoline()
 
-//go:linkname libc_open libc_open
 //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1183,7 +1113,6 @@
 
 func libc_pathconf_trampoline()
 
-//go:linkname libc_pathconf libc_pathconf
 //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1205,7 +1134,6 @@
 
 func libc_pread_trampoline()
 
-//go:linkname libc_pread libc_pread
 //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1227,7 +1155,6 @@
 
 func libc_pwrite_trampoline()
 
-//go:linkname libc_pwrite libc_pwrite
 //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1249,7 +1176,6 @@
 
 func libc_read_trampoline()
 
-//go:linkname libc_read libc_read
 //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1262,7 +1188,6 @@
 
 func libc_readdir_r_trampoline()
 
-//go:linkname libc_readdir_r libc_readdir_r
 //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1289,7 +1214,6 @@
 
 func libc_readlink_trampoline()
 
-//go:linkname libc_readlink libc_readlink
 //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1314,7 +1238,6 @@
 
 func libc_rename_trampoline()
 
-//go:linkname libc_rename libc_rename
 //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1334,7 +1257,6 @@
 
 func libc_revoke_trampoline()
 
-//go:linkname libc_revoke libc_revoke
 //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1354,7 +1276,6 @@
 
 func libc_rmdir_trampoline()
 
-//go:linkname libc_rmdir libc_rmdir
 //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1370,7 +1291,6 @@
 
 func libc_lseek_trampoline()
 
-//go:linkname libc_lseek libc_lseek
 //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1385,7 +1305,6 @@
 
 func libc_select_trampoline()
 
-//go:linkname libc_select libc_select
 //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1400,7 +1319,6 @@
 
 func libc_setegid_trampoline()
 
-//go:linkname libc_setegid libc_setegid
 //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1415,7 +1333,6 @@
 
 func libc_seteuid_trampoline()
 
-//go:linkname libc_seteuid libc_seteuid
 //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1430,7 +1347,6 @@
 
 func libc_setgid_trampoline()
 
-//go:linkname libc_setgid libc_setgid
 //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1450,7 +1366,6 @@
 
 func libc_setlogin_trampoline()
 
-//go:linkname libc_setlogin libc_setlogin
 //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1465,7 +1380,6 @@
 
 func libc_setpgid_trampoline()
 
-//go:linkname libc_setpgid libc_setpgid
 //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1480,7 +1394,6 @@
 
 func libc_setpriority_trampoline()
 
-//go:linkname libc_setpriority libc_setpriority
 //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1495,7 +1408,6 @@
 
 func libc_setprivexec_trampoline()
 
-//go:linkname libc_setprivexec libc_setprivexec
 //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1510,7 +1422,6 @@
 
 func libc_setregid_trampoline()
 
-//go:linkname libc_setregid libc_setregid
 //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1525,7 +1436,6 @@
 
 func libc_setreuid_trampoline()
 
-//go:linkname libc_setreuid libc_setreuid
 //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1540,7 +1450,6 @@
 
 func libc_setrlimit_trampoline()
 
-//go:linkname libc_setrlimit libc_setrlimit
 //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1556,7 +1465,6 @@
 
 func libc_setsid_trampoline()
 
-//go:linkname libc_setsid libc_setsid
 //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1571,7 +1479,6 @@
 
 func libc_settimeofday_trampoline()
 
-//go:linkname libc_settimeofday libc_settimeofday
 //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1586,7 +1493,6 @@
 
 func libc_setuid_trampoline()
 
-//go:linkname libc_setuid libc_setuid
 //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1611,7 +1517,6 @@
 
 func libc_symlink_trampoline()
 
-//go:linkname libc_symlink libc_symlink
 //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1626,7 +1531,6 @@
 
 func libc_sync_trampoline()
 
-//go:linkname libc_sync libc_sync
 //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1646,7 +1550,6 @@
 
 func libc_truncate_trampoline()
 
-//go:linkname libc_truncate libc_truncate
 //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1659,7 +1562,6 @@
 
 func libc_umask_trampoline()
 
-//go:linkname libc_umask libc_umask
 //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1679,7 +1581,6 @@
 
 func libc_undelete_trampoline()
 
-//go:linkname libc_undelete libc_undelete
 //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1699,7 +1600,6 @@
 
 func libc_unlink_trampoline()
 
-//go:linkname libc_unlink libc_unlink
 //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1719,7 +1619,6 @@
 
 func libc_unmount_trampoline()
 
-//go:linkname libc_unmount libc_unmount
 //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1741,7 +1640,6 @@
 
 func libc_write_trampoline()
 
-//go:linkname libc_write libc_write
 //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1763,7 +1661,6 @@
 
 func libc_writev_trampoline()
 
-//go:linkname libc_writev libc_writev
 //go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1779,7 +1676,6 @@
 
 func libc_mmap_trampoline()
 
-//go:linkname libc_mmap libc_mmap
 //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1794,7 +1690,6 @@
 
 func libc_munmap_trampoline()
 
-//go:linkname libc_munmap libc_munmap
 //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1810,7 +1705,6 @@
 
 func libc_fork_trampoline()
 
-//go:linkname libc_fork libc_fork
 //go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1825,7 +1719,6 @@
 
 func libc_ioctl_trampoline()
 
-//go:linkname libc_ioctl libc_ioctl
 //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1850,7 +1743,6 @@
 
 func libc_execve_trampoline()
 
-//go:linkname libc_execve libc_execve
 //go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1865,7 +1757,6 @@
 
 func libc_exit_trampoline()
 
-//go:linkname libc_exit libc_exit
 //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1886,7 +1777,6 @@
 
 func libc_sysctl_trampoline()
 
-//go:linkname libc_sysctl libc_sysctl
 //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1917,7 +1807,6 @@
 
 func libc_unlinkat_trampoline()
 
-//go:linkname libc_unlinkat libc_unlinkat
 //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1938,7 +1827,6 @@
 
 func libc_openat_trampoline()
 
-//go:linkname libc_openat libc_openat
 //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1960,7 +1848,6 @@
 
 func libc_getcwd_trampoline()
 
-//go:linkname libc_getcwd libc_getcwd
 //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1975,7 +1862,6 @@
 
 func libc_fstat_trampoline()
 
-//go:linkname libc_fstat libc_fstat
 //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -1990,7 +1876,6 @@
 
 func libc_fstatfs_trampoline()
 
-//go:linkname libc_fstatfs libc_fstatfs
 //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2005,7 +1890,6 @@
 
 func libc_gettimeofday_trampoline()
 
-//go:linkname libc_gettimeofday libc_gettimeofday
 //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2025,7 +1909,6 @@
 
 func libc_lstat_trampoline()
 
-//go:linkname libc_lstat libc_lstat
 //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2045,7 +1928,6 @@
 
 func libc_stat_trampoline()
 
-//go:linkname libc_stat libc_stat
 //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2065,7 +1947,6 @@
 
 func libc_statfs_trampoline()
 
-//go:linkname libc_statfs libc_statfs
 //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2085,7 +1966,6 @@
 
 func libc_fstatat_trampoline()
 
-//go:linkname libc_fstatat libc_fstatat
 //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib"
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -2101,5 +1981,4 @@
 
 func libc_ptrace_trampoline()
 
-//go:linkname libc_ptrace libc_ptrace
 //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib"
diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s
index b606c6e..73e4a3f 100644
--- a/src/syscall/zsyscall_darwin_arm64.s
+++ b/src/syscall/zsyscall_darwin_arm64.s
@@ -1,253 +1,253 @@
 // go run mkasm.go darwin arm64
 // Code generated by the command above; DO NOT EDIT.
 #include "textflag.h"
-TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getfsstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getfsstat(SB)
-TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setattrlist_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setattrlist(SB)
-TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fdopendir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fdopendir(SB)
-TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendfile_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendfile(SB)
-TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_wait4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_bind_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_connect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socket_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpeername_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockname_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_shutdown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socketpair_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvfrom_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendto_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_futimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pipe_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe(SB)
-TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_access_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_adjtime_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chroot_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_close_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_closedir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_closedir(SB)
-TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exchangedata_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exchangedata(SB)
-TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_flock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fpathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fsync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ftruncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getdtablesize_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getdtablesize(SB)
-TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_geteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgrp_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getppid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrusage_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_issetugid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_link_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_listen_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkfifo_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mknod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mlock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mlock(SB)
-TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mlockall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mlockall(SB)
-TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mprotect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mprotect(SB)
-TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munlock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munlock(SB)
-TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munlockall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munlockall(SB)
-TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_open_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pread_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pwrite_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_read_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readdir_r_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readdir_r(SB)
-TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rename_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_revoke_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rmdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lseek_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_select_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_seteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setlogin_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setprivexec_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setprivexec(SB)
-TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setregid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setreuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setrlimit(SB)
-TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_settimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_symlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_truncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_umask_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_undelete_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_undelete(SB)
-TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unmount_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_write_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_writev_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_writev(SB)
-TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fork_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ioctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_execve_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
-TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlinkat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_openat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
-TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getcwd_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat(SB)
-TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs(SB)
-TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_gettimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat(SB)
-TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_stat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_stat(SB)
-TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_statfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs(SB)
-TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat(SB)
-TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ptrace_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
diff --git a/src/syscall/zsyscall_openbsd_amd64.s b/src/syscall/zsyscall_openbsd_amd64.s
index e5c5dde..8256a45 100644
--- a/src/syscall/zsyscall_openbsd_amd64.s
+++ b/src/syscall/zsyscall_openbsd_amd64.s
@@ -1,233 +1,233 @@
 // go run mkasm.go openbsd amd64
 // Code generated by the command above; DO NOT EDIT.
 #include "textflag.h"
-TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_wait4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_bind_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_connect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socket_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpeername_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockname_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_shutdown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socketpair_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvfrom_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendto_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_futimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pipe2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
-TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept4(SB)
-TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getdents_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getdents(SB)
-TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_access_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_adjtime_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chroot_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_close_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_flock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fpathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat(SB)
-TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs(SB)
-TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fsync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ftruncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_geteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgrp_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getppid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrusage_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_gettimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_issetugid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_link_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_listen_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat(SB)
-TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkfifo_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mknod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_nanosleep_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_nanosleep(SB)
-TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_open_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pread_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pwrite_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_read_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rename_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_revoke_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rmdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_select_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_seteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setlogin_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setregid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setreuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setrlimit(SB)
-TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_settimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_stat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_stat(SB)
-TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_statfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs(SB)
-TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_symlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_truncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_umask_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unmount_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_write_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimensat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
-TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_syscall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_syscall(SB)
-TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lseek_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getcwd_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fork_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ioctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_execve_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
-TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ptrace_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
-TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getentropy_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getentropy(SB)
-TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat(SB)
-TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlinkat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_openat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
diff --git a/src/syscall/zsyscall_openbsd_arm64.s b/src/syscall/zsyscall_openbsd_arm64.s
index 37778b1..f6e0a8d 100644
--- a/src/syscall/zsyscall_openbsd_arm64.s
+++ b/src/syscall/zsyscall_openbsd_arm64.s
@@ -1,233 +1,233 @@
 // go run mkasm.go openbsd arm64
 // Code generated by the command above; DO NOT EDIT.
 #include "textflag.h"
-TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgroups(SB)
-TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgroups_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgroups(SB)
-TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_wait4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_wait4(SB)
-TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept(SB)
-TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_bind_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_bind(SB)
-TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_connect_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_connect(SB)
-TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socket_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socket(SB)
-TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockopt(SB)
-TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsockopt_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsockopt(SB)
-TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpeername_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpeername(SB)
-TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsockname_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsockname(SB)
-TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_shutdown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_shutdown(SB)
-TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_socketpair_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_socketpair(SB)
-TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvfrom_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvfrom(SB)
-TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendto_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendto(SB)
-TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_recvmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_recvmsg(SB)
-TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sendmsg_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sendmsg(SB)
-TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kevent(SB)
-TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimes(SB)
-TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_futimes_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_futimes(SB)
-TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fcntl(SB)
-TEXT ·libc_pipe2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pipe2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pipe2(SB)
-TEXT ·libc_accept4_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_accept4_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_accept4(SB)
-TEXT ·libc_getdents_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getdents_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getdents(SB)
-TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_access_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_access(SB)
-TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_adjtime_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_adjtime(SB)
-TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chdir(SB)
-TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chflags(SB)
-TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chmod(SB)
-TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chown(SB)
-TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_chroot_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_chroot(SB)
-TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_close_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_close(SB)
-TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup(SB)
-TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_dup2_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_dup2(SB)
-TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchdir(SB)
-TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchflags_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchflags(SB)
-TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchmod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchmod(SB)
-TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fchown(SB)
-TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_flock_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_flock(SB)
-TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fpathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fpathconf(SB)
-TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat(SB)
-TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatfs(SB)
-TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fsync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fsync(SB)
-TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ftruncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ftruncate(SB)
-TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getegid(SB)
-TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_geteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_geteuid(SB)
-TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getgid(SB)
-TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgid(SB)
-TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpgrp_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpgrp(SB)
-TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpid(SB)
-TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getppid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getppid(SB)
-TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getpriority(SB)
-TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrlimit(SB)
-TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getrusage_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getrusage(SB)
-TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getsid(SB)
-TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_gettimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_gettimeofday(SB)
-TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getuid(SB)
-TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_issetugid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_issetugid(SB)
-TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kill(SB)
-TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_kqueue(SB)
-TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lchown_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lchown(SB)
-TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_link_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_link(SB)
-TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_listen_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_listen(SB)
-TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lstat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lstat(SB)
-TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkdir(SB)
-TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mkfifo_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mkfifo(SB)
-TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mknod_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mknod(SB)
-TEXT ·libc_nanosleep_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_nanosleep_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_nanosleep(SB)
-TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_open_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_open(SB)
-TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pathconf_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pathconf(SB)
-TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pread_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pread(SB)
-TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_pwrite_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_pwrite(SB)
-TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_read_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_read(SB)
-TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_readlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_readlink(SB)
-TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rename_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rename(SB)
-TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_revoke_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_revoke(SB)
-TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_rmdir_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_rmdir(SB)
-TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_select_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_select(SB)
-TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setegid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setegid(SB)
-TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_seteuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_seteuid(SB)
-TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setgid(SB)
-TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setlogin_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setlogin(SB)
-TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpgid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpgid(SB)
-TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setpriority_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setpriority(SB)
-TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setregid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setregid(SB)
-TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setreuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setreuid(SB)
-TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setrlimit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setrlimit(SB)
-TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setsid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setsid(SB)
-TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_settimeofday_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_settimeofday(SB)
-TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_setuid_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_setuid(SB)
-TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_stat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_stat(SB)
-TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_statfs_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_statfs(SB)
-TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_symlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_symlink(SB)
-TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sync_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sync(SB)
-TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_truncate_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_truncate(SB)
-TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_umask_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_umask(SB)
-TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlink_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlink(SB)
-TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unmount_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unmount(SB)
-TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_write_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_write(SB)
-TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_mmap(SB)
-TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_munmap(SB)
-TEXT ·libc_utimensat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_utimensat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_utimensat(SB)
-TEXT ·libc_syscall_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_syscall_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_syscall(SB)
-TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_lseek_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_lseek(SB)
-TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getcwd_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getcwd(SB)
-TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_sysctl(SB)
-TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fork_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fork(SB)
-TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ioctl_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ioctl(SB)
-TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_execve_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_execve(SB)
-TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_exit_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_exit(SB)
-TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_ptrace_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_ptrace(SB)
-TEXT ·libc_getentropy_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_getentropy_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_getentropy(SB)
-TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_fstatat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_fstatat(SB)
-TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_unlinkat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_unlinkat(SB)
-TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0
+TEXT ·libc_openat_trampoline<ABIInternal>(SB),NOSPLIT,$0-0
 	JMP	libc_openat(SB)
diff --git a/test/abi/regabipragma.dir/main.go b/test/abi/regabipragma.dir/main.go
new file mode 100644
index 0000000..d663337
--- /dev/null
+++ b/test/abi/regabipragma.dir/main.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"regabipragma.dir/tmp"
+)
+
+type S string
+
+//go:noinline
+func (s S) ff(t string) string {
+	return string(s) + " " + t
+}
+
+//go:noinline
+//go:registerparams
+func f(s,t string) string { // ERROR "Declared function f has register params"
+	return s + " " + t
+}
+
+func check(s string) {
+	if s != "Hello world!" {
+		fmt.Printf("FAIL, wanted 'Hello world!' but got '%s'\n", s)
+	}
+}
+
+func main() {
+	check(f("Hello", "world!"))   // ERROR "Called function ...f has register params"
+	check(tmp.F("Hello", "world!"))  // ERROR "Called function regabipragma.dir/tmp.F has register params"
+	check(S("Hello").ff("world!"))
+	check(tmp.S("Hello").FF("world!"))
+}
diff --git a/test/abi/regabipragma.dir/tmp/foo.go b/test/abi/regabipragma.dir/tmp/foo.go
new file mode 100644
index 0000000..cff989b
--- /dev/null
+++ b/test/abi/regabipragma.dir/tmp/foo.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tmp
+
+
+type S string
+
+//go:noinline
+func (s S) FF(t string) string {
+        return string(s) + " " + t
+}
+
+//go:noinline
+//go:registerparams
+func F(s,t string) string {
+        return s + " " + t
+}
diff --git a/test/abi/regabipragma.go b/test/abi/regabipragma.go
new file mode 100644
index 0000000..6a1b193
--- /dev/null
+++ b/test/abi/regabipragma.go
@@ -0,0 +1,10 @@
+// runindir
+// +build !windows
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO May delete or adapt this test once regabi is the default
+
+package ignore
diff --git a/test/abi/regabipragma.out b/test/abi/regabipragma.out
new file mode 100644
index 0000000..321b1ad
--- /dev/null
+++ b/test/abi/regabipragma.out
@@ -0,0 +1,6 @@
+# regabipragma.dir/tmp
+tmp/foo.go:17:6: declared function F has register params
+# regabipragma.dir
+./main.go:21:6: declared function f has register params
+./main.go:32:9: called function f has register params
+./main.go:33:13: called function tmp.F has register params
diff --git a/test/closure2.go b/test/closure2.go
index e4db05d..812d41f 100644
--- a/test/closure2.go
+++ b/test/closure2.go
@@ -9,6 +9,8 @@
 
 package main
 
+var never bool
+
 func main() {
 	{
 		type X struct {
@@ -115,4 +117,16 @@
 			panic("g() != 2")
 		}
 	}
+
+	{
+		var g func() int
+		q := 0
+		q, g = 1, func() int { return q }
+		if never {
+			g = func() int { return 2 }
+		}
+		if g() != 1 {
+			panic("g() != 1")
+		}
+	}
 }
diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go
index 5694673..2fc3375 100644
--- a/test/closure3.dir/main.go
+++ b/test/closure3.dir/main.go
@@ -93,11 +93,11 @@
 		y := func(x int) int { // ERROR "can inline main.func11" "func literal does not escape"
 			return x + 2
 		}
-		y, sink = func() (func(int) int, int) { // ERROR "func literal does not escape"
-			return func(x int) int { // ERROR "can inline main.func12" "func literal escapes"
+		y, sink = func() (func(int) int, int) { // ERROR "can inline main.func12"
+			return func(x int) int { // ERROR "can inline main.func12"
 				return x + 1
 			}, 42
-		}()
+		}() // ERROR "func literal does not escape" "inlining call to main.func12"
 		if y(40) != 41 {
 			ppanic("y(40) != 41")
 		}
@@ -105,14 +105,14 @@
 
 	{
 		func() { // ERROR "func literal does not escape"
-			y := func(x int) int { // ERROR "can inline main.func13.1" "func literal does not escape"
+			y := func(x int) int { // ERROR "func literal does not escape" "can inline main.func13.1"
 				return x + 2
 			}
-			y, sink = func() (func(int) int, int) { // ERROR "func literal does not escape"
-				return func(x int) int { // ERROR "can inline main.func13.2" "func literal escapes"
+			y, sink = func() (func(int) int, int) { // ERROR "can inline main.func13.2"
+				return func(x int) int { // ERROR "can inline main.func13.2"
 					return x + 1
 				}, 42
-			}()
+			}() // ERROR "inlining call to main.func13.2" "func literal does not escape"
 			if y(40) != 41 {
 				ppanic("y(40) != 41")
 			}
@@ -187,29 +187,29 @@
 
 	{
 		x := 42
-		if z := func(y int) int { // ERROR "func literal does not escape"
-			return func() int { // ERROR "can inline main.func22.1"
+		if z := func(y int) int { // ERROR "can inline main.func22"
+			return func() int { // ERROR "can inline main.func22.1" "can inline main.func30"
 				return x + y
 			}() // ERROR "inlining call to main.func22.1"
-		}(1); z != 43 {
+		}(1); z != 43 { // ERROR "inlining call to main.func22" "inlining call to main.func30"
 			ppanic("z != 43")
 		}
-		if z := func(y int) int { // ERROR "func literal does not escape"
-			return func() int { // ERROR "can inline main.func23.1"
+		if z := func(y int) int { // ERROR "func literal does not escape" "can inline main.func23"
+			return func() int { // ERROR "can inline main.func23.1" "can inline main.func31"
 				return x + y
 			}() // ERROR "inlining call to main.func23.1"
-		}; z(1) != 43 {
+		}; z(1) != 43 { // ERROR "inlining call to main.func23" "inlining call to main.func31"
 			ppanic("z(1) != 43")
 		}
 	}
 
 	{
 		a := 1
-		func() { // ERROR "func literal does not escape"
-			func() { // ERROR "can inline main.func24"
+		func() { // ERROR "can inline main.func24"
+			func() { // ERROR "can inline main.func24" "can inline main.func32"
 				a = 2
 			}() // ERROR "inlining call to main.func24"
-		}()
+		}() // ERROR "inlining call to main.func24" "inlining call to main.func32"
 		if a != 2 {
 			ppanic("a != 2")
 		}
@@ -250,12 +250,12 @@
 		a := 2
 		if r := func(x int) int { // ERROR "func literal does not escape"
 			b := 3
-			return func(y int) int { // ERROR "func literal does not escape"
+			return func(y int) int { // ERROR "can inline main.func27.1"
 				c := 5
-				return func(z int) int { // ERROR "can inline main.func27.1.1"
+				return func(z int) int { // ERROR "can inline main.func27.1.1" "can inline main.func27.2"
 					return a*x + b*y + c*z
 				}(10) // ERROR "inlining call to main.func27.1.1"
-			}(100)
+			}(100) // ERROR "inlining call to main.func27.1" "inlining call to main.func27.2"
 		}(1000); r != 2350 {
 			ppanic("r != 2350")
 		}
@@ -265,15 +265,15 @@
 		a := 2
 		if r := func(x int) int { // ERROR "func literal does not escape"
 			b := 3
-			return func(y int) int { // ERROR "func literal does not escape"
+			return func(y int) int { // ERROR "can inline main.func28.1"
 				c := 5
-				func(z int) { // ERROR "can inline main.func28.1.1"
+				func(z int) { // ERROR "can inline main.func28.1.1" "can inline main.func28.2"
 					a = a * x
 					b = b * y
 					c = c * z
 				}(10) // ERROR "inlining call to main.func28.1.1"
 				return a + c
-			}(100) + b
+			}(100) + b // ERROR "inlining call to main.func28.1" "inlining call to main.func28.2"
 		}(1000); r != 2350 {
 			ppanic("r != 2350")
 		}
@@ -285,5 +285,5 @@
 
 //go:noinline
 func ppanic(s string) { // ERROR "leaking param: s"
-	panic(s)
+	panic(s) // ERROR "s escapes to heap"
 }
diff --git a/test/closure5.dir/a.go b/test/closure5.dir/a.go
new file mode 100644
index 0000000..de8082b
--- /dev/null
+++ b/test/closure5.dir/a.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check correctness of various closure corner cases
+// that are expected to be inlined
+
+package a
+
+func f() bool               { return true }
+func G() func() func() bool { return func() func() bool { return f } }
diff --git a/test/closure5.dir/main.go b/test/closure5.dir/main.go
new file mode 100644
index 0000000..ee5dba6
--- /dev/null
+++ b/test/closure5.dir/main.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check correctness of various closure corner cases
+// that are expected to be inlined
+package main
+
+import "a"
+
+func main() {
+	if !a.G()()() {
+		panic("FAIL")
+	}
+}
diff --git a/test/closure5.go b/test/closure5.go
new file mode 100644
index 0000000..a7022b2
--- /dev/null
+++ b/test/closure5.go
@@ -0,0 +1,10 @@
+// compiledir
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check correctness of various closure corner cases
+// that are expected to be inlined
+
+package ignored
diff --git a/test/closure6.go b/test/closure6.go
new file mode 100644
index 0000000..b5592ad
--- /dev/null
+++ b/test/closure6.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Float64Slice []float64
+
+func (a Float64Slice) Search1(x float64) int {
+	f := func(q int) bool { return a[q] >= x }
+	i := 0
+	if !f(3) {
+		i = 5
+	}
+	return i
+}
diff --git a/test/closure7.go b/test/closure7.go
new file mode 100644
index 0000000..823333f
--- /dev/null
+++ b/test/closure7.go
@@ -0,0 +1,28 @@
+// run
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func g(f func()) {
+}
+
+// Must have exportable name
+func F() {
+	g(func() {
+		ch := make(chan int)
+		for {
+			select {
+			case <-ch:
+				return
+			default:
+			}
+		}
+	})
+}
+
+func main() {
+	F()
+}
diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go
index f86da34..7579dd1 100644
--- a/test/codegen/condmove.go
+++ b/test/codegen/condmove.go
@@ -31,7 +31,7 @@
 	if x < y {
 		x = -y
 	}
-	// amd64:"CMOVQCS"
+	// amd64:"CMOVQ(HI|CS)"
 	// arm64:"CSEL\t(LO|HI)"
 	// wasm:"Select"
 	return x
@@ -41,7 +41,7 @@
 	if x < y {
 		x = -y
 	}
-	// amd64:"CMOVLCS"
+	// amd64:"CMOVL(HI|CS)"
 	// arm64:"CSEL\t(LO|HI)"
 	// wasm:"Select"
 	return x
@@ -51,7 +51,7 @@
 	if x < y {
 		x = -y
 	}
-	// amd64:"CMOVWCS"
+	// amd64:"CMOVW(HI|CS)"
 	// arm64:"CSEL\t(LO|HI)"
 	// wasm:"Select"
 	return x
diff --git a/test/codegen/spectre.go b/test/codegen/spectre.go
index 3753498..d845da3 100644
--- a/test/codegen/spectre.go
+++ b/test/codegen/spectre.go
@@ -13,12 +13,12 @@
 }
 
 func IndexString(x string, i int) byte {
-	// amd64:`CMOVQCC`
+	// amd64:`CMOVQLS`
 	return x[i]
 }
 
 func IndexSlice(x []float64, i int) float64 {
-	// amd64:`CMOVQCC`
+	// amd64:`CMOVQLS`
 	return x[i]
 }
 
diff --git a/test/codegen/structs.go b/test/codegen/structs.go
index 9eddc5b..c4bcb55 100644
--- a/test/codegen/structs.go
+++ b/test/codegen/structs.go
@@ -18,7 +18,7 @@
 }
 
 func Zero1(t *Z1) { // Issue #18370
-	// amd64:`XORPS\tX., X`,`MOVUPS\tX., \(.*\)`,`MOVQ\t\$0, 16\(.*\)`
+	// amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)`
 	*t = Z1{}
 }
 
@@ -27,7 +27,7 @@
 }
 
 func Zero2(t *Z2) {
-	// amd64:`XORPS\tX., X`,`MOVUPS\tX., \(.*\)`,`MOVQ\t\$0, 16\(.*\)`
+	// amd64:`MOVUPS\tX[0-9]+, \(.*\)`,`MOVQ\t\$0, 16\(.*\)`
 	// amd64:`.*runtime[.]gcWriteBarrier.*\(SB\)`
 	*t = Z2{}
 }
diff --git a/test/const2.go b/test/const2.go
index 048d0cb..d104a2f 100644
--- a/test/const2.go
+++ b/test/const2.go
@@ -19,3 +19,14 @@
 const LargeC = LargeB * LargeB * LargeB // GC_ERROR "constant multiplication overflow"
 
 const AlsoLargeA = LargeA << 400 << 400 >> 400 >> 400 // GC_ERROR "constant shift overflow"
+
+// Issue #42732.
+
+const a = 1e+500000000
+const b = a * a // ERROR "constant multiplication overflow"
+const c = b * b
+
+const MaxInt512 = (1<<256 - 1) * (1<<256 + 1)
+const _ = MaxInt512 + 1  // ERROR "constant addition overflow"
+const _ = MaxInt512 ^ -1 // ERROR "constant bitwise XOR overflow"
+const _ = ^MaxInt512     // ERROR "constant bitwise complement overflow"
diff --git a/test/deferfin.go b/test/deferfin.go
index 8037291..1312bbb 100644
--- a/test/deferfin.go
+++ b/test/deferfin.go
@@ -18,12 +18,8 @@
 var sink func()
 
 func main() {
-	// Does not work on 32-bits due to partially conservative GC.
+	// Does not work with gccgo, due to partially conservative GC.
 	// Try to enable when we have fully precise GC.
-	if runtime.GOARCH != "amd64" {
-		return
-	}
-	// Likewise for gccgo.
 	if runtime.Compiler == "gccgo" {
 		return
 	}
@@ -60,4 +56,3 @@
 		panic("not all finalizers are called")
 	}
 }
-
diff --git a/test/escape2.go b/test/escape2.go
index 5c6eb55..b9b723d 100644
--- a/test/escape2.go
+++ b/test/escape2.go
@@ -1547,7 +1547,7 @@
 	case int: // ERROR "moved to heap: x$"
 		return &x
 	}
-	panic(0)
+	panic(0) // ERROR "0 escapes to heap"
 }
 
 // issue 8185 - &result escaping into result
diff --git a/test/escape2n.go b/test/escape2n.go
index 46e58f8..7c8208a 100644
--- a/test/escape2n.go
+++ b/test/escape2n.go
@@ -1547,7 +1547,7 @@
 	case int: // ERROR "moved to heap: x$"
 		return &x
 	}
-	panic(0)
+	panic(0) // ERROR "0 escapes to heap"
 }
 
 // issue 8185 - &result escaping into result
diff --git a/test/escape4.go b/test/escape4.go
index a4a9c14..4e50231 100644
--- a/test/escape4.go
+++ b/test/escape4.go
@@ -35,14 +35,14 @@
 func f2() {} // ERROR "can inline f2"
 
 // No inline for recover; panic now allowed to inline.
-func f3() { panic(1) } // ERROR "can inline f3"
+func f3() { panic(1) } // ERROR "can inline f3" "1 escapes to heap"
 func f4() { recover() }
 
 func f5() *byte {
 	type T struct {
 		x [1]byte
 	}
-	t := new(T)    // ERROR "new.T. escapes to heap"
+	t := new(T) // ERROR "new.T. escapes to heap"
 	return &t.x[0]
 }
 
@@ -52,6 +52,6 @@
 			y byte
 		}
 	}
-	t := new(T)   // ERROR "new.T. escapes to heap"
+	t := new(T) // ERROR "new.T. escapes to heap"
 	return &t.x.y
 }
diff --git a/test/escape_param.go b/test/escape_param.go
index 993e914..dc93f68 100644
--- a/test/escape_param.go
+++ b/test/escape_param.go
@@ -212,7 +212,7 @@
 
 // **in -> heap
 func param8(i **int) { // ERROR "i does not escape$"
-	sink = **i // ERROR "\* \(\*i\) escapes to heap"
+	sink = **i // ERROR "\*\(\*i\) escapes to heap"
 }
 
 func caller8() {
@@ -402,7 +402,7 @@
 	var p *int
 	v := &Val{&p} // ERROR "&Val{...} does not escape$"
 	v.param13(&i)
-	sink = **v.p // ERROR "\* \(\*v\.p\) escapes to heap"
+	sink = **v.p // ERROR "\*\(\*v\.p\) escapes to heap"
 }
 
 type Node struct {
diff --git a/test/fixedbugs/bug340.go b/test/fixedbugs/bug340.go
index 8c543c9..542a6ea 100644
--- a/test/fixedbugs/bug340.go
+++ b/test/fixedbugs/bug340.go
@@ -12,6 +12,7 @@
 	var x interface{}
 	switch t := x.(type) {
 	case 0:		// ERROR "type"
-		t.x = 1 // ERROR "type interface \{\}|reference to undefined field or method|interface with no methods"
+		t.x = 1
+		x.x = 1 // ERROR "type interface \{\}|reference to undefined field or method|interface with no methods"
 	}
 }
diff --git a/test/fixedbugs/bug462.go b/test/fixedbugs/bug462.go
index 3df63b0..bae5ee0 100644
--- a/test/fixedbugs/bug462.go
+++ b/test/fixedbugs/bug462.go
@@ -13,7 +13,7 @@
 }
 
 func main() {
-	_ = T {
-		os.File: 1, // ERROR "unknown T? ?field"
+	_ = T{
+		os.File: 1, // ERROR "invalid field name os.File|unknown field"
 	}
 }
diff --git a/test/fixedbugs/issue11362.go b/test/fixedbugs/issue11362.go
index 964e5fd..9492ec1 100644
--- a/test/fixedbugs/issue11362.go
+++ b/test/fixedbugs/issue11362.go
@@ -8,7 +8,7 @@
 
 package main
 
-import _ "unicode//utf8" // GC_ERROR "non-canonical import path .unicode//utf8. \(should be .unicode/utf8.\)" "can't find import: .unicode//utf8."
+import _ "unicode//utf8" // GC_ERROR "non-canonical import path .unicode//utf8. \(should be .unicode/utf8.\)"
 
 func main() {
 }
diff --git a/test/fixedbugs/issue13799.go b/test/fixedbugs/issue13799.go
index fbdd4c3..c8ecfc5 100644
--- a/test/fixedbugs/issue13799.go
+++ b/test/fixedbugs/issue13799.go
@@ -60,7 +60,7 @@
 	}
 
 	if len(m) != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -84,7 +84,7 @@
 	}
 
 	if len(m) != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, len(m) = %d", iter, maxI, len(m))) // ERROR "iter escapes to heap$" "len\(m\) escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -110,7 +110,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -136,7 +136,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -167,7 +167,7 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
 
@@ -185,6 +185,6 @@
 	}
 
 	if *m != maxI {
-		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$"
+		panic(fmt.Sprintf("iter %d: maxI = %d, *m = %d", iter, maxI, *m)) // ERROR "\*m escapes to heap$" "iter escapes to heap$" "maxI escapes to heap$" "... argument does not escape$" "fmt.Sprintf\(.*\) escapes to heap"
 	}
 }
diff --git a/test/fixedbugs/issue15055.go b/test/fixedbugs/issue15055.go
index e58047e..33cf63a 100644
--- a/test/fixedbugs/issue15055.go
+++ b/test/fixedbugs/issue15055.go
@@ -8,10 +8,12 @@
 
 func main() {
 	type name string
-	_ = []byte("abc", "def", 12)    // ERROR "too many arguments to conversion to \[\]byte: \(\[\]byte\)\(.abc., .def., 12\)"
+	_ = []byte("abc", "def", 12)    // ERROR "too many arguments to conversion to \[\]byte: \[\]byte\(.abc., .def., 12\)"
 	_ = string("a", "b", nil)       // ERROR "too many arguments to conversion to string: string\(.a., .b., nil\)"
-	_ = []byte()                    // ERROR "missing argument to conversion to \[\]byte: \(\[\]byte\)\(\)"
+	_ = []byte()                    // ERROR "missing argument to conversion to \[\]byte: \[\]byte\(\)"
 	_ = string()                    // ERROR "missing argument to conversion to string: string\(\)"
+	_ = *int()                      // ERROR "missing argument to conversion to int: int\(\)"
+	_ = (*int)()                    // ERROR "missing argument to conversion to \*int: \(\*int\)\(\)"
 	_ = name("a", 1, 3.3)           // ERROR "too many arguments to conversion to name: name\(.a., 1, 3.3\)"
-	_ = map[string]string(nil, nil) // ERROR "too many arguments to conversion to map\[string\]string: \(map\[string\]string\)\(nil, nil\)"
+	_ = map[string]string(nil, nil) // ERROR "too many arguments to conversion to map\[string\]string: map\[string\]string\(nil, nil\)"
 }
diff --git a/test/fixedbugs/issue20232.go b/test/fixedbugs/issue20232.go
index f91c749..7a0300a 100644
--- a/test/fixedbugs/issue20232.go
+++ b/test/fixedbugs/issue20232.go
@@ -6,6 +6,7 @@
 
 package main
 
-const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744 \(exponent overflow\)"
+const x = 6e5518446744 // ERROR "malformed constant: 6e5518446744"
+const _ = x * x
 const _ = 1e-1000000000
-const _ = 1e+1000000000 // ERROR "constant too large"
+const _ = 1e+1000000000 // ERROR "malformed constant: 1e\+1000000000"
diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go
index c190515..1a513be 100644
--- a/test/fixedbugs/issue20250.go
+++ b/test/fixedbugs/issue20250.go
@@ -1,4 +1,4 @@
-// errorcheck -0 -live -l -d=compilelater
+// errorcheck -0 -live -l
 
 // Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue20415.go b/test/fixedbugs/issue20415.go
index 9e7649f..1d9a745 100644
--- a/test/fixedbugs/issue20415.go
+++ b/test/fixedbugs/issue20415.go
@@ -11,7 +11,7 @@
 // 1
 var f byte
 
-var f interface{} // ERROR "previous declaration at issue20415.go:12|redefinition"
+var f interface{} // ERROR "issue20415.go:12: previous declaration|redefinition"
 
 func _(f int) {
 }
@@ -22,7 +22,7 @@
 func _(g int) {
 }
 
-var g interface{} // ERROR "previous declaration at issue20415.go:20|redefinition"
+var g interface{} // ERROR "issue20415.go:20: previous declaration|redefinition"
 
 // 3
 func _(h int) {
@@ -30,4 +30,4 @@
 
 var h byte
 
-var h interface{} // ERROR "previous declaration at issue20415.go:31|redefinition"
+var h interface{} // ERROR "issue20415.go:31: previous declaration|redefinition"
diff --git a/test/fixedbugs/issue20780.go b/test/fixedbugs/issue20780.go
index 53c4f61..f73e6d1 100644
--- a/test/fixedbugs/issue20780.go
+++ b/test/fixedbugs/issue20780.go
@@ -9,11 +9,17 @@
 
 package main
 
+type Big = [400e6]byte
+
 func f() { // GC_ERROR "stack frame too large"
-	var x [800e6]byte
-	g(x)
-	return
+	// Note: This test relies on the fact that we currently always
+	// spill function-results to the stack, even if they're so
+	// large that we would normally heap allocate them. If we ever
+	// improve the backend to spill temporaries to the heap, this
+	// test will probably need updating to find some new way to
+	// construct an overly large stack frame.
+	g(h(), h())
 }
 
-//go:noinline
-func g([800e6]byte) {}
+func g(Big, Big)
+func h() Big
diff --git a/test/fixedbugs/issue20780b.go b/test/fixedbugs/issue20780b.go
new file mode 100644
index 0000000..c8bf1f8
--- /dev/null
+++ b/test/fixedbugs/issue20780b.go
@@ -0,0 +1,62 @@
+// +build cgo,linux,amd64
+// run -race
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that CL 281293 doesn't interfere with race detector
+// instrumentation.
+
+package main
+
+import "fmt"
+
+const N = 2e6
+
+type Big = [N]int
+
+var sink interface{}
+
+func main() {
+	g(0, f(0))
+
+	x1 := f(1)
+	sink = &x1
+	g(1, x1)
+	g(7, f(7))
+	g(1, x1)
+
+	x3 := f(3)
+	sink = &x3
+	g(1, x1)
+	g(3, x3)
+
+	h(f(0), x1, f(2), x3, f(4))
+}
+
+//go:noinline
+func f(k int) (x Big) {
+	for i := range x {
+		x[i] = k*N + i
+	}
+	return
+}
+
+//go:noinline
+func g(k int, x Big) {
+	for i := range x {
+		if x[i] != k*N+i {
+			panic(fmt.Sprintf("x%d[%d] = %d", k, i, x[i]))
+		}
+	}
+}
+
+//go:noinline
+func h(x0, x1, x2, x3, x4 Big) {
+	g(0, x0)
+	g(1, x1)
+	g(2, x2)
+	g(3, x3)
+	g(4, x4)
+}
diff --git a/test/fixedbugs/issue22822.go b/test/fixedbugs/issue22822.go
index ea53452..dc86c97 100644
--- a/test/fixedbugs/issue22822.go
+++ b/test/fixedbugs/issue22822.go
@@ -12,5 +12,7 @@
 func F() {
 	slice := []int{1, 2, 3}
 	len := int(2)
-	println(len(slice)) // ERROR "cannot call non-function len .type int., declared at|expected function"
+	println(len(slice)) // ERROR "cannot call non-function len .type int., declared at LINE-1|expected function"
+	const iota = 1
+	println(iota(slice)) // ERROR "cannot call non-function iota .type int., declared at LINE-1|expected function"
 }
diff --git a/test/fixedbugs/issue23017.go b/test/fixedbugs/issue23017.go
new file mode 100644
index 0000000..770c48e
--- /dev/null
+++ b/test/fixedbugs/issue23017.go
@@ -0,0 +1,113 @@
+// run
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// assignment order in multiple assignments.
+// See issue #23017
+
+package main
+
+import "fmt"
+
+func main() {}
+
+func init() {
+	var m = map[int]int{}
+	var p *int
+
+	defer func() {
+		recover()
+		check(1, len(m))
+		check(42, m[2])
+	}()
+	m[2], *p = 42, 2
+}
+
+func init() {
+	var m = map[int]int{}
+	p := []int{}
+
+	defer func() {
+		recover()
+		check(1, len(m))
+		check(2, m[2])
+	}()
+	m[2], p[1] = 2, 2
+}
+
+func init() {
+	type P struct{ i int }
+	var m = map[int]int{}
+	var p *P
+
+	defer func() {
+		recover()
+		check(1, len(m))
+		check(3, m[2])
+	}()
+	m[2], p.i = 3, 2
+}
+
+func init() {
+	type T struct{ i int }
+	var x T
+	p := &x
+	p, p.i = new(T), 4
+	check(4, x.i)
+}
+
+func init() {
+	var m map[int]int
+	var a int
+	var p = &a
+
+	defer func() {
+		recover()
+		check(5, *p)
+	}()
+	*p, m[2] = 5, 2
+}
+
+var g int
+
+func init() {
+	var m map[int]int
+	defer func() {
+		recover()
+		check(0, g)
+	}()
+	m[0], g = 1, 2
+}
+
+func init() {
+	type T struct{ x struct{ y int } }
+	var x T
+	p := &x
+	p, p.x.y = new(T), 7
+	check(7, x.x.y)
+	check(0, p.x.y)
+}
+
+func init() {
+	type T *struct{ x struct{ y int } }
+	x := struct{ y int }{0}
+	var q T = &struct{ x struct{ y int } }{x}
+	p := q
+	p, p.x.y = nil, 7
+	check(7, q.x.y)
+}
+
+func init() {
+	x, y := 1, 2
+	x, y = y, x
+	check(2, x)
+	check(1, y)
+}
+
+func check(want, got int) {
+	if want != got {
+		panic(fmt.Sprintf("wanted %d, but got %d", want, got))
+	}
+}
diff --git a/test/fixedbugs/issue27595.go b/test/fixedbugs/issue27595.go
index af5c7a1..b9328a6 100644
--- a/test/fixedbugs/issue27595.go
+++ b/test/fixedbugs/issue27595.go
@@ -8,7 +8,7 @@
 
 var a = twoResults()       // ERROR "assignment mismatch: 1 variable but twoResults returns 2 values"
 var b, c, d = twoResults() // ERROR "assignment mismatch: 3 variables but twoResults returns 2 values"
-var e, f = oneResult()     // ERROR "assignment mismatch: 2 variables but oneResult returns 1 values"
+var e, f = oneResult()     // ERROR "assignment mismatch: 2 variables but oneResult returns 1 value"
 
 func twoResults() (int, int) {
 	return 1, 2
diff --git a/test/fixedbugs/issue28079b.go b/test/fixedbugs/issue28079b.go
index d1992e1..54c9db9 100644
--- a/test/fixedbugs/issue28079b.go
+++ b/test/fixedbugs/issue28079b.go
@@ -13,5 +13,5 @@
 type T [uintptr(unsafe.Pointer(nil))]int // ERROR "non-constant array bound|array bound is not constant"
 
 func f() {
-	_ = complex(1<<uintptr(unsafe.Pointer(nil)), 0) // GCCGO_ERROR "non-integer type for left operand of shift"
+	_ = complex(1<<uintptr(unsafe.Pointer(nil)), 0) // ERROR "shift of type float64|non-integer type for left operand of shift"
 }
diff --git a/test/fixedbugs/issue30087.go b/test/fixedbugs/issue30087.go
index 3ad9c8c..a8f6202 100644
--- a/test/fixedbugs/issue30087.go
+++ b/test/fixedbugs/issue30087.go
@@ -7,8 +7,8 @@
 package main
 
 func main() {
-	var a, b = 1    // ERROR "assignment mismatch: 2 variables but 1 values|wrong number of initializations"
-	_ = 1, 2        // ERROR "assignment mismatch: 1 variables but 2 values|number of variables does not match"
-	c, d := 1       // ERROR "assignment mismatch: 2 variables but 1 values|wrong number of initializations"
+	var a, b = 1    // ERROR "assignment mismatch: 2 variables but 1 value|wrong number of initializations"
+	_ = 1, 2        // ERROR "assignment mismatch: 1 variable but 2 values|number of variables does not match"
+	c, d := 1       // ERROR "assignment mismatch: 2 variables but 1 value|wrong number of initializations"
 	e, f := 1, 2, 3 // ERROR "assignment mismatch: 2 variables but 3 values|wrong number of initializations"
 }
diff --git a/test/fixedbugs/issue31053.dir/main.go b/test/fixedbugs/issue31053.dir/main.go
index 895c262..3bc75d1 100644
--- a/test/fixedbugs/issue31053.dir/main.go
+++ b/test/fixedbugs/issue31053.dir/main.go
@@ -35,8 +35,8 @@
 	_ = f.Exported
 	_ = f.exported    // ERROR "f.exported undefined .type f1.Foo has no field or method exported, but does have Exported."
 	_ = f.Unexported  // ERROR "f.Unexported undefined .type f1.Foo has no field or method Unexported."
-	_ = f.unexported  // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported."
-	f.unexported = 10 // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported."
-	f.unexported()    // ERROR "f.unexported undefined .cannot refer to unexported field or method f1..\*Foo..unexported."
+	_ = f.unexported  // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported."
+	f.unexported = 10 // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported."
+	f.unexported()    // ERROR "f.unexported undefined .cannot refer to unexported field or method unexported."
 	_ = f.hook        // ERROR "f.hook undefined .cannot refer to unexported field or method hook."
 }
diff --git a/test/fixedbugs/issue43164.dir/a.go b/test/fixedbugs/issue43164.dir/a.go
new file mode 100644
index 0000000..fa10e85
--- /dev/null
+++ b/test/fixedbugs/issue43164.dir/a.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "strings"
+
+var _ = Index // use strings
+
+type t struct{ Index int }
+
+var _ = t{Index: 0}
diff --git a/test/fixedbugs/issue43164.dir/b.go b/test/fixedbugs/issue43164.dir/b.go
new file mode 100644
index 0000000..b025927
--- /dev/null
+++ b/test/fixedbugs/issue43164.dir/b.go
@@ -0,0 +1,11 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "bytes"
+
+var _ = Index // use bytes
+
+var _ = t{Index: 0}
diff --git a/test/fixedbugs/issue43164.go b/test/fixedbugs/issue43164.go
new file mode 100644
index 0000000..f21d1d5
--- /dev/null
+++ b/test/fixedbugs/issue43164.go
@@ -0,0 +1,7 @@
+// compiledir
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/fixedbugs/issue43167.go b/test/fixedbugs/issue43167.go
new file mode 100644
index 0000000..1d1b69a
--- /dev/null
+++ b/test/fixedbugs/issue43167.go
@@ -0,0 +1,13 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "bytes"
+
+var _ Buffer // use package bytes
+
+var Index byte // ERROR "Index redeclared.*\n\tLINE-4: previous declaration during import .bytes.|already declared|redefinition"
diff --git a/test/fixedbugs/issue43384.go b/test/fixedbugs/issue43384.go
new file mode 100644
index 0000000..1bd793b
--- /dev/null
+++ b/test/fixedbugs/issue43384.go
@@ -0,0 +1,124 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.  Use of this
+// source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+package p
+
+type T int
+
+func (T) Mv()  {}
+func (*T) Mp() {}
+
+type P1 struct{ T }
+type P2 struct{ *T }
+type P3 *struct{ T }
+type P4 *struct{ *T }
+
+func _() {
+	{
+		var p P1
+		p.Mv()
+		(&p).Mv()
+		(*&p).Mv()
+		p.Mp()
+		(&p).Mp()
+		(*&p).Mp()
+	}
+	{
+		var p P2
+		p.Mv()
+		(&p).Mv()
+		(*&p).Mv()
+		p.Mp()
+		(&p).Mp()
+		(*&p).Mp()
+	}
+	{
+		var p P3
+		p.Mv()     // ERROR "undefined"
+		(&p).Mv()  // ERROR "undefined"
+		(*&p).Mv() // ERROR "undefined"
+		(**&p).Mv()
+		(*p).Mv()
+		(&*p).Mv()
+		p.Mp()     // ERROR "undefined"
+		(&p).Mp()  // ERROR "undefined"
+		(*&p).Mp() // ERROR "undefined"
+		(**&p).Mp()
+		(*p).Mp()
+		(&*p).Mp()
+	}
+	{
+		var p P4
+		p.Mv()     // ERROR "undefined"
+		(&p).Mv()  // ERROR "undefined"
+		(*&p).Mv() // ERROR "undefined"
+		(**&p).Mv()
+		(*p).Mv()
+		(&*p).Mv()
+		p.Mp()     // ERROR "undefined"
+		(&p).Mp()  // ERROR "undefined"
+		(*&p).Mp() // ERROR "undefined"
+		(**&p).Mp()
+		(*p).Mp()
+		(&*p).Mp()
+	}
+}
+
+func _() {
+	type P5 struct{ T }
+	type P6 struct{ *T }
+	type P7 *struct{ T }
+	type P8 *struct{ *T }
+
+	{
+		var p P5
+		p.Mv()
+		(&p).Mv()
+		(*&p).Mv()
+		p.Mp()
+		(&p).Mp()
+		(*&p).Mp()
+	}
+	{
+		var p P6
+		p.Mv()
+		(&p).Mv()
+		(*&p).Mv()
+		p.Mp()
+		(&p).Mp()
+		(*&p).Mp()
+	}
+	{
+		var p P7
+		p.Mv()     // ERROR "undefined"
+		(&p).Mv()  // ERROR "undefined"
+		(*&p).Mv() // ERROR "undefined"
+		(**&p).Mv()
+		(*p).Mv()
+		(&*p).Mv()
+		p.Mp()     // ERROR "undefined"
+		(&p).Mp()  // ERROR "undefined"
+		(*&p).Mp() // ERROR "undefined"
+		(**&p).Mp()
+		(*p).Mp()
+		(&*p).Mp()
+	}
+	{
+		var p P8
+		p.Mv()     // ERROR "undefined"
+		(&p).Mv()  // ERROR "undefined"
+		(*&p).Mv() // ERROR "undefined"
+		(**&p).Mv()
+		(*p).Mv()
+		(&*p).Mv()
+		p.Mp()     // ERROR "undefined"
+		(&p).Mp()  // ERROR "undefined"
+		(*&p).Mp() // ERROR "undefined"
+		(**&p).Mp()
+		(*p).Mp()
+		(&*p).Mp()
+	}
+}
diff --git a/test/fixedbugs/issue43428.go b/test/fixedbugs/issue43428.go
new file mode 100644
index 0000000..773a3f3
--- /dev/null
+++ b/test/fixedbugs/issue43428.go
@@ -0,0 +1,25 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "time"
+
+type T int
+
+func (T) Mv()  {}
+func (*T) Mp() {}
+
+var _ = []int{
+	T.Mv,    // ERROR "cannot use T\.Mv|incompatible type"
+	(*T).Mv, // ERROR "cannot use \(\*T\)\.Mv|incompatible type"
+	(*T).Mp, // ERROR "cannot use \(\*T\)\.Mp|incompatible type"
+
+	time.Time.GobEncode,    // ERROR "cannot use time\.Time\.GobEncode|incompatible type"
+	(*time.Time).GobEncode, // ERROR "cannot use \(\*time\.Time\)\.GobEncode|incompatible type"
+	(*time.Time).GobDecode, // ERROR "cannot use \(\*time\.Time\)\.GobDecode|incompatible type"
+
+}
diff --git a/test/fixedbugs/issue43444.go b/test/fixedbugs/issue43444.go
new file mode 100644
index 0000000..c430e1b
--- /dev/null
+++ b/test/fixedbugs/issue43444.go
@@ -0,0 +1,28 @@
+// run
+
+package main
+
+var sp = ""
+
+func f(name string, _ ...interface{}) int {
+	print(sp, name)
+	sp = " "
+	return 0
+}
+
+var a = f("a", x)
+var b = f("b", y)
+var c = f("c", z)
+var d = func() int {
+	if false {
+		_ = z
+	}
+	return f("d")
+}()
+var e = f("e")
+
+var x int
+var y int = 42
+var z int = func() int { return 42 }()
+
+func main() { println() }
diff --git a/test/fixedbugs/issue43444.out b/test/fixedbugs/issue43444.out
new file mode 100644
index 0000000..22d6a0d
--- /dev/null
+++ b/test/fixedbugs/issue43444.out
@@ -0,0 +1 @@
+e a b c d
diff --git a/test/fixedbugs/issue43479.dir/a.go b/test/fixedbugs/issue43479.dir/a.go
new file mode 100644
index 0000000..ed3e6a5
--- /dev/null
+++ b/test/fixedbugs/issue43479.dir/a.go
@@ -0,0 +1,27 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Here struct{ stuff int }
+type Info struct{ Dir string }
+
+func New() Here { return Here{} }
+func (h Here) Dir(p string) (Info, error)
+
+type I interface{ M(x string) }
+
+type T = struct {
+	Here
+	I
+}
+
+var X T
+
+var A = (*T).Dir
+var B = T.Dir
+var C = X.Dir
+var D = (*T).M
+var E = T.M
+var F = X.M
diff --git a/test/fixedbugs/issue43479.dir/b.go b/test/fixedbugs/issue43479.dir/b.go
new file mode 100644
index 0000000..02d1690
--- /dev/null
+++ b/test/fixedbugs/issue43479.dir/b.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+var Here = a.New()
+var Dir = Here.Dir
+
+type T = struct {
+	a.Here
+	a.I
+}
+
+var X T
+
+// Test exporting the type of method values for anonymous structs with
+// promoted methods.
+var A = a.A
+var B = a.B
+var C = a.C
+var D = a.D
+var E = a.E
+var F = a.F
+var G = (*a.T).Dir
+var H = a.T.Dir
+var I = a.X.Dir
+var J = (*a.T).M
+var K = a.T.M
+var L = a.X.M
+var M = (*T).Dir
+var N = T.Dir
+var O = X.Dir
+var P = (*T).M
+var Q = T.M
+var R = X.M
diff --git a/test/fixedbugs/issue43479.go b/test/fixedbugs/issue43479.go
new file mode 100644
index 0000000..f21d1d5
--- /dev/null
+++ b/test/fixedbugs/issue43479.go
@@ -0,0 +1,7 @@
+// compiledir
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/fixedbugs/issue43480.go b/test/fixedbugs/issue43480.go
new file mode 100644
index 0000000..d98ad3a
--- /dev/null
+++ b/test/fixedbugs/issue43480.go
@@ -0,0 +1,33 @@
+// run
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue #43480: ICE on large uint64 constants in switch cases.
+
+package main
+
+func isPow10(x uint64) bool {
+	switch x {
+	case 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+		1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19:
+		return true
+	}
+	return false
+}
+
+func main() {
+	var x uint64 = 1
+
+	for {
+		if !isPow10(x) || isPow10(x-1) || isPow10(x+1) {
+			panic(x)
+		}
+		next := x * 10
+		if next/10 != x {
+			break // overflow
+		}
+		x = next
+	}
+}
diff --git a/test/fixedbugs/issue43633.dir/a.go b/test/fixedbugs/issue43633.dir/a.go
new file mode 100644
index 0000000..946a37e
--- /dev/null
+++ b/test/fixedbugs/issue43633.dir/a.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func F() bool {
+	{
+		x := false
+		_ = x
+	}
+	if false {
+		_ = func(x bool) {}
+	}
+	x := true
+	return x
+}
+
+func G() func() bool {
+	x := true
+	return func() bool {
+		{
+			x := false
+			_ = x
+		}
+		return x
+	}
+}
diff --git a/test/fixedbugs/issue43633.dir/main.go b/test/fixedbugs/issue43633.dir/main.go
new file mode 100644
index 0000000..320e000
--- /dev/null
+++ b/test/fixedbugs/issue43633.dir/main.go
@@ -0,0 +1,18 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "./a"
+
+var g = a.G()
+
+func main() {
+	if !a.F() {
+		panic("FAIL")
+	}
+	if !g() {
+		panic("FAIL")
+	}
+}
diff --git a/test/fixedbugs/issue43633.go b/test/fixedbugs/issue43633.go
new file mode 100644
index 0000000..40df49f
--- /dev/null
+++ b/test/fixedbugs/issue43633.go
@@ -0,0 +1,7 @@
+// rundir
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/fixedbugs/issue43677.go b/test/fixedbugs/issue43677.go
new file mode 100644
index 0000000..1a68c8b
--- /dev/null
+++ b/test/fixedbugs/issue43677.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue #43677: ICE during compilation of dynamic initializers for
+// composite blank variables.
+
+package p
+
+func f() *int
+
+var _ = [2]*int{nil, f()}
+
+var _ = struct{ x, y *int }{nil, f()}
+
+var _ interface{} = f()
diff --git a/test/fixedbugs/issue43701.go b/test/fixedbugs/issue43701.go
new file mode 100644
index 0000000..6e16180
--- /dev/null
+++ b/test/fixedbugs/issue43701.go
@@ -0,0 +1,18 @@
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f() {
+	var st struct {
+		s string
+		i int16
+	}
+	_ = func() {
+		var m map[int16]int
+		m[st.i] = 0
+	}
+}
diff --git a/test/fixedbugs/issue43762.go b/test/fixedbugs/issue43762.go
new file mode 100644
index 0000000..4544b6e
--- /dev/null
+++ b/test/fixedbugs/issue43762.go
@@ -0,0 +1,11 @@
+// errorcheck
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = true == '\\' // ERROR "invalid operation: true == '\\\\'"
+var _ = true == '\'' // ERROR "invalid operation: true == '\\''"
+var _ = true == '\n' // ERROR "invalid operation: true == '\\n'"
diff --git a/test/fixedbugs/issue43835.go b/test/fixedbugs/issue43835.go
index 449eb72..29a5194 100644
--- a/test/fixedbugs/issue43835.go
+++ b/test/fixedbugs/issue43835.go
@@ -13,6 +13,9 @@
 	if bad, _ := g(); bad {
 		panic("FAIL")
 	}
+	if bad, _ := h(); bad {
+		panic("FAIL")
+	}
 }
 
 func f() (bad bool) {
@@ -31,3 +34,12 @@
 	var p *int
 	return true, *p
 }
+
+
+func h() (_ bool, _ int) {
+	defer func() {
+		recover()
+	}()
+	var p *int
+	return true, *p
+}
diff --git a/test/fixedbugs/issue43962.dir/a.go b/test/fixedbugs/issue43962.dir/a.go
new file mode 100644
index 0000000..168b206
--- /dev/null
+++ b/test/fixedbugs/issue43962.dir/a.go
@@ -0,0 +1,5 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package init
diff --git a/test/fixedbugs/issue43962.dir/b.go b/test/fixedbugs/issue43962.dir/b.go
new file mode 100644
index 0000000..f55fea1
--- /dev/null
+++ b/test/fixedbugs/issue43962.dir/b.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a" // ERROR "cannot import package as init"
diff --git a/test/fixedbugs/issue43962.go b/test/fixedbugs/issue43962.go
new file mode 100644
index 0000000..dca4d07
--- /dev/null
+++ b/test/fixedbugs/issue43962.go
@@ -0,0 +1,9 @@
+// errorcheckdir
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 43962: Importing a package called "init" is an error.
+
+package ignored
diff --git a/test/fixedbugs/issue5493.go b/test/fixedbugs/issue5493.go
index 2ee0398..8f771bc 100644
--- a/test/fixedbugs/issue5493.go
+++ b/test/fixedbugs/issue5493.go
@@ -14,6 +14,7 @@
 )
 
 const N = 10
+
 var count int64
 
 func run() error {
@@ -31,10 +32,9 @@
 }
 
 func main() {
-	// Does not work on 32-bits, or with gccgo, due to partially
-	// conservative GC.
+	// Does not work with gccgo, due to partially conservative GC.
 	// Try to enable when we have fully precise GC.
-	if runtime.GOARCH != "amd64" || runtime.Compiler == "gccgo" {
+	if runtime.Compiler == "gccgo" {
 		return
 	}
 	count = N
@@ -56,4 +56,3 @@
 		panic("not all finalizers are called")
 	}
 }
-
diff --git a/test/fixedbugs/issue6428.go b/test/fixedbugs/issue6428.go
new file mode 100644
index 0000000..c3f7b20
--- /dev/null
+++ b/test/fixedbugs/issue6428.go
@@ -0,0 +1,15 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import . "testing" // ERROR "imported and not used"
+
+type S struct {
+	T int
+}
+
+var _ = S{T: 0}
diff --git a/test/fixedbugs/issue7740.go b/test/fixedbugs/issue7740.go
index 8f1afe8..6bc6249 100644
--- a/test/fixedbugs/issue7740.go
+++ b/test/fixedbugs/issue7740.go
@@ -21,7 +21,7 @@
 	var prec float64
 	switch runtime.Compiler {
 	case "gc":
-		prec = 512
+		prec = math.Inf(1) // exact precision using rational arithmetic
 	case "gccgo":
 		prec = 256
 	default:
diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go
index 5dce557..65be4b5 100644
--- a/test/fixedbugs/issue7921.go
+++ b/test/fixedbugs/issue7921.go
@@ -41,7 +41,7 @@
 
 func bufferNoEscape4() []byte {
 	var b bytes.Buffer
-	b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m·3\]$" "inlining call to bytes.\(\*Buffer\).Grow$"
+	b.Grow(64) // ERROR "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m\]$" "inlining call to bytes.\(\*Buffer\).Grow$" "string\(.*\) escapes to heap"
 	useBuffer(&b)
 	return b.Bytes() // ERROR "inlining call to bytes.\(\*Buffer\).Bytes$"
 }
diff --git a/test/float_lit3.go b/test/float_lit3.go
index c4d1aa5..850d02c 100644
--- a/test/float_lit3.go
+++ b/test/float_lit3.go
@@ -37,12 +37,11 @@
 
 	// If the compiler's internal floating point representation
 	// is shorter than 1024 bits, it cannot distinguish max64+ulp64/2-1 and max64+ulp64/2.
-	// gc uses fewer than 1024 bits, so allow it to print the overflow error for the -1 case.
 	float64(max64 + ulp64/2 - two1024/two256), // ok
-	float64(max64 + ulp64/2 - 1),              // GC_ERROR "constant 1\.79769e\+308 overflows float64"
+	float64(max64 + ulp64/2 - 1),              // ok
 	float64(max64 + ulp64/2),                  // ERROR "constant 1\.79769e\+308 overflows float64"
 
 	float64(-max64 - ulp64/2 + two1024/two256), // ok
-	float64(-max64 - ulp64/2 + 1),              // GC_ERROR "constant -1\.79769e\+308 overflows float64"
+	float64(-max64 - ulp64/2 + 1),              // ok
 	float64(-max64 - ulp64/2),                  // ERROR "constant -1\.79769e\+308 overflows float64"
 }
diff --git a/test/inline.go b/test/inline.go
index d754f06..37965c0 100644
--- a/test/inline.go
+++ b/test/inline.go
@@ -58,7 +58,7 @@
 var somethingWrong error
 
 // local closures can be inlined
-func l(x, y int) (int, int, error) {
+func l(x, y int) (int, int, error) { // ERROR "can inline l"
 	e := func(err error) (int, int, error) { // ERROR "can inline l.func1" "func literal does not escape" "leaking param: err to result"
 		return 0, 0, err
 	}
@@ -90,19 +90,19 @@
 // make sure assignment inside closure is detected
 func o() int {
 	foo := func() int { return 1 } // ERROR "can inline o.func1" "func literal does not escape"
-	func(x int) {                  // ERROR "func literal does not escape"
+	func(x int) {                  // ERROR "can inline o.func2"
 		if x > 10 {
-			foo = func() int { return 2 } // ERROR "can inline o.func2" "func literal escapes"
+			foo = func() int { return 2 } // ERROR "can inline o.func2"
 		}
-	}(11)
+	}(11) // ERROR "func literal does not escape" "inlining call to o.func2"
 	return foo()
 }
 
-func p() int {
+func p() int { // ERROR "can inline p"
 	return func() int { return 42 }() // ERROR "can inline p.func1" "inlining call to p.func1"
 }
 
-func q(x int) int {
+func q(x int) int { // ERROR "can inline q"
 	foo := func() int { return x * 2 } // ERROR "can inline q.func1" "func literal does not escape"
 	return foo()                       // ERROR "inlining call to q.func1"
 }
@@ -111,15 +111,15 @@
 	foo := func(x int) int { // ERROR "can inline r.func1" "func literal does not escape"
 		return x + z
 	}
-	bar := func(x int) int { // ERROR "func literal does not escape"
-		return x + func(y int) int { // ERROR "can inline r.func2.1"
+	bar := func(x int) int { // ERROR "func literal does not escape" "can inline r.func2"
+		return x + func(y int) int { // ERROR "can inline r.func2.1" "can inline r.func3"
 			return 2*y + x*z
 		}(x) // ERROR "inlining call to r.func2.1"
 	}
-	return foo(42) + bar(42) // ERROR "inlining call to r.func1"
+	return foo(42) + bar(42) // ERROR "inlining call to r.func1" "inlining call to r.func2" "inlining call to r.func3"
 }
 
-func s0(x int) int {
+func s0(x int) int { // ERROR "can inline s0"
 	foo := func() { // ERROR "can inline s0.func1" "func literal does not escape"
 		x = x + 1
 	}
@@ -127,7 +127,7 @@
 	return x
 }
 
-func s1(x int) int {
+func s1(x int) int { // ERROR "can inline s1"
 	foo := func() int { // ERROR "can inline s1.func1" "func literal does not escape"
 		return x
 	}
diff --git a/test/interface/explicit.go b/test/interface/explicit.go
index 3f9451e..b705b97 100644
--- a/test/interface/explicit.go
+++ b/test/interface/explicit.go
@@ -100,6 +100,7 @@
 func (t *T2) M() {}
 func (t *T2) _() {}
 
-// Check that nothing satisfies an interface with blank methods.
-var b1 B1 = &T2{} // ERROR "incompatible|missing _ method"
-var b2 B2 = &T2{} // ERROR "incompatible|missing _ method"
+// Already reported about the invalid blank interface method above;
+// no need to report about not implementing it.
+var b1 B1 = &T2{}
+var b2 B2 = &T2{}
diff --git a/test/linkname2.go b/test/linkname2.go
new file mode 100644
index 0000000..43e66a5
--- /dev/null
+++ b/test/linkname2.go
@@ -0,0 +1,30 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that errors are reported for misuse of linkname.
+package p
+
+import _ "unsafe"
+
+type t int
+
+var x, y int
+
+//go:linkname x ok
+
+// ERROR "//go:linkname requires linkname argument or -p compiler flag"
+// BAD: want error "//go:linkname must refer to declared function or variable"
+// BAD: want error "//go:linkname must refer to declared function or variable"
+// ERROR "duplicate //go:linkname for x"
+
+// The two BAD lines are just waiting for #42938 before we can
+// re-enable the errors.
+
+//line linkname2.go:18
+//go:linkname y
+//go:linkname nonexist nonexist
+//go:linkname t notvarfunc
+//go:linkname x duplicate
diff --git a/test/live.go b/test/live.go
index 3df7ab0..d52ce7f 100644
--- a/test/live.go
+++ b/test/live.go
@@ -718,5 +718,5 @@
 	}
 	ret := T{}
 	ret.s[0] = f()
-	return ret // ERROR "stack object .autotmp_5 T"
+	return ret // ERROR "stack object .autotmp_[0-9]+ T"
 }
diff --git a/test/mainsig.go b/test/mainsig.go
new file mode 100644
index 0000000..d006d9c
--- /dev/null
+++ b/test/mainsig.go
@@ -0,0 +1,13 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main(int)  {}           // ERROR "func main must have no arguments and no return values"
+func main() int { return 1 } // ERROR "func main must have no arguments and no return values" "main redeclared in this block"
+
+func init(int)  {}           // ERROR "func init must have no arguments and no return values"
+func init() int { return 1 } // ERROR "func init must have no arguments and no return values"
diff --git a/test/nilptr3.go b/test/nilptr3.go
index e0f2ed9..3345cfa 100644
--- a/test/nilptr3.go
+++ b/test/nilptr3.go
@@ -214,14 +214,6 @@
 	return p[5] // ERROR "removed nil check"
 }
 
-// make sure not to do nil check for access of PAUTOHEAP
-//go:noinline
-func (p *Struct) m() {}
-func c1() {
-	var x Struct
-	func() { x.m() }() // ERROR "removed nil check"
-}
-
 type SS struct {
 	x byte
 }
diff --git a/test/nosplit.go b/test/nosplit.go
index faa7b8c..8a3fa9b 100644
--- a/test/nosplit.go
+++ b/test/nosplit.go
@@ -353,7 +353,14 @@
 			log.Fatal(err)
 		}
 
-		cmd := exec.Command("go", "build")
+		// Turn off ABI0 wrapper generation for now. The problem here is
+		// that in these test cases main.main is an assembly routine,
+		// thus calls to it will have to go through an ABI wrapper. The
+		// ABI wrapper will consume some stack space, which throws off
+		// the numbers.
+		workaround := "-gcflags=-abiwrap=0"
+
+		cmd := exec.Command("go", "build", workaround)
 		cmd.Dir = dir
 		output, err := cmd.CombinedOutput()
 		if err == nil {
diff --git a/test/reorder.go b/test/reorder.go
index 3a87d02..57892f8 100644
--- a/test/reorder.go
+++ b/test/reorder.go
@@ -20,6 +20,8 @@
 	p7()
 	p8()
 	p9()
+	p10()
+	p11()
 }
 
 var gx []int
@@ -149,3 +151,17 @@
 		panic("failed")
 	}
 }
+
+//go:noinline
+func fp() (*int, int) { return nil, 42 }
+
+func p10() {
+	p := new(int)
+	p, *p = fp()
+}
+
+func p11() {
+	var i interface{}
+	p := new(bool)
+	p, *p = i.(*bool)
+}
diff --git a/test/run.go b/test/run.go
index 624f223..116f983 100644
--- a/test/run.go
+++ b/test/run.go
@@ -59,7 +59,7 @@
 
 	// dirs are the directories to look for *.go files in.
 	// TODO(bradfitz): just use all directories?
-	dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime"}
+	dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi"}
 
 	// ratec controls the max number of tests running at a time.
 	ratec chan bool
@@ -491,7 +491,7 @@
 	// Execution recipe stops at first blank line.
 	pos := strings.Index(t.src, "\n\n")
 	if pos == -1 {
-		t.err = errors.New("double newline not found")
+		t.err = fmt.Errorf("double newline ending execution recipe not found in %s", t.goFileName())
 		return
 	}
 	action := t.src[:pos]
@@ -868,9 +868,7 @@
 					t.err = err
 					return
 				}
-				if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
-					t.err = fmt.Errorf("incorrect output\n%s", out)
-				}
+				t.checkExpectedOutput(out)
 			}
 		}
 
@@ -910,9 +908,7 @@
 			t.err = err
 			return
 		}
-		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
-			t.err = fmt.Errorf("incorrect output\n%s", out)
-		}
+		t.checkExpectedOutput(out)
 
 	case "build":
 		// Build Go file.
@@ -997,9 +993,7 @@
 				t.err = err
 				break
 			}
-			if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
-				t.err = fmt.Errorf("incorrect output\n%s", out)
-			}
+			t.checkExpectedOutput(out)
 		}
 
 	case "buildrun":
@@ -1025,9 +1019,7 @@
 			return
 		}
 
-		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
-			t.err = fmt.Errorf("incorrect output\n%s", out)
-		}
+		t.checkExpectedOutput(out)
 
 	case "run":
 		// Run Go file if no special go command flags are provided;
@@ -1070,9 +1062,7 @@
 			t.err = err
 			return
 		}
-		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
-			t.err = fmt.Errorf("incorrect output\n%s", out)
-		}
+		t.checkExpectedOutput(out)
 
 	case "runoutput":
 		// Run Go file and write its output into temporary Go file.
@@ -1107,9 +1097,7 @@
 			t.err = err
 			return
 		}
-		if string(out) != t.expectedOutput() {
-			t.err = fmt.Errorf("incorrect output\n%s", out)
-		}
+		t.checkExpectedOutput(out)
 
 	case "errorcheckoutput":
 		// Run Go file and write its output into temporary Go file.
@@ -1183,12 +1171,24 @@
 	}
 }
 
-func (t *test) expectedOutput() string {
+// checkExpectedOutput compares the output from compiling and/or running with the contents
+// of the corresponding reference output file, if any (replace ".go" with ".out").
+// If they don't match, fail with an informative message.
+func (t *test) checkExpectedOutput(gotBytes []byte) {
+	got := string(gotBytes)
 	filename := filepath.Join(t.dir, t.gofile)
 	filename = filename[:len(filename)-len(".go")]
 	filename += ".out"
-	b, _ := ioutil.ReadFile(filename)
-	return string(b)
+	b, err := ioutil.ReadFile(filename)
+	// File is allowed to be missing (err != nil) in which case output should be empty.
+	got = strings.Replace(got, "\r\n", "\n", -1)
+	if got != string(b) {
+		if err == nil {
+			t.err = fmt.Errorf("output does not match expected in %s. Instead saw\n%s", filename, got)
+		} else {
+			t.err = fmt.Errorf("output should be empty when (optional) expected-output file %s is not present. Instead saw\n%s", filename, got)
+		}
+	}
 }
 
 func splitOutput(out string, wantAuto bool) []string {
diff --git a/test/used.go b/test/used.go
new file mode 100644
index 0000000..76f3fc9
--- /dev/null
+++ b/test/used.go
@@ -0,0 +1,144 @@
+// errorcheck
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+const C = 1
+
+var x, x1, x2 int
+var b bool
+var s string
+var c chan int
+var cp complex128
+var slice []int
+var array [2]int
+var bytes []byte
+var runes []rune
+var r rune
+
+func f0()            {}
+func f1() int        { return 1 }
+func f2() (int, int) { return 1, 1 }
+
+type T struct{ X int }
+
+func (T) M1() int { return 1 }
+func (T) M0()     {}
+func (T) M()      {}
+
+var t T
+var tp *T
+
+type I interface{ M() }
+
+var i I
+
+var m map[int]int
+
+func _() {
+	// Note: if the next line changes to x, the error silences the x+x etc below!
+	x1 // ERROR "x1 evaluated but not used"
+
+	nil                    // ERROR "nil evaluated but not used"
+	C                      // ERROR  "C evaluated but not used"
+	1                      // ERROR "1 evaluated but not used"
+	x + x                  // ERROR "x \+ x evaluated but not used"
+	x - x                  // ERROR "x - x evaluated but not used"
+	x | x                  // ERROR "x \| x evaluated but not used"
+	"a" + s                // ERROR ".a. \+ s evaluated but not used"
+	&x                     // ERROR "&x evaluated but not used"
+	b && b                 // ERROR "b && b evaluated but not used"
+	append(slice, 1)       // ERROR "append\(slice, 1\) evaluated but not used"
+	string(bytes)          // ERROR "string\(bytes\) evaluated but not used"
+	string(runes)          // ERROR "string\(runes\) evaluated but not used"
+	f0()                   // ok
+	f1()                   // ok
+	f2()                   // ok
+	_ = f0()               // ERROR "f0\(\) used as value"
+	_ = f1()               // ok
+	_, _ = f2()            // ok
+	_ = f2()               // ERROR "assignment mismatch: 1 variable but f2 returns 2 values"
+	_ = f1(), 0            // ERROR "assignment mismatch: 1 variable but 2 values"
+	T.M0                   // ERROR "T.M0 evaluated but not used"
+	t.M0                   // ERROR "t.M0 evaluated but not used"
+	cap                    // ERROR "use of builtin cap not in function call"
+	cap(slice)             // ERROR "cap\(slice\) evaluated but not used"
+	close(c)               // ok
+	_ = close(c)           // ERROR "close\(c\) used as value"
+	func() {}              // ERROR "func literal evaluated but not used"
+	X{}                    // ERROR "undefined: X"
+	map[string]int{}       // ERROR "map\[string\]int{} evaluated but not used"
+	struct{}{}             // ERROR "struct ?{}{} evaluated but not used"
+	[1]int{}               // ERROR "\[1\]int{} evaluated but not used"
+	[]int{}                // ERROR "\[\]int{} evaluated but not used"
+	&struct{}{}            // ERROR "&struct ?{}{} evaluated but not used"
+	float32(x)             // ERROR "float32\(x\) evaluated but not used"
+	I(t)                   // ERROR "I\(t\) evaluated but not used"
+	int(x)                 // ERROR "int\(x\) evaluated but not used"
+	copy(slice, slice)     // ok
+	_ = copy(slice, slice) // ok
+	delete(m, 1)           // ok
+	_ = delete(m, 1)       // ERROR "delete\(m, 1\) used as value"
+	t.X                    // ERROR "t.X evaluated but not used"
+	tp.X                   // ERROR "tp.X evaluated but not used"
+	t.M                    // ERROR "t.M evaluated but not used"
+	I.M                    // ERROR "I.M evaluated but not used"
+	i.(T)                  // ERROR "i.\(T\) evaluated but not used"
+	x == x                 // ERROR "x == x evaluated but not used"
+	x != x                 // ERROR "x != x evaluated but not used"
+	x != x                 // ERROR "x != x evaluated but not used"
+	x < x                  // ERROR "x < x evaluated but not used"
+	x >= x                 // ERROR "x >= x evaluated but not used"
+	x > x                  // ERROR "x > x evaluated but not used"
+	*tp                    // ERROR "\*tp evaluated but not used"
+	slice[0]               // ERROR "slice\[0\] evaluated but not used"
+	m[1]                   // ERROR "m\[1\] evaluated but not used"
+	len(slice)             // ERROR "len\(slice\) evaluated but not used"
+	make(chan int)         // ERROR "make\(chan int\) evaluated but not used"
+	make(map[int]int)      // ERROR "make\(map\[int\]int\) evaluated but not used"
+	make([]int, 1)         // ERROR "make\(\[\]int, 1\) evaluated but not used"
+	x * x                  // ERROR "x \* x evaluated but not used"
+	x / x                  // ERROR "x / x evaluated but not used"
+	x % x                  // ERROR "x % x evaluated but not used"
+	x << x                 // ERROR "x << x evaluated but not used"
+	x >> x                 // ERROR "x >> x evaluated but not used"
+	x & x                  // ERROR "x & x evaluated but not used"
+	x &^ x                 // ERROR "x &\^ x evaluated but not used"
+	new(int)               // ERROR "new\(int\) evaluated but not used"
+	!b                     // ERROR "!b evaluated but not used"
+	^x                     // ERROR "\^x evaluated but not used"
+	+x                     // ERROR "\+x evaluated but not used"
+	-x                     // ERROR "-x evaluated but not used"
+	b || b                 // ERROR "b \|\| b evaluated but not used"
+	panic(1)               // ok
+	_ = panic(1)           // ERROR "panic\(1\) used as value"
+	print(1)               // ok
+	_ = print(1)           // ERROR "print\(1\) used as value"
+	println(1)             // ok
+	_ = println(1)         // ERROR "println\(1\) used as value"
+	c <- 1                 // ok
+	slice[1:1]             // ERROR "slice\[1:1\] evaluated but not used"
+	array[1:1]             // ERROR "array\[1:1\] evaluated but not used"
+	s[1:1]                 // ERROR "s\[1:1\] evaluated but not used"
+	slice[1:1:1]           // ERROR "slice\[1:1:1\] evaluated but not used"
+	array[1:1:1]           // ERROR "array\[1:1:1\] evaluated but not used"
+	recover()              // ok
+	<-c                    // ok
+	string(r)              // ERROR "string\(r\) evaluated but not used"
+	iota                   // ERROR "undefined: iota"
+	real(cp)               // ERROR "real\(cp\) evaluated but not used"
+	imag(cp)               // ERROR "imag\(cp\) evaluated but not used"
+	complex(1, 2)          // ERROR "complex\(1, 2\) evaluated but not used"
+	unsafe.Alignof(t.X)    // ERROR "unsafe.Alignof\(t.X\) evaluated but not used"
+	unsafe.Offsetof(t.X)   // ERROR "unsafe.Offsetof\(t.X\) evaluated but not used"
+	unsafe.Sizeof(t)       // ERROR "unsafe.Sizeof\(t\) evaluated but not used"
+	_ = int                // ERROR "type int is not an expression"
+	(x)                    // ERROR "x evaluated but not used"
+	_ = new(x2)            // ERROR "x2 is not a type"
+	_ = new(1 + 1)         // ERROR "1 \+ 1 is not a type"
+}