all: merge branch dev.regabi (d3cd4830ad) into master

This CL merges the dev.regabi branch to the master branch.

In the dev.regabi branch we have refactored the compiler, and laid
some preliminary work for enabling a register-based ABI (issue #40724),
including improved late call/return lowering, improved ABI wrapper
generation, reflect call prepared for the new ABI, and reserving
special registers in the internal ABI. The actual register-based ABI
has not been enabled for the moment. The ABI-related changes are behind
GOEXPERIMENT=regabi and currently off by default.

Updates #40724, #44222.
Fixes #44224.

Change-Id: Id5de9f734d14099267ab717167aaaeef31fdba70
diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go
index a62e551..026d8ab 100644
--- a/src/cmd/asm/internal/arch/arch.go
+++ b/src/cmd/asm/internal/arch/arch.go
@@ -109,6 +109,10 @@
 	register["SB"] = RSB
 	register["FP"] = RFP
 	register["PC"] = RPC
+	if linkArch == &x86.Linkamd64 {
+		// Alias g to R14
+		register["g"] = x86.REGG
+	}
 	// Register prefix not used on this architecture.
 
 	instructions := make(map[string]obj.As)
diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go
index 2e83e17..c6def15 100644
--- a/src/cmd/asm/internal/asm/operand_test.go
+++ b/src/cmd/asm/internal/asm/operand_test.go
@@ -259,6 +259,7 @@
 	{"R15", "R15"},
 	{"R8", "R8"},
 	{"R9", "R9"},
+	{"g", "R14"},
 	{"SI", "SI"},
 	{"SP", "SP"},
 	{"X0", "X0"},
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 154cf9c..f1d37bc 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -305,7 +305,7 @@
 // references and writes symabis information to w.
 //
 // The symabis format is documented at
-// cmd/compile/internal/gc.readSymABIs.
+// cmd/compile/internal/ssagen.ReadSymABIs.
 func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) {
 	switch word {
 	case "TEXT":
diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go
deleted file mode 100644
index 6625ccf..0000000
--- a/src/cmd/compile/fmt_test.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements TestFormats; a test that verifies
-// format strings in the compiler (this directory and all
-// subdirectories, recursively).
-//
-// TestFormats finds potential (Printf, etc.) format strings.
-// If they are used in a call, the format verbs are verified
-// based on the matching argument type against a precomputed
-// map of valid formats (knownFormats). This map can be used to
-// automatically rewrite format strings across all compiler
-// files with the -r flag.
-//
-// The format map needs to be updated whenever a new (type,
-// format) combination is found and the format verb is not
-// 'v' or 'T' (as in "%v" or "%T"). To update the map auto-
-// matically from the compiler source's use of format strings,
-// use the -u flag. (Whether formats are valid for the values
-// to be formatted must be verified manually, of course.)
-//
-// The -v flag prints out the names of all functions called
-// with a format string, the names of files that were not
-// processed, and any format rewrites made (with -r).
-//
-// Run as: go test -run Formats [-r][-u][-v]
-//
-// Known shortcomings:
-// - indexed format strings ("%[2]s", etc.) are not supported
-//   (the test will fail)
-// - format strings that are not simple string literals cannot
-//   be updated automatically
-//   (the test will fail with respective warnings)
-// - format strings in _test packages outside the current
-//   package are not processed
-//   (the test will report those files)
-//
-package main_test
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/constant"
-	"go/format"
-	"go/importer"
-	"go/parser"
-	"go/token"
-	"go/types"
-	"internal/testenv"
-	"io"
-	"io/fs"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-	"testing"
-	"unicode/utf8"
-)
-
-var (
-	rewrite = flag.Bool("r", false, "rewrite format strings")
-	update  = flag.Bool("u", false, "update known formats")
-)
-
-// The following variables collect information across all processed files.
-var (
-	fset          = token.NewFileSet()
-	formatStrings = make(map[*ast.BasicLit]bool)      // set of all potential format strings found
-	foundFormats  = make(map[string]bool)             // set of all formats found
-	callSites     = make(map[*ast.CallExpr]*callSite) // map of all calls
-)
-
-// A File is a corresponding (filename, ast) pair.
-type File struct {
-	name string
-	ast  *ast.File
-}
-
-func TestFormats(t *testing.T) {
-	if testing.Short() && testenv.Builder() == "" {
-		t.Skip("Skipping in short mode")
-	}
-	testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
-
-	// process all directories
-	filepath.WalkDir(".", func(path string, info fs.DirEntry, err error) error {
-		if info.IsDir() {
-			if info.Name() == "testdata" {
-				return filepath.SkipDir
-			}
-
-			importPath := filepath.Join("cmd/compile", path)
-			if ignoredPackages[filepath.ToSlash(importPath)] {
-				return filepath.SkipDir
-			}
-
-			pkg, err := build.Import(importPath, path, 0)
-			if err != nil {
-				if _, ok := err.(*build.NoGoError); ok {
-					return nil // nothing to do here
-				}
-				t.Fatal(err)
-			}
-			collectPkgFormats(t, pkg)
-		}
-		return nil
-	})
-
-	// test and rewrite formats
-	updatedFiles := make(map[string]File) // files that were rewritten
-	for _, p := range callSites {
-		// test current format literal and determine updated one
-		out := formatReplace(p.str, func(index int, in string) string {
-			if in == "*" {
-				return in // cannot rewrite '*' (as in "%*d")
-			}
-			// in != '*'
-			typ := p.types[index]
-			format := typ + " " + in // e.g., "*Node %n"
-
-			// check if format is known
-			out, known := knownFormats[format]
-
-			// record format if not yet found
-			_, found := foundFormats[format]
-			if !found {
-				foundFormats[format] = true
-			}
-
-			// report an error if the format is unknown and this is the first
-			// time we see it; ignore "%v" and "%T" which are always valid
-			if !known && !found && in != "%v" && in != "%T" {
-				t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
-			}
-
-			if out == "" {
-				out = in
-			}
-			return out
-		})
-
-		// replace existing format literal if it changed
-		if out != p.str {
-			// we cannot replace the argument if it's not a string literal for now
-			// (e.g., it may be "foo" + "bar")
-			lit, ok := p.arg.(*ast.BasicLit)
-			if !ok {
-				delete(callSites, p.call) // treat as if we hadn't found this site
-				continue
-			}
-
-			if testing.Verbose() {
-				fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
-			}
-
-			// find argument index of format argument
-			index := -1
-			for i, arg := range p.call.Args {
-				if p.arg == arg {
-					index = i
-					break
-				}
-			}
-			if index < 0 {
-				// we may have processed the same call site twice,
-				// but that shouldn't happen
-				panic("internal error: matching argument not found")
-			}
-
-			// replace literal
-			new := *lit                    // make a copy
-			new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
-			p.call.Args[index] = &new
-			updatedFiles[p.file.name] = p.file
-		}
-	}
-
-	// write dirty files back
-	var filesUpdated bool
-	if len(updatedFiles) > 0 && *rewrite {
-		for _, file := range updatedFiles {
-			var buf bytes.Buffer
-			if err := format.Node(&buf, fset, file.ast); err != nil {
-				t.Errorf("WARNING: gofmt %s failed: %v", file.name, err)
-				continue
-			}
-			if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
-				t.Errorf("WARNING: writing %s failed: %v", file.name, err)
-				continue
-			}
-			fmt.Printf("updated %s\n", file.name)
-			filesUpdated = true
-		}
-	}
-
-	// report the names of all functions called with a format string
-	if len(callSites) > 0 && testing.Verbose() {
-		set := make(map[string]bool)
-		for _, p := range callSites {
-			set[nodeString(p.call.Fun)] = true
-		}
-		var list []string
-		for s := range set {
-			list = append(list, s)
-		}
-		fmt.Println("\nFunctions called with a format string")
-		writeList(os.Stdout, list)
-	}
-
-	// update formats
-	if len(foundFormats) > 0 && *update {
-		var list []string
-		for s := range foundFormats {
-			list = append(list, fmt.Sprintf("%q: \"\",", s))
-		}
-		var buf bytes.Buffer
-		buf.WriteString(knownFormatsHeader)
-		writeList(&buf, list)
-		buf.WriteString("}\n")
-		out, err := format.Source(buf.Bytes())
-		const outfile = "fmtmap_test.go"
-		if err != nil {
-			t.Errorf("WARNING: gofmt %s failed: %v", outfile, err)
-			out = buf.Bytes() // continue with unformatted source
-		}
-		if err = ioutil.WriteFile(outfile, out, 0644); err != nil {
-			t.Errorf("WARNING: updating format map failed: %v", err)
-		}
-	}
-
-	// check that knownFormats is up to date
-	if !*rewrite && !*update {
-		var mismatch bool
-		for s := range foundFormats {
-			if _, ok := knownFormats[s]; !ok {
-				mismatch = true
-				break
-			}
-		}
-		if !mismatch {
-			for s := range knownFormats {
-				if _, ok := foundFormats[s]; !ok {
-					mismatch = true
-					break
-				}
-			}
-		}
-		if mismatch {
-			t.Errorf("format map is out of date; run 'go test -u' to update and manually verify correctness of change'")
-		}
-	}
-
-	// all format strings of calls must be in the formatStrings set (self-verification)
-	for _, p := range callSites {
-		if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-			if formatStrings[lit] {
-				// ok
-				delete(formatStrings, lit)
-			} else {
-				// this should never happen
-				panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
-			}
-		}
-	}
-
-	// if we have any strings left, we may need to update them manually
-	if len(formatStrings) > 0 && filesUpdated {
-		var list []string
-		for lit := range formatStrings {
-			list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
-		}
-		fmt.Println("\nWARNING: Potentially missed format strings")
-		writeList(os.Stdout, list)
-		t.Fail()
-	}
-
-	fmt.Println()
-}
-
-// A callSite describes a function call that appears to contain
-// a format string.
-type callSite struct {
-	file  File
-	call  *ast.CallExpr // call containing the format string
-	arg   ast.Expr      // format argument (string literal or constant)
-	str   string        // unquoted format string
-	types []string      // argument types
-}
-
-func collectPkgFormats(t *testing.T, pkg *build.Package) {
-	// collect all files
-	var filenames []string
-	filenames = append(filenames, pkg.GoFiles...)
-	filenames = append(filenames, pkg.CgoFiles...)
-	filenames = append(filenames, pkg.TestGoFiles...)
-
-	// TODO(gri) verify _test files outside package
-	for _, name := range pkg.XTestGoFiles {
-		// don't process this test itself
-		if name != "fmt_test.go" && testing.Verbose() {
-			fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
-		}
-	}
-
-	// make filenames relative to .
-	for i, name := range filenames {
-		filenames[i] = filepath.Join(pkg.Dir, name)
-	}
-
-	// parse all files
-	files := make([]*ast.File, len(filenames))
-	for i, filename := range filenames {
-		f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
-		if err != nil {
-			t.Fatal(err)
-		}
-		files[i] = f
-	}
-
-	// typecheck package
-	conf := types.Config{Importer: importer.Default()}
-	etypes := make(map[ast.Expr]types.TypeAndValue)
-	if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
-		t.Fatal(err)
-	}
-
-	// collect all potential format strings (for extra verification later)
-	for _, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if s, ok := stringLit(n); ok && isFormat(s) {
-				formatStrings[n.(*ast.BasicLit)] = true
-			}
-			return true
-		})
-	}
-
-	// collect all formats/arguments of calls with format strings
-	for index, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if call, ok := n.(*ast.CallExpr); ok {
-				if ignoredFunctions[nodeString(call.Fun)] {
-					return true
-				}
-				// look for an arguments that might be a format string
-				for i, arg := range call.Args {
-					if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
-						// make sure we have enough arguments
-						n := numFormatArgs(s)
-						if i+1+n > len(call.Args) {
-							t.Errorf("%s: not enough format args (ignore %s?)", posString(call), nodeString(call.Fun))
-							break // ignore this call
-						}
-						// assume last n arguments are to be formatted;
-						// determine their types
-						argTypes := make([]string, n)
-						for i, arg := range call.Args[len(call.Args)-n:] {
-							if tv, ok := etypes[arg]; ok {
-								argTypes[i] = typeString(tv.Type)
-							}
-						}
-						// collect call site
-						if callSites[call] != nil {
-							panic("internal error: file processed twice?")
-						}
-						callSites[call] = &callSite{
-							file:  File{filenames[index], file},
-							call:  call,
-							arg:   arg,
-							str:   s,
-							types: argTypes,
-						}
-						break // at most one format per argument list
-					}
-				}
-			}
-			return true
-		})
-	}
-}
-
-// writeList writes list in sorted order to w.
-func writeList(w io.Writer, list []string) {
-	sort.Strings(list)
-	for _, s := range list {
-		fmt.Fprintln(w, "\t", s)
-	}
-}
-
-// posString returns a string representation of n's position
-// in the form filename:line:col: .
-func posString(n ast.Node) string {
-	if n == nil {
-		return ""
-	}
-	return fset.Position(n.Pos()).String()
-}
-
-// nodeString returns a string representation of n.
-func nodeString(n ast.Node) string {
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, n); err != nil {
-		log.Fatal(err) // should always succeed
-	}
-	return buf.String()
-}
-
-// typeString returns a string representation of n.
-func typeString(typ types.Type) string {
-	return filepath.ToSlash(typ.String())
-}
-
-// stringLit returns the unquoted string value and true if
-// n represents a string literal; otherwise it returns ""
-// and false.
-func stringLit(n ast.Node) (string, bool) {
-	if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-		s, err := strconv.Unquote(lit.Value)
-		if err != nil {
-			log.Fatal(err) // should not happen with correct ASTs
-		}
-		return s, true
-	}
-	return "", false
-}
-
-// stringVal returns the (unquoted) string value and true if
-// tv is a string constant; otherwise it returns "" and false.
-func stringVal(tv types.TypeAndValue) (string, bool) {
-	if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
-		return constant.StringVal(tv.Value), true
-	}
-	return "", false
-}
-
-// formatIter iterates through the string s in increasing
-// index order and calls f for each format specifier '%..v'.
-// The arguments for f describe the specifier's index range.
-// If a format specifier contains a "*", f is called with
-// the index range for "*" alone, before being called for
-// the entire specifier. The result of f is the index of
-// the rune at which iteration continues.
-func formatIter(s string, f func(i, j int) int) {
-	i := 0     // index after current rune
-	var r rune // current rune
-
-	next := func() {
-		r1, w := utf8.DecodeRuneInString(s[i:])
-		if w == 0 {
-			r1 = -1 // signal end-of-string
-		}
-		r = r1
-		i += w
-	}
-
-	flags := func() {
-		for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
-			next()
-		}
-	}
-
-	index := func() {
-		if r == '[' {
-			log.Fatalf("cannot handle indexed arguments: %s", s)
-		}
-	}
-
-	digits := func() {
-		index()
-		if r == '*' {
-			i = f(i-1, i)
-			next()
-			return
-		}
-		for '0' <= r && r <= '9' {
-			next()
-		}
-	}
-
-	for next(); r >= 0; next() {
-		if r == '%' {
-			i0 := i
-			next()
-			flags()
-			digits()
-			if r == '.' {
-				next()
-				digits()
-			}
-			index()
-			// accept any letter (a-z, A-Z) as format verb;
-			// ignore anything else
-			if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
-				i = f(i0-1, i)
-			}
-		}
-	}
-}
-
-// isFormat reports whether s contains format specifiers.
-func isFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = true
-		return len(s) // stop iteration
-	})
-	return
-}
-
-// oneFormat reports whether s is exactly one format specifier.
-func oneFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = i == 0 && j == len(s)
-		return j
-	})
-	return
-}
-
-// numFormatArgs returns the number of format specifiers in s.
-func numFormatArgs(s string) int {
-	count := 0
-	formatIter(s, func(i, j int) int {
-		count++
-		return j
-	})
-	return count
-}
-
-// formatReplace replaces the i'th format specifier s in the incoming
-// string in with the result of f(i, s) and returns the new string.
-func formatReplace(in string, f func(i int, s string) string) string {
-	var buf []byte
-	i0 := 0
-	index := 0
-	formatIter(in, func(i, j int) int {
-		if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
-			buf = append(buf, in[i0:i]...)
-			buf = append(buf, f(index, sub)...)
-			i0 = j
-		}
-		index++
-		return j
-	})
-	return string(append(buf, in[i0:]...))
-}
-
-// ignoredPackages is the set of packages which can
-// be ignored.
-var ignoredPackages = map[string]bool{}
-
-// ignoredFunctions is the set of functions which may have
-// format-like arguments but which don't do any formatting and
-// thus may be ignored.
-var ignoredFunctions = map[string]bool{}
-
-func init() {
-	// verify that knownFormats entries are correctly formatted
-	for key, val := range knownFormats {
-		// key must be "typename format", and format starts with a '%'
-		// (formats containing '*' alone are not collected in this map)
-		i := strings.Index(key, "%")
-		if i < 0 || !oneFormat(key[i:]) {
-			log.Fatalf("incorrect knownFormats key: %q", key)
-		}
-		// val must be "format" or ""
-		if val != "" && !oneFormat(val) {
-			log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
-		}
-	}
-}
-
-const knownFormatsHeader = `// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the knownFormats map which records the valid
-// formats for a given type. The valid formats must correspond to
-// supported compiler formats implemented in fmt.go, or whatever
-// other format verbs are implemented for the given type. The map may
-// also be used to change the use of a format verb across all compiler
-// sources automatically (for instance, if the implementation of fmt.go
-// changes), by using the -r option together with the new formats in the
-// map. To generate this file automatically from the existing source,
-// run: go test -run Formats -u.
-//
-// See the package comment in fmt_test.go for additional information.
-
-package main_test
-
-// knownFormats entries are of the form "typename format" -> "newformat".
-// An absent entry means that the format is not recognized as valid.
-// An empty new format means that the format should remain unchanged.
-var knownFormats = map[string]string{
-`
diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go
deleted file mode 100644
index 0811df7..0000000
--- a/src/cmd/compile/fmtmap_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the knownFormats map which records the valid
-// formats for a given type. The valid formats must correspond to
-// supported compiler formats implemented in fmt.go, or whatever
-// other format verbs are implemented for the given type. The map may
-// also be used to change the use of a format verb across all compiler
-// sources automatically (for instance, if the implementation of fmt.go
-// changes), by using the -r option together with the new formats in the
-// map. To generate this file automatically from the existing source,
-// run: go test -run Formats -u.
-//
-// See the package comment in fmt_test.go for additional information.
-
-package main_test
-
-// knownFormats entries are of the form "typename format" -> "newformat".
-// An absent entry means that the format is not recognized as valid.
-// An empty new format means that the format should remain unchanged.
-var knownFormats = map[string]string{
-	"*bytes.Buffer %s":                                "",
-	"*cmd/compile/internal/gc.EscLocation %v":         "",
-	"*cmd/compile/internal/gc.Mpflt %v":               "",
-	"*cmd/compile/internal/gc.Mpint %v":               "",
-	"*cmd/compile/internal/gc.Node %#v":               "",
-	"*cmd/compile/internal/gc.Node %+S":               "",
-	"*cmd/compile/internal/gc.Node %+v":               "",
-	"*cmd/compile/internal/gc.Node %L":                "",
-	"*cmd/compile/internal/gc.Node %S":                "",
-	"*cmd/compile/internal/gc.Node %j":                "",
-	"*cmd/compile/internal/gc.Node %p":                "",
-	"*cmd/compile/internal/gc.Node %v":                "",
-	"*cmd/compile/internal/ssa.Block %s":              "",
-	"*cmd/compile/internal/ssa.Block %v":              "",
-	"*cmd/compile/internal/ssa.Func %s":               "",
-	"*cmd/compile/internal/ssa.Func %v":               "",
-	"*cmd/compile/internal/ssa.Register %s":           "",
-	"*cmd/compile/internal/ssa.Register %v":           "",
-	"*cmd/compile/internal/ssa.SparseTreeNode %v":     "",
-	"*cmd/compile/internal/ssa.Value %s":              "",
-	"*cmd/compile/internal/ssa.Value %v":              "",
-	"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
-	"*cmd/compile/internal/types.Field %p":            "",
-	"*cmd/compile/internal/types.Field %v":            "",
-	"*cmd/compile/internal/types.Sym %0S":             "",
-	"*cmd/compile/internal/types.Sym %S":              "",
-	"*cmd/compile/internal/types.Sym %p":              "",
-	"*cmd/compile/internal/types.Sym %v":              "",
-	"*cmd/compile/internal/types.Type %#L":            "",
-	"*cmd/compile/internal/types.Type %#v":            "",
-	"*cmd/compile/internal/types.Type %+v":            "",
-	"*cmd/compile/internal/types.Type %-S":            "",
-	"*cmd/compile/internal/types.Type %0S":            "",
-	"*cmd/compile/internal/types.Type %L":             "",
-	"*cmd/compile/internal/types.Type %S":             "",
-	"*cmd/compile/internal/types.Type %p":             "",
-	"*cmd/compile/internal/types.Type %s":             "",
-	"*cmd/compile/internal/types.Type %v":             "",
-	"*cmd/internal/obj.Addr %v":                       "",
-	"*cmd/internal/obj.LSym %v":                       "",
-	"*math/big.Float %f":                              "",
-	"*math/big.Int %#x":                               "",
-	"*math/big.Int %s":                                "",
-	"*math/big.Int %v":                                "",
-	"[16]byte %x":                                     "",
-	"[]*cmd/compile/internal/ssa.Block %v":            "",
-	"[]*cmd/compile/internal/ssa.Value %v":            "",
-	"[][]string %q":                                   "",
-	"[]byte %s":                                       "",
-	"[]byte %x":                                       "",
-	"[]cmd/compile/internal/ssa.Edge %v":              "",
-	"[]cmd/compile/internal/ssa.ID %v":                "",
-	"[]cmd/compile/internal/ssa.posetNode %v":         "",
-	"[]cmd/compile/internal/ssa.posetUndo %v":         "",
-	"[]cmd/compile/internal/syntax.token %s":          "",
-	"[]string %v":                                     "",
-	"[]uint32 %v":                                     "",
-	"bool %v":                                         "",
-	"byte %08b":                                       "",
-	"byte %c":                                         "",
-	"byte %q":                                         "",
-	"byte %v":                                         "",
-	"cmd/compile/internal/arm.shift %d":               "",
-	"cmd/compile/internal/gc.Class %d":                "",
-	"cmd/compile/internal/gc.Class %s":                "",
-	"cmd/compile/internal/gc.Class %v":                "",
-	"cmd/compile/internal/gc.Ctype %d":                "",
-	"cmd/compile/internal/gc.Ctype %v":                "",
-	"cmd/compile/internal/gc.Nodes %#v":               "",
-	"cmd/compile/internal/gc.Nodes %+v":               "",
-	"cmd/compile/internal/gc.Nodes %.v":               "",
-	"cmd/compile/internal/gc.Nodes %v":                "",
-	"cmd/compile/internal/gc.Op %#v":                  "",
-	"cmd/compile/internal/gc.Op %v":                   "",
-	"cmd/compile/internal/gc.Val %#v":                 "",
-	"cmd/compile/internal/gc.Val %T":                  "",
-	"cmd/compile/internal/gc.Val %v":                  "",
-	"cmd/compile/internal/gc.fmtMode %d":              "",
-	"cmd/compile/internal/gc.initKind %d":             "",
-	"cmd/compile/internal/gc.itag %v":                 "",
-	"cmd/compile/internal/ssa.BranchPrediction %d":    "",
-	"cmd/compile/internal/ssa.Edge %v":                "",
-	"cmd/compile/internal/ssa.GCNode %v":              "",
-	"cmd/compile/internal/ssa.ID %d":                  "",
-	"cmd/compile/internal/ssa.ID %v":                  "",
-	"cmd/compile/internal/ssa.LocalSlot %s":           "",
-	"cmd/compile/internal/ssa.LocalSlot %v":           "",
-	"cmd/compile/internal/ssa.Location %s":            "",
-	"cmd/compile/internal/ssa.Op %s":                  "",
-	"cmd/compile/internal/ssa.Op %v":                  "",
-	"cmd/compile/internal/ssa.Sym %v":                 "",
-	"cmd/compile/internal/ssa.ValAndOff %s":           "",
-	"cmd/compile/internal/ssa.domain %v":              "",
-	"cmd/compile/internal/ssa.flagConstant %s":        "",
-	"cmd/compile/internal/ssa.posetNode %v":           "",
-	"cmd/compile/internal/ssa.posetTestOp %v":         "",
-	"cmd/compile/internal/ssa.rbrank %d":              "",
-	"cmd/compile/internal/ssa.regMask %d":             "",
-	"cmd/compile/internal/ssa.register %d":            "",
-	"cmd/compile/internal/ssa.relation %s":            "",
-	"cmd/compile/internal/syntax.Error %q":            "",
-	"cmd/compile/internal/syntax.Expr %#v":            "",
-	"cmd/compile/internal/syntax.LitKind %d":          "",
-	"cmd/compile/internal/syntax.Node %T":             "",
-	"cmd/compile/internal/syntax.Operator %s":         "",
-	"cmd/compile/internal/syntax.Pos %s":              "",
-	"cmd/compile/internal/syntax.Pos %v":              "",
-	"cmd/compile/internal/syntax.position %s":         "",
-	"cmd/compile/internal/syntax.token %q":            "",
-	"cmd/compile/internal/syntax.token %s":            "",
-	"cmd/compile/internal/types.EType %d":             "",
-	"cmd/compile/internal/types.EType %s":             "",
-	"cmd/compile/internal/types.EType %v":             "",
-	"cmd/internal/obj.ABI %v":                         "",
-	"error %v":                                        "",
-	"float64 %.2f":                                    "",
-	"float64 %.3f":                                    "",
-	"float64 %.6g":                                    "",
-	"float64 %g":                                      "",
-	"int %#x":                                         "",
-	"int %-12d":                                       "",
-	"int %-6d":                                        "",
-	"int %-8o":                                        "",
-	"int %02d":                                        "",
-	"int %6d":                                         "",
-	"int %c":                                          "",
-	"int %d":                                          "",
-	"int %v":                                          "",
-	"int %x":                                          "",
-	"int16 %d":                                        "",
-	"int16 %x":                                        "",
-	"int32 %#x":                                       "",
-	"int32 %d":                                        "",
-	"int32 %v":                                        "",
-	"int32 %x":                                        "",
-	"int64 %#x":                                       "",
-	"int64 %+d":                                       "",
-	"int64 %-10d":                                     "",
-	"int64 %.5d":                                      "",
-	"int64 %d":                                        "",
-	"int64 %v":                                        "",
-	"int64 %x":                                        "",
-	"int8 %d":                                         "",
-	"int8 %v":                                         "",
-	"int8 %x":                                         "",
-	"interface{} %#v":                                 "",
-	"interface{} %T":                                  "",
-	"interface{} %p":                                  "",
-	"interface{} %q":                                  "",
-	"interface{} %s":                                  "",
-	"interface{} %v":                                  "",
-	"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
-	"map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
-	"map[cmd/compile/internal/ssa.ID]uint32 %v":                            "",
-	"map[int64]uint32 %v":  "",
-	"math/big.Accuracy %s": "",
-	"reflect.Type %s":      "",
-	"rune %#U":             "",
-	"rune %c":              "",
-	"rune %q":              "",
-	"string %-*s":          "",
-	"string %-16s":         "",
-	"string %-6s":          "",
-	"string %q":            "",
-	"string %s":            "",
-	"string %v":            "",
-	"time.Duration %d":     "",
-	"time.Duration %v":     "",
-	"uint %04x":            "",
-	"uint %5d":             "",
-	"uint %d":              "",
-	"uint %x":              "",
-	"uint16 %d":            "",
-	"uint16 %x":            "",
-	"uint32 %#U":           "",
-	"uint32 %#x":           "",
-	"uint32 %d":            "",
-	"uint32 %v":            "",
-	"uint32 %x":            "",
-	"uint64 %08x":          "",
-	"uint64 %b":            "",
-	"uint64 %d":            "",
-	"uint64 %x":            "",
-	"uint8 %#x":            "",
-	"uint8 %d":             "",
-	"uint8 %v":             "",
-	"uint8 %x":             "",
-	"uintptr %d":           "",
-}
diff --git a/src/cmd/compile/internal-abi.md b/src/cmd/compile/internal-abi.md
new file mode 100644
index 0000000..f4ef2cc
--- /dev/null
+++ b/src/cmd/compile/internal-abi.md
@@ -0,0 +1,628 @@
+# Go internal ABI specification
+
+This document describes Go’s internal application binary interface
+(ABI), known as ABIInternal.
+Go's ABI defines the layout of data in memory and the conventions for
+calling between Go functions.
+This ABI is *unstable* and will change between Go versions.
+If you’re writing assembly code, please instead refer to Go’s
+[assembly documentation](/doc/asm.html), which describes Go’s stable
+ABI, known as ABI0.
+
+All functions defined in Go source follow ABIInternal.
+However, ABIInternal and ABI0 functions are able to call each other
+through transparent *ABI wrappers*, described in the [internal calling
+convention proposal](https://golang.org/design/27539-internal-abi).
+
+Go uses a common ABI design across all architectures.
+We first describe the common ABI, and then cover per-architecture
+specifics.
+
+*Rationale*: For the reasoning behind using a common ABI across
+architectures instead of the platform ABI, see the [register-based Go
+calling convention proposal](https://golang.org/design/40724-register-calling).
+
+## Memory layout
+
+Go's built-in types have the following sizes and alignments.
+Many, though not all, of these sizes are guaranteed by the [language
+specification](/doc/go_spec.html#Size_and_alignment_guarantees).
+Those that aren't guaranteed may change in future versions of Go (for
+example, we've considered changing the alignment of int64 on 32-bit).
+
+| Type | 64-bit |       | 32-bit |       |
+| ---  | ---    | ---   | ---    | ---   |
+|      | Size   | Align | Size   | Align |
+| bool, uint8, int8  | 1  | 1 | 1  | 1 |
+| uint16, int16      | 2  | 2 | 2  | 2 |
+| uint32, int32      | 4  | 4 | 4  | 4 |
+| uint64, int64      | 8  | 8 | 8  | 4 |
+| int, uint          | 8  | 8 | 4  | 4 |
+| float32            | 4  | 4 | 4  | 4 |
+| float64            | 8  | 8 | 8  | 4 |
+| complex64          | 8  | 4 | 8  | 4 |
+| complex128         | 16 | 8 | 16 | 4 |
+| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
+
+The types `byte` and `rune` are aliases for `uint8` and `int32`,
+respectively, and hence have the same size and alignment as these
+types.
+
+The layout of `map`, `chan`, and `func` types is equivalent to *T.
+
+To describe the layout of the remaining composite types, we first
+define the layout of a *sequence* S of N fields with types
+t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
+We define the byte offset at which each field begins relative to a
+base address of 0, as well as the size and alignment of the sequence
+as follows:
+
+```
+offset(S, i) = 0  if i = 1
+             = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
+alignof(S)   = 1  if N = 0
+             = max(alignof(t_i) | 1 <= i <= N)
+sizeof(S)    = 0  if N = 0
+             = align(offset(S, N) + sizeof(t_N), alignof(S))
+```
+
+Where sizeof(T) and alignof(T) are the size and alignment of type T,
+respectively, and align(x, y) rounds x up to a multiple of y.
+
+The `interface{}` type is a sequence of 1. a pointer to the runtime type
+description for the interface's dynamic type and 2. an `unsafe.Pointer`
+data field.
+Any other interface type (besides the empty interface) is a sequence
+of 1. a pointer to the runtime "itab" that gives the method pointers and
+the type of the data field and 2. an `unsafe.Pointer` data field.
+An interface can be "direct" or "indirect" depending on the dynamic
+type: a direct interface stores the value directly in the data field,
+and an indirect interface stores a pointer to the value in the data
+field.
+An interface can only be direct if the value consists of a single
+pointer word.
+
+An array type `[N]T` is a sequence of N fields of type T.
+
+The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
+backing store, an `int` giving the `len` of the slice, and an `int`
+giving the `cap` of the slice.
+
+The `string` type is a sequence of a `*[len]byte` pointer to the
+string backing store, and an `int` giving the `len` of the string.
+
+A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
+sequence t1, ..., tM, tP, where tP is either:
+
+- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
+- Empty (size 0 and align 1) otherwise.
+
+The padding byte prevents creating a past-the-end pointer by taking
+the address of the final, empty fN field.
+
+Note that user-written assembly code should generally not depend on Go
+type layout and should instead use the constants defined in
+[`go_asm.h`](/doc/asm.html#data-offsets).
+
+## Function call argument and result passing
+
+Function calls pass arguments and results using a combination of the
+stack and machine registers.
+Each argument or result is passed either entirely in registers or
+entirely on the stack.
+Because access to registers is generally faster than access to the
+stack, arguments and results are preferentially passed in registers.
+However, any argument or result that contains a non-trivial array or
+does not fit entirely in the remaining available registers is passed
+on the stack.
+
+Each architecture defines a sequence of integer registers and a
+sequence of floating-point registers.
+At a high level, arguments and results are recursively broken down
+into values of base types and these base values are assigned to
+registers from these sequences.
+
+Arguments and results can share the same registers, but do not share
+the same stack space.
+Beyond the arguments and results passed on the stack, the caller also
+reserves spill space on the stack for all register-based arguments
+(but does not populate this space).
+
+The receiver, arguments, and results of function or method F are
+assigned to registers or the stack using the following algorithm:
+
+1. Let NI and NFP be the length of integer and floating-point register
+   sequences defined by the architecture.
+   Let I and FP be 0; these are the indexes of the next integer and
+   floating-pointer register.
+   Let S, the type sequence defining the stack frame, be empty.
+1. If F is a method, assign F’s receiver.
+1. For each argument A of F, assign A.
+1. Add a pointer-alignment field to S. This has size 0 and the same
+   alignment as `uintptr`.
+1. Reset I and FP to 0.
+1. For each result R of F, assign R.
+1. Add a pointer-alignment field to S.
+1. For each register-assigned receiver and argument of F, let T be its
+   type and add T to the stack sequence S.
+   This is the argument's (or receiver's) spill space and will be
+   uninitialized at the call.
+1. Add a pointer-alignment field to S.
+
+Assigning a receiver, argument, or result V of underlying type T works
+as follows:
+
+1. Remember I and FP.
+1. Try to register-assign V.
+1. If step 2 failed, reset I and FP to the values from step 1, add T
+   to the stack sequence S, and assign V to this field in S.
+
+Register-assignment of a value V of underlying type T works as follows:
+
+1. If T is a boolean or integral type that fits in an integer
+   register, assign V to register I and increment I.
+1. If T is an integral type that fits in two integer registers, assign
+   the least significant and most significant halves of V to registers
+   I and I+1, respectively, and increment I by 2
+1. If T is a floating-point type and can be represented without loss
+   of precision in a floating-point register, assign V to register FP
+   and increment FP.
+1. If T is a complex type, recursively register-assign its real and
+   imaginary parts.
+1. If T is a pointer type, map type, channel type, or function type,
+   assign V to register I and increment I.
+1. If T is a string type, interface type, or slice type, recursively
+   register-assign V’s components (2 for strings and interfaces, 3 for
+   slices).
+1. If T is a struct type, recursively register-assign each field of V.
+1. If T is an array type of length 0, do nothing.
+1. If T is an array type of length 1, recursively register-assign its
+   one element.
+1. If T is an array type of length > 1, fail.
+1. If I > NI or FP > NFP, fail.
+1. If any recursive assignment above fails, fail.
+
+The above algorithm produces an assignment of each receiver, argument,
+and result to registers or to a field in the stack sequence.
+The final stack sequence looks like: stack-assigned receiver,
+stack-assigned arguments, pointer-alignment, stack-assigned results,
+pointer-alignment, spill space for each register-assigned argument,
+pointer-alignment.
+The following diagram shows what this stack frame looks like on the
+stack, using the typical convention where address 0 is at the bottom:
+
+    +------------------------------+
+    |             . . .            |
+    | 2nd reg argument spill space |
+    | 1st reg argument spill space |
+    | <pointer-sized alignment>    |
+    |             . . .            |
+    | 2nd stack-assigned result    |
+    | 1st stack-assigned result    |
+    | <pointer-sized alignment>    |
+    |             . . .            |
+    | 2nd stack-assigned argument  |
+    | 1st stack-assigned argument  |
+    | stack-assigned receiver      |
+    +------------------------------+ ↓ lower addresses
+
+To perform a call, the caller reserves space starting at the lowest
+address in its stack frame for the call stack frame, stores arguments
+in the registers and argument stack fields determined by the above
+algorithm, and performs the call.
+At the time of a call, spill space, result stack fields, and result
+registers are left uninitialized.
+Upon return, the callee must have stored results to all result
+registers and result stack fields determined by the above algorithm.
+
+There are no callee-save registers, so a call may overwrite any
+register that doesn’t have a fixed meaning, including argument
+registers.
+
+### Example
+
+Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
+struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
+architecture with hypothetical integer registers R0–R9.
+
+On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
+stack frame is laid out in the following sequence:
+
+    a2      [2]uintptr
+    r1.x    uintptr
+    r1.y    [2]uintptr
+    a1Spill uint8
+    a2Spill uint8
+    _       [6]uint8  // alignment padding
+
+In the stack frame, only the `a2` field is initialized on entry; the
+rest of the frame is left uninitialized.
+
+On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
+and `r1.x` and `r1.y` are initialized in the stack frame.
+
+There are several things to note in this example.
+First, `a2` and `r1` are stack-assigned because they contain arrays.
+The other arguments and results are register-assigned.
+Result `r2` is decomposed into its components, which are individually
+register-assigned.
+On the stack, the stack-assigned arguments appear at lower addresses
+than the stack-assigned results, which appear at lower addresses than
+the argument spill area.
+Only arguments, not results, are assigned a spill area on the stack.
+
+### Rationale
+
+Each base value is assigned to its own register to optimize
+construction and access.
+An alternative would be to pack multiple sub-word values into
+registers, or to simply map an argument's in-memory layout to
+registers (this is common in C ABIs), but this typically adds cost to
+pack and unpack these values.
+Modern architectures have more than enough registers to pass all
+arguments and results this way for nearly all functions (see the
+appendix), so there’s little downside to spreading base values across
+registers.
+
+Arguments that can’t be fully assigned to registers are passed
+entirely on the stack in case the callee takes the address of that
+argument.
+If an argument could be split across the stack and registers and the
+callee took its address, it would need to be reconstructed in memory,
+a process that would be proportional to the size of the argument.
+
+Non-trivial arrays are always passed on the stack because indexing
+into an array typically requires a computed offset, which generally
+isn’t possible with registers.
+Arrays in general are rare in function signatures (only 0.7% of
+functions in the Go 1.15 standard library and 0.2% in kubelet).
+We considered allowing array fields to be passed on the stack while
+the rest of an argument’s fields are passed in registers, but this
+creates the same problems as other large structs if the callee takes
+the address of an argument, and would benefit <0.1% of functions in
+kubelet (and even these very little).
+
+We make exceptions for 0 and 1-element arrays because these don’t
+require computed offsets, and 1-element arrays are already decomposed
+in the compiler’s SSA representation.
+
+The ABI assignment algorithm above is equivalent to Go’s stack-based
+ABI0 calling convention if there are zero architecture registers.
+This is intended to ease the transition to the register-based internal
+ABI and make it easy for the compiler to generate either calling
+convention.
+An architecture may still define register meanings that aren’t
+compatible with ABI0, but these differences should be easy to account
+for in the compiler.
+
+The algorithm reserves spill space for arguments in the caller’s frame
+so that the compiler can generate a stack growth path that spills into
+this reserved space.
+If the callee has to grow the stack, it may not be able to reserve
+enough additional stack space in its own frame to spill these, which
+is why it’s important that the caller do so.
+These slots also act as the home location if these arguments need to
+be spilled for any other reason, which simplifies traceback printing.
+
+There are several options for how to lay out the argument spill space.
+We chose to lay out each argument according to its type's usual memory
+layout but to separate the spill space from the regular argument
+space.
+Using the usual memory layout simplifies the compiler because it
+already understands this layout.
+Also, if a function takes the address of a register-assigned argument,
+the compiler must spill that argument to memory in its usual memory
+layout and it's more convenient to use the argument spill space for
+this purpose.
+
+Alternatively, the spill space could be structured around argument
+registers.
+In this approach, the stack growth spill path would spill each
+argument register to a register-sized stack word.
+However, if the function takes the address of a register-assigned
+argument, the compiler would have to reconstruct it in memory layout
+elsewhere on the stack.
+
+The spill space could also be interleaved with the stack-assigned
+arguments so the arguments appear in order whether they are register-
+or stack-assigned.
+This would be close to ABI0, except that register-assigned arguments
+would be uninitialized on the stack and there's no need to reserve
+stack space for register-assigned results.
+We expect separating the spill space to perform better because of
+memory locality.
+Separating the space is also potentially simpler for `reflect` calls
+because this allows `reflect` to summarize the spill space as a single
+number.
+Finally, the long-term intent is to remove reserved spill slots
+entirely – allowing most functions to be called without any stack
+setup and easing the introduction of callee-save registers – and
+separating the spill space makes that transition easier.
+
+## Closures
+
+A func value (e.g., `var x func()`) is a pointer to a closure object.
+A closure object begins with a pointer-sized program counter
+representing the entry point of the function, followed by zero or more
+bytes containing the closed-over environment.
+
+Closure calls follow the same conventions as static function and
+method calls, with one addition. Each architecture specifies a
+*closure context pointer* register and calls to closures store the
+address of the closure object in the closure context pointer register
+prior to the call.
+
+## Software floating-point mode
+
+In "softfloat" mode, the ABI simply treats the hardware as having zero
+floating-point registers.
+As a result, any arguments containing floating-point values will be
+passed on the stack.
+
+*Rationale*: Softfloat mode is about compatibility over performance
+and is not commonly used.
+Hence, we keep the ABI as simple as possible in this case, rather than
+adding additional rules for passing floating-point values in integer
+registers.
+
+## Architecture specifics
+
+This section describes per-architecture register mappings, as well as
+other per-architecture special cases.
+
+### amd64 architecture
+
+The amd64 architecture uses the following sequence of 9 registers for
+integer arguments and results:
+
+    RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
+
+It uses X0 – X14 for floating-point arguments and results.
+
+*Rationale*: These sequences are chosen from the available registers
+to be relatively easy to remember.
+
+Registers R12 and R13 are permanent scratch registers.
+R15 is a scratch register except in dynamically linked binaries.
+
+*Rationale*: Some operations such as stack growth and reflection calls
+need dedicated scratch registers in order to manipulate call frames
+without corrupting arguments or results.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Body meaning |
+| --- | --- | --- |
+| RSP | Stack pointer | Fixed |
+| RBP | Frame pointer | Fixed |
+| RDX | Closure context pointer | Scratch |
+| R12 | None | Scratch |
+| R13 | None | Scratch |
+| R14 | Current goroutine | Scratch |
+| R15 | GOT reference temporary | Fixed if dynlink |
+| X15 | Zero value | Fixed |
+
+TODO: We may start with the existing TLS-based g and move to R14
+later.
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention except for R14 and X15, which will have
+to be restored on transitions from ABI0 code to ABIInternal code.
+In ABI0, these are undefined, so transitions from ABIInternal to ABI0
+can ignore these registers.
+
+*Rationale*: For the current goroutine pointer, we chose a register
+that requires an additional REX byte.
+While this adds one byte to every function prologue, it is hardly ever
+accessed outside the function prologue and we expect making more
+single-byte registers available to be a net win.
+
+*Rationale*: We designate X15 as a fixed zero register because
+functions often have to bulk zero their stack frames, and this is more
+efficient with a designated zero register.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 8 bytes.
+
+The amd64 architecture does not use a link register.
+
+A function's stack frame is laid out as follows:
+
+    +------------------------------+
+    | return PC                    |
+    | RBP on entry                 |
+    | ... locals ...               |
+    | ... outgoing arguments ...   |
+    +------------------------------+ ↓ lower addresses
+
+The "return PC" is pushed as part of the standard amd64 `CALL`
+operation.
+On entry, a function subtracts from RSP to open its stack frame and
+saves the value of RBP directly below the return PC.
+A leaf function that does not require any stack space may omit the
+saved RBP.
+
+The Go ABI's use of RBP as a frame pointer register is compatible with
+amd64 platform conventions so that Go can inter-operate with platform
+debuggers and profilers.
+
+#### Flags
+
+The direction flag (D) is always cleared (set to the “forward”
+direction) at a call.
+The arithmetic status flags are treated like scratch registers and not
+preserved across calls.
+All other bits in RFLAGS are system flags.
+
+The CPU is always in MMX technology state (not x87 mode).
+
+*Rationale*: Go on amd64 uses the XMM registers and never uses the x87
+registers, so it makes sense to assume the CPU is in MMX mode.
+Otherwise, any function that used the XMM registers would have to
+execute an EMMS instruction before calling another function or
+returning (this is the case in the SysV ABI).
+
+At calls, the MXCSR control bits are always set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| FZ | 15 | 0 | Do not flush to zero |
+| RC | 14/13 | 0 (RN) | Round to nearest |
+| PM | 12 | 1 | Precision masked |
+| UM | 11 | 1 | Underflow masked |
+| OM | 10 | 1 | Overflow masked |
+| ZM | 9 | 1 | Divide-by-zero masked |
+| DM | 8 | 1 | Denormal operations masked |
+| IM | 7 | 1 | Invalid operations masked |
+| DAZ | 6 | 0 | Do not zero de-normals |
+
+The MXCSR status bits are callee-save.
+
+*Rationale*: Having a fixed MXCSR control configuration allows Go
+functions to use SSE operations without modifying or saving the MXCSR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+The above fixed configuration matches the process initialization
+control bits specified by the ELF AMD64 ABI.
+
+The x87 floating-point control word is not used by Go on amd64.
+
+## Future directions
+
+### Spill path improvements
+
+The ABI currently reserves spill space for argument registers so the
+compiler can statically generate an argument spill path before calling
+into `runtime.morestack` to grow the stack.
+This ensures there will be sufficient spill space even when the stack
+is nearly exhausted and keeps stack growth and stack scanning
+essentially unchanged from ABI0.
+
+However, this wastes stack space (the median wastage is 16 bytes per
+call), resulting in larger stacks and increased cache footprint.
+A better approach would be to reserve stack space only when spilling.
+One way to ensure enough space is available to spill would be for
+every function to ensure there is enough space for the function's own
+frame *as well as* the spill space of all functions it calls.
+For most functions, this would change the threshold for the prologue
+stack growth check.
+For `nosplit` functions, this would change the threshold used in the
+linker's static stack size check.
+
+Allocating spill space in the callee rather than the caller may also
+allow for faster reflection calls in the common case where a function
+takes only register arguments, since it would allow reflection to make
+these calls directly without allocating any frame.
+
+The statically-generated spill path also increases code size.
+It is possible to instead have a generic spill path in the runtime, as
+part of `morestack`.
+However, this complicates reserving the spill space, since spilling
+all possible register arguments would, in most cases, take
+significantly more space than spilling only those used by a particular
+function.
+Some options are to spill to a temporary space and copy back only the
+registers used by the function, or to grow the stack if necessary
+before spilling to it (using a temporary space if necessary), or to
+use a heap-allocated space if insufficient stack space is available.
+These options all add enough complexity that we will have to make this
+decision based on the actual code size growth caused by the static
+spill paths.
+
+### Clobber sets
+
+As defined, the ABI does not use callee-save registers.
+This significantly simplifies the garbage collector and the compiler's
+register allocator, but at some performance cost.
+A potentially better balance for Go code would be to use *clobber
+sets*: for each function, the compiler records the set of registers it
+clobbers (including those clobbered by functions it calls) and any
+register not clobbered by function F can remain live across calls to
+F.
+
+This is generally a good fit for Go because Go's package DAG allows
+function metadata like the clobber set to flow up the call graph, even
+across package boundaries.
+Clobber sets would require relatively little change to the garbage
+collector, unlike general callee-save registers.
+One disadvantage of clobber sets over callee-save registers is that
+they don't help with indirect function calls or interface method
+calls, since static information isn't available in these cases.
+
+### Large aggregates
+
+Go encourages passing composite values by value, and this simplifies
+reasoning about mutation and races.
+However, this comes at a performance cost for large composite values.
+It may be possible to instead transparently pass large composite
+values by reference and delay copying until it is actually necessary.
+
+## Appendix: Register usage analysis
+
+In order to understand the impacts of the above design on register
+usage, we
+[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
+impact of the above ABI on a large code base: cmd/kubelet from
+[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
+
+The following table shows the impact of different numbers of available
+integer and floating-point registers on argument assignment:
+
+```
+|      |        |       |      stack args |          spills |     stack total |
+| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
+|    0 |      0 |  6.3% |  32 | 152 | 256 |   0 |   0 |   0 |  32 | 152 | 256 |
+|    0 |      8 |  6.4% |  32 | 152 | 256 |   0 |   0 |   0 |  32 | 152 | 256 |
+|    1 |      8 | 21.3% |  24 | 144 | 248 |   8 |   8 |   8 |  32 | 152 | 256 |
+|    2 |      8 | 38.9% |  16 | 128 | 224 |   8 |  16 |  16 |  24 | 136 | 240 |
+|    3 |      8 | 57.0% |   0 | 120 | 224 |  16 |  24 |  24 |  24 | 136 | 240 |
+|    4 |      8 | 73.0% |   0 | 120 | 216 |  16 |  32 |  32 |  24 | 136 | 232 |
+|    5 |      8 | 83.3% |   0 | 112 | 216 |  16 |  40 |  40 |  24 | 136 | 232 |
+|    6 |      8 | 87.5% |   0 | 112 | 208 |  16 |  48 |  48 |  24 | 136 | 232 |
+|    7 |      8 | 89.8% |   0 | 112 | 208 |  16 |  48 |  56 |  24 | 136 | 232 |
+|    8 |      8 | 91.3% |   0 | 112 | 200 |  16 |  56 |  64 |  24 | 136 | 232 |
+|    9 |      8 | 92.1% |   0 | 112 | 192 |  16 |  56 |  72 |  24 | 136 | 232 |
+|   10 |      8 | 92.6% |   0 | 104 | 192 |  16 |  56 |  72 |  24 | 136 | 232 |
+|   11 |      8 | 93.1% |   0 | 104 | 184 |  16 |  56 |  80 |  24 | 128 | 232 |
+|   12 |      8 | 93.4% |   0 | 104 | 176 |  16 |  56 |  88 |  24 | 128 | 232 |
+|   13 |      8 | 94.0% |   0 |  88 | 176 |  16 |  56 |  96 |  24 | 128 | 232 |
+|   14 |      8 | 94.4% |   0 |  80 | 152 |  16 |  64 | 104 |  24 | 128 | 232 |
+|   15 |      8 | 94.6% |   0 |  80 | 152 |  16 |  64 | 112 |  24 | 128 | 232 |
+|   16 |      8 | 94.9% |   0 |  16 | 152 |  16 |  64 | 112 |  24 | 128 | 232 |
+|    ∞ |      8 | 99.8% |   0 |   0 |   0 |  24 | 112 | 216 |  24 | 120 | 216 |
+```
+
+The first two columns show the number of available integer and
+floating-point registers.
+The first row shows the results for 0 integer and 0 floating-point
+registers, which is equivalent to ABI0.
+We found that any reasonable number of floating-point registers has
+the same effect, so we fixed it at 8 for all other rows.
+
+The “% fit” column gives the fraction of functions where all arguments
+and results are register-assigned and no arguments are passed on the
+stack.
+The three “stack args” columns give the median, 95th and 99th
+percentile number of bytes of stack arguments.
+The “spills” columns likewise summarize the number of bytes in
+on-stack spill space.
+And “stack total” summarizes the sum of stack arguments and on-stack
+spill slots.
+Note that these are three different distributions; for example,
+there’s no single function that takes 0 stack argument bytes, 16 spill
+bytes, and 24 total stack bytes.
+
+From this, we can see that the fraction of functions that fit entirely
+in registers grows very slowly once it reaches about 90%, though
+curiously there is a small minority of functions that could benefit
+from a huge number of registers.
+Making 9 integer registers available on amd64 puts it in this realm.
+We also see that the stack space required for most functions is fairly
+small.
+While the increasing space required for spills largely balances out
+the decreasing space required for stack arguments as the number of
+available registers increases, there is a general reduction in the
+total stack space required with more available registers.
+This does, however, suggest that eliminating spill slots in the future
+would noticeably reduce stack requirements.
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
new file mode 100644
index 0000000..e935821
--- /dev/null
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -0,0 +1,461 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+	"fmt"
+	"sync"
+)
+
+//......................................................................
+//
+// Public/exported bits of the ABI utilities.
+//
+
+// ABIParamResultInfo stores the results of processing a given
+// function type to compute stack layout and register assignments. For
+// each input and output parameter we capture whether the param was
+// register-assigned (and to which register(s)) or the stack offset
+// for the param if is not going to be passed in registers according
+// to the rules in the Go internal ABI specification (1.17).
+type ABIParamResultInfo struct {
+	inparams          []ABIParamAssignment // Includes receiver for method calls.  Does NOT include hidden closure pointer.
+	outparams         []ABIParamAssignment
+	offsetToSpillArea int64
+	spillAreaSize     int64
+	config            *ABIConfig // to enable String() method
+}
+
+func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
+	return a.inparams
+}
+
+func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
+	return a.outparams
+}
+
+func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
+	return a.inparams[i]
+}
+
+func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
+	return a.outparams[i]
+}
+
+func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
+	return a.offsetToSpillArea
+}
+
+func (a *ABIParamResultInfo) SpillAreaSize() int64 {
+	return a.spillAreaSize
+}
+
+// RegIndex stores the index into the set of machine registers used by
+// the ABI on a specific architecture for parameter passing.  RegIndex
+// values 0 through N-1 (where N is the number of integer registers
+// used for param passing according to the ABI rules) describe integer
+// registers; values N through M (where M is the number of floating
+// point registers used).  Thus if the ABI says there are 5 integer
+// registers and 7 floating point registers, then RegIndex value of 4
+// indicates the 5th integer register, and a RegIndex value of 11
+// indicates the 7th floating point register.
+type RegIndex uint8
+
+// ABIParamAssignment holds information about how a specific param or
+// result will be passed: in registers (in which case 'Registers' is
+// populated) or on the stack (in which case 'Offset' is set to a
+// non-negative stack offset. The values in 'Registers' are indices (as
+// described above), not architected registers.
+type ABIParamAssignment struct {
+	Type      *types.Type
+	Registers []RegIndex
+	offset    int32
+}
+
+// Offset returns the stack offset for addressing the parameter that "a" describes.
+// This will panic if "a" describes a register-allocated parameter.
+func (a *ABIParamAssignment) Offset() int32 {
+	if len(a.Registers) > 0 {
+		panic("Register allocated parameters have no offset")
+	}
+	return a.offset
+}
+
+// SpillOffset returns the offset *within the spill area* for the parameter that "a" describes.
+// Registers will be spilled here; if a memory home is needed (for a pointer method e.g.)
+// then that will be the address.
+// This will panic if "a" describes a stack-allocated parameter.
+func (a *ABIParamAssignment) SpillOffset() int32 {
+	if len(a.Registers) == 0 {
+		panic("Stack-allocated parameters have no spill offset")
+	}
+	return a.offset
+}
+
+// RegAmounts holds a specified number of integer/float registers.
+type RegAmounts struct {
+	intRegs   int
+	floatRegs int
+}
+
+// ABIConfig captures the number of registers made available
+// by the ABI rules for parameter passing and result returning.
+type ABIConfig struct {
+	// Do we need anything more than this?
+	regAmounts       RegAmounts
+	regsForTypeCache map[*types.Type]int
+}
+
+// NewABIConfig returns a new ABI configuration for an architecture with
+// iRegsCount integer/pointer registers and fRegsCount floating point registers.
+func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
+	return &ABIConfig{regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
+}
+
+// NumParamRegs returns the number of parameter registers used for a given type,
+// without regard for the number available.
+func (a *ABIConfig) NumParamRegs(t *types.Type) int {
+	if n, ok := a.regsForTypeCache[t]; ok {
+		return n
+	}
+
+	if t.IsScalar() || t.IsPtrShaped() {
+		var n int
+		if t.IsComplex() {
+			n = 2
+		} else {
+			n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
+		}
+		a.regsForTypeCache[t] = n
+		return n
+	}
+	typ := t.Kind()
+	n := 0
+	switch typ {
+	case types.TARRAY:
+		n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			n += a.NumParamRegs(f.Type)
+		}
+	case types.TSLICE:
+		n = a.NumParamRegs(synthSlice)
+	case types.TSTRING:
+		n = a.NumParamRegs(synthString)
+	case types.TINTER:
+		n = a.NumParamRegs(synthIface)
+	}
+	a.regsForTypeCache[t] = n
+	return n
+}
+
+// ABIAnalyze takes a function type 't' and an ABI rules description
+// 'config' and analyzes the function to determine how its parameters
+// and results will be passed (in registers or on the stack), returning
+// an ABIParamResultInfo object that holds the results of the analysis.
+func (config *ABIConfig) ABIAnalyze(t *types.Type) ABIParamResultInfo {
+	setup()
+	s := assignState{
+		rTotal: config.regAmounts,
+	}
+	result := ABIParamResultInfo{config: config}
+
+	// Receiver
+	ft := t.FuncType()
+	if t.NumRecvs() != 0 {
+		rfsl := ft.Receiver.FieldSlice()
+		result.inparams = append(result.inparams,
+			s.assignParamOrReturn(rfsl[0].Type, false))
+	}
+
+	// Inputs
+	ifsl := ft.Params.FieldSlice()
+	for _, f := range ifsl {
+		result.inparams = append(result.inparams,
+			s.assignParamOrReturn(f.Type, false))
+	}
+	s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
+
+	// Outputs
+	s.rUsed = RegAmounts{}
+	ofsl := ft.Results.FieldSlice()
+	for _, f := range ofsl {
+		result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, true))
+	}
+	// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+	// TODO in theory could align offset only to minimum required by spilled data types.
+	result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+	result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+
+	return result
+}
+
+//......................................................................
+//
+// Non-public portions.
+
+// regString produces a human-readable version of a RegIndex.
+func (c *RegAmounts) regString(r RegIndex) string {
+	if int(r) < c.intRegs {
+		return fmt.Sprintf("I%d", int(r))
+	} else if int(r) < c.intRegs+c.floatRegs {
+		return fmt.Sprintf("F%d", int(r)-c.intRegs)
+	}
+	return fmt.Sprintf("<?>%d", r)
+}
+
+// toString method renders an ABIParamAssignment in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
+	regs := "R{"
+	offname := "spilloffset" // offset is for spill for register(s)
+	if len(ri.Registers) == 0 {
+		offname = "offset" // offset is for memory arg
+	}
+	for _, r := range ri.Registers {
+		regs += " " + config.regAmounts.regString(r)
+	}
+	return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
+}
+
+// toString method renders an ABIParamResultInfo in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamResultInfo) String() string {
+	res := ""
+	for k, p := range ri.inparams {
+		res += fmt.Sprintf("IN %d: %s\n", k, p.toString(ri.config))
+	}
+	for k, r := range ri.outparams {
+		res += fmt.Sprintf("OUT %d: %s\n", k, r.toString(ri.config))
+	}
+	res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
+		ri.offsetToSpillArea, ri.spillAreaSize)
+	return res
+}
+
+// assignState holds intermediate state during the register assigning process
+// for a given function signature.
+type assignState struct {
+	rTotal      RegAmounts // total reg amounts from ABI rules
+	rUsed       RegAmounts // regs used by params completely assigned so far
+	pUsed       RegAmounts // regs used by the current param (or pieces therein)
+	stackOffset int64      // current stack offset
+	spillOffset int64      // current spill offset
+}
+
+// align returns a rounded up to t's alignment
+func align(a int64, t *types.Type) int64 {
+	return alignTo(a, int(t.Align))
+}
+
+// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
+func alignTo(a int64, t int) int64 {
+	if t == 0 {
+		return a
+	}
+	return types.Rnd(a, int64(t))
+}
+
+// stackSlot returns a stack offset for a param or result of the
+// specified type.
+func (state *assignState) stackSlot(t *types.Type) int64 {
+	rv := align(state.stackOffset, t)
+	state.stackOffset = rv + t.Width
+	return rv
+}
+
+// allocateRegs returns a set of register indices for a parameter or result
+// that we've just determined to be register-assignable. The number of registers
+// needed is assumed to be stored in state.pUsed.
+func (state *assignState) allocateRegs() []RegIndex {
+	regs := []RegIndex{}
+
+	// integer
+	for r := state.rUsed.intRegs; r < state.rUsed.intRegs+state.pUsed.intRegs; r++ {
+		regs = append(regs, RegIndex(r))
+	}
+	state.rUsed.intRegs += state.pUsed.intRegs
+
+	// floating
+	for r := state.rUsed.floatRegs; r < state.rUsed.floatRegs+state.pUsed.floatRegs; r++ {
+		regs = append(regs, RegIndex(r+state.rTotal.intRegs))
+	}
+	state.rUsed.floatRegs += state.pUsed.floatRegs
+
+	return regs
+}
+
+// regAllocate creates a register ABIParamAssignment object for a param
+// or result with the specified type, as a final step (this assumes
+// that all of the safety/suitability analysis is complete).
+func (state *assignState) regAllocate(t *types.Type, isReturn bool) ABIParamAssignment {
+	spillLoc := int64(-1)
+	if !isReturn {
+		// Spill for register-resident t must be aligned for storage of a t.
+		spillLoc = align(state.spillOffset, t)
+		state.spillOffset = spillLoc + t.Size()
+	}
+	return ABIParamAssignment{
+		Type:      t,
+		Registers: state.allocateRegs(),
+		offset:    int32(spillLoc),
+	}
+}
+
+// stackAllocate creates a stack memory ABIParamAssignment object for
+// a param or result with the specified type, as a final step (this
+// assumes that all of the safety/suitability analysis is complete).
+func (state *assignState) stackAllocate(t *types.Type) ABIParamAssignment {
+	return ABIParamAssignment{
+		Type:   t,
+		offset: int32(state.stackSlot(t)),
+	}
+}
+
+// intUsed returns the number of integer registers consumed
+// at a given point within an assignment stage.
+func (state *assignState) intUsed() int {
+	return state.rUsed.intRegs + state.pUsed.intRegs
+}
+
+// floatUsed returns the number of floating point registers consumed at
+// a given point within an assignment stage.
+func (state *assignState) floatUsed() int {
+	return state.rUsed.floatRegs + state.pUsed.floatRegs
+}
+
+// regassignIntegral examines a param/result of integral type 't' to
+// determines whether it can be register-assigned. Returns TRUE if we
+// can register allocate, FALSE otherwise (and updates state
+// accordingly).
+func (state *assignState) regassignIntegral(t *types.Type) bool {
+	regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
+	if t.IsComplex() {
+		regsNeeded = 2
+	}
+
+	// Floating point and complex.
+	if t.IsFloat() || t.IsComplex() {
+		if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
+			// not enough regs
+			return false
+		}
+		state.pUsed.floatRegs += regsNeeded
+		return true
+	}
+
+	// Non-floating point
+	if regsNeeded+state.intUsed() > state.rTotal.intRegs {
+		// not enough regs
+		return false
+	}
+	state.pUsed.intRegs += regsNeeded
+	return true
+}
+
+// regassignArray processes an array type (or array component within some
+// other enclosing type) to determine if it can be register assigned.
+// Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignArray(t *types.Type) bool {
+
+	nel := t.NumElem()
+	if nel == 0 {
+		return true
+	}
+	if nel > 1 {
+		// Not an array of length 1: stack assign
+		return false
+	}
+	// Visit element
+	return state.regassign(t.Elem())
+}
+
+// regassignStruct processes a struct type (or struct component within
+// some other enclosing type) to determine if it can be register
+// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignStruct(t *types.Type) bool {
+	for _, field := range t.FieldSlice() {
+		if !state.regassign(field.Type) {
+			return false
+		}
+	}
+	return true
+}
+
+// synthOnce ensures that we only create the synth* fake types once.
+var synthOnce sync.Once
+
+// synthSlice, synthString, and syncIface are synthesized struct types
+// meant to capture the underlying implementations of string/slice/interface.
+var synthSlice *types.Type
+var synthString *types.Type
+var synthIface *types.Type
+
+// setup performs setup for the register assignment utilities, manufacturing
+// a small set of synthesized types that we'll need along the way.
+func setup() {
+	synthOnce.Do(func() {
+		fname := types.BuiltinPkg.Lookup
+		nxp := src.NoXPos
+		unsp := types.Types[types.TUNSAFEPTR]
+		ui := types.Types[types.TUINTPTR]
+		synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("ptr"), unsp),
+			types.NewField(nxp, fname("len"), ui),
+			types.NewField(nxp, fname("cap"), ui),
+		})
+		synthString = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("data"), unsp),
+			types.NewField(nxp, fname("len"), ui),
+		})
+		synthIface = types.NewStruct(types.NoPkg, []*types.Field{
+			types.NewField(nxp, fname("f1"), unsp),
+			types.NewField(nxp, fname("f2"), unsp),
+		})
+	})
+}
+
+// regassign examines a given param type (or component within some
+// composite) to determine if it can be register assigned.  Returns
+// TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassign(pt *types.Type) bool {
+	typ := pt.Kind()
+	if pt.IsScalar() || pt.IsPtrShaped() {
+		return state.regassignIntegral(pt)
+	}
+	switch typ {
+	case types.TARRAY:
+		return state.regassignArray(pt)
+	case types.TSTRUCT:
+		return state.regassignStruct(pt)
+	case types.TSLICE:
+		return state.regassignStruct(synthSlice)
+	case types.TSTRING:
+		return state.regassignStruct(synthString)
+	case types.TINTER:
+		return state.regassignStruct(synthIface)
+	default:
+		panic("not expected")
+	}
+}
+
+// assignParamOrReturn processes a given receiver, param, or result
+// of type 'pt' to determine whether it can be register assigned.
+// The result of the analysis is recorded in the result
+// ABIParamResultInfo held in 'state'.
+func (state *assignState) assignParamOrReturn(pt *types.Type, isReturn bool) ABIParamAssignment {
+	state.pUsed = RegAmounts{}
+	if pt.Width == types.BADWIDTH {
+		panic("should never happen")
+	} else if pt.Width == 0 {
+		return state.stackAllocate(pt)
+	} else if state.regassign(pt) {
+		return state.regAllocate(pt, isReturn)
+	} else {
+		return state.stackAllocate(pt)
+	}
+}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index af58440..ce1c402 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -5,13 +5,13 @@
 package amd64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/x86"
 )
 
 var leaptr = x86.ALEAQ
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &x86.Linkamd64
 	arch.REGSP = x86.REGSP
 	arch.MAXWIDTH = 1 << 50
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 0c1456f..aefdb14 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -5,7 +5,10 @@
 package amd64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 	"cmd/internal/objabi"
@@ -19,8 +22,8 @@
 const (
 	dzBlocks    = 16 // number of MOV/ADD blocks
 	dzBlockLen  = 4  // number of clears per block
-	dzBlockSize = 19 // size of instructions in a single block
-	dzMovSize   = 4  // size of single MOV instruction w/ offset
+	dzBlockSize = 23 // size of instructions in a single block
+	dzMovSize   = 5  // size of single MOV instruction w/ offset
 	dzLeaqSize  = 4  // size of single LEAQ instruction
 	dzClearStep = 16 // number of bytes cleared by each MOV instruction
 
@@ -51,7 +54,7 @@
 	return -dzClearStep * (dzBlockLen - tailSteps)
 }
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
 	const (
 		ax = 1 << iota
 		x0
@@ -61,67 +64,67 @@
 		return p
 	}
 
-	if cnt%int64(gc.Widthreg) != 0 {
+	if cnt%int64(types.RegSize) != 0 {
 		// should only happen with nacl
-		if cnt%int64(gc.Widthptr) != 0 {
-			gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+		if cnt%int64(types.PtrSize) != 0 {
+			base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
 		}
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
-		p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
-		off += int64(gc.Widthptr)
-		cnt -= int64(gc.Widthptr)
+		p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+		off += int64(types.PtrSize)
+		cnt -= int64(types.PtrSize)
 	}
 
 	if cnt == 8 {
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
-		p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
-	} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
+		p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+	} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
 		if *state&x0 == 0 {
-			p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+			p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
 			*state |= x0
 		}
 
 		for i := int64(0); i < cnt/16; i++ {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
 		}
 
 		if cnt%16 != 0 {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
 		}
-	} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
+	} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
 		if *state&x0 == 0 {
-			p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+			p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
 			*state |= x0
 		}
-		p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
-		p.To.Sym = gc.Duffzero
+		p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+		p.To.Sym = ir.Syms.Duffzero
 
 		if cnt%16 != 0 {
-			p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+			p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
 		}
 	} else {
 		if *state&ax == 0 {
-			p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+			p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
 			*state |= ax
 		}
 
-		p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
-		p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
-		p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-		p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+		p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+		p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	// This is a hardware nop (1-byte 0x90) instruction,
 	// even though we describe it as an explicit XCHGL here.
 	// Particularly, this does not zero the high 32 bits
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 5ff05a0..4938e4b 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -8,16 +8,18 @@
 	"fmt"
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/x86"
 )
 
 // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 	flive := b.FlagsLiveAtEnd
 	for _, c := range b.ControlValues() {
 		flive = c.Type.IsFlags() || flive
@@ -110,7 +112,7 @@
 //     dest := dest(To) op src(From)
 // and also returns the created obj.Prog so it
 // may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
 	p := s.Prog(op)
 	p.From.Type = obj.TYPE_REG
 	p.To.Type = obj.TYPE_REG
@@ -164,7 +166,35 @@
 	return off, adj
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func getgFromTLS(s *ssagen.State, r int16) {
+	// See the comments in cmd/internal/obj/x86/obj6.go
+	// near CanUse1InsnTLS for a detailed explanation of these instructions.
+	if x86.CanUse1InsnTLS(base.Ctxt) {
+		// MOVQ (TLS), r
+		p := s.Prog(x86.AMOVQ)
+		p.From.Type = obj.TYPE_MEM
+		p.From.Reg = x86.REG_TLS
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = r
+	} else {
+		// MOVQ TLS, r
+		// MOVQ (r)(TLS*1), r
+		p := s.Prog(x86.AMOVQ)
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = x86.REG_TLS
+		p.To.Type = obj.TYPE_REG
+		p.To.Reg = r
+		q := s.Prog(x86.AMOVQ)
+		q.From.Type = obj.TYPE_MEM
+		q.From.Reg = r
+		q.From.Index = x86.REG_TLS
+		q.From.Scale = 1
+		q.To.Type = obj.TYPE_REG
+		q.To.Reg = r
+	}
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpAMD64VFMADD231SD:
 		p := s.Prog(v.Op.Asm())
@@ -630,12 +660,12 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = o
 		}
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 	case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@@ -671,7 +701,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Args[1].Reg()
 	case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@@ -679,20 +709,20 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.From, v, sc.Off())
+		ssagen.AddAux2(&p.From, v, sc.Off())
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = sc.Val()
 	case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Args[2].Reg()
 	case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
 		sc := v.AuxValAndOff()
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux2(&p.From, v, sc.Off())
+		ssagen.AddAux2(&p.From, v, sc.Off())
 		p.To.Type = obj.TYPE_CONST
 		p.To.Offset = sc.Val()
 	case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@@ -732,14 +762,14 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
 		ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
 		p := s.Prog(v.Op.Asm())
 		memIdx(&p.From, v)
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@@ -751,7 +781,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
 		ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
 		ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@@ -763,7 +793,7 @@
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[2].Reg()
 		memIdx(&p.To, v)
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
 		sc := v.AuxValAndOff()
 		off := sc.Off()
@@ -786,7 +816,7 @@
 			p := s.Prog(asm)
 			p.To.Type = obj.TYPE_MEM
 			p.To.Reg = v.Args[0].Reg()
-			gc.AddAux2(&p.To, v, off)
+			ssagen.AddAux2(&p.To, v, off)
 			break
 		}
 		fallthrough
@@ -801,7 +831,7 @@
 		p.From.Offset = val
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, off)
+		ssagen.AddAux2(&p.To, v, off)
 
 	case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
 		p := s.Prog(v.Op.Asm())
@@ -810,7 +840,21 @@
 		p.From.Offset = sc.Val()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
+	case ssa.OpAMD64MOVOstorezero:
+		if s.ABI != obj.ABIInternal {
+			v.Fatalf("MOVOstorezero can be only used in ABIInternal functions")
+		}
+		if !base.Flag.ABIWrap {
+			// zeroing X15 manually if wrappers are not used
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+		}
+		p := s.Prog(v.Op.Asm())
+		p.From.Type = obj.TYPE_REG
+		p.From.Reg = x86.REG_X15
+		p.To.Type = obj.TYPE_MEM
+		p.To.Reg = v.Args[0].Reg()
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
 		ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
 		ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@@ -835,7 +879,7 @@
 			p.From.Type = obj.TYPE_NONE
 		}
 		memIdx(&p.To, v)
-		gc.AddAux2(&p.To, v, sc.Off())
+		ssagen.AddAux2(&p.To, v, sc.Off())
 	case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
 		ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
 		ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@@ -865,7 +909,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
@@ -891,13 +935,20 @@
 		p.From.Reg = r
 		p.From.Index = i
 
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 		if v.Reg() != v.Args[0].Reg() {
 			v.Fatalf("input[0] and output not in same register %s", v.LongString())
 		}
 	case ssa.OpAMD64DUFFZERO:
+		if s.ABI != obj.ABIInternal {
+			v.Fatalf("MOVOconst can be only used in ABIInternal functions")
+		}
+		if !base.Flag.ABIWrap {
+			// zeroing X15 manually if wrappers are not used
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+		}
 		off := duffStart(v.AuxInt)
 		adj := duffAdj(v.AuxInt)
 		var p *obj.Prog
@@ -911,18 +962,12 @@
 		}
 		p = s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = off
-	case ssa.OpAMD64MOVOconst:
-		if v.AuxInt != 0 {
-			v.Fatalf("MOVOconst can only do constant=0")
-		}
-		r := v.Reg()
-		opregreg(s, x86.AXORPS, r, r)
 	case ssa.OpAMD64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		if v.AuxInt%16 != 0 {
 			v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
 		}
@@ -949,7 +994,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 
@@ -961,44 +1006,37 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpAMD64LoweredHasCPUFeature:
 		p := s.Prog(x86.AMOVBQZX)
 		p.From.Type = obj.TYPE_MEM
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpAMD64LoweredGetClosurePtr:
 		// Closure pointer is DX.
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpAMD64LoweredGetG:
-		r := v.Reg()
-		// See the comments in cmd/internal/obj/x86/obj6.go
-		// near CanUse1InsnTLS for a detailed explanation of these instructions.
-		if x86.CanUse1InsnTLS(gc.Ctxt) {
-			// MOVQ (TLS), r
-			p := s.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		} else {
-			// MOVQ TLS, r
-			// MOVQ (r)(TLS*1), r
-			p := s.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			q := s.Prog(x86.AMOVQ)
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = r
-			q.From.Index = x86.REG_TLS
-			q.From.Scale = 1
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = r
+		if base.Flag.ABIWrap {
+			v.Fatalf("LoweredGetG should not appear in new ABI")
 		}
-	case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
+		r := v.Reg()
+		getgFromTLS(s, r)
+	case ssa.OpAMD64CALLstatic:
+		if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
+		s.Call(v)
+		if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
+	case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
 		s.Call(v)
 
 	case ssa.OpAMD64LoweredGetCallerPC:
@@ -1012,12 +1050,12 @@
 	case ssa.OpAMD64LoweredGetCallerSP:
 		// caller's SP is the address of the first arg
 		mov := x86.AMOVQ
-		if gc.Widthptr == 4 {
+		if types.PtrSize == 4 {
 			mov = x86.AMOVL
 		}
 		p := s.Prog(mov)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+		p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -1027,14 +1065,14 @@
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
 		// arg0 is in DI. Set sym to match where regalloc put arg1.
-		p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
+		p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
 
 	case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
-		s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+		s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
 
 	case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
 		ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
@@ -1115,7 +1153,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 
 	case ssa.OpAMD64SETNEF:
 		p := s.Prog(v.Op.Asm())
@@ -1164,14 +1202,14 @@
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 	case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@@ -1184,7 +1222,7 @@
 		p.From.Reg = r
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
 		r := v.Reg0()
 		if r != v.Args[0].Reg() {
@@ -1196,7 +1234,7 @@
 		p.From.Reg = r
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
 		if v.Args[1].Reg() != x86.REG_AX {
 			v.Fatalf("input[1] not in AX %s", v.LongString())
@@ -1207,7 +1245,7 @@
 		p.From.Reg = v.Args[2].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p = s.Prog(x86.ASETEQ)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
@@ -1218,20 +1256,20 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpClobber:
 		p := s.Prog(x86.AMOVL)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 0xdeaddead
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = x86.REG_SP
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p = s.Prog(x86.AMOVL)
 		p.From.Type = obj.TYPE_CONST
 		p.From.Offset = 0xdeaddead
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = x86.REG_SP
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 		p.To.Offset += 4
 	default:
 		v.Fatalf("genValue not implemented: %s", v.LongString())
@@ -1257,22 +1295,22 @@
 	ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
 }
 
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
 }
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
 	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockDefer:
 		// defer returns in rax:
@@ -1285,16 +1323,22 @@
 		p.To.Reg = x86.REG_AX
 		p = s.Prog(x86.AJNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 	case ssa.BlockExit:
 	case ssa.BlockRet:
 		s.Prog(obj.ARET)
 	case ssa.BlockRetJmp:
+		if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
+			// zeroing X15 when entering ABIInternal from ABI0
+			opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+			// set G register from TLS
+			getgFromTLS(s, x86.REG_R14)
+		}
 		p := s.Prog(obj.ARET)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index 20e2f43..81959ae 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -5,13 +5,13 @@
 package arm
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/arm"
 	"cmd/internal/objabi"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &arm.Linkarm
 	arch.REGSP = arm.REGSP
 	arch.MAXWIDTH = (1 << 32) - 1
@@ -20,7 +20,7 @@
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
 
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
index bd8d7ff..f2c6763 100644
--- a/src/cmd/compile/internal/arm/ggen.go
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -5,49 +5,51 @@
 package arm
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm"
 )
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
 	if *r0 == 0 {
-		p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+		p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
 		*r0 = 1
 	}
 
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+	} else if cnt <= int64(128*types.PtrSize) {
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
 		p.Reg = arm.REGSP
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
 	} else {
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
 		p.Reg = arm.REGSP
-		p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+		p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
 		p.Reg = arm.REG_R1
-		p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+		p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
 		p1 := p
 		p.Scond |= arm.C_PBIT
-		p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
 		p.Reg = arm.REG_R2
-		p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
+		p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(arm.AAND)
 	p.From.Type = obj.TYPE_REG
 	p.From.Reg = arm.REG_R0
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 765a771..729d2da 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -9,9 +9,11 @@
 	"math"
 	"math/bits"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm"
@@ -91,7 +93,7 @@
 }
 
 // genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(makeshift(r1, typ, n))
@@ -109,7 +111,7 @@
 }
 
 // genregshift generates a Prog for r = r0 op (r1 shifted by r2)
-func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = int64(makeregshift(r1, typ, r2))
@@ -143,7 +145,7 @@
 	return 0xffffffff, 0
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpARMMOVWreg:
 		if v.Type.IsMemory() {
@@ -181,7 +183,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -192,7 +194,7 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpARMADD,
 		ssa.OpARMADC,
 		ssa.OpARMSUB,
@@ -543,10 +545,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVW $off(SP), R
 			wantreg = "SP"
@@ -566,7 +568,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpARMMOVBstore,
@@ -579,7 +581,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
 		// this is just shift 0 bits
 		fallthrough
@@ -700,7 +702,7 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Udiv
+		p.To.Sym = ir.Syms.Udiv
 	case ssa.OpARMLoweredWB:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
@@ -710,39 +712,39 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(8) // space used in callee args area by assembly stubs
 	case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
 		s.UseArgs(12) // space used in callee args area by assembly stubs
 	case ssa.OpARMDUFFZERO:
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpARMDUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpARMLoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(arm.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = arm.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpARMLoweredZero:
 		// MOVW.P	Rarg2, 4(R1)
@@ -777,7 +779,7 @@
 		p2.Reg = arm.REG_R1
 		p3 := s.Prog(arm.ABLE)
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARMLoweredMove:
 		// MOVW.P	4(R1), Rtmp
 		// MOVW.P	Rtmp, 4(R2)
@@ -818,7 +820,7 @@
 		p3.Reg = arm.REG_R1
 		p4 := s.Prog(arm.ABLE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 	case ssa.OpARMEqual,
 		ssa.OpARMNotEqual,
 		ssa.OpARMLessThan,
@@ -844,12 +846,12 @@
 		p.To.Reg = v.Reg()
 	case ssa.OpARMLoweredGetClosurePtr:
 		// Closure pointer is R7 (arm.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpARMLoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(arm.AMOVW)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -899,24 +901,24 @@
 }
 
 // To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
 }
 
 // To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockDefer:
@@ -929,11 +931,11 @@
 		p.Reg = arm.REG_R0
 		p = s.Prog(arm.ABNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockExit:
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 40d6e17..d3db37e 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -5,12 +5,12 @@
 package arm64
 
 import (
-	"cmd/compile/internal/gc"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/internal/obj/arm64"
 )
 
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
 	arch.LinkArch = &arm64.Linkarm64
 	arch.REGSP = arm64.REGSP
 	arch.MAXWIDTH = 1 << 50
@@ -20,7 +20,7 @@
 	arch.Ginsnop = ginsnop
 	arch.Ginsnopdefer = ginsnop
 
-	arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+	arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
 	arch.SSAGenValue = ssaGenValue
 	arch.SSAGenBlock = ssaGenBlock
 }
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index f3fec03..8364535 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -5,7 +5,9 @@
 package arm64
 
 import (
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/objw"
+	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
 	"cmd/internal/objabi"
@@ -22,52 +24,52 @@
 	return frame
 }
 
-func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
 	if cnt == 0 {
 		return p
 	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+	if cnt < int64(4*types.PtrSize) {
+		for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+			p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
 		}
-	} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
-		if cnt%(2*int64(gc.Widthptr)) != 0 {
-			p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
-			off += int64(gc.Widthptr)
-			cnt -= int64(gc.Widthptr)
+	} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+		if cnt%(2*int64(types.PtrSize)) != 0 {
+			p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+			off += int64(types.PtrSize)
+			cnt -= int64(types.PtrSize)
 		}
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
 		p.Reg = arm64.REG_R20
-		p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+		p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
-		p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
+		p.To.Sym = ir.Syms.Duffzero
+		p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
 	} else {
 		// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
 		// We are at the function entry, where no register is live, so it is okay to clobber
 		// other registers
 		const rtmp = arm64.REG_R20
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
 		p.Reg = arm64.REGRT1
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
-		p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+		p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
 		p.Reg = arm64.REGRT1
-		p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
+		p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
 		p.Scond = arm64.C_XPRE
 		p1 := p
-		p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+		p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
 		p.Reg = arm64.REGRT2
-		p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
+		p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+		p.To.SetTarget(p1)
 	}
 
 	return p
 }
 
-func ginsnop(pp *gc.Progs) *obj.Prog {
+func ginsnop(pp *objw.Progs) *obj.Prog {
 	p := pp.Prog(arm64.AHINT)
 	p.From.Type = obj.TYPE_CONST
 	return p
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 4358851..73e74e1 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -7,9 +7,11 @@
 import (
 	"math"
 
-	"cmd/compile/internal/gc"
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/compile/internal/logopt"
 	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
 	"cmd/compile/internal/types"
 	"cmd/internal/obj"
 	"cmd/internal/obj/arm64"
@@ -81,7 +83,7 @@
 }
 
 // genshift generates a Prog for r = r0 op (r1 shifted by n)
-func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
 	p := s.Prog(as)
 	p.From.Type = obj.TYPE_SHIFT
 	p.From.Offset = makeshift(r1, typ, n)
@@ -110,7 +112,7 @@
 	return mop
 }
 
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
 	switch v.Op {
 	case ssa.OpCopy, ssa.OpARM64MOVDreg:
 		if v.Type.IsMemory() {
@@ -148,7 +150,7 @@
 			return
 		}
 		p := s.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
+		ssagen.AddrAuto(&p.From, v.Args[0])
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpStoreReg:
@@ -159,7 +161,7 @@
 		p := s.Prog(storeByType(v.Type))
 		p.From.Type = obj.TYPE_REG
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
+		ssagen.AddrAuto(&p.To, v)
 	case ssa.OpARM64ADD,
 		ssa.OpARM64SUB,
 		ssa.OpARM64AND,
@@ -393,10 +395,10 @@
 			v.Fatalf("aux is of unknown type %T", v.Aux)
 		case *obj.LSym:
 			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *gc.Node:
+			ssagen.AddAux(&p.From, v)
+		case *ir.Name:
 			wantreg = "SP"
-			gc.AddAux(&p.From, v)
+			ssagen.AddAux(&p.From, v)
 		case nil:
 			// No sym, just MOVD $off(SP), R
 			wantreg = "SP"
@@ -417,7 +419,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
 	case ssa.OpARM64MOVBloadidx,
@@ -444,7 +446,7 @@
 		p := s.Prog(v.Op.Asm())
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg0()
 	case ssa.OpARM64MOVBstore,
@@ -461,7 +463,7 @@
 		p.From.Reg = v.Args[1].Reg()
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstoreidx,
 		ssa.OpARM64MOVHstoreidx,
 		ssa.OpARM64MOVWstoreidx,
@@ -482,7 +484,7 @@
 		p.From.Offset = int64(v.Args[2].Reg())
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstorezero,
 		ssa.OpARM64MOVHstorezero,
 		ssa.OpARM64MOVWstorezero,
@@ -492,7 +494,7 @@
 		p.From.Reg = arm64.REGZERO
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64MOVBstorezeroidx,
 		ssa.OpARM64MOVHstorezeroidx,
 		ssa.OpARM64MOVWstorezeroidx,
@@ -511,7 +513,7 @@
 		p.From.Offset = int64(arm64.REGZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
+		ssagen.AddAux(&p.To, v)
 	case ssa.OpARM64BFI,
 		ssa.OpARM64BFXIL:
 		r := v.Reg()
@@ -580,7 +582,7 @@
 		p2.From.Type = obj.TYPE_REG
 		p2.From.Reg = arm64.REGTMP
 		p2.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p2, p)
+		p2.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicExchange64Variant,
 		ssa.OpARM64LoweredAtomicExchange32Variant:
 		swap := arm64.ASWPALD
@@ -634,7 +636,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = arm64.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicAdd64Variant,
 		ssa.OpARM64LoweredAtomicAdd32Variant:
 		// LDADDAL	Rarg1, (Rarg0), Rout
@@ -698,13 +700,13 @@
 		p4.From.Type = obj.TYPE_REG
 		p4.From.Reg = arm64.REGTMP
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 		p5 := s.Prog(arm64.ACSET)
 		p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
 		p5.From.Reg = arm64.COND_EQ
 		p5.To.Type = obj.TYPE_REG
 		p5.To.Reg = out
-		gc.Patch(p2, p5)
+		p2.To.SetTarget(p5)
 	case ssa.OpARM64LoweredAtomicCas64Variant,
 		ssa.OpARM64LoweredAtomicCas32Variant:
 		// Rarg0: ptr
@@ -792,7 +794,7 @@
 		p3.From.Type = obj.TYPE_REG
 		p3.From.Reg = arm64.REGTMP
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64LoweredAtomicAnd8Variant,
 		ssa.OpARM64LoweredAtomicAnd32Variant:
 		atomic_clear := arm64.ALDCLRALW
@@ -959,7 +961,7 @@
 		p := s.Prog(obj.ADUFFZERO)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffzero
+		p.To.Sym = ir.Syms.Duffzero
 		p.To.Offset = v.AuxInt
 	case ssa.OpARM64LoweredZero:
 		// STP.P	(ZR,ZR), 16(R16)
@@ -980,12 +982,12 @@
 		p2.Reg = arm64.REG_R16
 		p3 := s.Prog(arm64.ABLE)
 		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
+		p3.To.SetTarget(p)
 	case ssa.OpARM64DUFFCOPY:
 		p := s.Prog(obj.ADUFFCOPY)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Duffcopy
+		p.To.Sym = ir.Syms.Duffcopy
 		p.To.Offset = v.AuxInt
 	case ssa.OpARM64LoweredMove:
 		// MOVD.P	8(R16), Rtmp
@@ -1013,7 +1015,7 @@
 		p3.Reg = arm64.REG_R16
 		p4 := s.Prog(arm64.ABLE)
 		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
+		p4.To.SetTarget(p)
 	case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
 		s.Call(v)
 	case ssa.OpARM64LoweredWB:
@@ -1025,21 +1027,21 @@
 		p := s.Prog(obj.ACALL)
 		p.To.Type = obj.TYPE_MEM
 		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+		p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
 		s.UseArgs(16) // space used in callee args area by assembly stubs
 	case ssa.OpARM64LoweredNilCheck:
 		// Issue a load which will fault if arg is nil.
 		p := s.Prog(arm64.AMOVB)
 		p.From.Type = obj.TYPE_MEM
 		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
+		ssagen.AddAux(&p.From, v)
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = arm64.REGTMP
 		if logopt.Enabled() {
 			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
 		}
-		if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Pos, "generated nil check")
+		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+			base.WarnfAt(v.Pos, "generated nil check")
 		}
 	case ssa.OpARM64Equal,
 		ssa.OpARM64NotEqual,
@@ -1067,12 +1069,12 @@
 		p.To.Reg = v.Reg()
 	case ssa.OpARM64LoweredGetClosurePtr:
 		// Closure pointer is R26 (arm64.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
+		ssagen.CheckLoweredGetClosurePtr(v)
 	case ssa.OpARM64LoweredGetCallerSP:
 		// caller's SP is FixedFrameSize below the address of the first arg
 		p := s.Prog(arm64.AMOVD)
 		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = -gc.Ctxt.FixedFrameSize()
+		p.From.Offset = -base.Ctxt.FixedFrameSize()
 		p.From.Name = obj.NAME_PARAM
 		p.To.Type = obj.TYPE_REG
 		p.To.Reg = v.Reg()
@@ -1142,24 +1144,24 @@
 }
 
 // To model a 'LEnoov' ('<=' without overflow checking) branching
-var leJumps = [2][2]gc.IndexJump{
+var leJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
 }
 
 // To model a 'GTnoov' ('>' without overflow checking) branching
-var gtJumps = [2][2]gc.IndexJump{
+var gtJumps = [2][2]ssagen.IndexJump{
 	{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
 	{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
 }
 
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 	switch b.Kind {
 	case ssa.BlockPlain:
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockDefer:
@@ -1172,11 +1174,11 @@
 		p.Reg = arm64.REG_R0
 		p = s.Prog(arm64.ABNE)
 		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+		s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
 		if b.Succs[0].Block() != next {
 			p := s.Prog(obj.AJMP)
 			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
 		}
 
 	case ssa.BlockExit:
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
new file mode 100644
index 0000000..3b9bc3a
--- /dev/null
+++ b/src/cmd/compile/internal/base/base.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"os"
+)
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+	atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+	for i := len(atExitFuncs) - 1; i >= 0; i-- {
+		f := atExitFuncs[i]
+		atExitFuncs = atExitFuncs[:i]
+		f()
+	}
+	os.Exit(code)
+}
+
+// To enable tracing support (-t flag), set EnableTrace to true.
+const EnableTrace = false
+
+func Compiling(pkgs []string) bool {
+	if Ctxt.Pkgpath != "" {
+		for _, p := range pkgs {
+			if Ctxt.Pkgpath == p {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// The racewalk pass is currently handled in three parts.
+//
+// First, for flag_race, it inserts calls to racefuncenter and
+// racefuncexit at the start and end (respectively) of each
+// function. This is handled below.
+//
+// Second, during buildssa, it inserts appropriate instrumentation
+// calls immediately before each memory load or store. This is handled
+// by the (*state).instrument method in ssa.go, so here we just set
+// the Func.InstrumentBody flag as needed. For background on why this
+// is done during SSA construction rather than a separate SSA pass,
+// see issue #19054.
+//
+// Third we remove calls to racefuncenter and racefuncexit, for leaf
+// functions without instrumented operations. This is done as part of
+// ssa opt pass via special rule.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var NoInstrumentPkgs = []string{
+	"runtime/internal/atomic",
+	"runtime/internal/sys",
+	"runtime/internal/math",
+	"runtime",
+	"runtime/race",
+	"runtime/msan",
+	"internal/cpu",
+}
+
+// Don't insert racefuncenterfp/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var NoRacePkgs = []string{"sync", "sync/atomic"}
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
new file mode 100644
index 0000000..164941b
--- /dev/null
+++ b/src/cmd/compile/internal/base/debug.go
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"cmd/internal/objabi"
+)
+
+// Debug holds the parsed debugging configuration values.
+var Debug = DebugFlags{
+	Fieldtrack: &objabi.Fieldtrack_enabled,
+}
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+	Append        int    `help:"print information about append compilation"`
+	Checkptr      int    `help:"instrument unsafe pointer conversions"`
+	Closure       int    `help:"print information about closure compilation"`
+	DclStack      int    `help:"run internal dclstack check"`
+	Defer         int    `help:"print information about defer compilation"`
+	DisableNil    int    `help:"disable nil checks"`
+	DumpPtrs      int    `help:"show Node pointers values in dump output"`
+	DwarfInl      int    `help:"print information about DWARF inlined function creation"`
+	Export        int    `help:"print export data"`
+	Fieldtrack    *int   `help:"enable field tracking"`
+	GCProg        int    `help:"print dump of GC programs"`
+	Libfuzzer     int    `help:"enable coverage instrumentation for libfuzzer"`
+	LocationLists int    `help:"print information about DWARF location list creation"`
+	Nil           int    `help:"print information about nil checks"`
+	PCTab         string `help:"print named pc-value table"`
+	Panic         int    `help:"show all compiler panics"`
+	Slice         int    `help:"print information about slice compilation"`
+	SoftFloat     int    `help:"force compiler to emit soft-float code"`
+	TypeAssert    int    `help:"print information about type assertion inlining"`
+	TypecheckInl  int    `help:"eager typechecking of inline function bodies"`
+	WB            int    `help:"print information about write barriers"`
+	ABIWrap       int    `help:"print information about ABI wrapper generation"`
+
+	any bool // set when any of the values have been set
+}
+
+// Any reports whether any of the debug flags have been set.
+func (d *DebugFlags) Any() bool { return d.any }
+
+type debugField struct {
+	name string
+	help string
+	val  interface{} // *int or *string
+}
+
+var debugTab []debugField
+
+func init() {
+	v := reflect.ValueOf(&Debug).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Name == "any" {
+			continue
+		}
+		name := strings.ToLower(f.Name)
+		help := f.Tag.Get("help")
+		if help == "" {
+			panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
+		}
+		ptr := v.Field(i).Addr().Interface()
+		switch ptr.(type) {
+		default:
+			panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
+		case *int, *string:
+			// ok
+		case **int:
+			ptr = *ptr.(**int) // record the *int itself
+		}
+		debugTab = append(debugTab, debugField{name, help, ptr})
+	}
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
+
+// parseDebug parses the -d debug string argument.
+func parseDebug(debugstr string) {
+	// parse -d argument
+	if debugstr == "" {
+		return
+	}
+	Debug.any = true
+Split:
+	for _, name := range strings.Split(debugstr, ",") {
+		if name == "" {
+			continue
+		}
+		// display help about the -d option itself and quit
+		if name == "help" {
+			fmt.Print(debugHelpHeader)
+			maxLen := len("ssa/help")
+			for _, t := range debugTab {
+				if len(t.name) > maxLen {
+					maxLen = len(t.name)
+				}
+			}
+			for _, t := range debugTab {
+				fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
+			}
+			// ssa options have their own help
+			fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
+			fmt.Print(debugHelpFooter)
+			os.Exit(0)
+		}
+		val, valstring, haveInt := 1, "", true
+		if i := strings.IndexAny(name, "=:"); i >= 0 {
+			var err error
+			name, valstring = name[:i], name[i+1:]
+			val, err = strconv.Atoi(valstring)
+			if err != nil {
+				val, haveInt = 1, false
+			}
+		}
+		for _, t := range debugTab {
+			if t.name != name {
+				continue
+			}
+			switch vp := t.val.(type) {
+			case nil:
+				// Ignore
+			case *string:
+				*vp = valstring
+			case *int:
+				if !haveInt {
+					log.Fatalf("invalid debug value %v", name)
+				}
+				*vp = val
+			default:
+				panic("bad debugtab type")
+			}
+			continue Split
+		}
+		// special case for ssa for now
+		if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
+			// expect form ssa/phase/flag
+			// e.g. -d=ssa/generic_cse/time
+			// _ in phase name also matches space
+			phase := name[4:]
+			flag := "debug" // default flag is debug
+			if i := strings.Index(phase, "/"); i >= 0 {
+				flag = phase[i+1:]
+				phase = phase[:i]
+			}
+			err := DebugSSA(phase, flag, val, valstring)
+			if err != "" {
+				log.Fatalf(err)
+			}
+			continue Split
+		}
+		log.Fatalf("unknown debug key -d %s\n", name)
+	}
+}
+
+const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
+
+<key> is one of:
+
+`
+
+const debugHelpFooter = `
+<value> is key-specific.
+
+Key "checkptr" supports values:
+	"0": instrumentation disabled
+	"1": conversions involving unsafe.Pointer are instrumented
+	"2": conversions to unsafe.Pointer force heap allocation
+
+Key "pctab" supports values:
+	"pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
+`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
new file mode 100644
index 0000000..c38bbe6
--- /dev/null
+++ b/src/cmd/compile/internal/base/flag.go
@@ -0,0 +1,459 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"cmd/internal/objabi"
+	"cmd/internal/sys"
+)
+
+func usage() {
+	fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+	objabi.Flagprint(os.Stderr)
+	Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+	// Single letters
+	B CountFlag    "help:\"disable bounds checking\""
+	C CountFlag    "help:\"disable printing of columns in error messages\""
+	D string       "help:\"set relative `path` for local imports\""
+	E CountFlag    "help:\"debug symbol export\""
+	I func(string) "help:\"add `directory` to import search path\""
+	K CountFlag    "help:\"debug missing line numbers\""
+	L CountFlag    "help:\"show full file names in error messages\""
+	N CountFlag    "help:\"disable optimizations\""
+	S CountFlag    "help:\"print assembly listing\""
+	// V is added by objabi.AddVersionFlag
+	W CountFlag "help:\"debug parse tree after type checking\""
+
+	LowerC int          "help:\"concurrency during compilation (1 means no concurrency)\""
+	LowerD func(string) "help:\"enable debugging settings; try -d help\""
+	LowerE CountFlag    "help:\"no limit on number of errors reported\""
+	LowerH CountFlag    "help:\"halt on error\""
+	LowerJ CountFlag    "help:\"debug runtime-initialized variables\""
+	LowerL CountFlag    "help:\"disable inlining\""
+	LowerM CountFlag    "help:\"print optimization decisions\""
+	LowerO string       "help:\"write output to `file`\""
+	LowerP *string      "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+	LowerR CountFlag    "help:\"debug generated wrappers\""
+	LowerT bool         "help:\"enable tracing for debugging the compiler\""
+	LowerW CountFlag    "help:\"debug type checking\""
+	LowerV *bool        "help:\"increase debug verbosity\""
+
+	// Special characters
+	Percent          int  "flag:\"%\" help:\"debug non-static initializers\""
+	CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+	// Longer names
+	ABIWrap            bool         "help:\"enable generation of ABI wrappers\""
+	ABIWrapLimit       int          "help:\"emit at most N ABI wrappers (for debugging)\""
+	AsmHdr             string       "help:\"write assembly header to `file`\""
+	Bench              string       "help:\"append benchmark times to `file`\""
+	BlockProfile       string       "help:\"write block profile to `file`\""
+	BuildID            string       "help:\"record `id` as the build id in the export metadata\""
+	CPUProfile         string       "help:\"write cpu profile to `file`\""
+	Complete           bool         "help:\"compiling complete package (no C or assembly)\""
+	Dwarf              bool         "help:\"generate DWARF symbols\""
+	DwarfBASEntries    *bool        "help:\"use base address selection entries in DWARF\""                        // &Ctxt.UseBASEntries, set below
+	DwarfLocationLists *bool        "help:\"add location lists to DWARF in optimized mode\""                      // &Ctxt.Flag_locationlists, set below
+	Dynlink            *bool        "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+	EmbedCfg           func(string) "help:\"read go:embed configuration from `file`\""
+	GenDwarfInl        int          "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+	GoVersion          string       "help:\"required version of the runtime\""
+	ImportCfg          func(string) "help:\"read import configuration from `file`\""
+	ImportMap          func(string) "help:\"add `definition` of the form source=actual to import map\""
+	InstallSuffix      string       "help:\"set pkg directory `suffix`\""
+	JSON               string       "help:\"version,file for JSON compiler/optimizer detail output\""
+	Lang               string       "help:\"Go language version source code expects\""
+	LinkObj            string       "help:\"write linker-specific object to `file`\""
+	LinkShared         *bool        "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+	Live               CountFlag    "help:\"debug liveness analysis\""
+	MSan               bool         "help:\"build code compatible with C/C++ memory sanitizer\""
+	MemProfile         string       "help:\"write memory profile to `file`\""
+	MemProfileRate     int64        "help:\"set runtime.MemProfileRate to `rate`\""
+	MutexProfile       string       "help:\"write mutex profile to `file`\""
+	NoLocalImports     bool         "help:\"reject local (relative) imports\""
+	Pack               bool         "help:\"write to file.a instead of file.o\""
+	Race               bool         "help:\"enable race detector\""
+	Shared             *bool        "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+	SmallFrames        bool         "help:\"reduce the size limit for stack allocated objects\""      // small stacks, to diagnose GC latency; see golang.org/issue/27732
+	Spectre            string       "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+	Std                bool         "help:\"compiling standard library\""
+	SymABIs            string       "help:\"read symbol ABIs from `file`\""
+	TraceProfile       string       "help:\"write an execution trace to `file`\""
+	TrimPath           string       "help:\"remove `prefix` from recorded source file paths\""
+	WB                 bool         "help:\"enable write barrier\"" // TODO: remove
+
+	// Configuration derived from flags; not a flag itself.
+	Cfg struct {
+		Embed struct { // set by -embedcfg
+			Patterns map[string][]string
+			Files    map[string]string
+		}
+		ImportDirs   []string          // appended to by -I
+		ImportMap    map[string]string // set by -importmap OR -importcfg
+		PackageFile  map[string]string // set by -importcfg; nil means not in use
+		SpectreIndex bool              // set by -spectre=index or -spectre=all
+		// Whether we are adding any sort of code instrumentation, such as
+		// when the race detector is enabled.
+		Instrumenting bool
+	}
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+	Flag.I = addImportDir
+
+	Flag.LowerC = 1
+	Flag.LowerD = parseDebug
+	Flag.LowerP = &Ctxt.Pkgpath
+	Flag.LowerV = &Ctxt.Debugvlog
+
+	Flag.ABIWrap = objabi.Regabi_enabled != 0
+	Flag.Dwarf = objabi.GOARCH != "wasm"
+	Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+	Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+	*Flag.DwarfLocationLists = true
+	Flag.Dynlink = &Ctxt.Flag_dynlink
+	Flag.EmbedCfg = readEmbedCfg
+	Flag.GenDwarfInl = 2
+	Flag.ImportCfg = readImportCfg
+	Flag.ImportMap = addImportMap
+	Flag.LinkShared = &Ctxt.Flag_linkshared
+	Flag.Shared = &Ctxt.Flag_shared
+	Flag.WB = true
+
+	Flag.Cfg.ImportMap = make(map[string]string)
+
+	objabi.AddVersionFlag() // -V
+	registerFlags()
+	objabi.Flagparse(usage)
+
+	if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
+		log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
+	}
+	if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
+		log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
+	}
+	if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
+		log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
+	}
+	parseSpectre(Flag.Spectre) // left as string for RecordFlags
+
+	Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+	Ctxt.Flag_optimize = Flag.N == 0
+	Ctxt.Debugasm = int(Flag.S)
+
+	if flag.NArg() < 1 {
+		usage()
+	}
+
+	if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+		fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+		Exit(2)
+	}
+
+	if Flag.LowerO == "" {
+		p := flag.Arg(0)
+		if i := strings.LastIndex(p, "/"); i >= 0 {
+			p = p[i+1:]
+		}
+		if runtime.GOOS == "windows" {
+			if i := strings.LastIndex(p, `\`); i >= 0 {
+				p = p[i+1:]
+			}
+		}
+		if i := strings.LastIndex(p, "."); i >= 0 {
+			p = p[:i]
+		}
+		suffix := ".o"
+		if Flag.Pack {
+			suffix = ".a"
+		}
+		Flag.LowerO = p + suffix
+	}
+
+	if Flag.Race && Flag.MSan {
+		log.Fatal("cannot use both -race and -msan")
+	}
+	if Flag.Race || Flag.MSan {
+		// -race and -msan imply -d=checkptr for now.
+		Debug.Checkptr = 1
+	}
+
+	if Flag.CompilingRuntime && Flag.N != 0 {
+		log.Fatal("cannot disable optimizations while compiling runtime")
+	}
+	if Flag.LowerC < 1 {
+		log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+	}
+	if Flag.LowerC > 1 && !concurrentBackendAllowed() {
+		log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+	}
+
+	if Flag.CompilingRuntime {
+		// Runtime can't use -d=checkptr, at least not yet.
+		Debug.Checkptr = 0
+
+		// Fuzzing the runtime isn't interesting either.
+		Debug.Libfuzzer = 0
+	}
+
+	// set via a -d flag
+	Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+	var (
+		boolType      = reflect.TypeOf(bool(false))
+		intType       = reflect.TypeOf(int(0))
+		stringType    = reflect.TypeOf(string(""))
+		ptrBoolType   = reflect.TypeOf(new(bool))
+		ptrIntType    = reflect.TypeOf(new(int))
+		ptrStringType = reflect.TypeOf(new(string))
+		countType     = reflect.TypeOf(CountFlag(0))
+		funcType      = reflect.TypeOf((func(string))(nil))
+	)
+
+	v := reflect.ValueOf(&Flag).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Name == "Cfg" {
+			continue
+		}
+
+		var name string
+		if len(f.Name) == 1 {
+			name = f.Name
+		} else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+			name = string(rune(f.Name[5] + 'a' - 'A'))
+		} else {
+			name = strings.ToLower(f.Name)
+		}
+		if tag := f.Tag.Get("flag"); tag != "" {
+			name = tag
+		}
+
+		help := f.Tag.Get("help")
+		if help == "" {
+			panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+		}
+
+		if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+			panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+		}
+
+		switch f.Type {
+		case boolType:
+			p := v.Field(i).Addr().Interface().(*bool)
+			flag.BoolVar(p, name, *p, help)
+		case intType:
+			p := v.Field(i).Addr().Interface().(*int)
+			flag.IntVar(p, name, *p, help)
+		case stringType:
+			p := v.Field(i).Addr().Interface().(*string)
+			flag.StringVar(p, name, *p, help)
+		case ptrBoolType:
+			p := v.Field(i).Interface().(*bool)
+			flag.BoolVar(p, name, *p, help)
+		case ptrIntType:
+			p := v.Field(i).Interface().(*int)
+			flag.IntVar(p, name, *p, help)
+		case ptrStringType:
+			p := v.Field(i).Interface().(*string)
+			flag.StringVar(p, name, *p, help)
+		case countType:
+			p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+			objabi.Flagcount(name, help, p)
+		case funcType:
+			f := v.Field(i).Interface().(func(string))
+			objabi.Flagfn1(name, help, f)
+		}
+	}
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+	// TODO(rsc): Many of these are fine. Remove them.
+	return Flag.Percent == 0 &&
+		Flag.E == 0 &&
+		Flag.K == 0 &&
+		Flag.L == 0 &&
+		Flag.LowerH == 0 &&
+		Flag.LowerJ == 0 &&
+		Flag.LowerM == 0 &&
+		Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+	if !concurrentFlagOk() {
+		return false
+	}
+
+	// Debug.S by itself is ok, because all printing occurs
+	// while writing the object file, and that is non-concurrent.
+	// Adding Debug_vlog, however, causes Debug.S to also print
+	// while flushing the plist, which happens concurrently.
+	if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
+		return false
+	}
+	// TODO: Test and delete this condition.
+	if objabi.Fieldtrack_enabled != 0 {
+		return false
+	}
+	// TODO: fix races and enable the following flags
+	if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
+		return false
+	}
+	return true
+}
+
+func addImportDir(dir string) {
+	if dir != "" {
+		Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+	}
+}
+
+func addImportMap(s string) {
+	if Flag.Cfg.ImportMap == nil {
+		Flag.Cfg.ImportMap = make(map[string]string)
+	}
+	if strings.Count(s, "=") != 1 {
+		log.Fatal("-importmap argument must be of the form source=actual")
+	}
+	i := strings.Index(s, "=")
+	source, actual := s[:i], s[i+1:]
+	if source == "" || actual == "" {
+		log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+	}
+	Flag.Cfg.ImportMap[source] = actual
+}
+
+func readImportCfg(file string) {
+	if Flag.Cfg.ImportMap == nil {
+		Flag.Cfg.ImportMap = make(map[string]string)
+	}
+	Flag.Cfg.PackageFile = map[string]string{}
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		log.Fatalf("-importcfg: %v", err)
+	}
+
+	for lineNum, line := range strings.Split(string(data), "\n") {
+		lineNum++ // 1-based
+		line = strings.TrimSpace(line)
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+
+		var verb, args string
+		if i := strings.Index(line, " "); i < 0 {
+			verb = line
+		} else {
+			verb, args = line[:i], strings.TrimSpace(line[i+1:])
+		}
+		var before, after string
+		if i := strings.Index(args, "="); i >= 0 {
+			before, after = args[:i], args[i+1:]
+		}
+		switch verb {
+		default:
+			log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+		case "importmap":
+			if before == "" || after == "" {
+				log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+			}
+			Flag.Cfg.ImportMap[before] = after
+		case "packagefile":
+			if before == "" || after == "" {
+				log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+			}
+			Flag.Cfg.PackageFile[before] = after
+		}
+	}
+}
+
+func readEmbedCfg(file string) {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		log.Fatalf("-embedcfg: %v", err)
+	}
+	if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+		log.Fatalf("%s: %v", file, err)
+	}
+	if Flag.Cfg.Embed.Patterns == nil {
+		log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+	}
+	if Flag.Cfg.Embed.Files == nil {
+		log.Fatalf("%s: invalid embedcfg: missing Files", file)
+	}
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+	for _, f := range strings.Split(s, ",") {
+		f = strings.TrimSpace(f)
+		switch f {
+		default:
+			log.Fatalf("unknown setting -spectre=%s", f)
+		case "":
+			// nothing
+		case "all":
+			Flag.Cfg.SpectreIndex = true
+			Ctxt.Retpoline = true
+		case "index":
+			Flag.Cfg.SpectreIndex = true
+		case "ret":
+			Ctxt.Retpoline = true
+		}
+	}
+
+	if Flag.Cfg.SpectreIndex {
+		switch objabi.GOARCH {
+		case "amd64":
+			// ok
+		default:
+			log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go
new file mode 100644
index 0000000..49fe435
--- /dev/null
+++ b/src/cmd/compile/internal/base/link.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+// TODO(mdempsky): These should probably be obj.Link methods.
+
+// PkgLinksym returns the linker symbol for name within the given
+// package prefix. For user packages, prefix should be the package
+// path encoded with objabi.PathToPrefix.
+func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
+	if name == "_" {
+		// TODO(mdempsky): Cleanup callers and Fatalf instead.
+		return linksym(prefix, "_", abi)
+	}
+	return linksym(prefix, prefix+"."+name, abi)
+}
+
+// Linkname returns the linker symbol for the given name as it might
+// appear within a //go:linkname directive.
+func Linkname(name string, abi obj.ABI) *obj.LSym {
+	return linksym("_", name, abi)
+}
+
+// linksym is an internal helper function for implementing the above
+// exported APIs.
+func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
+	return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
new file mode 100644
index 0000000..668c600
--- /dev/null
+++ b/src/cmd/compile/internal/base/print.go
@@ -0,0 +1,264 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+	"fmt"
+	"os"
+	"runtime/debug"
+	"sort"
+	"strings"
+
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+	pos src.XPos
+	msg string
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+	errorMsgs       []errorMsg
+	numErrors       int // number of entries in errorMsgs that are errors (as opposed to warnings)
+	numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+	return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported
+func SyntaxErrors() int {
+	return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	// Only add the position if know the position.
+	// See issue golang.org/issue/11361.
+	if pos.IsKnown() {
+		msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+	}
+	errorMsgs = append(errorMsgs, errorMsg{
+		pos: pos,
+		msg: msg + "\n",
+	})
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+	if Ctxt == nil {
+		return "???"
+	}
+	return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int           { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+	if Ctxt != nil && Ctxt.Bso != nil {
+		Ctxt.Bso.Flush()
+	}
+	if len(errorMsgs) == 0 {
+		return
+	}
+	sort.Stable(byPos(errorMsgs))
+	for i, err := range errorMsgs {
+		if i == 0 || err.msg != errorMsgs[i-1].msg {
+			fmt.Printf("%s", err.msg)
+		}
+	}
+	errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+	syntax src.XPos // source position of last syntax error
+	other  src.XPos // source position of last non-syntax error
+	msg    string   // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+	p := Ctxt.PosTable.Pos(a)
+	q := Ctxt.PosTable.Pos(b)
+	return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+	ErrorfAt(Pos, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+
+	if strings.HasPrefix(msg, "syntax error") {
+		numSyntaxErrors++
+		// only one syntax error per line, no matter what error
+		if sameline(lasterror.syntax, pos) {
+			return
+		}
+		lasterror.syntax = pos
+	} else {
+		// only one of multiple equal non-syntax errors per line
+		// (FlushErrors shows only one of them, so we filter them
+		// here as best as we can (they may not appear in order)
+		// so that we don't count them here and exit early, and
+		// then have nothing to show for.)
+		if sameline(lasterror.other, pos) && lasterror.msg == msg {
+			return
+		}
+		lasterror.other = pos
+		lasterror.msg = msg
+	}
+
+	addErrorMsg(pos, "%s", msg)
+	numErrors++
+
+	hcrash()
+	if numErrors >= 10 && Flag.LowerE == 0 {
+		FlushErrors()
+		fmt.Printf("%v: too many errors\n", FmtPos(pos))
+		ErrorExit()
+	}
+}
+
+// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
+func ErrorfVers(lang string, format string, args ...interface{}) {
+	Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+	if len(errorMsgs) == 0 {
+		return
+	}
+	e := &errorMsgs[len(errorMsgs)-1]
+	if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+		e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+	}
+}
+
+// Warnf reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+	WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+	addErrorMsg(pos, format, args...)
+	if Flag.LowerM != 0 {
+		FlushErrors()
+	}
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+	FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+	FlushErrors()
+
+	if Debug.Panic != 0 || numErrors == 0 {
+		fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+		fmt.Printf(format, args...)
+		fmt.Printf("\n")
+
+		// If this is a released compiler version, ask for a bug report.
+		if strings.HasPrefix(objabi.Version, "go") {
+			fmt.Printf("\n")
+			fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+			fmt.Printf("https://golang.org/issue/new\n")
+		} else {
+			// Not a release; dump a stack trace, too.
+			fmt.Println()
+			os.Stdout.Write(debug.Stack())
+			fmt.Println()
+		}
+	}
+
+	hcrash()
+	ErrorExit()
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+	if Flag.LowerH != 0 {
+		FlushErrors()
+		if Flag.LowerO != "" {
+			os.Remove(Flag.LowerO)
+		}
+		panic("-h")
+	}
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+	FlushErrors()
+	if Flag.LowerO != "" {
+		os.Remove(Flag.LowerO)
+	}
+	os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+	if Errors() > 0 {
+		ErrorExit()
+	}
+}
+
+var AutogeneratedPos src.XPos
diff --git a/src/cmd/compile/internal/gc/timings.go b/src/cmd/compile/internal/base/timings.go
similarity index 99%
rename from src/cmd/compile/internal/gc/timings.go
rename to src/cmd/compile/internal/base/timings.go
index 56b3899..f599f4e 100644
--- a/src/cmd/compile/internal/gc/timings.go
+++ b/src/cmd/compile/internal/base/timings.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package base
 
 import (
 	"fmt"
@@ -11,6 +11,8 @@
 	"time"
 )
 
+var Timer Timings
+
 // Timings collects the execution times of labeled phases
 // which are added trough a sequence of Start/Stop calls.
 // Events may be associated with each phase via AddEvent.
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
new file mode 100644
index 0000000..bcac1fe
--- /dev/null
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -0,0 +1,190 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+	"math/bits"
+
+	"cmd/compile/internal/base"
+)
+
+const (
+	wordBits  = 32
+	wordMask  = wordBits - 1
+	wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+	N int32    // number of bits in vector
+	B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+	nword := (n + wordBits - 1) / wordBits
+	return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+	words []uint32
+	nbit  int32
+	nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+	nword := (nbit + wordBits - 1) / wordBits
+	size := int64(nword) * int64(count)
+	if int64(int32(size*4)) != size*4 {
+		base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+	}
+	return Bulk{
+		words: make([]uint32, size),
+		nbit:  nbit,
+		nword: nword,
+	}
+}
+
+func (b *Bulk) Next() BitVec {
+	out := BitVec{b.nbit, b.words[:b.nword]}
+	b.words = b.words[b.nword:]
+	return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+	if bv1.N != bv2.N {
+		base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+	}
+	for i, x := range bv1.B {
+		if x != bv2.B[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+	copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+	if i < 0 || i >= bv.N {
+		base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+	}
+	mask := uint32(1 << uint(i%wordBits))
+	bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+	if i >= bv.N {
+		return -1
+	}
+
+	// Jump i ahead to next word with bits.
+	if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+		i &^= wordMask
+		i += wordBits
+		for i < bv.N && bv.B[i>>wordShift] == 0 {
+			i += wordBits
+		}
+	}
+
+	if i >= bv.N {
+		return -1
+	}
+
+	// Find 1 bit.
+	w := bv.B[i>>wordShift] >> uint(i&wordMask)
+	i += int32(bits.TrailingZeros32(w))
+
+	return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+	for _, x := range bv.B {
+		if x != 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (bv BitVec) Not() {
+	for i, x := range bv.B {
+		bv.B[i] = ^x
+	}
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x | src2.B[i]
+	}
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x & src2.B[i]
+	}
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+	if len(src1.B) == 0 {
+		return
+	}
+	_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+	for i, x := range src1.B {
+		dst.B[i] = x &^ src2.B[i]
+	}
+}
+
+func (bv BitVec) String() string {
+	s := make([]byte, 2+bv.N)
+	copy(s, "#*")
+	for i := int32(0); i < bv.N; i++ {
+		ch := byte('0')
+		if bv.Get(i) {
+			ch = '1'
+		}
+		s[2+i] = ch
+	}
+	return string(s)
+}
+
+func (bv BitVec) Clear() {
+	for i := range bv.B {
+		bv.B[i] = 0
+	}
+}
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
new file mode 100644
index 0000000..5202037
--- /dev/null
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -0,0 +1,152 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deadcode
+
+import (
+	"go/constant"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+)
+
+func Func(fn *ir.Func) {
+	stmts(&fn.Body)
+
+	if len(fn.Body) == 0 {
+		return
+	}
+
+	for _, n := range fn.Body {
+		if len(n.Init()) > 0 {
+			return
+		}
+		switch n.Op() {
+		case ir.OIF:
+			n := n.(*ir.IfStmt)
+			if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
+				return
+			}
+		case ir.OFOR:
+			n := n.(*ir.ForStmt)
+			if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
+				return
+			}
+		default:
+			return
+		}
+	}
+
+	fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
+}
+
+func stmts(nn *ir.Nodes) {
+	var lastLabel = -1
+	for i, n := range *nn {
+		if n != nil && n.Op() == ir.OLABEL {
+			lastLabel = i
+		}
+	}
+	for i, n := range *nn {
+		// Cut is set to true when all nodes after i'th position
+		// should be removed.
+		// In other words, it marks whole slice "tail" as dead.
+		cut := false
+		if n == nil {
+			continue
+		}
+		if n.Op() == ir.OIF {
+			n := n.(*ir.IfStmt)
+			n.Cond = expr(n.Cond)
+			if ir.IsConst(n.Cond, constant.Bool) {
+				var body ir.Nodes
+				if ir.BoolVal(n.Cond) {
+					n.Else = ir.Nodes{}
+					body = n.Body
+				} else {
+					n.Body = ir.Nodes{}
+					body = n.Else
+				}
+				// If "then" or "else" branch ends with panic or return statement,
+				// it is safe to remove all statements after this node.
+				// isterminating is not used to avoid goto-related complications.
+				// We must be careful not to deadcode-remove labels, as they
+				// might be the target of a goto. See issue 28616.
+				if body := body; len(body) != 0 {
+					switch body[(len(body) - 1)].Op() {
+					case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
+						if i > lastLabel {
+							cut = true
+						}
+					}
+				}
+			}
+		}
+
+		if len(n.Init()) != 0 {
+			stmts(n.(ir.InitNode).PtrInit())
+		}
+		switch n.Op() {
+		case ir.OBLOCK:
+			n := n.(*ir.BlockStmt)
+			stmts(&n.List)
+		case ir.OFOR:
+			n := n.(*ir.ForStmt)
+			stmts(&n.Body)
+		case ir.OIF:
+			n := n.(*ir.IfStmt)
+			stmts(&n.Body)
+			stmts(&n.Else)
+		case ir.ORANGE:
+			n := n.(*ir.RangeStmt)
+			stmts(&n.Body)
+		case ir.OSELECT:
+			n := n.(*ir.SelectStmt)
+			for _, cas := range n.Cases {
+				stmts(&cas.Body)
+			}
+		case ir.OSWITCH:
+			n := n.(*ir.SwitchStmt)
+			for _, cas := range n.Cases {
+				stmts(&cas.Body)
+			}
+		}
+
+		if cut {
+			*nn = (*nn)[:i+1]
+			break
+		}
+	}
+}
+
+func expr(n ir.Node) ir.Node {
+	// Perform dead-code elimination on short-circuited boolean
+	// expressions involving constants with the intent of
+	// producing a constant 'if' condition.
+	switch n.Op() {
+	case ir.OANDAND:
+		n := n.(*ir.LogicalExpr)
+		n.X = expr(n.X)
+		n.Y = expr(n.Y)
+		if ir.IsConst(n.X, constant.Bool) {
+			if ir.BoolVal(n.X) {
+				return n.Y // true && x => x
+			} else {
+				return n.X // false && x => false
+			}
+		}
+	case ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		n.X = expr(n.X)
+		n.Y = expr(n.Y)
+		if ir.IsConst(n.X, constant.Bool) {
+			if ir.BoolVal(n.X) {
+				return n.X // true || x => true
+			} else {
+				return n.Y // false || x => x
+			}
+		}
+	}
+	return n
+}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
new file mode 100644
index 0000000..60ba208
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -0,0 +1,85 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package devirtualize implements a simple "devirtualization"
+// optimization pass, which replaces interface method calls with
+// direct concrete-type method calls where possible.
+package devirtualize
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+)
+
+// Func devirtualizes calls within fn where possible.
+func Func(fn *ir.Func) {
+	ir.CurFunc = fn
+	ir.VisitList(fn.Body, func(n ir.Node) {
+		if call, ok := n.(*ir.CallExpr); ok {
+			Call(call)
+		}
+	})
+}
+
+// Call devirtualizes the given call if possible.
+func Call(call *ir.CallExpr) {
+	if call.Op() != ir.OCALLINTER {
+		return
+	}
+	sel := call.X.(*ir.SelectorExpr)
+	r := ir.StaticValue(sel.X)
+	if r.Op() != ir.OCONVIFACE {
+		return
+	}
+	recv := r.(*ir.ConvExpr)
+
+	typ := recv.X.Type()
+	if typ.IsInterface() {
+		return
+	}
+
+	dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
+	dt.SetType(typ)
+	x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
+	switch x.Op() {
+	case ir.ODOTMETH:
+		x := x.(*ir.SelectorExpr)
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
+		}
+		call.SetOp(ir.OCALLMETH)
+		call.X = x
+	case ir.ODOTINTER:
+		// Promoted method from embedded interface-typed field (#42279).
+		x := x.(*ir.SelectorExpr)
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
+		}
+		call.SetOp(ir.OCALLINTER)
+		call.X = x
+	default:
+		// TODO(mdempsky): Turn back into Fatalf after more testing.
+		if base.Flag.LowerM != 0 {
+			base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
+		}
+		return
+	}
+
+	// Duplicated logic from typecheck for function call return
+	// value types.
+	//
+	// Receiver parameter size may have changed; need to update
+	// call.Type to get correct stack offsets for result
+	// parameters.
+	types.CheckSize(x.Type())
+	switch ft := x.Type(); ft.NumResults() {
+	case 0:
+	case 1:
+		call.SetType(ft.Results().Field(0).Type)
+	default:
+		call.SetType(ft.Results())
+	}
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
new file mode 100644
index 0000000..dd22c03
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -0,0 +1,458 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/reflectdata"
+	"cmd/compile/internal/ssa"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/types"
+	"cmd/internal/dwarf"
+	"cmd/internal/obj"
+	"cmd/internal/objabi"
+	"cmd/internal/src"
+)
+
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
+	fn := curfn.(*ir.Func)
+
+	if fn.Nname != nil {
+		expect := fn.Linksym()
+		if fnsym.ABI() == obj.ABI0 {
+			expect = fn.LinksymABI(obj.ABI0)
+		}
+		if fnsym != expect {
+			base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+		}
+	}
+
+	// Back when there were two different *Funcs for a function, this code
+	// was not consistent about whether a particular *Node being processed
+	// was an ODCLFUNC or ONAME node. Partly this is because inlined function
+	// bodies have no ODCLFUNC node, which was it's own inconsistency.
+	// In any event, the handling of the two different nodes for DWARF purposes
+	// was subtly different, likely in unintended ways. CL 272253 merged the
+	// two nodes' Func fields, so that code sees the same *Func whether it is
+	// holding the ODCLFUNC or the ONAME. This resulted in changes in the
+	// DWARF output. To preserve the existing DWARF output and leave an
+	// intentional change for a future CL, this code does the following when
+	// fn.Op == ONAME:
+	//
+	// 1. Disallow use of createComplexVars in createDwarfVars.
+	//    It was not possible to reach that code for an ONAME before,
+	//    because the DebugInfo was set only on the ODCLFUNC Func.
+	//    Calling into it in the ONAME case causes an index out of bounds panic.
+	//
+	// 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+	//    not the ONAME Func. Populating apdecls for the ONAME case results
+	//    in selected being populated after createSimpleVars is called in
+	//    createDwarfVars, and then that causes the loop to skip all the entries
+	//    in dcl, meaning that the RecordAutoType calls don't happen.
+	//
+	// These two adjustments keep toolstash -cmp working for now.
+	// Deciding the right answer is, as they say, future work.
+	//
+	// We can tell the difference between the old ODCLFUNC and ONAME
+	// cases by looking at the infosym.Name. If it's empty, DebugInfo is
+	// being called from (*obj.Link).populateDWARF, which used to use
+	// the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+	// DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+	// which used to use the ONAME form.
+	isODCLFUNC := infosym.Name == ""
+
+	var apdecls []*ir.Name
+	// Populate decls for fn.
+	if isODCLFUNC {
+		for _, n := range fn.Dcl {
+			if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+				continue
+			}
+			switch n.Class {
+			case ir.PAUTO:
+				if !n.Used() {
+					// Text == nil -> generating abstract function
+					if fnsym.Func().Text != nil {
+						base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+					}
+					continue
+				}
+			case ir.PPARAM, ir.PPARAMOUT:
+			default:
+				continue
+			}
+			apdecls = append(apdecls, n)
+			fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+		}
+	}
+
+	decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
+
+	// For each type referenced by the functions auto vars but not
+	// already referenced by a dwarf var, attach an R_USETYPE relocation to
+	// the function symbol to insure that the type included in DWARF
+	// processing during linking.
+	typesyms := []*obj.LSym{}
+	for t, _ := range fnsym.Func().Autot {
+		typesyms = append(typesyms, t)
+	}
+	sort.Sort(obj.BySymName(typesyms))
+	for _, sym := range typesyms {
+		r := obj.Addrel(infosym)
+		r.Sym = sym
+		r.Type = objabi.R_USETYPE
+	}
+	fnsym.Func().Autot = nil
+
+	var varScopes []ir.ScopeID
+	for _, decl := range decls {
+		pos := declPos(decl)
+		varScopes = append(varScopes, findScope(fn.Marks, pos))
+	}
+
+	scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
+	var inlcalls dwarf.InlCalls
+	if base.Flag.GenDwarfInl > 0 {
+		inlcalls = assembleInlines(fnsym, dwarfVars)
+	}
+	return scopes, inlcalls
+}
+
+func declPos(decl *ir.Name) src.XPos {
+	return decl.Canonical().Pos()
+}
+
+// createDwarfVars process fn, returning a list of DWARF variables and the
+// Nodes they represent.
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
+	// Collect a raw list of DWARF vars.
+	var vars []*dwarf.Var
+	var decls []*ir.Name
+	var selected ir.NameSet
+	if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
+		decls, vars, selected = createComplexVars(fnsym, fn)
+	} else {
+		decls, vars, selected = createSimpleVars(fnsym, apDecls)
+	}
+
+	dcl := apDecls
+	if fnsym.WasInlined() {
+		dcl = preInliningDcls(fnsym)
+	}
+
+	// If optimization is enabled, the list above will typically be
+	// missing some of the original pre-optimization variables in the
+	// function (they may have been promoted to registers, folded into
+	// constants, dead-coded away, etc).  Input arguments not eligible
+	// for SSA optimization are also missing.  Here we add back in entries
+	// for selected missing vars. Note that the recipe below creates a
+	// conservative location. The idea here is that we want to
+	// communicate to the user that "yes, there is a variable named X
+	// in this function, but no, I don't have enough information to
+	// reliably report its contents."
+	// For non-SSA-able arguments, however, the correct information
+	// is known -- they have a single home on the stack.
+	for _, n := range dcl {
+		if selected.Has(n) {
+			continue
+		}
+		c := n.Sym().Name[0]
+		if c == '.' || n.Type().IsUntyped() {
+			continue
+		}
+		if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
+			// SSA-able args get location lists, and may move in and
+			// out of registers, so those are handled elsewhere.
+			// Autos and named output params seem to get handled
+			// with VARDEF, which creates location lists.
+			// Args not of SSA-able type are treated here; they
+			// are homed on the stack in a single place for the
+			// entire call.
+			vars = append(vars, createSimpleVar(fnsym, n))
+			decls = append(decls, n)
+			continue
+		}
+		typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+		decls = append(decls, n)
+		abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+		isReturnValue := (n.Class == ir.PPARAMOUT)
+		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+			abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+		}
+		if n.Esc() == ir.EscHeap {
+			// The variable in question has been promoted to the heap.
+			// Its address is in n.Heapaddr.
+			// TODO(thanm): generate a better location expression
+		}
+		inlIndex := 0
+		if base.Flag.GenDwarfInl > 1 {
+			if n.InlFormal() || n.InlLocal() {
+				inlIndex = posInlIndex(n.Pos()) + 1
+				if n.InlFormal() {
+					abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+				}
+			}
+		}
+		declpos := base.Ctxt.InnermostPos(n.Pos())
+		vars = append(vars, &dwarf.Var{
+			Name:          n.Sym().Name,
+			IsReturnValue: isReturnValue,
+			Abbrev:        abbrev,
+			StackOffset:   int32(n.FrameOffset()),
+			Type:          base.Ctxt.Lookup(typename),
+			DeclFile:      declpos.RelFilename(),
+			DeclLine:      declpos.RelLine(),
+			DeclCol:       declpos.Col(),
+			InlIndex:      int32(inlIndex),
+			ChildIndex:    -1,
+		})
+		// Record go type of to insure that it gets emitted by the linker.
+		fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+	}
+
+	return decls, vars
+}
+
+// Given a function that was inlined at some point during the
+// compilation, return a sorted list of nodes corresponding to the
+// autos/locals in that function prior to inlining. If this is a
+// function that is not local to the package being compiled, then the
+// names of the variables may have been "versioned" to avoid conflicts
+// with local vars; disregard this versioning when sorting.
+func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
+	fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
+	var rdcl []*ir.Name
+	for _, n := range fn.Inl.Dcl {
+		c := n.Sym().Name[0]
+		// Avoid reporting "_" parameters, since if there are more than
+		// one, it can result in a collision later on, as in #23179.
+		if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
+			continue
+		}
+		rdcl = append(rdcl, n)
+	}
+	return rdcl
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+	var vars []*dwarf.Var
+	var decls []*ir.Name
+	var selected ir.NameSet
+	for _, n := range apDecls {
+		if ir.IsAutoTmp(n) {
+			continue
+		}
+
+		decls = append(decls, n)
+		vars = append(vars, createSimpleVar(fnsym, n))
+		selected.Add(n)
+	}
+	return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
+	var abbrev int
+	var offs int64
+
+	switch n.Class {
+	case ir.PAUTO:
+		offs = n.FrameOffset()
+		abbrev = dwarf.DW_ABRV_AUTO
+		if base.Ctxt.FixedFrameSize() == 0 {
+			offs -= int64(types.PtrSize)
+		}
+		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+			// There is a word space for FP on ARM64 even if the frame pointer is disabled
+			offs -= int64(types.PtrSize)
+		}
+
+	case ir.PPARAM, ir.PPARAMOUT:
+		abbrev = dwarf.DW_ABRV_PARAM
+		offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+	default:
+		base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
+	}
+
+	typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+	delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
+	inlIndex := 0
+	if base.Flag.GenDwarfInl > 1 {
+		if n.InlFormal() || n.InlLocal() {
+			inlIndex = posInlIndex(n.Pos()) + 1
+			if n.InlFormal() {
+				abbrev = dwarf.DW_ABRV_PARAM
+			}
+		}
+	}
+	declpos := base.Ctxt.InnermostPos(declPos(n))
+	return &dwarf.Var{
+		Name:          n.Sym().Name,
+		IsReturnValue: n.Class == ir.PPARAMOUT,
+		IsInlFormal:   n.InlFormal(),
+		Abbrev:        abbrev,
+		StackOffset:   int32(offs),
+		Type:          base.Ctxt.Lookup(typename),
+		DeclFile:      declpos.RelFilename(),
+		DeclLine:      declpos.RelLine(),
+		DeclCol:       declpos.Col(),
+		InlIndex:      int32(inlIndex),
+		ChildIndex:    -1,
+	}
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+	debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+
+	// Produce a DWARF variable entry for each user variable.
+	var decls []*ir.Name
+	var vars []*dwarf.Var
+	var ssaVars ir.NameSet
+
+	for varID, dvar := range debugInfo.Vars {
+		n := dvar
+		ssaVars.Add(n)
+		for _, slot := range debugInfo.VarSlots[varID] {
+			ssaVars.Add(debugInfo.Slots[slot].N)
+		}
+
+		if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+			decls = append(decls, n)
+			vars = append(vars, dvar)
+		}
+	}
+
+	return decls, vars, ssaVars
+}
+
+// createComplexVar builds a single DWARF variable entry and location list.
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+	debug := fn.DebugInfo.(*ssa.FuncDebug)
+	n := debug.Vars[varID]
+
+	var abbrev int
+	switch n.Class {
+	case ir.PAUTO:
+		abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+	case ir.PPARAM, ir.PPARAMOUT:
+		abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+	default:
+		return nil
+	}
+
+	gotype := reflectdata.TypeLinksym(n.Type())
+	delete(fnsym.Func().Autot, gotype)
+	typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
+	inlIndex := 0
+	if base.Flag.GenDwarfInl > 1 {
+		if n.InlFormal() || n.InlLocal() {
+			inlIndex = posInlIndex(n.Pos()) + 1
+			if n.InlFormal() {
+				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+			}
+		}
+	}
+	declpos := base.Ctxt.InnermostPos(n.Pos())
+	dvar := &dwarf.Var{
+		Name:          n.Sym().Name,
+		IsReturnValue: n.Class == ir.PPARAMOUT,
+		IsInlFormal:   n.InlFormal(),
+		Abbrev:        abbrev,
+		Type:          base.Ctxt.Lookup(typename),
+		// The stack offset is used as a sorting key, so for decomposed
+		// variables just give it the first one. It's not used otherwise.
+		// This won't work well if the first slot hasn't been assigned a stack
+		// location, but it's not obvious how to do better.
+		StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+		DeclFile:    declpos.RelFilename(),
+		DeclLine:    declpos.RelLine(),
+		DeclCol:     declpos.Col(),
+		InlIndex:    int32(inlIndex),
+		ChildIndex:  -1,
+	}
+	list := debug.LocationLists[varID]
+	if len(list) != 0 {
+		dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+			debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+		}
+	}
+	return dvar
+}
+
+// RecordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func RecordFlags(flags ...string) {
+	if base.Ctxt.Pkgpath == "" {
+		// We can't record the flags if we don't know what the
+		// package name is.
+		return
+	}
+
+	type BoolFlag interface {
+		IsBoolFlag() bool
+	}
+	type CountFlag interface {
+		IsCountFlag() bool
+	}
+	var cmd bytes.Buffer
+	for _, name := range flags {
+		f := flag.Lookup(name)
+		if f == nil {
+			continue
+		}
+		getter := f.Value.(flag.Getter)
+		if getter.String() == f.DefValue {
+			// Flag has default value, so omit it.
+			continue
+		}
+		if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+			val, ok := getter.Get().(bool)
+			if ok && val {
+				fmt.Fprintf(&cmd, " -%s", f.Name)
+				continue
+			}
+		}
+		if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+			val, ok := getter.Get().(int)
+			if ok && val == 1 {
+				fmt.Fprintf(&cmd, " -%s", f.Name)
+				continue
+			}
+		}
+		fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+	}
+
+	if cmd.Len() == 0 {
+		return
+	}
+	s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
+	s.Type = objabi.SDWARFCUINFO
+	// Sometimes (for example when building tests) we can link
+	// together two package main archives. So allow dups.
+	s.Set(obj.AttrDuplicateOK, true)
+	base.Ctxt.Data = append(base.Ctxt.Data, s)
+	s.P = cmd.Bytes()[1:]
+}
+
+// RecordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func RecordPackageName() {
+	s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
+	s.Type = objabi.SDWARFCUINFO
+	// Sometimes (for example when building tests) we can link
+	// together two package main archives. So allow dups.
+	s.Set(obj.AttrDuplicateOK, true)
+	base.Ctxt.Data = append(base.Ctxt.Data, s)
+	s.P = []byte(types.LocalPkg.Name)
+}
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
similarity index 85%
rename from src/cmd/compile/internal/gc/dwinl.go
rename to src/cmd/compile/internal/dwarfgen/dwinl.go
index bb5ae61..d5687cb 100644
--- a/src/cmd/compile/internal/gc/dwinl.go
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -2,14 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package dwarfgen
 
 import (
+	"fmt"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/internal/dwarf"
 	"cmd/internal/obj"
 	"cmd/internal/src"
-	"fmt"
-	"strings"
 )
 
 // To identify variables by original source position.
@@ -26,8 +29,8 @@
 func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
 	var inlcalls dwarf.InlCalls
 
-	if Debug_gendwarfinl != 0 {
-		Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+	if base.Debug.DwarfInl != 0 {
+		base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
 	}
 
 	// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@@ -106,7 +109,7 @@
 			}
 			m = makePreinlineDclMap(fnsym)
 		} else {
-			ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+			ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
 			m = makePreinlineDclMap(ifnlsym)
 		}
 
@@ -181,7 +184,7 @@
 	}
 
 	// Debugging
-	if Debug_gendwarfinl != 0 {
+	if base.Debug.DwarfInl != 0 {
 		dumpInlCalls(inlcalls)
 		dumpInlVars(dwVars)
 	}
@@ -204,16 +207,17 @@
 // late in the compilation when it is determined that we need an
 // abstract function DIE for an inlined routine imported from a
 // previously compiled package.
-func genAbstractFunc(fn *obj.LSym) {
-	ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+func AbstractFunc(fn *obj.LSym) {
+	ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
 	if ifn == nil {
-		Ctxt.Diag("failed to locate precursor fn for %v", fn)
+		base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
 		return
 	}
-	if Debug_gendwarfinl != 0 {
-		Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+	_ = ifn.(*ir.Func)
+	if base.Debug.DwarfInl != 0 {
+		base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
 	}
-	Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+	base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
 }
 
 // Undo any versioning performed when a name was written
@@ -235,15 +239,15 @@
 	dcl := preInliningDcls(fnsym)
 	m := make(map[varPos]int)
 	for i, n := range dcl {
-		pos := Ctxt.InnermostPos(n.Pos)
+		pos := base.Ctxt.InnermostPos(n.Pos())
 		vp := varPos{
-			DeclName: unversion(n.Sym.Name),
+			DeclName: unversion(n.Sym().Name),
 			DeclFile: pos.RelFilename(),
 			DeclLine: pos.RelLine(),
 			DeclCol:  pos.Col(),
 		}
 		if _, found := m[vp]; found {
-			Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
+			base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
 		}
 		m[vp] = i
 	}
@@ -260,17 +264,17 @@
 	// is one. We do this first so that parents appear before their
 	// children in the resulting table.
 	parCallIdx := -1
-	parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+	parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
 	if parInlIdx >= 0 {
 		parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
 	}
 
 	// Create new entry for this inline
-	inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
-	callXPos := Ctxt.InlTree.CallPos(inlIdx)
-	absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
-	pb := Ctxt.PosTable.Pos(callXPos).Base()
-	callFileSym := Ctxt.Lookup(pb.SymFilename())
+	inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+	callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+	absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+	pb := base.Ctxt.PosTable.Pos(callXPos).Base()
+	callFileSym := base.Ctxt.Lookup(pb.SymFilename())
 	ic := dwarf.InlCall{
 		InlIndex:  inlIdx,
 		CallFile:  callFileSym,
@@ -298,7 +302,7 @@
 // the index for a node from the inlined body of D will refer to the
 // call to D from C. Whew.
 func posInlIndex(xpos src.XPos) int {
-	pos := Ctxt.PosTable.Pos(xpos)
+	pos := base.Ctxt.PosTable.Pos(xpos)
 	if b := pos.Base(); b != nil {
 		ii := b.InliningIndex()
 		if ii >= 0 {
@@ -324,7 +328,7 @@
 	// Append range to correct inlined call
 	callIdx, found := imap[ii]
 	if !found {
-		Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+		base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
 	}
 	call := &calls[callIdx]
 	call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@@ -332,23 +336,23 @@
 
 func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
 	for i := 0; i < ilevel; i++ {
-		Ctxt.Logf("  ")
+		base.Ctxt.Logf("  ")
 	}
 	ic := inlcalls.Calls[idx]
-	callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
-	Ctxt.Logf("  %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+	callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+	base.Ctxt.Logf("  %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
 	for _, f := range ic.InlVars {
-		Ctxt.Logf(" %v", f.Name)
+		base.Ctxt.Logf(" %v", f.Name)
 	}
-	Ctxt.Logf(" ) C: (")
+	base.Ctxt.Logf(" ) C: (")
 	for _, k := range ic.Children {
-		Ctxt.Logf(" %v", k)
+		base.Ctxt.Logf(" %v", k)
 	}
-	Ctxt.Logf(" ) R:")
+	base.Ctxt.Logf(" ) R:")
 	for _, r := range ic.Ranges {
-		Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+		base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
 	}
-	Ctxt.Logf("\n")
+	base.Ctxt.Logf("\n")
 	for _, k := range ic.Children {
 		dumpInlCall(inlcalls, k, ilevel+1)
 	}
@@ -373,7 +377,7 @@
 		if dwv.IsInAbstract {
 			ia = 1
 		}
-		Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+		base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
 	}
 }
 
@@ -410,7 +414,7 @@
 
 	// Callee
 	ic := inlCalls.Calls[idx]
-	callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+	callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
 	calleeRanges := ic.Ranges
 
 	// Caller
@@ -418,14 +422,14 @@
 	parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
 	if parentIdx != -1 {
 		pic := inlCalls.Calls[parentIdx]
-		caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+		caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
 		parentRanges = pic.Ranges
 	}
 
 	// Callee ranges contained in caller ranges?
 	c, m := rangesContainsAll(parentRanges, calleeRanges)
 	if !c {
-		Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+		base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
 	}
 
 	// Now visit kids
diff --git a/src/cmd/compile/internal/dwarfgen/marker.go b/src/cmd/compile/internal/dwarfgen/marker.go
new file mode 100644
index 0000000..ec6ce45
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/marker.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/internal/src"
+)
+
+// A ScopeMarker tracks scope nesting and boundaries for later use
+// during DWARF generation.
+type ScopeMarker struct {
+	parents []ir.ScopeID
+	marks   []ir.Mark
+}
+
+// checkPos validates the given position and returns the current scope.
+func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
+	if !pos.IsKnown() {
+		base.Fatalf("unknown scope position")
+	}
+
+	if len(m.marks) == 0 {
+		return 0
+	}
+
+	last := &m.marks[len(m.marks)-1]
+	if xposBefore(pos, last.Pos) {
+		base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
+	}
+	return last.Scope
+}
+
+// Push records a transition to a new child scope of the current scope.
+func (m *ScopeMarker) Push(pos src.XPos) {
+	current := m.checkPos(pos)
+
+	m.parents = append(m.parents, current)
+	child := ir.ScopeID(len(m.parents))
+
+	m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
+}
+
+// Pop records a transition back to the current scope's parent.
+func (m *ScopeMarker) Pop(pos src.XPos) {
+	current := m.checkPos(pos)
+
+	parent := m.parents[current-1]
+
+	m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
+}
+
+// Unpush removes the current scope, which must be empty.
+func (m *ScopeMarker) Unpush() {
+	i := len(m.marks) - 1
+	current := m.marks[i].Scope
+
+	if current != ir.ScopeID(len(m.parents)) {
+		base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
+	}
+
+	m.parents = m.parents[:current-1]
+	m.marks = m.marks[:i]
+}
+
+// WriteTo writes the recorded scope marks to the given function,
+// and resets the marker for reuse.
+func (m *ScopeMarker) WriteTo(fn *ir.Func) {
+	m.compactMarks()
+
+	fn.Parents = make([]ir.ScopeID, len(m.parents))
+	copy(fn.Parents, m.parents)
+	m.parents = m.parents[:0]
+
+	fn.Marks = make([]ir.Mark, len(m.marks))
+	copy(fn.Marks, m.marks)
+	m.marks = m.marks[:0]
+}
+
+func (m *ScopeMarker) compactMarks() {
+	n := 0
+	for _, next := range m.marks {
+		if n > 0 && next.Pos == m.marks[n-1].Pos {
+			m.marks[n-1].Scope = next.Scope
+			continue
+		}
+		m.marks[n] = next
+		n++
+	}
+	m.marks = m.marks[:n]
+}
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go
similarity index 78%
rename from src/cmd/compile/internal/gc/scope.go
rename to src/cmd/compile/internal/dwarfgen/scope.go
index e66b859..1c040ed 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/dwarfgen/scope.go
@@ -2,21 +2,24 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc
+package dwarfgen
 
 import (
+	"sort"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
 	"cmd/internal/dwarf"
 	"cmd/internal/obj"
 	"cmd/internal/src"
-	"sort"
 )
 
 // See golang.org/issue/20390.
 func xposBefore(p, q src.XPos) bool {
-	return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
+	return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
 }
 
-func findScope(marks []Mark, pos src.XPos) ScopeID {
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
 	i := sort.Search(len(marks), func(i int) bool {
 		return xposBefore(pos, marks[i].Pos)
 	})
@@ -26,20 +29,20 @@
 	return marks[i-1].Scope
 }
 
-func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
 	// Initialize the DWARF scope tree based on lexical scopes.
-	dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
-	for i, parent := range fn.Func.Parents {
+	dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
+	for i, parent := range fn.Parents {
 		dwarfScopes[i+1].Parent = int32(parent)
 	}
 
 	scopeVariables(dwarfVars, varScopes, dwarfScopes)
-	scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+	scopePCs(fnsym, fn.Marks, dwarfScopes)
 	return compactScopes(dwarfScopes)
 }
 
 // scopeVariables assigns DWARF variable records to their scopes.
-func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
 	sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
 
 	i0 := 0
@@ -56,7 +59,7 @@
 }
 
 // scopePCs assigns PC ranges to their scopes.
-func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
 	// If there aren't any child scopes (in particular, when scope
 	// tracking is disabled), we can skip a whole lot of work.
 	if len(marks) == 0 {
@@ -89,7 +92,7 @@
 
 type varsByScopeAndOffset struct {
 	vars   []*dwarf.Var
-	scopes []ScopeID
+	scopes []ir.ScopeID
 }
 
 func (v varsByScopeAndOffset) Len() int {
diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
similarity index 99%
rename from src/cmd/compile/internal/gc/scope_test.go
rename to src/cmd/compile/internal/dwarfgen/scope_test.go
index b0e038d..fcfcf85 100644
--- a/src/cmd/compile/internal/gc/scope_test.go
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -2,10 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package gc_test
+package dwarfgen
 
 import (
-	"cmd/internal/objfile"
 	"debug/dwarf"
 	"fmt"
 	"internal/testenv"
@@ -18,6 +17,8 @@
 	"strconv"
 	"strings"
 	"testing"
+
+	"cmd/internal/objfile"
 )
 
 type testline struct {
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
new file mode 100644
index 0000000..58cad73
--- /dev/null
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -0,0 +1,2137 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+	"fmt"
+	"math"
+	"strings"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/logopt"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/internal/src"
+)
+
+// Escape analysis.
+//
+// Here we analyze functions to determine which Go variables
+// (including implicit allocations such as calls to "new" or "make",
+// composite literals, etc.) can be allocated on the stack. The two
+// key invariants we have to ensure are: (1) pointers to stack objects
+// cannot be stored in the heap, and (2) pointers to a stack object
+// cannot outlive that object (e.g., because the declaring function
+// returned and destroyed the object's stack frame, or its space is
+// reused across loop iterations for logically distinct variables).
+//
+// We implement this with a static data-flow analysis of the AST.
+// First, we construct a directed weighted graph where vertices
+// (termed "locations") represent variables allocated by statements
+// and expressions, and edges represent assignments between variables
+// (with weights representing addressing/dereference counts).
+//
+// Next we walk the graph looking for assignment paths that might
+// violate the invariants stated above. If a variable v's address is
+// stored in the heap or elsewhere that may outlive it, then v is
+// marked as requiring heap allocation.
+//
+// To support interprocedural analysis, we also record data-flow from
+// each function's parameters to the heap and to its result
+// parameters. This information is summarized as "parameter tags",
+// which are used at static call sites to improve escape analysis of
+// function arguments.
+
+// Constructing the location graph.
+//
+// Every allocating statement (e.g., variable declaration) or
+// expression (e.g., "new" or "make") is first mapped to a unique
+// "location."
+//
+// We also model every Go assignment as a directed edges between
+// locations. The number of dereference operations minus the number of
+// addressing operations is recorded as the edge's weight (termed
+// "derefs"). For example:
+//
+//     p = &q    // -1
+//     p = q     //  0
+//     p = *q    //  1
+//     p = **q   //  2
+//
+//     p = **&**&q  // 2
+//
+// Note that the & operator can only be applied to addressable
+// expressions, and the expression &x itself is not addressable, so
+// derefs cannot go below -1.
+//
+// Every Go language construct is lowered into this representation,
+// generally without sensitivity to flow, path, or context; and
+// without distinguishing elements within a compound variable. For
+// example:
+//
+//     var x struct { f, g *int }
+//     var u []*int
+//
+//     x.f = u[0]
+//
+// is modeled simply as
+//
+//     x = *u
+//
+// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
+// u[2], etc. However, we do record the implicit dereference involved
+// in indexing a slice.
+
+// A batch holds escape analysis state that's shared across an entire
+// batch of functions being analyzed at once.
+type batch struct {
+	allLocs  []*location
+	closures []closure
+
+	heapLoc  location
+	blankLoc location
+}
+
+// A closure holds a closure expression and its spill hole (i.e.,
+// where the hole representing storing into its closure record).
+type closure struct {
+	k   hole
+	clo *ir.ClosureExpr
+}
+
+// An escape holds state specific to a single function being analyzed
+// within a batch.
+type escape struct {
+	*batch
+
+	curfn *ir.Func // function being analyzed
+
+	labels map[*types.Sym]labelState // known labels
+
+	// loopDepth counts the current loop nesting depth within
+	// curfn. It increments within each "for" loop and at each
+	// label with a corresponding backwards "goto" (i.e.,
+	// unstructured loop).
+	loopDepth int
+}
+
+// An location represents an abstract location that stores a Go
+// variable.
+type location struct {
+	n         ir.Node  // represented variable or expression, if any
+	curfn     *ir.Func // enclosing function
+	edges     []edge   // incoming edges
+	loopDepth int      // loopDepth at declaration
+
+	// resultIndex records the tuple index (starting at 1) for
+	// PPARAMOUT variables within their function's result type.
+	// For non-PPARAMOUT variables it's 0.
+	resultIndex int
+
+	// derefs and walkgen are used during walkOne to track the
+	// minimal dereferences from the walk root.
+	derefs  int // >= -1
+	walkgen uint32
+
+	// dst and dstEdgeindex track the next immediate assignment
+	// destination location during walkone, along with the index
+	// of the edge pointing back to this location.
+	dst        *location
+	dstEdgeIdx int
+
+	// queued is used by walkAll to track whether this location is
+	// in the walk queue.
+	queued bool
+
+	// escapes reports whether the represented variable's address
+	// escapes; that is, whether the variable must be heap
+	// allocated.
+	escapes bool
+
+	// transient reports whether the represented expression's
+	// address does not outlive the statement; that is, whether
+	// its storage can be immediately reused.
+	transient bool
+
+	// paramEsc records the represented parameter's leak set.
+	paramEsc leaks
+
+	captured   bool // has a closure captured this variable?
+	reassigned bool // has this variable been reassigned?
+	addrtaken  bool // has this variable's address been taken?
+}
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+	src    *location
+	derefs int // >= -1
+	notes  *note
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+	text := ""
+	switch n.Esc() {
+	case ir.EscUnknown:
+		break
+
+	case ir.EscHeap:
+		text = "esc(h)"
+
+	case ir.EscNone:
+		text = "esc(no)"
+
+	case ir.EscNever:
+		text = "esc(N)"
+
+	default:
+		text = fmt.Sprintf("esc(%d)", n.Esc())
+	}
+
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+			if text != "" {
+				text += " "
+			}
+			text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+		}
+	}
+
+	return text
+}
+
+// Batch performs escape analysis on a minimal batch of
+// functions.
+func Batch(fns []*ir.Func, recursive bool) {
+	for _, fn := range fns {
+		if fn.Op() != ir.ODCLFUNC {
+			base.Fatalf("unexpected node: %v", fn)
+		}
+	}
+
+	var b batch
+	b.heapLoc.escapes = true
+
+	// Construct data-flow graph from syntax trees.
+	for _, fn := range fns {
+		if base.Flag.W > 1 {
+			s := fmt.Sprintf("\nbefore escape %v", fn)
+			ir.Dump(s, fn)
+		}
+		b.initFunc(fn)
+	}
+	for _, fn := range fns {
+		if !fn.IsHiddenClosure() {
+			b.walkFunc(fn)
+		}
+	}
+
+	// We've walked the function bodies, so we've seen everywhere a
+	// variable might be reassigned or have it's address taken. Now we
+	// can decide whether closures should capture their free variables
+	// by value or reference.
+	for _, closure := range b.closures {
+		b.flowClosure(closure.k, closure.clo)
+	}
+	b.closures = nil
+
+	for _, loc := range b.allLocs {
+		if why := HeapAllocReason(loc.n); why != "" {
+			b.flow(b.heapHole().addr(loc.n, why), loc)
+		}
+	}
+
+	b.walkAll()
+	b.finish(fns)
+}
+
+func (b *batch) with(fn *ir.Func) *escape {
+	return &escape{
+		batch:     b,
+		curfn:     fn,
+		loopDepth: 1,
+	}
+}
+
+func (b *batch) initFunc(fn *ir.Func) {
+	e := b.with(fn)
+	if fn.Esc() != escFuncUnknown {
+		base.Fatalf("unexpected node: %v", fn)
+	}
+	fn.SetEsc(escFuncPlanned)
+	if base.Flag.LowerM > 3 {
+		ir.Dump("escAnalyze", fn)
+	}
+
+	// Allocate locations for local variables.
+	for _, n := range fn.Dcl {
+		if n.Op() == ir.ONAME {
+			e.newLoc(n, false)
+		}
+	}
+
+	// Initialize resultIndex for result parameters.
+	for i, f := range fn.Type().Results().FieldSlice() {
+		e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
+	}
+}
+
+func (b *batch) walkFunc(fn *ir.Func) {
+	e := b.with(fn)
+	fn.SetEsc(escFuncStarted)
+
+	// Identify labels that mark the head of an unstructured loop.
+	ir.Visit(fn, func(n ir.Node) {
+		switch n.Op() {
+		case ir.OLABEL:
+			n := n.(*ir.LabelStmt)
+			if e.labels == nil {
+				e.labels = make(map[*types.Sym]labelState)
+			}
+			e.labels[n.Label] = nonlooping
+
+		case ir.OGOTO:
+			// If we visited the label before the goto,
+			// then this is a looping label.
+			n := n.(*ir.BranchStmt)
+			if e.labels[n.Label] == nonlooping {
+				e.labels[n.Label] = looping
+			}
+		}
+	})
+
+	e.block(fn.Body)
+
+	if len(e.labels) != 0 {
+		base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
+	}
+}
+
+func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
+	for _, cv := range clo.Func.ClosureVars {
+		n := cv.Canonical()
+		loc := b.oldLoc(cv)
+		if !loc.captured {
+			base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
+		}
+
+		// Capture by value for variables <= 128 bytes that are never reassigned.
+		n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
+		if !n.Byval() {
+			n.SetAddrtaken(true)
+		}
+
+		if base.Flag.LowerM > 1 {
+			how := "ref"
+			if n.Byval() {
+				how = "value"
+			}
+			base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
+		}
+
+		// Flow captured variables to closure.
+		k := k
+		if !cv.Byval() {
+			k = k.addr(cv, "reference")
+		}
+		b.flow(k.note(cv, "captured by a closure"), loc)
+	}
+}
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+//     if x {
+//         e.discard(n.Left)
+//     } else {
+//         e.value(k, n.Left)
+//     }
+//
+// or
+//
+//     if x {
+//         k = e.discardHole()
+//     }
+//     e.value(k, n.Left)
+//
+// Do NOT write:
+//
+//    // BAD: possibly loses side-effects within n.Left
+//    if !x {
+//        e.value(k, n.Left)
+//    }
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+	if n == nil {
+		return
+	}
+
+	lno := ir.SetPos(n)
+	defer func() {
+		base.Pos = lno
+	}()
+
+	if base.Flag.LowerM > 2 {
+		fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+	}
+
+	e.stmts(n.Init())
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected stmt: %v", n)
+
+	case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+		// nop
+
+	case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+		// TODO(mdempsky): Handle dead code?
+
+	case ir.OBLOCK:
+		n := n.(*ir.BlockStmt)
+		e.stmts(n.List)
+
+	case ir.ODCL:
+		// Record loop depth at declaration.
+		n := n.(*ir.Decl)
+		if !ir.IsBlank(n.X) {
+			e.dcl(n.X)
+		}
+
+	case ir.OLABEL:
+		n := n.(*ir.LabelStmt)
+		switch e.labels[n.Label] {
+		case nonlooping:
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+			}
+		case looping:
+			if base.Flag.LowerM > 2 {
+				fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+			}
+			e.loopDepth++
+		default:
+			base.Fatalf("label missing tag")
+		}
+		delete(e.labels, n.Label)
+
+	case ir.OIF:
+		n := n.(*ir.IfStmt)
+		e.discard(n.Cond)
+		e.block(n.Body)
+		e.block(n.Else)
+
+	case ir.OFOR, ir.OFORUNTIL:
+		n := n.(*ir.ForStmt)
+		e.loopDepth++
+		e.discard(n.Cond)
+		e.stmt(n.Post)
+		e.block(n.Body)
+		e.loopDepth--
+
+	case ir.ORANGE:
+		// for Key, Value = range X { Body }
+		n := n.(*ir.RangeStmt)
+
+		// X is evaluated outside the loop.
+		tmp := e.newLoc(nil, false)
+		e.expr(tmp.asHole(), n.X)
+
+		e.loopDepth++
+		ks := e.addrs([]ir.Node{n.Key, n.Value})
+		if n.X.Type().IsArray() {
+			e.flow(ks[1].note(n, "range"), tmp)
+		} else {
+			e.flow(ks[1].deref(n, "range-deref"), tmp)
+		}
+		e.reassigned(ks, n)
+
+		e.block(n.Body)
+		e.loopDepth--
+
+	case ir.OSWITCH:
+		n := n.(*ir.SwitchStmt)
+
+		if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+			var ks []hole
+			if guard.Tag != nil {
+				for _, cas := range n.Cases {
+					cv := cas.Var
+					k := e.dcl(cv) // type switch variables have no ODCL.
+					if cv.Type().HasPointers() {
+						ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+					}
+				}
+			}
+			e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+		} else {
+			e.discard(n.Tag)
+		}
+
+		for _, cas := range n.Cases {
+			e.discards(cas.List)
+			e.block(cas.Body)
+		}
+
+	case ir.OSELECT:
+		n := n.(*ir.SelectStmt)
+		for _, cas := range n.Cases {
+			e.stmt(cas.Comm)
+			e.block(cas.Body)
+		}
+	case ir.ORECV:
+		// TODO(mdempsky): Consider e.discard(n.Left).
+		n := n.(*ir.UnaryExpr)
+		e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+	case ir.OSEND:
+		n := n.(*ir.SendStmt)
+		e.discard(n.Chan)
+		e.assignHeap(n.Value, "send", n)
+
+	case ir.OAS:
+		n := n.(*ir.AssignStmt)
+		e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+	case ir.OASOP:
+		n := n.(*ir.AssignOpStmt)
+		// TODO(mdempsky): Worry about OLSH/ORSH?
+		e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+	case ir.OAS2:
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+	case ir.OAS2DOTTYPE: // v, ok = x.(type)
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+	case ir.OAS2MAPR: // v, ok = m[k]
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+	case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+		n := n.(*ir.AssignListStmt)
+		e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+	case ir.OAS2FUNC:
+		n := n.(*ir.AssignListStmt)
+		e.stmts(n.Rhs[0].Init())
+		ks := e.addrs(n.Lhs)
+		e.call(ks, n.Rhs[0], nil)
+		e.reassigned(ks, n)
+	case ir.ORETURN:
+		n := n.(*ir.ReturnStmt)
+		results := e.curfn.Type().Results().FieldSlice()
+		dsts := make([]ir.Node, len(results))
+		for i, res := range results {
+			dsts[i] = res.Nname.(*ir.Name)
+		}
+		e.assignList(dsts, n.Results, "return", n)
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		e.call(nil, n, nil)
+	case ir.OGO, ir.ODEFER:
+		n := n.(*ir.GoDeferStmt)
+		e.stmts(n.Call.Init())
+		e.call(nil, n.Call, n)
+
+	case ir.OTAILCALL:
+		// TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
+	}
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+	for _, n := range l {
+		e.stmt(n)
+	}
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+	old := e.loopDepth
+	e.stmts(l)
+	e.loopDepth = old
+}
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+	if n == nil {
+		return
+	}
+	e.stmts(n.Init())
+	e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+	if n == nil {
+		return
+	}
+
+	lno := ir.SetPos(n)
+	defer func() {
+		base.Pos = lno
+	}()
+
+	uintptrEscapesHack := k.uintptrEscapesHack
+	k.uintptrEscapesHack = false
+
+	if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
+		// nop
+	} else if k.derefs >= 0 && !n.Type().HasPointers() {
+		k.dst = &e.blankLoc
+	}
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected expr: %v", n)
+
+	case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+		// nop
+
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+			return
+		}
+		if n.IsClosureVar() && n.Defn == nil {
+			return // ".this" from method value wrapper
+		}
+		e.flow(k, e.oldLoc(n))
+
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+		n := n.(*ir.UnaryExpr)
+		e.discard(n.X)
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+		n := n.(*ir.BinaryExpr)
+		e.discard(n.X)
+		e.discard(n.Y)
+	case ir.OANDAND, ir.OOROR:
+		n := n.(*ir.LogicalExpr)
+		e.discard(n.X)
+		e.discard(n.Y)
+	case ir.OADDR:
+		n := n.(*ir.AddrExpr)
+		e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+	case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+		n := n.(*ir.SelectorExpr)
+		e.expr(k.note(n, "dot"), n.X)
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+	case ir.ODOTTYPE, ir.ODOTTYPE2:
+		n := n.(*ir.TypeAssertExpr)
+		e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		if n.X.Type().IsArray() {
+			e.expr(k.note(n, "fixed-array-index-of"), n.X)
+		} else {
+			// TODO(mdempsky): Fix why reason text.
+			e.expr(k.deref(n, "dot of pointer"), n.X)
+		}
+		e.discard(n.Index)
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.X)
+		e.discard(n.Index)
+	case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+		n := n.(*ir.SliceExpr)
+		e.expr(k.note(n, "slice"), n.X)
+		e.discard(n.Low)
+		e.discard(n.High)
+		e.discard(n.Max)
+
+	case ir.OCONV, ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+			// When -d=checkptr=2 is enabled, treat
+			// conversions to unsafe.Pointer as an
+			// escaping operation. This allows better
+			// runtime instrumentation, since we can more
+			// easily detect object boundaries on the heap
+			// than the stack.
+			e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+		} else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+			e.unsafeValue(k, n.X)
+		} else {
+			e.expr(k, n.X)
+		}
+	case ir.OCONVIFACE:
+		n := n.(*ir.ConvExpr)
+		if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+			k = e.spill(k, n)
+		}
+		e.expr(k.note(n, "interface-converted"), n.X)
+
+	case ir.ORECV:
+		n := n.(*ir.UnaryExpr)
+		e.discard(n.X)
+
+	case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
+		e.call([]hole{k}, n, nil)
+
+	case ir.ONEW:
+		n := n.(*ir.UnaryExpr)
+		e.spill(k, n)
+
+	case ir.OMAKESLICE:
+		n := n.(*ir.MakeExpr)
+		e.spill(k, n)
+		e.discard(n.Len)
+		e.discard(n.Cap)
+	case ir.OMAKECHAN:
+		n := n.(*ir.MakeExpr)
+		e.discard(n.Len)
+	case ir.OMAKEMAP:
+		n := n.(*ir.MakeExpr)
+		e.spill(k, n)
+		e.discard(n.Len)
+
+	case ir.ORECOVER:
+		// nop
+
+	case ir.OCALLPART:
+		// Flow the receiver argument to both the closure and
+		// to the receiver parameter.
+
+		n := n.(*ir.SelectorExpr)
+		closureK := e.spill(k, n)
+
+		m := n.Selection
+
+		// We don't know how the method value will be called
+		// later, so conservatively assume the result
+		// parameters all flow to the heap.
+		//
+		// TODO(mdempsky): Change ks into a callback, so that
+		// we don't have to create this slice?
+		var ks []hole
+		for i := m.Type.NumResults(); i > 0; i-- {
+			ks = append(ks, e.heapHole())
+		}
+		name, _ := m.Nname.(*ir.Name)
+		paramK := e.tagHole(ks, name, m.Type.Recv())
+
+		e.expr(e.teeHole(paramK, closureK), n.X)
+
+	case ir.OPTRLIT:
+		n := n.(*ir.AddrExpr)
+		e.expr(e.spill(k, n), n.X)
+
+	case ir.OARRAYLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elt := range n.List {
+			if elt.Op() == ir.OKEY {
+				elt = elt.(*ir.KeyExpr).Value
+			}
+			e.expr(k.note(n, "array literal element"), elt)
+		}
+
+	case ir.OSLICELIT:
+		n := n.(*ir.CompLitExpr)
+		k = e.spill(k, n)
+		k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
+
+		for _, elt := range n.List {
+			if elt.Op() == ir.OKEY {
+				elt = elt.(*ir.KeyExpr).Value
+			}
+			e.expr(k.note(n, "slice-literal-element"), elt)
+		}
+
+	case ir.OSTRUCTLIT:
+		n := n.(*ir.CompLitExpr)
+		for _, elt := range n.List {
+			e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+		}
+
+	case ir.OMAPLIT:
+		n := n.(*ir.CompLitExpr)
+		e.spill(k, n)
+
+		// Map keys and values are always stored in the heap.
+		for _, elt := range n.List {
+			elt := elt.(*ir.KeyExpr)
+			e.assignHeap(elt.Key, "map literal key", n)
+			e.assignHeap(elt.Value, "map literal value", n)
+		}
+
+	case ir.OCLOSURE:
+		n := n.(*ir.ClosureExpr)
+		k = e.spill(k, n)
+		e.closures = append(e.closures, closure{k, n})
+
+		if fn := n.Func; fn.IsHiddenClosure() {
+			for _, cv := range fn.ClosureVars {
+				if loc := e.oldLoc(cv); !loc.captured {
+					loc.captured = true
+
+					// Ignore reassignments to the variable in straightline code
+					// preceding the first capture by a closure.
+					if loc.loopDepth == e.loopDepth {
+						loc.reassigned = false
+					}
+				}
+			}
+
+			for _, n := range fn.Dcl {
+				// Add locations for local variables of the
+				// closure, if needed, in case we're not including
+				// the closure func in the batch for escape
+				// analysis (happens for escape analysis called
+				// from reflectdata.methodWrapper)
+				if n.Op() == ir.ONAME && n.Opt == nil {
+					e.with(fn).newLoc(n, false)
+				}
+			}
+			e.walkFunc(fn)
+		}
+
+	case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+		n := n.(*ir.ConvExpr)
+		e.spill(k, n)
+		e.discard(n.X)
+
+	case ir.OADDSTR:
+		n := n.(*ir.AddStringExpr)
+		e.spill(k, n)
+
+		// Arguments of OADDSTR never escape;
+		// runtime.concatstrings makes sure of that.
+		e.discards(n.List)
+	}
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+	if n.Type().Kind() != types.TUINTPTR {
+		base.Fatalf("unexpected type %v for %v", n.Type(), n)
+	}
+	if k.addrtaken {
+		base.Fatalf("unexpected addrtaken")
+	}
+
+	e.stmts(n.Init())
+
+	switch n.Op() {
+	case ir.OCONV, ir.OCONVNOP:
+		n := n.(*ir.ConvExpr)
+		if n.X.Type().IsUnsafePtr() {
+			e.expr(k, n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		if ir.IsReflectHeaderDataField(n) {
+			e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+		n := n.(*ir.UnaryExpr)
+		e.unsafeValue(k, n.X)
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+		n := n.(*ir.BinaryExpr)
+		e.unsafeValue(k, n.X)
+		e.unsafeValue(k, n.Y)
+	case ir.OLSH, ir.ORSH:
+		n := n.(*ir.BinaryExpr)
+		e.unsafeValue(k, n.X)
+		// RHS need not be uintptr-typed (#32959) and can't meaningfully
+		// flow pointers anyway.
+		e.discard(n.Y)
+	default:
+		e.exprSkipInit(e.discardHole(), n)
+	}
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+	e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+	for _, n := range l {
+		e.discard(n)
+	}
+}
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+	if n == nil || ir.IsBlank(n) {
+		// Can happen in select case, range, maybe others.
+		return e.discardHole()
+	}
+
+	k := e.heapHole()
+
+	switch n.Op() {
+	default:
+		base.Fatalf("unexpected addr: %v", n)
+	case ir.ONAME:
+		n := n.(*ir.Name)
+		if n.Class == ir.PEXTERN {
+			break
+		}
+		k = e.oldLoc(n).asHole()
+	case ir.OLINKSYMOFFSET:
+		break
+	case ir.ODOT:
+		n := n.(*ir.SelectorExpr)
+		k = e.addr(n.X)
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.Index)
+		if n.X.Type().IsArray() {
+			k = e.addr(n.X)
+		} else {
+			e.discard(n.X)
+		}
+	case ir.ODEREF, ir.ODOTPTR:
+		e.discard(n)
+	case ir.OINDEXMAP:
+		n := n.(*ir.IndexExpr)
+		e.discard(n.X)
+		e.assignHeap(n.Index, "key of map put", n)
+	}
+
+	return k
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+	var ks []hole
+	for _, n := range l {
+		ks = append(ks, e.addr(n))
+	}
+	return ks
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+	if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+		if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+			// Zero-value assignment for variable declared without an
+			// explicit initial value. Assume this is its initialization
+			// statement.
+			return
+		}
+	}
+
+	for _, k := range ks {
+		loc := k.dst
+		// Variables declared by range statements are assigned on every iteration.
+		if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+			continue
+		}
+		loc.reassigned = true
+	}
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+	ks := e.addrs(dsts)
+	for i, k := range ks {
+		var src ir.Node
+		if i < len(srcs) {
+			src = srcs[i]
+		}
+
+		if dst := dsts[i]; dst != nil {
+			// Detect implicit conversion of uintptr to unsafe.Pointer when
+			// storing into reflect.{Slice,String}Header.
+			if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+				e.unsafeValue(e.heapHole().note(where, why), src)
+				continue
+			}
+
+			// Filter out some no-op assignments for escape analysis.
+			if src != nil && isSelfAssign(dst, src) {
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+				}
+				k = e.discardHole()
+			}
+		}
+
+		e.expr(k.note(where, why), src)
+	}
+
+	e.reassigned(ks, where)
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+	e.expr(e.heapHole().note(where, why), src)
+}
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows; where is the OGO/ODEFER context of the call, if any.
+func (e *escape) call(ks []hole, call, where ir.Node) {
+	topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
+	if topLevelDefer {
+		// force stack allocation of defer record, unless
+		// open-coded defers are used (see ssa.go)
+		where.SetEsc(ir.EscNever)
+	}
+
+	argument := func(k hole, arg ir.Node) {
+		if topLevelDefer {
+			// Top level defers arguments don't escape to
+			// heap, but they do need to last until end of
+			// function.
+			k = e.later(k)
+		} else if where != nil {
+			k = e.heapHole()
+		}
+
+		e.expr(k.note(call, "call parameter"), arg)
+	}
+
+	switch call.Op() {
+	default:
+		ir.Dump("esc", call)
+		base.Fatalf("unexpected call op: %v", call.Op())
+
+	case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+		call := call.(*ir.CallExpr)
+		typecheck.FixVariadicCall(call)
+
+		// Pick out the function callee, if statically known.
+		var fn *ir.Name
+		switch call.Op() {
+		case ir.OCALLFUNC:
+			switch v := ir.StaticValue(call.X); {
+			case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC:
+				fn = v.(*ir.Name)
+			case v.Op() == ir.OCLOSURE:
+				fn = v.(*ir.ClosureExpr).Func.Nname
+			}
+		case ir.OCALLMETH:
+			fn = ir.MethodExprName(call.X)
+		}
+
+		fntype := call.X.Type()
+		if fn != nil {
+			fntype = fn.Type()
+		}
+
+		if ks != nil && fn != nil && e.inMutualBatch(fn) {
+			for i, result := range fn.Type().Results().FieldSlice() {
+				e.expr(ks[i], ir.AsNode(result.Nname))
+			}
+		}
+
+		if r := fntype.Recv(); r != nil {
+			argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X)
+		} else {
+			// Evaluate callee function expression.
+			argument(e.discardHole(), call.X)
+		}
+
+		args := call.Args
+		for i, param := range fntype.Params().FieldSlice() {
+			argument(e.tagHole(ks, fn, param), args[i])
+		}
+
+	case ir.OAPPEND:
+		call := call.(*ir.CallExpr)
+		args := call.Args
+
+		// Appendee slice may flow directly to the result, if
+		// it has enough capacity. Alternatively, a new heap
+		// slice might be allocated, and all slice elements
+		// might flow to heap.
+		appendeeK := ks[0]
+		if args[0].Type().Elem().HasPointers() {
+			appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+		}
+		argument(appendeeK, args[0])
+
+		if call.IsDDD {
+			appendedK := e.discardHole()
+			if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+				appendedK = e.heapHole().deref(call, "appended slice...")
+			}
+			argument(appendedK, args[1])
+		} else {
+			for _, arg := range args[1:] {
+				argument(e.heapHole(), arg)
+			}
+		}
+
+	case ir.OCOPY:
+		call := call.(*ir.BinaryExpr)
+		argument(e.discardHole(), call.X)
+
+		copiedK := e.discardHole()
+		if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+			copiedK = e.heapHole().deref(call, "copied slice")
+		}
+		argument(copiedK, call.Y)
+
+	case ir.OPANIC:
+		call := call.(*ir.UnaryExpr)
+		argument(e.heapHole(), call.X)
+
+	case ir.OCOMPLEX:
+		call := call.(*ir.BinaryExpr)
+		argument(e.discardHole(), call.X)
+		argument(e.discardHole(), call.Y)
+	case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+		call := call.(*ir.CallExpr)
+		for _, arg := range call.Args {
+			argument(e.discardHole(), arg)
+		}
+	case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+		call := call.(*ir.UnaryExpr)
+		argument(e.discardHole(), call.X)
+	}
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+	// If this is a dynamic call, we can't rely on param.Note.
+	if fn == nil {
+		return e.heapHole()
+	}
+
+	if e.inMutualBatch(fn) {
+		return e.addr(ir.AsNode(param.Nname))
+	}
+
+	// Call to previously tagged function.
+
+	if param.Note == UintptrEscapesNote {
+		k := e.heapHole()
+		k.uintptrEscapesHack = true
+		return k
+	}
+
+	var tagKs []hole
+
+	esc := parseLeaks(param.Note)
+	if x := esc.Heap(); x >= 0 {
+		tagKs = append(tagKs, e.heapHole().shift(x))
+	}
+
+	if ks != nil {
+		for i := 0; i < numEscResults; i++ {
+			if x := esc.Result(i); x >= 0 {
+				tagKs = append(tagKs, ks[i].shift(x))
+			}
+		}
+	}
+
+	return e.teeHole(tagKs...)
+}
+
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+	if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+		if fn.Defn.Esc() == escFuncUnknown {
+			base.Fatalf("graph inconsistency: %v", fn)
+		}
+		return true
+	}
+	return false
+}
+
+// An hole represents a context for evaluation a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+	dst    *location
+	derefs int // >= -1
+	notes  *note
+
+	// addrtaken indicates whether this context is taking the address of
+	// the expression, independent of whether the address will actually
+	// be stored into a variable.
+	addrtaken bool
+
+	// uintptrEscapesHack indicates this context is evaluating an
+	// argument for a //go:uintptrescapes function.
+	uintptrEscapesHack bool
+}
+
+type note struct {
+	next  *note
+	where ir.Node
+	why   string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+	if where == nil || why == "" {
+		base.Fatalf("note: missing where/why")
+	}
+	if base.Flag.LowerM >= 2 || logopt.Enabled() {
+		k.notes = &note{
+			next:  k.notes,
+			where: where,
+			why:   why,
+		}
+	}
+	return k
+}
+
+func (k hole) shift(delta int) hole {
+	k.derefs += delta
+	if k.derefs < -1 {
+		base.Fatalf("derefs underflow: %v", k.derefs)
+	}
+	k.addrtaken = delta < 0
+	return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole  { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+	if !t.IsInterface() && !types.IsDirectIface(t) {
+		k = k.shift(1)
+	}
+	return k.note(where, why)
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+	if len(ks) == 0 {
+		return e.discardHole()
+	}
+	if len(ks) == 1 {
+		return ks[0]
+	}
+	// TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+	// new temporary location ltmp, wire it into place, and return
+	// a hole for "ltmp = _".
+	loc := e.newLoc(nil, true)
+	for _, k := range ks {
+		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
+		// semantically equivalent. To combine holes like "l1
+		// = _" and "l2 = &_", we'd need to wire them as "l1 =
+		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+		// instead.
+		if k.derefs < 0 {
+			base.Fatalf("teeHole: negative derefs")
+		}
+
+		e.flow(k, loc)
+	}
+	return loc.asHole()
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+	if n.Curfn != e.curfn || n.IsClosureVar() {
+		base.Fatalf("bad declaration of %v", n)
+	}
+	loc := e.oldLoc(n)
+	loc.loopDepth = e.loopDepth
+	return loc.asHole()
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+	loc := e.newLoc(n, true)
+	e.flow(k.addr(n, "spill"), loc)
+	return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+	loc := e.newLoc(nil, false)
+	e.flow(k, loc)
+	return loc.asHole()
+}
+
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
+	if e.curfn == nil {
+		base.Fatalf("e.curfn isn't set")
+	}
+	if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+		base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+	}
+
+	if n != nil && n.Op() == ir.ONAME {
+		n = n.(*ir.Name).Canonical()
+	}
+	loc := &location{
+		n:         n,
+		curfn:     e.curfn,
+		loopDepth: e.loopDepth,
+		transient: transient,
+	}
+	e.allLocs = append(e.allLocs, loc)
+	if n != nil {
+		if n.Op() == ir.ONAME {
+			n := n.(*ir.Name)
+			if n.Curfn != e.curfn {
+				base.Fatalf("curfn mismatch: %v != %v", n.Curfn, e.curfn)
+			}
+
+			if n.Opt != nil {
+				base.Fatalf("%v already has a location", n)
+			}
+			n.Opt = loc
+		}
+	}
+	return loc
+}
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+	return n.Canonical().Opt.(*location)
+}
+
+func (l *location) asHole() hole {
+	return hole{dst: l}
+}
+
+func (b *batch) flow(k hole, src *location) {
+	if k.addrtaken {
+		src.addrtaken = true
+	}
+
+	dst := k.dst
+	if dst == &b.blankLoc {
+		return
+	}
+	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+		return
+	}
+	if dst.escapes && k.derefs < 0 { // dst = &src
+		if base.Flag.LowerM >= 2 || logopt.Enabled() {
+			pos := base.FmtPos(src.n.Pos())
+			if base.Flag.LowerM >= 2 {
+				fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+			}
+			explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+			if logopt.Enabled() {
+				var e_curfn *ir.Func // TODO(mdempsky): Fix.
+				logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+			}
+
+		}
+		src.escapes = true
+		return
+	}
+
+	// TODO(mdempsky): Deduplicate edges?
+	dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole    { return b.heapLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+	// We use a work queue to keep track of locations that we need
+	// to visit, and repeatedly walk until we reach a fixed point.
+	//
+	// We walk once from each location (including the heap), and
+	// then re-enqueue each location on its transition from
+	// transient->!transient and !escapes->escapes, which can each
+	// happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
+	todo := make([]*location, 0, len(b.allLocs)+1)
+	enqueue := func(loc *location) {
+		if !loc.queued {
+			todo = append(todo, loc)
+			loc.queued = true
+		}
+	}
+
+	for _, loc := range b.allLocs {
+		enqueue(loc)
+	}
+	enqueue(&b.heapLoc)
+
+	var walkgen uint32
+	for len(todo) > 0 {
+		root := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+		root.queued = false
+
+		walkgen++
+		b.walkOne(root, walkgen, enqueue)
+	}
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+	// The data flow graph has negative edges (from addressing
+	// operations), so we use the Bellman-Ford algorithm. However,
+	// we don't have to worry about infinite negative cycles since
+	// we bound intermediate dereference counts to 0.
+
+	root.walkgen = walkgen
+	root.derefs = 0
+	root.dst = nil
+
+	todo := []*location{root} // LIFO queue
+	for len(todo) > 0 {
+		l := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+
+		derefs := l.derefs
+
+		// If l.derefs < 0, then l's address flows to root.
+		addressOf := derefs < 0
+		if addressOf {
+			// For a flow path like "root = &l; l = x",
+			// l's address flows to root, but x's does
+			// not. We recognize this by lower bounding
+			// derefs at 0.
+			derefs = 0
+
+			// If l's address flows to a non-transient
+			// location, then l can't be transiently
+			// allocated.
+			if !root.transient && l.transient {
+				l.transient = false
+				enqueue(l)
+			}
+		}
+
+		if b.outlives(root, l) {
+			// l's value flows to root. If l is a function
+			// parameter and root is the heap or a
+			// corresponding result parameter, then record
+			// that value flow for tagging the function
+			// later.
+			if l.isName(ir.PPARAM) {
+				if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+					if base.Flag.LowerM >= 2 {
+						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+					}
+					explanation := b.explainPath(root, l)
+					if logopt.Enabled() {
+						var e_curfn *ir.Func // TODO(mdempsky): Fix.
+						logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+					}
+				}
+				l.leakTo(root, derefs)
+			}
+
+			// If l's address flows somewhere that
+			// outlives it, then l needs to be heap
+			// allocated.
+			if addressOf && !l.escapes {
+				if logopt.Enabled() || base.Flag.LowerM >= 2 {
+					if base.Flag.LowerM >= 2 {
+						fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+					}
+					explanation := b.explainPath(root, l)
+					if logopt.Enabled() {
+						var e_curfn *ir.Func // TODO(mdempsky): Fix.
+						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+					}
+				}
+				l.escapes = true
+				enqueue(l)
+				continue
+			}
+		}
+
+		for i, edge := range l.edges {
+			if edge.src.escapes {
+				continue
+			}
+			d := derefs + edge.derefs
+			if edge.src.walkgen != walkgen || edge.src.derefs > d {
+				edge.src.walkgen = walkgen
+				edge.src.derefs = d
+				edge.src.dst = l
+				edge.src.dstEdgeIdx = i
+				todo = append(todo, edge.src)
+			}
+		}
+	}
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+	visited := make(map[*location]bool)
+	pos := base.FmtPos(src.n.Pos())
+	var explanation []*logopt.LoggedOpt
+	for {
+		// Prevent infinite loop.
+		if visited[src] {
+			if base.Flag.LowerM >= 2 {
+				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+			}
+			break
+		}
+		visited[src] = true
+		dst := src.dst
+		edge := &dst.edges[src.dstEdgeIdx]
+		if edge.src != src {
+			base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+		}
+
+		explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+		if dst == root {
+			break
+		}
+		src = dst
+	}
+
+	return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+	ops := "&"
+	if derefs >= 0 {
+		ops = strings.Repeat("*", derefs)
+	}
+	print := base.Flag.LowerM >= 2
+
+	flow := fmt.Sprintf("   flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+	if print {
+		fmt.Printf("%s:%s\n", pos, flow)
+	}
+	if logopt.Enabled() {
+		var epos src.XPos
+		if notes != nil {
+			epos = notes.where.Pos()
+		} else if srcloc != nil && srcloc.n != nil {
+			epos = srcloc.n.Pos()
+		}
+		var e_curfn *ir.Func // TODO(mdempsky): Fix.
+		explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+	}
+
+	for note := notes; note != nil; note = note.next {
+		if print {
+			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+		}
+		if logopt.Enabled() {
+			var e_curfn *ir.Func // TODO(mdempsky): Fix.
+			explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
+				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
+		}
+	}
+	return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+	if l == &b.heapLoc {
+		return "{heap}"
+	}
+	if l.n == nil {
+		// TODO(mdempsky): Omit entirely.
+		return "{temp}"
+	}
+	if l.n.Op() == ir.ONAME {
+		return fmt.Sprintf("%v", l.n)
+	}
+	return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+	// The heap outlives everything.
+	if l.escapes {
+		return true
+	}
+
+	// We don't know what callers do with returned values, so
+	// pessimistically we need to assume they flow to the heap and
+	// outlive everything too.
+	if l.isName(ir.PPARAMOUT) {
+		// Exception: Directly called closures can return
+		// locations allocated outside of them without forcing
+		// them to the heap. For example:
+		//
+		//    var u int  // okay to stack allocate
+		//    *(func() *int { return &u }()) = 42
+		if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+			return false
+		}
+
+		return true
+	}
+
+	// If l and other are within the same function, then l
+	// outlives other if it was declared outside other's loop
+	// scope. For example:
+	//
+	//    var l *int
+	//    for {
+	//        l = new(int)
+	//    }
+	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+		return true
+	}
+
+	// If other is declared within a child closure of where l is
+	// declared, then l outlives it. For example:
+	//
+	//    var l *int
+	//    func() {
+	//        l = new(int)
+	//    }
+	if containsClosure(l.curfn, other.curfn) {
+		return true
+	}
+
+	return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+	// Common case.
+	if f == c {
+		return false
+	}
+
+	// Closures within function Foo are named like "Foo.funcN..."
+	// TODO(mdempsky): Better way to recognize this.
+	fn := f.Sym().Name
+	cn := c.Sym().Name
+	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+	// If sink is a result parameter and we can fit return bits
+	// into the escape analysis tag, then record a return leak.
+	if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+		ri := sink.resultIndex - 1
+		if ri < numEscResults {
+			// Leak to result parameter.
+			l.paramEsc.AddResult(ri, derefs)
+			return
+		}
+	}
+
+	// Otherwise, record as heap leak.
+	l.paramEsc.AddHeap(derefs)
+}
+
+func (b *batch) finish(fns []*ir.Func) {
+	// Record parameter tags for package export data.
+	for _, fn := range fns {
+		fn.SetEsc(escFuncTagged)
+
+		narg := 0
+		for _, fs := range &types.RecvsParams {
+			for _, f := range fs(fn.Type()).Fields().Slice() {
+				narg++
+				f.Note = b.paramTag(fn, narg, f)
+			}
+		}
+	}
+
+	for _, loc := range b.allLocs {
+		n := loc.n
+		if n == nil {
+			continue
+		}
+		if n.Op() == ir.ONAME {
+			n := n.(*ir.Name)
+			n.Opt = nil
+		}
+
+		// Update n.Esc based on escape analysis results.
+
+		if loc.escapes {
+			if n.Op() == ir.ONAME {
+				if base.Flag.CompilingRuntime {
+					base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
+				}
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+				}
+			} else {
+				if base.Flag.LowerM != 0 {
+					base.WarnfAt(n.Pos(), "%v escapes to heap", n)
+				}
+				if logopt.Enabled() {
+					var e_curfn *ir.Func // TODO(mdempsky): Fix.
+					logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
+				}
+			}
+			n.SetEsc(ir.EscHeap)
+		} else {
+			if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+				base.WarnfAt(n.Pos(), "%v does not escape", n)
+			}
+			n.SetEsc(ir.EscNone)
+			if loc.transient {
+				switch n.Op() {
+				case ir.OCLOSURE:
+					n := n.(*ir.ClosureExpr)
+					n.SetTransient(true)
+				case ir.OCALLPART:
+					n := n.(*ir.SelectorExpr)
+					n.SetTransient(true)
+				case ir.OSLICELIT:
+					n := n.(*ir.CompLitExpr)
+					n.SetTransient(true)
+				}
+			}
+		}
+	}
+}
+
+func (l *location) isName(c ir.Class) bool {
+	return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+const numEscResults = 7
+
+// An leaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type leaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l leaks) Empty() bool { return l == leaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+	if old := l.get(i); old < 0 || derefs < old {
+		l.set(i, derefs)
+	}
+}
+
+func (l *leaks) set(i, derefs int) {
+	v := derefs + 1
+	if v < 0 {
+		base.Fatalf("invalid derefs count: %v", derefs)
+	}
+	if v > math.MaxUint8 {
+		v = math.MaxUint8
+	}
+
+	l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+	// If we have a path to the heap, then there's no use in
+	// keeping equal or longer paths elsewhere.
+	if x := l.Heap(); x >= 0 {
+		for i := 0; i < numEscResults; i++ {
+			if l.Result(i) >= x {
+				l.setResult(i, -1)
+			}
+		}
+	}
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+	if l.Heap() == 0 {
+		// Space optimization: empty string encodes more
+		// efficiently in export data.
+		return ""
+	}
+	if s, ok := leakTagCache[l]; ok {
+		return s
+	}
+
+	n := len(l)
+	for n > 0 && l[n-1] == 0 {
+		n--
+	}
+	s := "esc:" + string(l[:n])
+	leakTagCache[l] = s
+	return s
+}
+
+// parseLeaks parses a binary string representing a leaks
+func parseLeaks(s string) leaks {
+	var l leaks
+	if !strings.HasPrefix(s, "esc:") {
+		l.AddHeap(0)
+		return l
+	}
+	copy(l[:], s[4:])
+	return l
+}
+
+func Funcs(all []ir.Node) {
+	ir.VisitFuncsBottomUp(all, Batch)
+}
+
+const (
+	escFuncUnknown = 0 + iota
+	escFuncPlanned
+	escFuncStarted
+	escFuncTagged
+)
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
+
+const (
+	looping labelState = 1 + iota
+	nonlooping
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+	// Detect the following special case.
+	//
+	//	func (b *Buffer) Foo() {
+	//		n, m := ...
+	//		b.buf = b.buf[n:m]
+	//	}
+	//
+	// This assignment is a no-op for escape analysis,
+	// it does not store any new pointers into b that were not already there.
+	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+	// Here we assume that the statement will not contain calls,
+	// that is, that order will move any calls to init.
+	// Otherwise base ONAME value could change between the moments
+	// when we evaluate it for dst and for src.
+
+	// dst is ONAME dereference.
+	var dstX ir.Node
+	switch dst.Op() {
+	default:
+		return false
+	case ir.ODEREF:
+		dst := dst.(*ir.StarExpr)
+		dstX = dst.X
+	case ir.ODOTPTR:
+		dst := dst.(*ir.SelectorExpr)
+		dstX = dst.X
+	}
+	if dstX.Op() != ir.ONAME {
+		return false
+	}
+	// src is a slice operation.
+	switch src.Op() {
+	case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+		// OK.
+	case ir.OSLICEARR, ir.OSLICE3ARR:
+		// Since arrays are embedded into containing object,
+		// slice of non-pointer array will introduce a new pointer into b that was not already there
+		// (pointer to b itself). After such assignment, if b contents escape,
+		// b escapes as well. If we ignore such OSLICEARR, we will conclude
+		// that b does not escape when b contents do.
+		//
+		// Pointer to an array is OK since it's not stored inside b directly.
+		// For slicing an array (not pointer to array), there is an implicit OADDR.
+		// We check that to determine non-pointer array slicing.
+		src := src.(*ir.SliceExpr)
+		if src.X.Op() == ir.OADDR {
+			return false
+		}
+	default:
+		return false
+	}
+	// slice is applied to ONAME dereference.
+	var baseX ir.Node
+	switch base := src.(*ir.SliceExpr).X; base.Op() {
+	default:
+		return false
+	case ir.ODEREF:
+		base := base.(*ir.StarExpr)
+		baseX = base.X
+	case ir.ODOTPTR:
+		base := base.(*ir.SelectorExpr)
+		baseX = base.X
+	}
+	if baseX.Op() != ir.ONAME {
+		return false
+	}
+	// dst and src reference the same base ONAME.
+	return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+	if isSliceSelfAssign(dst, src) {
+		return true
+	}
+
+	// Detect trivial assignments that assign back to the same object.
+	//
+	// It covers these cases:
+	//	val.x = val.y
+	//	val.x[i] = val.y[j]
+	//	val.x1.x2 = val.x1.y2
+	//	... etc
+	//
+	// These assignments do not change assigned object lifetime.
+
+	if dst == nil || src == nil || dst.Op() != src.Op() {
+		return false
+	}
+
+	// The expression prefix must be both "safe" and identical.
+	switch dst.Op() {
+	case ir.ODOT, ir.ODOTPTR:
+		// Safe trailing accessors that are permitted to differ.
+		dst := dst.(*ir.SelectorExpr)
+		src := src.(*ir.SelectorExpr)
+		return ir.SameSafeExpr(dst.X, src.X)
+	case ir.OINDEX:
+		dst := dst.(*ir.IndexExpr)
+		src := src.(*ir.IndexExpr)
+		if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+			return false
+		}
+		return ir.SameSafeExpr(dst.X, src.X)
+	default:
+		return false
+	}
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+	// We may want to use a list of "memory safe" ops instead of generally
+	// "side-effect free", which would include all calls and other ops that can
+	// allocate or change global state. For now, it's safer to start with the latter.
+	//
+	// We're ignoring things like division by zero, index out of range,
+	// and nil pointer dereference here.
+
+	// TODO(rsc): It seems like it should be possible to replace this with
+	// an ir.Any looking for any op that's not the ones in the case statement.
+	// But that produces changes in the compiled output detected by buildall.
+	switch n.Op() {
+	case ir.ONAME, ir.OLITERAL, ir.ONIL:
+		return false
+
+	case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+		n := n.(*ir.BinaryExpr)
+		return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+	case ir.OINDEX:
+		n := n.(*ir.IndexExpr)
+		return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+	case ir.OCONVNOP, ir.OCONV:
+		n := n.(*ir.ConvExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+		n := n.(*ir.UnaryExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.ODOT, ir.ODOTPTR:
+		n := n.(*ir.SelectorExpr)
+		return mayAffectMemory(n.X)
+
+	case ir.ODEREF:
+		n := n.(*ir.StarExpr)
+		return mayAffectMemory(n.X)
+
+	default:
+		return true
+	}
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+	if n == nil || n.Type() == nil {
+		return ""
+	}
+
+	// Parameters are always passed via the stack.
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+			return ""
+		}
+	}
+
+	if n.Type().Width > ir.MaxStackVarSize {
+		return "too large for stack"
+	}
+
+	if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+
+	if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+	if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() >= ir.MaxImplicitStackVarSize {
+		return "too large for stack"
+	}
+
+	if n.Op() == ir.OMAKESLICE {
+		n := n.(*ir.MakeExpr)
+		r := n.Cap
+		if r == nil {
+			r = n.Len
+		}
+		if !ir.IsSmallIntConst(r) {
+			return "non-constant size"
+		}
+		if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width {
+			return "too large for stack"
+		}
+	}
+
+	return ""
+}
+
+// This special tag is applied to uintptr variables
+// that we believe may hold unsafe.Pointers for
+// calls into assembly functions.
+const UnsafeUintptrNote = "unsafe-uintptr"
+
+// This special tag is applied to uintptr parameters of functions
+// marked go:uintptrescapes.
+const UintptrEscapesNote = "uintptr-escapes"
+
+func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+	name := func() string {
+		if f.Sym != nil {
+			return f.Sym.Name
+		}
+		return fmt.Sprintf("arg#%d", narg)
+	}
+
+	if len(fn.Body) == 0 {
+		// Assume that uintptr arguments must be held live across the call.
+		// This is most important for syscall.Syscall.
+		// See golang.org/issue/13372.
+		// This really doesn't have much to do with escape analysis per se,
+		// but we are reusing the ability to annotate an individual function
+		// argument and pass those annotations along to importing code.
+		if f.Type.IsUintptr() {
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+			}
+			return UnsafeUintptrNote
+		}
+
+		if !f.Type.HasPointers() { // don't bother tagging for scalars
+			return ""
+		}
+
+		var esc leaks
+
+		// External functions are assumed unsafe, unless
+		// //go:noescape is given before the declaration.
+		if fn.Pragma&ir.Noescape != 0 {
+			if base.Flag.LowerM != 0 && f.Sym != nil {
+				base.WarnfAt(f.Pos, "%v does not escape", name())
+			}
+		} else {
+			if base.Flag.LowerM != 0 && f.Sym != nil {
+				base.WarnfAt(f.Pos, "leaking param: %v", name())
+			}
+			esc.AddHeap(0)
+		}
+
+		return esc.Encode()
+	}
+
+	if fn.Pragma&ir.UintptrEscapes != 0 {
+		if f.Type.IsUintptr() {
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+			}
+			return UintptrEscapesNote
+		}
+		if f.IsDDD() && f.Type.Elem().IsUintptr() {
+			// final argument is ...uintptr.
+			if base.Flag.LowerM != 0 {
+				base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+			}
+			return UintptrEscapesNote
+		}
+	}
+
+	if !f.Type.HasPointers() { // don't bother tagging for scalars
+		return ""
+	}
+
+	// Unnamed parameters are unused and therefore do not escape.
+	if f.Sym == nil || f.Sym.IsBlank() {
+		var esc leaks
+		return esc.Encode()
+	}
+
+	n := f.Nname.(*ir.Name)
+	loc := b.oldLoc(n)
+	esc := loc.paramEsc
+	esc.Optimize()
+
+	if base.Flag.LowerM != 0 && !loc.escapes {
+		if esc.Empty() {
+			base.WarnfAt(f.Pos, "%v does not escape", name())
+		}
+		if x := esc.Heap(); x >= 0 {
+			if x == 0 {
+				base.WarnfAt(f.Pos, "leaking param: %v", name())
+			} else {
+				// TODO(mdempsky): Mention level=x like below?
+				base.WarnfAt(f.Pos, "leaking param content: %v", name())
+			}
+		}
+		for i := 0; i < numEscResults; i++ {
+			if x := esc.Result(i); x >= 0 {
+				res := fn.Type().Results().Field(i).Sym
+				base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+			}
+		}
+	}
+
+	return esc.Encode()
+}
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
deleted file mode 100644
index 2f7fa27..0000000
--- a/src/cmd/compile/internal/gc/alg.go
+++ /dev/null
@@ -1,959 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"fmt"
-	"sort"
-)
-
-// AlgKind describes the kind of algorithms used for comparing and
-// hashing a Type.
-type AlgKind int
-
-//go:generate stringer -type AlgKind -trimprefix A
-
-const (
-	// These values are known by runtime.
-	ANOEQ AlgKind = iota
-	AMEM0
-	AMEM8
-	AMEM16
-	AMEM32
-	AMEM64
-	AMEM128
-	ASTRING
-	AINTER
-	ANILINTER
-	AFLOAT32
-	AFLOAT64
-	ACPLX64
-	ACPLX128
-
-	// Type can be compared/hashed as regular memory.
-	AMEM AlgKind = 100
-
-	// Type needs special comparison/hashing functions.
-	ASPECIAL AlgKind = -1
-)
-
-// IsComparable reports whether t is a comparable type.
-func IsComparable(t *types.Type) bool {
-	a, _ := algtype1(t)
-	return a != ANOEQ
-}
-
-// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func IsRegularMemory(t *types.Type) bool {
-	a, _ := algtype1(t)
-	return a == AMEM
-}
-
-// IncomparableField returns an incomparable Field of struct Type t, if any.
-func IncomparableField(t *types.Type) *types.Field {
-	for _, f := range t.FieldSlice() {
-		if !IsComparable(f.Type) {
-			return f
-		}
-	}
-	return nil
-}
-
-// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
-// t must be comparable.
-func EqCanPanic(t *types.Type) bool {
-	switch t.Etype {
-	default:
-		return false
-	case TINTER:
-		return true
-	case TARRAY:
-		return EqCanPanic(t.Elem())
-	case TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
-				return true
-			}
-		}
-		return false
-	}
-}
-
-// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
-// instead of the general AMEM kind when possible.
-func algtype(t *types.Type) AlgKind {
-	a, _ := algtype1(t)
-	if a == AMEM {
-		switch t.Width {
-		case 0:
-			return AMEM0
-		case 1:
-			return AMEM8
-		case 2:
-			return AMEM16
-		case 4:
-			return AMEM32
-		case 8:
-			return AMEM64
-		case 16:
-			return AMEM128
-		}
-	}
-
-	return a
-}
-
-// algtype1 returns the AlgKind used for comparing and hashing Type t.
-// If it returns ANOEQ, it also returns the component type of t that
-// makes it incomparable.
-func algtype1(t *types.Type) (AlgKind, *types.Type) {
-	if t.Broke() {
-		return AMEM, nil
-	}
-	if t.Noalg() {
-		return ANOEQ, t
-	}
-
-	switch t.Etype {
-	case TANY, TFORW:
-		// will be defined later.
-		return ANOEQ, t
-
-	case TINT8, TUINT8, TINT16, TUINT16,
-		TINT32, TUINT32, TINT64, TUINT64,
-		TINT, TUINT, TUINTPTR,
-		TBOOL, TPTR,
-		TCHAN, TUNSAFEPTR:
-		return AMEM, nil
-
-	case TFUNC, TMAP:
-		return ANOEQ, t
-
-	case TFLOAT32:
-		return AFLOAT32, nil
-
-	case TFLOAT64:
-		return AFLOAT64, nil
-
-	case TCOMPLEX64:
-		return ACPLX64, nil
-
-	case TCOMPLEX128:
-		return ACPLX128, nil
-
-	case TSTRING:
-		return ASTRING, nil
-
-	case TINTER:
-		if t.IsEmptyInterface() {
-			return ANILINTER, nil
-		}
-		return AINTER, nil
-
-	case TSLICE:
-		return ANOEQ, t
-
-	case TARRAY:
-		a, bad := algtype1(t.Elem())
-		switch a {
-		case AMEM:
-			return AMEM, nil
-		case ANOEQ:
-			return ANOEQ, bad
-		}
-
-		switch t.NumElem() {
-		case 0:
-			// We checked above that the element type is comparable.
-			return AMEM, nil
-		case 1:
-			// Single-element array is same as its lone element.
-			return a, nil
-		}
-
-		return ASPECIAL, nil
-
-	case TSTRUCT:
-		fields := t.FieldSlice()
-
-		// One-field struct is same as that one field alone.
-		if len(fields) == 1 && !fields[0].Sym.IsBlank() {
-			return algtype1(fields[0].Type)
-		}
-
-		ret := AMEM
-		for i, f := range fields {
-			// All fields must be comparable.
-			a, bad := algtype1(f.Type)
-			if a == ANOEQ {
-				return ANOEQ, bad
-			}
-
-			// Blank fields, padded fields, fields with non-memory
-			// equality need special compare.
-			if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
-				ret = ASPECIAL
-			}
-		}
-
-		return ret, nil
-	}
-
-	Fatalf("algtype1: unexpected type %v", t)
-	return 0, nil
-}
-
-// genhash returns a symbol which is the closure used to compute
-// the hash of a value of type t.
-// Note: the generated function must match runtime.typehash exactly.
-func genhash(t *types.Type) *obj.LSym {
-	switch algtype(t) {
-	default:
-		// genhash is only called for types that have equality
-		Fatalf("genhash %v", t)
-	case AMEM0:
-		return sysClosure("memhash0")
-	case AMEM8:
-		return sysClosure("memhash8")
-	case AMEM16:
-		return sysClosure("memhash16")
-	case AMEM32:
-		return sysClosure("memhash32")
-	case AMEM64:
-		return sysClosure("memhash64")
-	case AMEM128:
-		return sysClosure("memhash128")
-	case ASTRING:
-		return sysClosure("strhash")
-	case AINTER:
-		return sysClosure("interhash")
-	case ANILINTER:
-		return sysClosure("nilinterhash")
-	case AFLOAT32:
-		return sysClosure("f32hash")
-	case AFLOAT64:
-		return sysClosure("f64hash")
-	case ACPLX64:
-		return sysClosure("c64hash")
-	case ACPLX128:
-		return sysClosure("c128hash")
-	case AMEM:
-		// For other sizes of plain memory, we build a closure
-		// that calls memhash_varlen. The size of the memory is
-		// encoded in the first slot of the closure.
-		closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
-		if len(closure.P) > 0 { // already generated
-			return closure
-		}
-		if memhashvarlen == nil {
-			memhashvarlen = sysfunc("memhash_varlen")
-		}
-		ot := 0
-		ot = dsymptr(closure, ot, memhashvarlen, 0)
-		ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
-		ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
-		return closure
-	case ASPECIAL:
-		break
-	}
-
-	closure := typesymprefix(".hashfunc", t).Linksym()
-	if len(closure.P) > 0 { // already generated
-		return closure
-	}
-
-	// Generate hash functions for subtypes.
-	// There are cases where we might not use these hashes,
-	// but in that case they will get dead-code eliminated.
-	// (And the closure generated by genhash will also get
-	// dead-code eliminated, as we call the subtype hashers
-	// directly.)
-	switch t.Etype {
-	case types.TARRAY:
-		genhash(t.Elem())
-	case types.TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			genhash(f.Type)
-		}
-	}
-
-	sym := typesymprefix(".hash", t)
-	if Debug.r != 0 {
-		fmt.Printf("genhash %v %v %v\n", closure, sym, t)
-	}
-
-	lineno = autogeneratedPos // less confusing than end of input
-	dclcontext = PEXTERN
-
-	// func sym(p *T, h uintptr) uintptr
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set2(
-		namedfield("p", types.NewPtr(t)),
-		namedfield("h", types.Types[TUINTPTR]),
-	)
-	tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
-
-	fn := dclfunc(sym, tfn)
-	np := asNode(tfn.Type.Params().Field(0).Nname)
-	nh := asNode(tfn.Type.Params().Field(1).Nname)
-
-	switch t.Etype {
-	case types.TARRAY:
-		// An array of pure memory would be handled by the
-		// standard algorithm, so the element type must not be
-		// pure memory.
-		hashel := hashfor(t.Elem())
-
-		n := nod(ORANGE, nil, nod(ODEREF, np, nil))
-		ni := newname(lookup("i"))
-		ni.Type = types.Types[TINT]
-		n.List.Set1(ni)
-		n.SetColas(true)
-		colasdefn(n.List.Slice(), n)
-		ni = n.List.First()
-
-		// h = hashel(&p[i], h)
-		call := nod(OCALL, hashel, nil)
-
-		nx := nod(OINDEX, np, ni)
-		nx.SetBounded(true)
-		na := nod(OADDR, nx, nil)
-		call.List.Append(na)
-		call.List.Append(nh)
-		n.Nbody.Append(nod(OAS, nh, call))
-
-		fn.Nbody.Append(n)
-
-	case types.TSTRUCT:
-		// Walk the struct using memhash for runs of AMEM
-		// and calling specific hash functions for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank fields.
-			if f.Sym.IsBlank() {
-				i++
-				continue
-			}
-
-			// Hash non-memory fields with appropriate hash function.
-			if !IsRegularMemory(f.Type) {
-				hashel := hashfor(f.Type)
-				call := nod(OCALL, hashel, nil)
-				nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-				na := nod(OADDR, nx, nil)
-				call.List.Append(na)
-				call.List.Append(nh)
-				fn.Nbody.Append(nod(OAS, nh, call))
-				i++
-				continue
-			}
-
-			// Otherwise, hash a maximal length run of raw memory.
-			size, next := memrun(t, i)
-
-			// h = hashel(&p.first, size, h)
-			hashel := hashmem(f.Type)
-			call := nod(OCALL, hashel, nil)
-			nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-			na := nod(OADDR, nx, nil)
-			call.List.Append(na)
-			call.List.Append(nh)
-			call.List.Append(nodintconst(size))
-			fn.Nbody.Append(nod(OAS, nh, call))
-
-			i = next
-		}
-	}
-
-	r := nod(ORETURN, nil, nil)
-	r.List.Append(nh)
-	fn.Nbody.Append(r)
-
-	if Debug.r != 0 {
-		dumplist("genhash body", fn.Nbody)
-	}
-
-	funcbody()
-
-	fn.Func.SetDupok(true)
-	fn = typecheck(fn, ctxStmt)
-
-	Curfn = fn
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-	Curfn = nil
-
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	fn.Func.SetNilCheckDisabled(true)
-	xtop = append(xtop, fn)
-
-	// Build closure. It doesn't close over any variables, so
-	// it contains just the function pointer.
-	dsymptr(closure, 0, sym.Linksym(), 0)
-	ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
-
-	return closure
-}
-
-func hashfor(t *types.Type) *Node {
-	var sym *types.Sym
-
-	switch a, _ := algtype1(t); a {
-	case AMEM:
-		Fatalf("hashfor with AMEM type")
-	case AINTER:
-		sym = Runtimepkg.Lookup("interhash")
-	case ANILINTER:
-		sym = Runtimepkg.Lookup("nilinterhash")
-	case ASTRING:
-		sym = Runtimepkg.Lookup("strhash")
-	case AFLOAT32:
-		sym = Runtimepkg.Lookup("f32hash")
-	case AFLOAT64:
-		sym = Runtimepkg.Lookup("f64hash")
-	case ACPLX64:
-		sym = Runtimepkg.Lookup("c64hash")
-	case ACPLX128:
-		sym = Runtimepkg.Lookup("c128hash")
-	default:
-		// Note: the caller of hashfor ensured that this symbol
-		// exists and has a body by calling genhash for t.
-		sym = typesymprefix(".hash", t)
-	}
-
-	n := newname(sym)
-	setNodeNameFunc(n)
-	n.Type = functype(nil, []*Node{
-		anonfield(types.NewPtr(t)),
-		anonfield(types.Types[TUINTPTR]),
-	}, []*Node{
-		anonfield(types.Types[TUINTPTR]),
-	})
-	return n
-}
-
-// sysClosure returns a closure which will call the
-// given runtime function (with no closed-over variables).
-func sysClosure(name string) *obj.LSym {
-	s := sysvar(name + "·f")
-	if len(s.P) == 0 {
-		f := sysfunc(name)
-		dsymptr(s, 0, f, 0)
-		ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	}
-	return s
-}
-
-// geneq returns a symbol which is the closure used to compute
-// equality for two objects of type t.
-func geneq(t *types.Type) *obj.LSym {
-	switch algtype(t) {
-	case ANOEQ:
-		// The runtime will panic if it tries to compare
-		// a type with a nil equality function.
-		return nil
-	case AMEM0:
-		return sysClosure("memequal0")
-	case AMEM8:
-		return sysClosure("memequal8")
-	case AMEM16:
-		return sysClosure("memequal16")
-	case AMEM32:
-		return sysClosure("memequal32")
-	case AMEM64:
-		return sysClosure("memequal64")
-	case AMEM128:
-		return sysClosure("memequal128")
-	case ASTRING:
-		return sysClosure("strequal")
-	case AINTER:
-		return sysClosure("interequal")
-	case ANILINTER:
-		return sysClosure("nilinterequal")
-	case AFLOAT32:
-		return sysClosure("f32equal")
-	case AFLOAT64:
-		return sysClosure("f64equal")
-	case ACPLX64:
-		return sysClosure("c64equal")
-	case ACPLX128:
-		return sysClosure("c128equal")
-	case AMEM:
-		// make equality closure. The size of the type
-		// is encoded in the closure.
-		closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
-		if len(closure.P) != 0 {
-			return closure
-		}
-		if memequalvarlen == nil {
-			memequalvarlen = sysvar("memequal_varlen") // asm func
-		}
-		ot := 0
-		ot = dsymptr(closure, ot, memequalvarlen, 0)
-		ot = duintptr(closure, ot, uint64(t.Width))
-		ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
-		return closure
-	case ASPECIAL:
-		break
-	}
-
-	closure := typesymprefix(".eqfunc", t).Linksym()
-	if len(closure.P) > 0 { // already generated
-		return closure
-	}
-	sym := typesymprefix(".eq", t)
-	if Debug.r != 0 {
-		fmt.Printf("geneq %v\n", t)
-	}
-
-	// Autogenerate code for equality of structs and arrays.
-
-	lineno = autogeneratedPos // less confusing than end of input
-	dclcontext = PEXTERN
-
-	// func sym(p, q *T) bool
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set2(
-		namedfield("p", types.NewPtr(t)),
-		namedfield("q", types.NewPtr(t)),
-	)
-	tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
-
-	fn := dclfunc(sym, tfn)
-	np := asNode(tfn.Type.Params().Field(0).Nname)
-	nq := asNode(tfn.Type.Params().Field(1).Nname)
-	nr := asNode(tfn.Type.Results().Field(0).Nname)
-
-	// Label to jump to if an equality test fails.
-	neq := autolabel(".neq")
-
-	// We reach here only for types that have equality but
-	// cannot be handled by the standard algorithms,
-	// so t must be either an array or a struct.
-	switch t.Etype {
-	default:
-		Fatalf("geneq %v", t)
-
-	case TARRAY:
-		nelem := t.NumElem()
-
-		// checkAll generates code to check the equality of all array elements.
-		// If unroll is greater than nelem, checkAll generates:
-		//
-		// if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
-		// } else {
-		//   return
-		// }
-		//
-		// And so on.
-		//
-		// Otherwise it generates:
-		//
-		// for i := 0; i < nelem; i++ {
-		//   if eq(p[i], q[i]) {
-		//   } else {
-		//     goto neq
-		//   }
-		// }
-		//
-		// TODO(josharian): consider doing some loop unrolling
-		// for larger nelem as well, processing a few elements at a time in a loop.
-		checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
-			// checkIdx generates a node to check for equality at index i.
-			checkIdx := func(i *Node) *Node {
-				// pi := p[i]
-				pi := nod(OINDEX, np, i)
-				pi.SetBounded(true)
-				pi.Type = t.Elem()
-				// qi := q[i]
-				qi := nod(OINDEX, nq, i)
-				qi.SetBounded(true)
-				qi.Type = t.Elem()
-				return eq(pi, qi)
-			}
-
-			if nelem <= unroll {
-				if last {
-					// Do last comparison in a different manner.
-					nelem--
-				}
-				// Generate a series of checks.
-				for i := int64(0); i < nelem; i++ {
-					// if check {} else { goto neq }
-					nif := nod(OIF, checkIdx(nodintconst(i)), nil)
-					nif.Rlist.Append(nodSym(OGOTO, nil, neq))
-					fn.Nbody.Append(nif)
-				}
-				if last {
-					fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
-				}
-			} else {
-				// Generate a for loop.
-				// for i := 0; i < nelem; i++
-				i := temp(types.Types[TINT])
-				init := nod(OAS, i, nodintconst(0))
-				cond := nod(OLT, i, nodintconst(nelem))
-				post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
-				loop := nod(OFOR, cond, post)
-				loop.Ninit.Append(init)
-				// if eq(pi, qi) {} else { goto neq }
-				nif := nod(OIF, checkIdx(i), nil)
-				nif.Rlist.Append(nodSym(OGOTO, nil, neq))
-				loop.Nbody.Append(nif)
-				fn.Nbody.Append(loop)
-				if last {
-					fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
-				}
-			}
-		}
-
-		switch t.Elem().Etype {
-		case TSTRING:
-			// Do two loops. First, check that all the lengths match (cheap).
-			// Second, check that all the contents match (expensive).
-			// TODO: when the array size is small, unroll the length match checks.
-			checkAll(3, false, func(pi, qi *Node) *Node {
-				// Compare lengths.
-				eqlen, _ := eqstring(pi, qi)
-				return eqlen
-			})
-			checkAll(1, true, func(pi, qi *Node) *Node {
-				// Compare contents.
-				_, eqmem := eqstring(pi, qi)
-				return eqmem
-			})
-		case TFLOAT32, TFLOAT64:
-			checkAll(2, true, func(pi, qi *Node) *Node {
-				// p[i] == q[i]
-				return nod(OEQ, pi, qi)
-			})
-		// TODO: pick apart structs, do them piecemeal too
-		default:
-			checkAll(1, true, func(pi, qi *Node) *Node {
-				// p[i] == q[i]
-				return nod(OEQ, pi, qi)
-			})
-		}
-
-	case TSTRUCT:
-		// Build a list of conditions to satisfy.
-		// The conditions are a list-of-lists. Conditions are reorderable
-		// within each inner list. The outer lists must be evaluated in order.
-		var conds [][]*Node
-		conds = append(conds, []*Node{})
-		and := func(n *Node) {
-			i := len(conds) - 1
-			conds[i] = append(conds[i], n)
-		}
-
-		// Walk the struct using memequal for runs of AMEM
-		// and calling specific equality tests for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank-named fields.
-			if f.Sym.IsBlank() {
-				i++
-				continue
-			}
-
-			// Compare non-memory fields with field equality.
-			if !IsRegularMemory(f.Type) {
-				if EqCanPanic(f.Type) {
-					// Enforce ordering by starting a new set of reorderable conditions.
-					conds = append(conds, []*Node{})
-				}
-				p := nodSym(OXDOT, np, f.Sym)
-				q := nodSym(OXDOT, nq, f.Sym)
-				switch {
-				case f.Type.IsString():
-					eqlen, eqmem := eqstring(p, q)
-					and(eqlen)
-					and(eqmem)
-				default:
-					and(nod(OEQ, p, q))
-				}
-				if EqCanPanic(f.Type) {
-					// Also enforce ordering after something that can panic.
-					conds = append(conds, []*Node{})
-				}
-				i++
-				continue
-			}
-
-			// Find maximal length run of memory-only fields.
-			size, next := memrun(t, i)
-
-			// TODO(rsc): All the calls to newname are wrong for
-			// cross-package unexported fields.
-			if s := fields[i:next]; len(s) <= 2 {
-				// Two or fewer fields: use plain field equality.
-				for _, f := range s {
-					and(eqfield(np, nq, f.Sym))
-				}
-			} else {
-				// More than two fields: use memequal.
-				and(eqmem(np, nq, f.Sym, size))
-			}
-			i = next
-		}
-
-		// Sort conditions to put runtime calls last.
-		// Preserve the rest of the ordering.
-		var flatConds []*Node
-		for _, c := range conds {
-			isCall := func(n *Node) bool {
-				return n.Op == OCALL || n.Op == OCALLFUNC
-			}
-			sort.SliceStable(c, func(i, j int) bool {
-				return !isCall(c[i]) && isCall(c[j])
-			})
-			flatConds = append(flatConds, c...)
-		}
-
-		if len(flatConds) == 0 {
-			fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
-		} else {
-			for _, c := range flatConds[:len(flatConds)-1] {
-				// if cond {} else { goto neq }
-				n := nod(OIF, c, nil)
-				n.Rlist.Append(nodSym(OGOTO, nil, neq))
-				fn.Nbody.Append(n)
-			}
-			fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
-		}
-	}
-
-	// ret:
-	//   return
-	ret := autolabel(".ret")
-	fn.Nbody.Append(nodSym(OLABEL, nil, ret))
-	fn.Nbody.Append(nod(ORETURN, nil, nil))
-
-	// neq:
-	//   r = false
-	//   return (or goto ret)
-	fn.Nbody.Append(nodSym(OLABEL, nil, neq))
-	fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
-	if EqCanPanic(t) || hasCall(fn) {
-		// Epilogue is large, so share it with the equal case.
-		fn.Nbody.Append(nodSym(OGOTO, nil, ret))
-	} else {
-		// Epilogue is small, so don't bother sharing.
-		fn.Nbody.Append(nod(ORETURN, nil, nil))
-	}
-	// TODO(khr): the epilogue size detection condition above isn't perfect.
-	// We should really do a generic CL that shares epilogues across
-	// the board. See #24936.
-
-	if Debug.r != 0 {
-		dumplist("geneq body", fn.Nbody)
-	}
-
-	funcbody()
-
-	fn.Func.SetDupok(true)
-	fn = typecheck(fn, ctxStmt)
-
-	Curfn = fn
-	typecheckslice(fn.Nbody.Slice(), ctxStmt)
-	Curfn = nil
-
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	// Disable checknils while compiling this code.
-	// We are comparing a struct or an array,
-	// neither of which can be nil, and our comparisons
-	// are shallow.
-	fn.Func.SetNilCheckDisabled(true)
-	xtop = append(xtop, fn)
-
-	// Generate a closure which points at the function we just generated.
-	dsymptr(closure, 0, sym.Linksym(), 0)
-	ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	return closure
-}
-
-func hasCall(n *Node) bool {
-	if n.Op == OCALL || n.Op == OCALLFUNC {
-		return true
-	}
-	if n.Left != nil && hasCall(n.Left) {
-		return true
-	}
-	if n.Right != nil && hasCall(n.Right) {
-		return true
-	}
-	for _, x := range n.Ninit.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.Nbody.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.List.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	for _, x := range n.Rlist.Slice() {
-		if hasCall(x) {
-			return true
-		}
-	}
-	return false
-}
-
-// eqfield returns the node
-// 	p.field == q.field
-func eqfield(p *Node, q *Node, field *types.Sym) *Node {
-	nx := nodSym(OXDOT, p, field)
-	ny := nodSym(OXDOT, q, field)
-	ne := nod(OEQ, nx, ny)
-	return ne
-}
-
-// eqstring returns the nodes
-//   len(s) == len(t)
-// and
-//   memequal(s.ptr, t.ptr, len(s))
-// which can be used to construct string equality comparison.
-// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *Node) (eqlen, eqmem *Node) {
-	s = conv(s, types.Types[TSTRING])
-	t = conv(t, types.Types[TSTRING])
-	sptr := nod(OSPTR, s, nil)
-	tptr := nod(OSPTR, t, nil)
-	slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
-	tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
-
-	fn := syslook("memequal")
-	fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
-	call := nod(OCALL, fn, nil)
-	call.List.Append(sptr, tptr, slen.copy())
-	call = typecheck(call, ctxExpr|ctxMultiOK)
-
-	cmp := nod(OEQ, slen, tlen)
-	cmp = typecheck(cmp, ctxExpr)
-	cmp.Type = types.Types[TBOOL]
-	return cmp, call
-}
-
-// eqinterface returns the nodes
-//   s.tab == t.tab (or s.typ == t.typ, as appropriate)
-// and
-//   ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
-// which can be used to construct interface equality comparison.
-// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
-	if !types.Identical(s.Type, t.Type) {
-		Fatalf("eqinterface %v %v", s.Type, t.Type)
-	}
-	// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
-	// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
-	var fn *Node
-	if s.Type.IsEmptyInterface() {
-		fn = syslook("efaceeq")
-	} else {
-		fn = syslook("ifaceeq")
-	}
-
-	stab := nod(OITAB, s, nil)
-	ttab := nod(OITAB, t, nil)
-	sdata := nod(OIDATA, s, nil)
-	tdata := nod(OIDATA, t, nil)
-	sdata.Type = types.Types[TUNSAFEPTR]
-	tdata.Type = types.Types[TUNSAFEPTR]
-	sdata.SetTypecheck(1)
-	tdata.SetTypecheck(1)
-
-	call := nod(OCALL, fn, nil)
-	call.List.Append(stab, sdata, tdata)
-	call = typecheck(call, ctxExpr|ctxMultiOK)
-
-	cmp := nod(OEQ, stab, ttab)
-	cmp = typecheck(cmp, ctxExpr)
-	cmp.Type = types.Types[TBOOL]
-	return cmp, call
-}
-
-// eqmem returns the node
-// 	memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
-	nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
-	ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
-	nx = typecheck(nx, ctxExpr)
-	ny = typecheck(ny, ctxExpr)
-
-	fn, needsize := eqmemfunc(size, nx.Type.Elem())
-	call := nod(OCALL, fn, nil)
-	call.List.Append(nx)
-	call.List.Append(ny)
-	if needsize {
-		call.List.Append(nodintconst(size))
-	}
-
-	return call
-}
-
-func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
-	switch size {
-	default:
-		fn = syslook("memequal")
-		needsize = true
-	case 1, 2, 4, 8, 16:
-		buf := fmt.Sprintf("memequal%d", int(size)*8)
-		fn = syslook(buf)
-	}
-
-	fn = substArgTypes(fn, t, t)
-	return fn, needsize
-}
-
-// memrun finds runs of struct fields for which memory-only algs are appropriate.
-// t is the parent struct type, and start is the field index at which to start the run.
-// size is the length in bytes of the memory included in the run.
-// next is the index just after the end of the memory run.
-func memrun(t *types.Type, start int) (size int64, next int) {
-	next = start
-	for {
-		next++
-		if next == t.NumFields() {
-			break
-		}
-		// Stop run after a padded field.
-		if ispaddedfield(t, next-1) {
-			break
-		}
-		// Also, stop before a blank or non-memory field.
-		if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
-			break
-		}
-	}
-	return t.Field(next-1).End() - t.Field(start).Offset, next
-}
-
-// ispaddedfield reports whether the i'th field of struct type t is followed
-// by padding.
-func ispaddedfield(t *types.Type, i int) bool {
-	if !t.IsStruct() {
-		Fatalf("ispaddedfield called non-struct %v", t)
-	}
-	end := t.Width
-	if i+1 < t.NumFields() {
-		end = t.Field(i + 1).Offset
-	}
-	return t.Field(i).End() != end
-}
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
deleted file mode 100644
index a3a0c8f..0000000
--- a/src/cmd/compile/internal/gc/align.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"fmt"
-	"sort"
-)
-
-// sizeCalculationDisabled indicates whether it is safe
-// to calculate Types' widths and alignments. See dowidth.
-var sizeCalculationDisabled bool
-
-// machine size and rounding alignment is dictated around
-// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
-var defercalc int
-
-func Rnd(o int64, r int64) int64 {
-	if r < 1 || r > 8 || r&(r-1) != 0 {
-		Fatalf("rnd %d", r)
-	}
-	return (o + r - 1) &^ (r - 1)
-}
-
-// expandiface computes the method set for interface type t by
-// expanding embedded interfaces.
-func expandiface(t *types.Type) {
-	seen := make(map[*types.Sym]*types.Field)
-	var methods []*types.Field
-
-	addMethod := func(m *types.Field, explicit bool) {
-		switch prev := seen[m.Sym]; {
-		case prev == nil:
-			seen[m.Sym] = m
-		case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
-			return
-		default:
-			yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
-		}
-		methods = append(methods, m)
-	}
-
-	for _, m := range t.Methods().Slice() {
-		if m.Sym == nil {
-			continue
-		}
-
-		checkwidth(m.Type)
-		addMethod(m, true)
-	}
-
-	for _, m := range t.Methods().Slice() {
-		if m.Sym != nil {
-			continue
-		}
-
-		if !m.Type.IsInterface() {
-			yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
-			m.SetBroke(true)
-			t.SetBroke(true)
-			// Add to fields so that error messages
-			// include the broken embedded type when
-			// printing t.
-			// TODO(mdempsky): Revisit this.
-			methods = append(methods, m)
-			continue
-		}
-
-		// Embedded interface: duplicate all methods
-		// (including broken ones, if any) and add to t's
-		// method set.
-		for _, t1 := range m.Type.Fields().Slice() {
-			f := types.NewField()
-			f.Pos = m.Pos // preserve embedding position
-			f.Sym = t1.Sym
-			f.Type = t1.Type
-			f.SetBroke(t1.Broke())
-			addMethod(f, false)
-		}
-	}
-
-	sort.Sort(methcmp(methods))
-
-	if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
-		yyerrorl(typePos(t), "interface too large")
-	}
-	for i, m := range methods {
-		m.Offset = int64(i) * int64(Widthptr)
-	}
-
-	// Access fields directly to avoid recursively calling dowidth
-	// within Type.Fields().
-	t.Extra.(*types.Interface).Fields.Set(methods)
-}
-
-func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
-	starto := o
-	maxalign := int32(flag)
-	if maxalign < 1 {
-		maxalign = 1
-	}
-	lastzero := int64(0)
-	for _, f := range t.Fields().Slice() {
-		if f.Type == nil {
-			// broken field, just skip it so that other valid fields
-			// get a width.
-			continue
-		}
-
-		dowidth(f.Type)
-		if int32(f.Type.Align) > maxalign {
-			maxalign = int32(f.Type.Align)
-		}
-		if f.Type.Align > 0 {
-			o = Rnd(o, int64(f.Type.Align))
-		}
-		f.Offset = o
-		if n := asNode(f.Nname); n != nil {
-			// addrescapes has similar code to update these offsets.
-			// Usually addrescapes runs after widstruct,
-			// in which case we could drop this,
-			// but function closure functions are the exception.
-			// NOTE(rsc): This comment may be stale.
-			// It's possible the ordering has changed and this is
-			// now the common case. I'm not sure.
-			if n.Name.Param.Stackcopy != nil {
-				n.Name.Param.Stackcopy.Xoffset = o
-				n.Xoffset = 0
-			} else {
-				n.Xoffset = o
-			}
-		}
-
-		w := f.Type.Width
-		if w < 0 {
-			Fatalf("invalid width %d", f.Type.Width)
-		}
-		if w == 0 {
-			lastzero = o
-		}
-		o += w
-		maxwidth := thearch.MAXWIDTH
-		// On 32-bit systems, reflect tables impose an additional constraint
-		// that each field start offset must fit in 31 bits.
-		if maxwidth < 1<<32 {
-			maxwidth = 1<<31 - 1
-		}
-		if o >= maxwidth {
-			yyerrorl(typePos(errtype), "type %L too large", errtype)
-			o = 8 // small but nonzero
-		}
-	}
-
-	// For nonzero-sized structs which end in a zero-sized thing, we add
-	// an extra byte of padding to the type. This padding ensures that
-	// taking the address of the zero-sized thing can't manufacture a
-	// pointer to the next object in the heap. See issue 9401.
-	if flag == 1 && o > starto && o == lastzero {
-		o++
-	}
-
-	// final width is rounded
-	if flag != 0 {
-		o = Rnd(o, int64(maxalign))
-	}
-	t.Align = uint8(maxalign)
-
-	// type width only includes back to first field's offset
-	t.Width = o - starto
-
-	return o
-}
-
-// findTypeLoop searches for an invalid type declaration loop involving
-// type t and reports whether one is found. If so, path contains the
-// loop.
-//
-// path points to a slice used for tracking the sequence of types
-// visited. Using a pointer to a slice allows the slice capacity to
-// grow and limit reallocations.
-func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
-	// We implement a simple DFS loop-finding algorithm. This
-	// could be faster, but type cycles are rare.
-
-	if t.Sym != nil {
-		// Declared type. Check for loops and otherwise
-		// recurse on the type expression used in the type
-		// declaration.
-
-		for i, x := range *path {
-			if x == t {
-				*path = (*path)[i:]
-				return true
-			}
-		}
-
-		*path = append(*path, t)
-		if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
-			return true
-		}
-		*path = (*path)[:len(*path)-1]
-	} else {
-		// Anonymous type. Recurse on contained types.
-
-		switch t.Etype {
-		case TARRAY:
-			if findTypeLoop(t.Elem(), path) {
-				return true
-			}
-		case TSTRUCT:
-			for _, f := range t.Fields().Slice() {
-				if findTypeLoop(f.Type, path) {
-					return true
-				}
-			}
-		case TINTER:
-			for _, m := range t.Methods().Slice() {
-				if m.Type.IsInterface() { // embedded interface
-					if findTypeLoop(m.Type, path) {
-						return true
-					}
-				}
-			}
-		}
-	}
-
-	return false
-}
-
-func reportTypeLoop(t *types.Type) {
-	if t.Broke() {
-		return
-	}
-
-	var l []*types.Type
-	if !findTypeLoop(t, &l) {
-		Fatalf("failed to find type loop for: %v", t)
-	}
-
-	// Rotate loop so that the earliest type declaration is first.
-	i := 0
-	for j, t := range l[1:] {
-		if typePos(t).Before(typePos(l[i])) {
-			i = j + 1
-		}
-	}
-	l = append(l[i:], l[:i]...)
-
-	var msg bytes.Buffer
-	fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
-	for _, t := range l {
-		fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
-		t.SetBroke(true)
-	}
-	fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
-	yyerrorl(typePos(l[0]), msg.String())
-}
-
-// dowidth calculates and stores the size and alignment for t.
-// If sizeCalculationDisabled is set, and the size/alignment
-// have not already been calculated, it calls Fatal.
-// This is used to prevent data races in the back end.
-func dowidth(t *types.Type) {
-	// Calling dowidth when typecheck tracing enabled is not safe.
-	// See issue #33658.
-	if enableTrace && skipDowidthForTracing {
-		return
-	}
-	if Widthptr == 0 {
-		Fatalf("dowidth without betypeinit")
-	}
-
-	if t == nil {
-		return
-	}
-
-	if t.Width == -2 {
-		reportTypeLoop(t)
-		t.Width = 0
-		t.Align = 1
-		return
-	}
-
-	if t.WidthCalculated() {
-		return
-	}
-
-	if sizeCalculationDisabled {
-		if t.Broke() {
-			// break infinite recursion from Fatal call below
-			return
-		}
-		t.SetBroke(true)
-		Fatalf("width not calculated: %v", t)
-	}
-
-	// break infinite recursion if the broken recursive type
-	// is referenced again
-	if t.Broke() && t.Width == 0 {
-		return
-	}
-
-	// defer checkwidth calls until after we're done
-	defercheckwidth()
-
-	lno := lineno
-	if asNode(t.Nod) != nil {
-		lineno = asNode(t.Nod).Pos
-	}
-
-	t.Width = -2
-	t.Align = 0 // 0 means use t.Width, below
-
-	et := t.Etype
-	switch et {
-	case TFUNC, TCHAN, TMAP, TSTRING:
-		break
-
-	// simtype == 0 during bootstrap
-	default:
-		if simtype[t.Etype] != 0 {
-			et = simtype[t.Etype]
-		}
-	}
-
-	var w int64
-	switch et {
-	default:
-		Fatalf("dowidth: unknown type: %v", t)
-
-	// compiler-specific stuff
-	case TINT8, TUINT8, TBOOL:
-		// bool is int8
-		w = 1
-
-	case TINT16, TUINT16:
-		w = 2
-
-	case TINT32, TUINT32, TFLOAT32:
-		w = 4
-
-	case TINT64, TUINT64, TFLOAT64:
-		w = 8
-		t.Align = uint8(Widthreg)
-
-	case TCOMPLEX64:
-		w = 8
-		t.Align = 4
-
-	case TCOMPLEX128:
-		w = 16
-		t.Align = uint8(Widthreg)
-
-	case TPTR:
-		w = int64(Widthptr)
-		checkwidth(t.Elem())
-
-	case TUNSAFEPTR:
-		w = int64(Widthptr)
-
-	case TINTER: // implemented as 2 pointers
-		w = 2 * int64(Widthptr)
-		t.Align = uint8(Widthptr)
-		expandiface(t)
-
-	case TCHAN: // implemented as pointer
-		w = int64(Widthptr)
-
-		checkwidth(t.Elem())
-
-		// make fake type to check later to
-		// trigger channel argument check.
-		t1 := types.NewChanArgs(t)
-		checkwidth(t1)
-
-	case TCHANARGS:
-		t1 := t.ChanArgs()
-		dowidth(t1) // just in case
-		if t1.Elem().Width >= 1<<16 {
-			yyerrorl(typePos(t1), "channel element type too large (>64kB)")
-		}
-		w = 1 // anything will do
-
-	case TMAP: // implemented as pointer
-		w = int64(Widthptr)
-		checkwidth(t.Elem())
-		checkwidth(t.Key())
-
-	case TFORW: // should have been filled in
-		reportTypeLoop(t)
-		w = 1 // anything will do
-
-	case TANY:
-		// dummy type; should be replaced before use.
-		Fatalf("dowidth any")
-
-	case TSTRING:
-		if sizeofString == 0 {
-			Fatalf("early dowidth string")
-		}
-		w = sizeofString
-		t.Align = uint8(Widthptr)
-
-	case TARRAY:
-		if t.Elem() == nil {
-			break
-		}
-
-		dowidth(t.Elem())
-		if t.Elem().Width != 0 {
-			cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
-			if uint64(t.NumElem()) > cap {
-				yyerrorl(typePos(t), "type %L larger than address space", t)
-			}
-		}
-		w = t.NumElem() * t.Elem().Width
-		t.Align = t.Elem().Align
-
-	case TSLICE:
-		if t.Elem() == nil {
-			break
-		}
-		w = sizeofSlice
-		checkwidth(t.Elem())
-		t.Align = uint8(Widthptr)
-
-	case TSTRUCT:
-		if t.IsFuncArgStruct() {
-			Fatalf("dowidth fn struct %v", t)
-		}
-		w = widstruct(t, t, 0, 1)
-
-	// make fake type to check later to
-	// trigger function argument computation.
-	case TFUNC:
-		t1 := types.NewFuncArgs(t)
-		checkwidth(t1)
-		w = int64(Widthptr) // width of func type is pointer
-
-	// function is 3 cated structures;
-	// compute their widths as side-effect.
-	case TFUNCARGS:
-		t1 := t.FuncArgs()
-		w = widstruct(t1, t1.Recvs(), 0, 0)
-		w = widstruct(t1, t1.Params(), w, Widthreg)
-		w = widstruct(t1, t1.Results(), w, Widthreg)
-		t1.Extra.(*types.Func).Argwid = w
-		if w%int64(Widthreg) != 0 {
-			Warn("bad type %v %d\n", t1, w)
-		}
-		t.Align = 1
-	}
-
-	if Widthptr == 4 && w != int64(int32(w)) {
-		yyerrorl(typePos(t), "type %v too large", t)
-	}
-
-	t.Width = w
-	if t.Align == 0 {
-		if w == 0 || w > 8 || w&(w-1) != 0 {
-			Fatalf("invalid alignment for %v", t)
-		}
-		t.Align = uint8(w)
-	}
-
-	lineno = lno
-
-	resumecheckwidth()
-}
-
-// when a type's width should be known, we call checkwidth
-// to compute it.  during a declaration like
-//
-//	type T *struct { next T }
-//
-// it is necessary to defer the calculation of the struct width
-// until after T has been initialized to be a pointer to that struct.
-// similarly, during import processing structs may be used
-// before their definition.  in those situations, calling
-// defercheckwidth() stops width calculations until
-// resumecheckwidth() is called, at which point all the
-// checkwidths that were deferred are executed.
-// dowidth should only be called when the type's size
-// is needed immediately.  checkwidth makes sure the
-// size is evaluated eventually.
-
-var deferredTypeStack []*types.Type
-
-func checkwidth(t *types.Type) {
-	if t == nil {
-		return
-	}
-
-	// function arg structs should not be checked
-	// outside of the enclosing function.
-	if t.IsFuncArgStruct() {
-		Fatalf("checkwidth %v", t)
-	}
-
-	if defercalc == 0 {
-		dowidth(t)
-		return
-	}
-
-	// if type has not yet been pushed on deferredTypeStack yet, do it now
-	if !t.Deferwidth() {
-		t.SetDeferwidth(true)
-		deferredTypeStack = append(deferredTypeStack, t)
-	}
-}
-
-func defercheckwidth() {
-	defercalc++
-}
-
-func resumecheckwidth() {
-	if defercalc == 1 {
-		for len(deferredTypeStack) > 0 {
-			t := deferredTypeStack[len(deferredTypeStack)-1]
-			deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
-			t.SetDeferwidth(false)
-			dowidth(t)
-		}
-	}
-
-	defercalc--
-}
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
deleted file mode 100644
index 10f21f8..0000000
--- a/src/cmd/compile/internal/gc/bexport.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-)
-
-type exporter struct {
-	marked map[*types.Type]bool // types already seen by markType
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
-	if p.marked[t] {
-		return
-	}
-	p.marked[t] = true
-
-	// If this is a named type, mark all of its associated
-	// methods. Skip interface types because t.Methods contains
-	// only their unexpanded method set (i.e., exclusive of
-	// interface embeddings), and the switch statement below
-	// handles their full method set.
-	if t.Sym != nil && t.Etype != TINTER {
-		for _, m := range t.Methods().Slice() {
-			if types.IsExported(m.Sym.Name) {
-				p.markType(m.Type)
-			}
-		}
-	}
-
-	// Recursively mark any types that can be produced given a
-	// value of type t: dereferencing a pointer; indexing or
-	// iterating over an array, slice, or map; receiving from a
-	// channel; accessing a struct field or interface method; or
-	// calling a function.
-	//
-	// Notably, we don't mark function parameter types, because
-	// the user already needs some way to construct values of
-	// those types.
-	switch t.Etype {
-	case TPTR, TARRAY, TSLICE:
-		p.markType(t.Elem())
-
-	case TCHAN:
-		if t.ChanDir().CanRecv() {
-			p.markType(t.Elem())
-		}
-
-	case TMAP:
-		p.markType(t.Key())
-		p.markType(t.Elem())
-
-	case TSTRUCT:
-		for _, f := range t.FieldSlice() {
-			if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
-				p.markType(f.Type)
-			}
-		}
-
-	case TFUNC:
-		// If t is the type of a function or method, then
-		// t.Nname() is its ONAME. Mark its inline body and
-		// any recursively called functions for export.
-		inlFlood(asNode(t.Nname()))
-
-		for _, f := range t.Results().FieldSlice() {
-			p.markType(f.Type)
-		}
-
-	case TINTER:
-		for _, f := range t.FieldSlice() {
-			if types.IsExported(f.Sym.Name) {
-				p.markType(f.Type)
-			}
-		}
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
-	// Objects
-	packageTag = -(iota + 1)
-	constTag
-	typeTag
-	varTag
-	funcTag
-	endTag
-
-	// Types
-	namedTag
-	arrayTag
-	sliceTag
-	dddTag
-	structTag
-	pointerTag
-	signatureTag
-	interfaceTag
-	mapTag
-	chanTag
-
-	// Values
-	falseTag
-	trueTag
-	int64Tag
-	floatTag
-	fractionTag // not used by gc
-	complexTag
-	stringTag
-	nilTag
-	unknownTag // not used by gc (only appears in packages with errors)
-
-	// Type aliases
-	aliasTag
-)
-
-var predecl []*types.Type // initialized lazily
-
-func predeclared() []*types.Type {
-	if predecl == nil {
-		// initialize lazily to be sure that all
-		// elements have been initialized before
-		predecl = []*types.Type{
-			// basic types
-			types.Types[TBOOL],
-			types.Types[TINT],
-			types.Types[TINT8],
-			types.Types[TINT16],
-			types.Types[TINT32],
-			types.Types[TINT64],
-			types.Types[TUINT],
-			types.Types[TUINT8],
-			types.Types[TUINT16],
-			types.Types[TUINT32],
-			types.Types[TUINT64],
-			types.Types[TUINTPTR],
-			types.Types[TFLOAT32],
-			types.Types[TFLOAT64],
-			types.Types[TCOMPLEX64],
-			types.Types[TCOMPLEX128],
-			types.Types[TSTRING],
-
-			// basic type aliases
-			types.Bytetype,
-			types.Runetype,
-
-			// error
-			types.Errortype,
-
-			// untyped types
-			types.UntypedBool,
-			types.UntypedInt,
-			types.UntypedRune,
-			types.UntypedFloat,
-			types.UntypedComplex,
-			types.UntypedString,
-			types.Types[TNIL],
-
-			// package unsafe
-			types.Types[TUNSAFEPTR],
-
-			// invalid type (package contains errors)
-			types.Types[Txxx],
-
-			// any type, for builtin export data
-			types.Types[TANY],
-		}
-	}
-	return predecl
-}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
deleted file mode 100644
index 911ac4c0..0000000
--- a/src/cmd/compile/internal/gc/bimport.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/internal/src"
-)
-
-// numImport tracks how often a package with a given name is imported.
-// It is used to provide a better error message (by using the package
-// path to disambiguate) if a package that appears multiple times with
-// the same name appears in an error message.
-var numImport = make(map[string]int)
-
-func npos(pos src.XPos, n *Node) *Node {
-	n.Pos = pos
-	return n
-}
-
-func builtinCall(op Op) *Node {
-	return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
-}
diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go
index 967f75a..2e13d6b 100644
--- a/src/cmd/compile/internal/gc/bootstrap.go
+++ b/src/cmd/compile/internal/gc/bootstrap.go
@@ -6,8 +6,11 @@
 
 package gc
 
-import "runtime"
+import (
+	"cmd/compile/internal/base"
+	"runtime"
+)
 
 func startMutexProfiling() {
-	Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+	base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
 }
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
deleted file mode 100644
index e04f23e..0000000
--- a/src/cmd/compile/internal/gc/builtin.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Code generated by mkbuiltin.go. DO NOT EDIT.
-
-package gc
-
-import "cmd/compile/internal/types"
-
-var runtimeDecls = [...]struct {
-	name string
-	tag  int
-	typ  int
-}{
-	{"newobject", funcTag, 4},
-	{"mallocgc", funcTag, 8},
-	{"panicdivide", funcTag, 9},
-	{"panicshift", funcTag, 9},
-	{"panicmakeslicelen", funcTag, 9},
-	{"panicmakeslicecap", funcTag, 9},
-	{"throwinit", funcTag, 9},
-	{"panicwrap", funcTag, 9},
-	{"gopanic", funcTag, 11},
-	{"gorecover", funcTag, 14},
-	{"goschedguarded", funcTag, 9},
-	{"goPanicIndex", funcTag, 16},
-	{"goPanicIndexU", funcTag, 18},
-	{"goPanicSliceAlen", funcTag, 16},
-	{"goPanicSliceAlenU", funcTag, 18},
-	{"goPanicSliceAcap", funcTag, 16},
-	{"goPanicSliceAcapU", funcTag, 18},
-	{"goPanicSliceB", funcTag, 16},
-	{"goPanicSliceBU", funcTag, 18},
-	{"goPanicSlice3Alen", funcTag, 16},
-	{"goPanicSlice3AlenU", funcTag, 18},
-	{"goPanicSlice3Acap", funcTag, 16},
-	{"goPanicSlice3AcapU", funcTag, 18},
-	{"goPanicSlice3B", funcTag, 16},
-	{"goPanicSlice3BU", funcTag, 18},
-	{"goPanicSlice3C", funcTag, 16},
-	{"goPanicSlice3CU", funcTag, 18},
-	{"printbool", funcTag, 19},
-	{"printfloat", funcTag, 21},
-	{"printint", funcTag, 23},
-	{"printhex", funcTag, 25},
-	{"printuint", funcTag, 25},
-	{"printcomplex", funcTag, 27},
-	{"printstring", funcTag, 29},
-	{"printpointer", funcTag, 30},
-	{"printuintptr", funcTag, 31},
-	{"printiface", funcTag, 30},
-	{"printeface", funcTag, 30},
-	{"printslice", funcTag, 30},
-	{"printnl", funcTag, 9},
-	{"printsp", funcTag, 9},
-	{"printlock", funcTag, 9},
-	{"printunlock", funcTag, 9},
-	{"concatstring2", funcTag, 34},
-	{"concatstring3", funcTag, 35},
-	{"concatstring4", funcTag, 36},
-	{"concatstring5", funcTag, 37},
-	{"concatstrings", funcTag, 39},
-	{"cmpstring", funcTag, 40},
-	{"intstring", funcTag, 43},
-	{"slicebytetostring", funcTag, 44},
-	{"slicebytetostringtmp", funcTag, 45},
-	{"slicerunetostring", funcTag, 48},
-	{"stringtoslicebyte", funcTag, 50},
-	{"stringtoslicerune", funcTag, 53},
-	{"slicecopy", funcTag, 54},
-	{"decoderune", funcTag, 55},
-	{"countrunes", funcTag, 56},
-	{"convI2I", funcTag, 57},
-	{"convT16", funcTag, 58},
-	{"convT32", funcTag, 58},
-	{"convT64", funcTag, 58},
-	{"convTstring", funcTag, 58},
-	{"convTslice", funcTag, 58},
-	{"convT2E", funcTag, 59},
-	{"convT2Enoptr", funcTag, 59},
-	{"convT2I", funcTag, 59},
-	{"convT2Inoptr", funcTag, 59},
-	{"assertE2I", funcTag, 57},
-	{"assertE2I2", funcTag, 60},
-	{"assertI2I", funcTag, 57},
-	{"assertI2I2", funcTag, 60},
-	{"panicdottypeE", funcTag, 61},
-	{"panicdottypeI", funcTag, 61},
-	{"panicnildottype", funcTag, 62},
-	{"ifaceeq", funcTag, 64},
-	{"efaceeq", funcTag, 64},
-	{"fastrand", funcTag, 66},
-	{"makemap64", funcTag, 68},
-	{"makemap", funcTag, 69},
-	{"makemap_small", funcTag, 70},
-	{"mapaccess1", funcTag, 71},
-	{"mapaccess1_fast32", funcTag, 72},
-	{"mapaccess1_fast64", funcTag, 72},
-	{"mapaccess1_faststr", funcTag, 72},
-	{"mapaccess1_fat", funcTag, 73},
-	{"mapaccess2", funcTag, 74},
-	{"mapaccess2_fast32", funcTag, 75},
-	{"mapaccess2_fast64", funcTag, 75},
-	{"mapaccess2_faststr", funcTag, 75},
-	{"mapaccess2_fat", funcTag, 76},
-	{"mapassign", funcTag, 71},
-	{"mapassign_fast32", funcTag, 72},
-	{"mapassign_fast32ptr", funcTag, 72},
-	{"mapassign_fast64", funcTag, 72},
-	{"mapassign_fast64ptr", funcTag, 72},
-	{"mapassign_faststr", funcTag, 72},
-	{"mapiterinit", funcTag, 77},
-	{"mapdelete", funcTag, 77},
-	{"mapdelete_fast32", funcTag, 78},
-	{"mapdelete_fast64", funcTag, 78},
-	{"mapdelete_faststr", funcTag, 78},
-	{"mapiternext", funcTag, 79},
-	{"mapclear", funcTag, 80},
-	{"makechan64", funcTag, 82},
-	{"makechan", funcTag, 83},
-	{"chanrecv1", funcTag, 85},
-	{"chanrecv2", funcTag, 86},
-	{"chansend1", funcTag, 88},
-	{"closechan", funcTag, 30},
-	{"writeBarrier", varTag, 90},
-	{"typedmemmove", funcTag, 91},
-	{"typedmemclr", funcTag, 92},
-	{"typedslicecopy", funcTag, 93},
-	{"selectnbsend", funcTag, 94},
-	{"selectnbrecv", funcTag, 95},
-	{"selectnbrecv2", funcTag, 97},
-	{"selectsetpc", funcTag, 98},
-	{"selectgo", funcTag, 99},
-	{"block", funcTag, 9},
-	{"makeslice", funcTag, 100},
-	{"makeslice64", funcTag, 101},
-	{"makeslicecopy", funcTag, 102},
-	{"growslice", funcTag, 104},
-	{"memmove", funcTag, 105},
-	{"memclrNoHeapPointers", funcTag, 106},
-	{"memclrHasPointers", funcTag, 106},
-	{"memequal", funcTag, 107},
-	{"memequal0", funcTag, 108},
-	{"memequal8", funcTag, 108},
-	{"memequal16", funcTag, 108},
-	{"memequal32", funcTag, 108},
-	{"memequal64", funcTag, 108},
-	{"memequal128", funcTag, 108},
-	{"f32equal", funcTag, 109},
-	{"f64equal", funcTag, 109},
-	{"c64equal", funcTag, 109},
-	{"c128equal", funcTag, 109},
-	{"strequal", funcTag, 109},
-	{"interequal", funcTag, 109},
-	{"nilinterequal", funcTag, 109},
-	{"memhash", funcTag, 110},
-	{"memhash0", funcTag, 111},
-	{"memhash8", funcTag, 111},
-	{"memhash16", funcTag, 111},
-	{"memhash32", funcTag, 111},
-	{"memhash64", funcTag, 111},
-	{"memhash128", funcTag, 111},
-	{"f32hash", funcTag, 111},
-	{"f64hash", funcTag, 111},
-	{"c64hash", funcTag, 111},
-	{"c128hash", funcTag, 111},
-	{"strhash", funcTag, 111},
-	{"interhash", funcTag, 111},
-	{"nilinterhash", funcTag, 111},
-	{"int64div", funcTag, 112},
-	{"uint64div", funcTag, 113},
-	{"int64mod", funcTag, 112},
-	{"uint64mod", funcTag, 113},
-	{"float64toint64", funcTag, 114},
-	{"float64touint64", funcTag, 115},
-	{"float64touint32", funcTag, 116},
-	{"int64tofloat64", funcTag, 117},
-	{"uint64tofloat64", funcTag, 118},
-	{"uint32tofloat64", funcTag, 119},
-	{"complex128div", funcTag, 120},
-	{"racefuncenter", funcTag, 31},
-	{"racefuncenterfp", funcTag, 9},
-	{"racefuncexit", funcTag, 9},
-	{"raceread", funcTag, 31},
-	{"racewrite", funcTag, 31},
-	{"racereadrange", funcTag, 121},
-	{"racewriterange", funcTag, 121},
-	{"msanread", funcTag, 121},
-	{"msanwrite", funcTag, 121},
-	{"msanmove", funcTag, 122},
-	{"checkptrAlignment", funcTag, 123},
-	{"checkptrArithmetic", funcTag, 125},
-	{"libfuzzerTraceCmp1", funcTag, 127},
-	{"libfuzzerTraceCmp2", funcTag, 129},
-	{"libfuzzerTraceCmp4", funcTag, 130},
-	{"libfuzzerTraceCmp8", funcTag, 131},
-	{"libfuzzerTraceConstCmp1", funcTag, 127},
-	{"libfuzzerTraceConstCmp2", funcTag, 129},
-	{"libfuzzerTraceConstCmp4", funcTag, 130},
-	{"libfuzzerTraceConstCmp8", funcTag, 131},
-	{"x86HasPOPCNT", varTag, 6},
-	{"x86HasSSE41", varTag, 6},
-	{"x86HasFMA", varTag, 6},
-	{"armHasVFPv4", varTag, 6},
-	{"arm64HasATOMICS", varTag, 6},
-}
-
-func runtimeTypes() []*types.Type {
-	var typs [132]*types.Type
-	typs[0] = types.Bytetype
-	typs[1] = types.NewPtr(typs[0])
-	typs[2] = types.Types[TANY]
-	typs[3] = types.NewPtr(typs[2])
-	typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[5] = types.Types[TUINTPTR]
-	typs[6] = types.Types[TBOOL]
-	typs[7] = types.Types[TUNSAFEPTR]
-	typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
-	typs[9] = functype(nil, nil, nil)
-	typs[10] = types.Types[TINTER]
-	typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
-	typs[12] = types.Types[TINT32]
-	typs[13] = types.NewPtr(typs[12])
-	typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
-	typs[15] = types.Types[TINT]
-	typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
-	typs[17] = types.Types[TUINT]
-	typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
-	typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
-	typs[20] = types.Types[TFLOAT64]
-	typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
-	typs[22] = types.Types[TINT64]
-	typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
-	typs[24] = types.Types[TUINT64]
-	typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
-	typs[26] = types.Types[TCOMPLEX128]
-	typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
-	typs[28] = types.Types[TSTRING]
-	typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
-	typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
-	typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
-	typs[32] = types.NewArray(typs[0], 32)
-	typs[33] = types.NewPtr(typs[32])
-	typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
-	typs[38] = types.NewSlice(typs[28])
-	typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
-	typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
-	typs[41] = types.NewArray(typs[0], 4)
-	typs[42] = types.NewPtr(typs[41])
-	typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
-	typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
-	typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
-	typs[46] = types.Runetype
-	typs[47] = types.NewSlice(typs[46])
-	typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
-	typs[49] = types.NewSlice(typs[0])
-	typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
-	typs[51] = types.NewArray(typs[46], 32)
-	typs[52] = types.NewPtr(typs[51])
-	typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
-	typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
-	typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
-	typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
-	typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
-	typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
-	typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
-	typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
-	typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
-	typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
-	typs[63] = types.NewPtr(typs[5])
-	typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
-	typs[65] = types.Types[TUINT32]
-	typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
-	typs[67] = types.NewMap(typs[2], typs[2])
-	typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
-	typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
-	typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
-	typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
-	typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
-	typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
-	typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
-	typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
-	typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
-	typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
-	typs[81] = types.NewChan(typs[2], types.Cboth)
-	typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
-	typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
-	typs[84] = types.NewChan(typs[2], types.Crecv)
-	typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
-	typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[87] = types.NewChan(typs[2], types.Csend)
-	typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
-	typs[89] = types.NewArray(typs[0], 3)
-	typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
-	typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
-	typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
-	typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
-	typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
-	typs[96] = types.NewPtr(typs[6])
-	typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
-	typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
-	typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
-	typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
-	typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
-	typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
-	typs[103] = types.NewSlice(typs[2])
-	typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
-	typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
-	typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
-	typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
-	typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
-	typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
-	typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
-	typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
-	typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
-	typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
-	typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
-	typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
-	typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
-	typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
-	typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
-	typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
-	typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
-	typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
-	typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
-	typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
-	typs[124] = types.NewSlice(typs[7])
-	typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
-	typs[126] = types.Types[TUINT8]
-	typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
-	typs[128] = types.Types[TUINT16]
-	typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
-	typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
-	typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
-	return typs[:]
-}
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
deleted file mode 100644
index e32ab97..0000000
--- a/src/cmd/compile/internal/gc/bv.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"math/bits"
-)
-
-const (
-	wordBits  = 32
-	wordMask  = wordBits - 1
-	wordShift = 5
-)
-
-// A bvec is a bit vector.
-type bvec struct {
-	n int32    // number of bits in vector
-	b []uint32 // words holding bits
-}
-
-func bvalloc(n int32) bvec {
-	nword := (n + wordBits - 1) / wordBits
-	return bvec{n, make([]uint32, nword)}
-}
-
-type bulkBvec struct {
-	words []uint32
-	nbit  int32
-	nword int32
-}
-
-func bvbulkalloc(nbit int32, count int32) bulkBvec {
-	nword := (nbit + wordBits - 1) / wordBits
-	size := int64(nword) * int64(count)
-	if int64(int32(size*4)) != size*4 {
-		Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
-	}
-	return bulkBvec{
-		words: make([]uint32, size),
-		nbit:  nbit,
-		nword: nword,
-	}
-}
-
-func (b *bulkBvec) next() bvec {
-	out := bvec{b.nbit, b.words[:b.nword]}
-	b.words = b.words[b.nword:]
-	return out
-}
-
-func (bv1 bvec) Eq(bv2 bvec) bool {
-	if bv1.n != bv2.n {
-		Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
-	}
-	for i, x := range bv1.b {
-		if x != bv2.b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func (dst bvec) Copy(src bvec) {
-	copy(dst.b, src.b)
-}
-
-func (bv bvec) Get(i int32) bool {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	return bv.b[i>>wordShift]&mask != 0
-}
-
-func (bv bvec) Set(i int32) {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	bv.b[i/wordBits] |= mask
-}
-
-func (bv bvec) Unset(i int32) {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%wordBits))
-	bv.b[i/wordBits] &^= mask
-}
-
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
-// If there is no such index, bvnext returns -1.
-func (bv bvec) Next(i int32) int32 {
-	if i >= bv.n {
-		return -1
-	}
-
-	// Jump i ahead to next word with bits.
-	if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
-		i &^= wordMask
-		i += wordBits
-		for i < bv.n && bv.b[i>>wordShift] == 0 {
-			i += wordBits
-		}
-	}
-
-	if i >= bv.n {
-		return -1
-	}
-
-	// Find 1 bit.
-	w := bv.b[i>>wordShift] >> uint(i&wordMask)
-	i += int32(bits.TrailingZeros32(w))
-
-	return i
-}
-
-func (bv bvec) IsEmpty() bool {
-	for _, x := range bv.b {
-		if x != 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (bv bvec) Not() {
-	for i, x := range bv.b {
-		bv.b[i] = ^x
-	}
-}
-
-// union
-func (dst bvec) Or(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x | src2.b[i]
-	}
-}
-
-// intersection
-func (dst bvec) And(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x & src2.b[i]
-	}
-}
-
-// difference
-func (dst bvec) AndNot(src1, src2 bvec) {
-	if len(src1.b) == 0 {
-		return
-	}
-	_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
-
-	for i, x := range src1.b {
-		dst.b[i] = x &^ src2.b[i]
-	}
-}
-
-func (bv bvec) String() string {
-	s := make([]byte, 2+bv.n)
-	copy(s, "#*")
-	for i := int32(0); i < bv.n; i++ {
-		ch := byte('0')
-		if bv.Get(i) {
-			ch = '1'
-		}
-		s[2+i] = ch
-	}
-	return string(s)
-}
-
-func (bv bvec) Clear() {
-	for i := range bv.b {
-		bv.b[i] = 0
-	}
-}
-
-// FNV-1 hash function constants.
-const (
-	H0 = 2166136261
-	Hp = 16777619
-)
-
-func hashbitmap(h uint32, bv bvec) uint32 {
-	n := int((bv.n + 31) / 32)
-	for i := 0; i < n; i++ {
-		w := bv.b[i]
-		h = (h * Hp) ^ (w & 0xff)
-		h = (h * Hp) ^ ((w >> 8) & 0xff)
-		h = (h * Hp) ^ ((w >> 16) & 0xff)
-		h = (h * Hp) ^ ((w >> 24) & 0xff)
-	}
-
-	return h
-}
-
-// bvecSet is a set of bvecs, in initial insertion order.
-type bvecSet struct {
-	index []int  // hash -> uniq index. -1 indicates empty slot.
-	uniq  []bvec // unique bvecs, in insertion order
-}
-
-func (m *bvecSet) grow() {
-	// Allocate new index.
-	n := len(m.index) * 2
-	if n == 0 {
-		n = 32
-	}
-	newIndex := make([]int, n)
-	for i := range newIndex {
-		newIndex[i] = -1
-	}
-
-	// Rehash into newIndex.
-	for i, bv := range m.uniq {
-		h := hashbitmap(H0, bv) % uint32(len(newIndex))
-		for {
-			j := newIndex[h]
-			if j < 0 {
-				newIndex[h] = i
-				break
-			}
-			h++
-			if h == uint32(len(newIndex)) {
-				h = 0
-			}
-		}
-	}
-	m.index = newIndex
-}
-
-// add adds bv to the set and returns its index in m.extractUniqe.
-// The caller must not modify bv after this.
-func (m *bvecSet) add(bv bvec) int {
-	if len(m.uniq)*4 >= len(m.index) {
-		m.grow()
-	}
-
-	index := m.index
-	h := hashbitmap(H0, bv) % uint32(len(index))
-	for {
-		j := index[h]
-		if j < 0 {
-			// New bvec.
-			index[h] = len(m.uniq)
-			m.uniq = append(m.uniq, bv)
-			return len(m.uniq) - 1
-		}
-		jlive := m.uniq[j]
-		if bv.Eq(jlive) {
-			// Existing bvec.
-			return j
-		}
-
-		h++
-		if h == uint32(len(index)) {
-			h = 0
-		}
-	}
-}
-
-// extractUniqe returns this slice of unique bit vectors in m, as
-// indexed by the result of bvecSet.add.
-func (m *bvecSet) extractUniqe() []bvec {
-	return m.uniq
-}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
deleted file mode 100644
index bd350f6..0000000
--- a/src/cmd/compile/internal/gc/closure.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types"
-	"fmt"
-)
-
-func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
-	xtype := p.typeExpr(expr.Type)
-	ntype := p.typeExpr(expr.Type)
-
-	xfunc := p.nod(expr, ODCLFUNC, nil, nil)
-	xfunc.Func.SetIsHiddenClosure(Curfn != nil)
-	xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
-	xfunc.Func.Nname.Name.Param.Ntype = xtype
-	xfunc.Func.Nname.Name.Defn = xfunc
-
-	clo := p.nod(expr, OCLOSURE, nil, nil)
-	clo.Func.Ntype = ntype
-
-	xfunc.Func.Closure = clo
-	clo.Func.Closure = xfunc
-
-	p.funcBody(xfunc, expr.Body)
-
-	// closure-specific variables are hanging off the
-	// ordinary ones in the symbol table; see oldname.
-	// unhook them.
-	// make the list of pointers for the closure call.
-	for _, v := range xfunc.Func.Cvars.Slice() {
-		// Unlink from v1; see comment in syntax.go type Param for these fields.
-		v1 := v.Name.Defn
-		v1.Name.Param.Innermost = v.Name.Param.Outer
-
-		// If the closure usage of v is not dense,
-		// we need to make it dense; now that we're out
-		// of the function in which v appeared,
-		// look up v.Sym in the enclosing function
-		// and keep it around for use in the compiled code.
-		//
-		// That is, suppose we just finished parsing the innermost
-		// closure f4 in this code:
-		//
-		//	func f() {
-		//		v := 1
-		//		func() { // f2
-		//			use(v)
-		//			func() { // f3
-		//				func() { // f4
-		//					use(v)
-		//				}()
-		//			}()
-		//		}()
-		//	}
-		//
-		// At this point v.Outer is f2's v; there is no f3's v.
-		// To construct the closure f4 from within f3,
-		// we need to use f3's v and in this case we need to create f3's v.
-		// We are now in the context of f3, so calling oldname(v.Sym)
-		// obtains f3's v, creating it if necessary (as it is in the example).
-		//
-		// capturevars will decide whether to use v directly or &v.
-		v.Name.Param.Outer = oldname(v.Sym)
-	}
-
-	return clo
-}
-
-// typecheckclosure typechecks an OCLOSURE node. It also creates the named
-// function associated with the closure.
-// TODO: This creation of the named function should probably really be done in a
-// separate pass from type-checking.
-func typecheckclosure(clo *Node, top int) {
-	xfunc := clo.Func.Closure
-	// Set current associated iota value, so iota can be used inside
-	// function in ConstSpec, see issue #22344
-	if x := getIotaValue(); x >= 0 {
-		xfunc.SetIota(x)
-	}
-
-	clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
-	clo.Type = clo.Func.Ntype.Type
-	clo.Func.Top = top
-
-	// Do not typecheck xfunc twice, otherwise, we will end up pushing
-	// xfunc to xtop multiple times, causing initLSym called twice.
-	// See #30709
-	if xfunc.Typecheck() == 1 {
-		return
-	}
-
-	for _, ln := range xfunc.Func.Cvars.Slice() {
-		n := ln.Name.Defn
-		if !n.Name.Captured() {
-			n.Name.SetCaptured(true)
-			if n.Name.Decldepth == 0 {
-				Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
-			}
-
-			// Ignore assignments to the variable in straightline code
-			// preceding the first capturing by a closure.
-			if n.Name.Decldepth == decldepth {
-				n.Name.SetAssigned(false)
-			}
-		}
-	}
-
-	xfunc.Func.Nname.Sym = closurename(Curfn)
-	setNodeNameFunc(xfunc.Func.Nname)
-	xfunc = typecheck(xfunc, ctxStmt)
-
-	// Type check the body now, but only if we're inside a function.
-	// At top level (in a variable initialization: curfn==nil) we're not
-	// ready to type check code yet; we'll check it later, because the
-	// underlying closure function we create is added to xtop.
-	if Curfn != nil && clo.Type != nil {
-		oldfn := Curfn
-		Curfn = xfunc
-		olddd := decldepth
-		decldepth = 1
-		typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
-		decldepth = olddd
-		Curfn = oldfn
-	}
-
-	xtop = append(xtop, xfunc)
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int
-
-// closurename generates a new unique name for a closure within
-// outerfunc.
-func closurename(outerfunc *Node) *types.Sym {
-	outer := "glob."
-	prefix := "func"
-	gen := &globClosgen
-
-	if outerfunc != nil {
-		if outerfunc.Func.Closure != nil {
-			prefix = ""
-		}
-
-		outer = outerfunc.funcname()
-
-		// There may be multiple functions named "_". In those
-		// cases, we can't use their individual Closgens as it
-		// would lead to name clashes.
-		if !outerfunc.Func.Nname.isBlank() {
-			gen = &outerfunc.Func.Closgen
-		}
-	}
-
-	*gen++
-	return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// capturevarscomplete is set to true when the capturevars phase is done.
-var capturevarscomplete bool
-
-// capturevars is called in a separate phase after all typechecking is done.
-// It decides whether each variable captured by a closure should be captured
-// by value or by reference.
-// We use value capturing for values <= 128 bytes that are never reassigned
-// after capturing (effectively constant).
-func capturevars(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Pos
-
-	clo := xfunc.Func.Closure
-	cvars := xfunc.Func.Cvars.Slice()
-	out := cvars[:0]
-	for _, v := range cvars {
-		if v.Type == nil {
-			// If v.Type is nil, it means v looked like it
-			// was going to be used in the closure, but
-			// isn't. This happens in struct literals like
-			// s{f: x} where we can't distinguish whether
-			// f is a field identifier or expression until
-			// resolving s.
-			continue
-		}
-		out = append(out, v)
-
-		// type check the & of closed variables outside the closure,
-		// so that the outer frame also grabs them and knows they escape.
-		dowidth(v.Type)
-
-		outer := v.Name.Param.Outer
-		outermost := v.Name.Defn
-
-		// out parameters will be assigned to implicitly upon return.
-		if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
-			v.Name.SetByval(true)
-		} else {
-			outermost.Name.SetAddrtaken(true)
-			outer = nod(OADDR, outer, nil)
-		}
-
-		if Debug.m > 1 {
-			var name *types.Sym
-			if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
-				name = v.Name.Curfn.Func.Nname.Sym
-			}
-			how := "ref"
-			if v.Name.Byval() {
-				how = "value"
-			}
-			Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
-		}
-
-		outer = typecheck(outer, ctxExpr)
-		clo.Func.Enter.Append(outer)
-	}
-
-	xfunc.Func.Cvars.Set(out)
-	lineno = lno
-}
-
-// transformclosure is called in a separate phase after escape analysis.
-// It transform closure bodies to properly reference captured variables.
-func transformclosure(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Pos
-	clo := xfunc.Func.Closure
-
-	if clo.Func.Top&ctxCallee != 0 {
-		// If the closure is directly called, we transform it to a plain function call
-		// with variables passed as args. This avoids allocation of a closure object.
-		// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
-		// will complete the transformation later.
-		// For illustration, the following closure:
-		//	func(a int) {
-		//		println(byval)
-		//		byref++
-		//	}(42)
-		// becomes:
-		//	func(byval int, &byref *int, a int) {
-		//		println(byval)
-		//		(*&byref)++
-		//	}(byval, &byref, 42)
-
-		// f is ONAME of the actual function.
-		f := xfunc.Func.Nname
-
-		// We are going to insert captured variables before input args.
-		var params []*types.Field
-		var decls []*Node
-		for _, v := range xfunc.Func.Cvars.Slice() {
-			if !v.Name.Byval() {
-				// If v of type T is captured by reference,
-				// we introduce function param &v *T
-				// and v remains PAUTOHEAP with &v heapaddr
-				// (accesses will implicitly deref &v).
-				addr := newname(lookup("&" + v.Sym.Name))
-				addr.Type = types.NewPtr(v.Type)
-				v.Name.Param.Heapaddr = addr
-				v = addr
-			}
-
-			v.SetClass(PPARAM)
-			decls = append(decls, v)
-
-			fld := types.NewField()
-			fld.Nname = asTypesNode(v)
-			fld.Type = v.Type
-			fld.Sym = v.Sym
-			params = append(params, fld)
-		}
-
-		if len(params) > 0 {
-			// Prepend params and decls.
-			f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
-			xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
-		}
-
-		dowidth(f.Type)
-		xfunc.Type = f.Type // update type of ODCLFUNC
-	} else {
-		// The closure is not called, so it is going to stay as closure.
-		var body []*Node
-		offset := int64(Widthptr)
-		for _, v := range xfunc.Func.Cvars.Slice() {
-			// cv refers to the field inside of closure OSTRUCTLIT.
-			cv := nod(OCLOSUREVAR, nil, nil)
-
-			cv.Type = v.Type
-			if !v.Name.Byval() {
-				cv.Type = types.NewPtr(v.Type)
-			}
-			offset = Rnd(offset, int64(cv.Type.Align))
-			cv.Xoffset = offset
-			offset += cv.Type.Width
-
-			if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
-				// If it is a small variable captured by value, downgrade it to PAUTO.
-				v.SetClass(PAUTO)
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
-				body = append(body, nod(OAS, v, cv))
-			} else {
-				// Declare variable holding addresses taken from closure
-				// and initialize in entry prologue.
-				addr := newname(lookup("&" + v.Sym.Name))
-				addr.Type = types.NewPtr(v.Type)
-				addr.SetClass(PAUTO)
-				addr.Name.SetUsed(true)
-				addr.Name.Curfn = xfunc
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
-				v.Name.Param.Heapaddr = addr
-				if v.Name.Byval() {
-					cv = nod(OADDR, cv, nil)
-				}
-				body = append(body, nod(OAS, addr, cv))
-			}
-		}
-
-		if len(body) > 0 {
-			typecheckslice(body, ctxStmt)
-			xfunc.Func.Enter.Set(body)
-			xfunc.Func.SetNeedctxt(true)
-		}
-	}
-
-	lineno = lno
-}
-
-// hasemptycvars reports whether closure clo has an
-// empty list of captured vars.
-func hasemptycvars(clo *Node) bool {
-	xfunc := clo.Func.Closure
-	return xfunc.Func.Cvars.Len() == 0
-}
-
-// closuredebugruntimecheck applies boilerplate checks for debug flags
-// and compiling runtime
-func closuredebugruntimecheck(clo *Node) {
-	if Debug_closure > 0 {
-		xfunc := clo.Func.Closure
-		if clo.Esc == EscHeap {
-			Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
-		} else {
-			Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
-		}
-	}
-	if compiling_runtime && clo.Esc == EscHeap {
-		yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
-	}
-}
-
-// closureType returns the struct type used to hold all the information
-// needed in the closure for clo (clo must be a OCLOSURE node).
-// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *Node) *types.Type {
-	// Create closure in the form of a composite literal.
-	// supposing the closure captures an int i and a string s
-	// and has one float64 argument and no results,
-	// the generated code looks like:
-	//
-	//	clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
-	//
-	// The use of the struct provides type information to the garbage
-	// collector so that it can walk the closure. We could use (in this case)
-	// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
-	// The information appears in the binary in the form of type descriptors;
-	// the struct is unnamed so that closures in multiple packages with the
-	// same struct type can share the descriptor.
-	fields := []*Node{
-		namedfield(".F", types.Types[TUINTPTR]),
-	}
-	for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
-		typ := v.Type
-		if !v.Name.Byval() {
-			typ = types.NewPtr(typ)
-		}
-		fields = append(fields, symfield(v.Sym, typ))
-	}
-	typ := tostruct(fields)
-	typ.SetNoalg(true)
-	return typ
-}
-
-func walkclosure(clo *Node, init *Nodes) *Node {
-	xfunc := clo.Func.Closure
-
-	// If no closure vars, don't bother wrapping.
-	if hasemptycvars(clo) {
-		if Debug_closure > 0 {
-			Warnl(clo.Pos, "closure converted to global")
-		}
-		return xfunc.Func.Nname
-	}
-	closuredebugruntimecheck(clo)
-
-	typ := closureType(clo)
-
-	clos := nod(OCOMPLIT, nil, typenod(typ))
-	clos.Esc = clo.Esc
-	clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
-
-	clos = nod(OADDR, clos, nil)
-	clos.Esc = clo.Esc
-
-	// Force type conversion from *struct to the func type.
-	clos = convnop(clos, clo.Type)
-
-	// non-escaping temp to use, if any.
-	if x := prealloc[clo]; x != nil {
-		if !types.Identical(typ, x.Type) {
-			panic("closure type does not match order's assigned type")
-		}
-		clos.Left.Right = x
-		delete(prealloc, clo)
-	}
-
-	return walkexpr(clos, init)
-}
-
-func typecheckpartialcall(fn *Node, sym *types.Sym) {
-	switch fn.Op {
-	case ODOTINTER, ODOTMETH:
-		break
-
-	default:
-		Fatalf("invalid typecheckpartialcall")
-	}
-
-	// Create top-level function.
-	xfunc := makepartialcall(fn, fn.Type, sym)
-	fn.Func = xfunc.Func
-	fn.Func.SetWrapper(true)
-	fn.Right = newname(sym)
-	fn.Op = OCALLPART
-	fn.Type = xfunc.Type
-}
-
-// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
-// for partial calls.
-func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
-	rcvrtype := fn.Left.Type
-	sym := methodSymSuffix(rcvrtype, meth, "-fm")
-
-	if sym.Uniq() {
-		return asNode(sym.Def)
-	}
-	sym.SetUniq(true)
-
-	savecurfn := Curfn
-	saveLineNo := lineno
-	Curfn = nil
-
-	// Set line number equal to the line number where the method is declared.
-	var m *types.Field
-	if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
-		lineno = m.Pos
-	}
-	// Note: !m.Pos.IsKnown() happens for method expressions where
-	// the method is implicitly declared. The Error method of the
-	// built-in error type is one such method.  We leave the line
-	// number at the use of the method expression in this
-	// case. See issue 29389.
-
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Set(structargs(t0.Params(), true))
-	tfn.Rlist.Set(structargs(t0.Results(), false))
-
-	xfunc := dclfunc(sym, tfn)
-	xfunc.Func.SetDupok(true)
-	xfunc.Func.SetNeedctxt(true)
-
-	tfn.Type.SetPkg(t0.Pkg())
-
-	// Declare and initialize variable holding receiver.
-
-	cv := nod(OCLOSUREVAR, nil, nil)
-	cv.Type = rcvrtype
-	cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
-
-	ptr := newname(lookup(".this"))
-	declare(ptr, PAUTO)
-	ptr.Name.SetUsed(true)
-	var body []*Node
-	if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
-		ptr.Type = rcvrtype
-		body = append(body, nod(OAS, ptr, cv))
-	} else {
-		ptr.Type = types.NewPtr(rcvrtype)
-		body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
-	}
-
-	call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
-	call.List.Set(paramNnames(tfn.Type))
-	call.SetIsDDD(tfn.Type.IsVariadic())
-	if t0.NumResults() != 0 {
-		n := nod(ORETURN, nil, nil)
-		n.List.Set1(call)
-		call = n
-	}
-	body = append(body, call)
-
-	xfunc.Nbody.Set(body)
-	funcbody()
-
-	xfunc = typecheck(xfunc, ctxStmt)
-	// Need to typecheck the body of the just-generated wrapper.
-	// typecheckslice() requires that Curfn is set when processing an ORETURN.
-	Curfn = xfunc
-	typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
-	sym.Def = asTypesNode(xfunc)
-	xtop = append(xtop, xfunc)
-	Curfn = savecurfn
-	lineno = saveLineNo
-
-	return xfunc
-}
-
-// partialCallType returns the struct type used to hold all the information
-// needed in the closure for n (n must be a OCALLPART node).
-// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *Node) *types.Type {
-	t := tostruct([]*Node{
-		namedfield("F", types.Types[TUINTPTR]),
-		namedfield("R", n.Left.Type),
-	})
-	t.SetNoalg(true)
-	return t
-}
-
-func walkpartialcall(n *Node, init *Nodes) *Node {
-	// Create closure in the form of a composite literal.
-	// For x.M with receiver (x) type T, the generated code looks like:
-	//
-	//	clos = &struct{F uintptr; R T}{T.M·f, x}
-	//
-	// Like walkclosure above.
-
-	if n.Left.Type.IsInterface() {
-		// Trigger panic for method on nil interface now.
-		// Otherwise it happens in the wrapper and is confusing.
-		n.Left = cheapexpr(n.Left, init)
-		n.Left = walkexpr(n.Left, nil)
-
-		tab := nod(OITAB, n.Left, nil)
-		tab = typecheck(tab, ctxExpr)
-
-		c := nod(OCHECKNIL, tab, nil)
-		c.SetTypecheck(1)
-		init.Append(c)
-	}
-
-	typ := partialCallType(n)
-
-	clos := nod(OCOMPLIT, nil, typenod(typ))
-	clos.Esc = n.Esc
-	clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
-
-	clos = nod(OADDR, clos, nil)
-	clos.Esc = n.Esc
-
-	// Force type conversion from *struct to the func type.
-	clos = convnop(clos, n.Type)
-
-	// non-escaping temp to use, if any.
-	if x := prealloc[n]; x != nil {
-		if !types.Identical(typ, x.Type) {
-			panic("partial call type does not match order's assigned type")
-		}
-		clos.Left.Right = x
-		delete(prealloc, n)
-	}
-
-	return walkexpr(clos, init)
-}
-
-// callpartMethod returns the *types.Field representing the method
-// referenced by method value n.
-func callpartMethod(n *Node) *types.Field {
-	if n.Op != OCALLPART {
-		Fatalf("expected OCALLPART, got %v", n)
-	}
-
-	// TODO(mdempsky): Optimize this. If necessary,
-	// makepartialcall could save m for us somewhere.
-	var m *types.Field
-	if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
-		Fatalf("failed to find field for OCALLPART")
-	}
-
-	return m
-}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
new file mode 100644
index 0000000..ba67c58
--- /dev/null
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -0,0 +1,147 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+	"internal/race"
+	"math/rand"
+	"sort"
+	"sync"
+
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/liveness"
+	"cmd/compile/internal/ssagen"
+	"cmd/compile/internal/typecheck"
+	"cmd/compile/internal/types"
+	"cmd/compile/internal/walk"
+)
+
+// "Portable" code generation.
+
+var (
+	compilequeue []*ir.Func // functions waiting to be compiled
+)
+
+func enqueueFunc(fn *ir.Func) {
+	if ir.CurFunc != nil {
+		base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
+	}
+
+	if ir.FuncName(fn) == "_" {
+		// Skip compiling blank functions.
+		// Frontend already reported any spec-mandated errors (#29870).
+		return
+	}
+
+	if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
+		return // we'll get this as part of its enclosing function
+	}
+
+	if len(fn.Body) == 0 {
+		// Initialize ABI wrappers if necessary.
+		ssagen.InitLSym(fn, false)
+		liveness.WriteFuncMap(fn)
+		return
+	}
+
+	errorsBefore := base.Errors()
+
+	todo := []*ir.Func{fn}
+	for len(todo) > 0 {
+		next := todo[len(todo)-1]
+		todo = todo[:len(todo)-1]
+
+		prepareFunc(next)
+		todo = append(todo, next.Closures...)
+	}
+
+	if base.Errors() > errorsBefore {
+		return
+	}
+
+	// Enqueue just fn itself. compileFunctions will handle
+	// scheduling compilation of its closures after it's done.
+	compilequeue = append(compilequeue, fn)
+}
+
+// prepareFunc handles any remaining frontend compilation tasks that
+// aren't yet safe to perform concurrently.
+func prepareFunc(fn *ir.Func) {
+	// Set up the function's LSym early to avoid data races with the assemblers.
+	// Do this before walk, as walk needs the LSym to set attributes/relocations
+	// (e.g. in MarkTypeUsedInInterface).
+	ssagen.InitLSym(fn, true)
+
+	// Calculate parameter offsets.
+	types.CalcSize(fn.Type())
+
+	typecheck.DeclContext = ir.PAUTO
+	ir.CurFunc = fn
+	walk.Walk(fn)
+	ir.CurFunc = nil // enforce no further uses of CurFunc
+	typecheck.DeclContext = ir.PEXTERN
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+	if len(compilequeue) == 0 {
+		return
+	}
+
+	if race.Enabled {
+		// Randomize compilation order to try to shake out races.
+		tmp := make([]*ir.Func, len(compilequeue))
+		perm := rand.Perm(len(compilequeue))
+		for i, v := range perm {
+			tmp[v] = compilequeue[i]
+		}
+		copy(compilequeue, tmp)
+	} else {
+		// Compile the longest functions first,
+		// since they're most likely to be the slowest.
+		// This helps avoid stragglers.
+		sort.Slice(compilequeue, func(i, j int) bool {
+			return len(compilequeue[i].Body) > len(compilequeue[j].Body)
+		})
+	}
+
+	// We queue up a goroutine per function that needs to be
+	// compiled, but require them to grab an available worker ID
+	// before doing any substantial work to limit parallelism.
+	workerIDs := make(chan int, base.Flag.LowerC)
+	for i := 0; i < base.Flag.LowerC; i++ {
+		workerIDs <- i
+	}
+
+	var wg sync.WaitGroup
+	var asyncCompile func(*ir.Func)
+	asyncCompile = func(fn *ir.Func) {
+		wg.Add(1)
+		go func() {
+			worker := <-workerIDs
+			ssagen.Compile(fn, worker)
+			workerIDs <- worker
+
+			// Done compiling fn. Schedule it's closures for compilation.
+			for _, closure := range fn.Closures {
+				asyncCompile(closure)
+			}
+			wg.Done()
+		}()
+	}
+
+	types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
+	base.Ctxt.InParallel = true
+	for _, fn := range compilequeue {
+		asyncCompile(fn)
+	}
+	compilequeue = nil
+	wg.Wait()
+	base.Ctxt.InParallel = false
+	types.CalcSizeDisabled = false
+}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
deleted file mode 100644
index b92c8d6..0000000
--- a/src/cmd/compile/internal/gc/const.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"math/big"
-	"strings"
-)
-
-// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype uint8
-
-const (
-	CTxxx Ctype = iota
-
-	CTINT
-	CTRUNE
-	CTFLT
-	CTCPLX
-	CTSTR
-	CTBOOL
-	CTNIL
-)
-
-type Val struct {
-	// U contains one of:
-	// bool     bool when Ctype() == CTBOOL
-	// *Mpint   int when Ctype() == CTINT, rune when Ctype() == CTRUNE
-	// *Mpflt   float when Ctype() == CTFLT
-	// *Mpcplx  pair of floats when Ctype() == CTCPLX
-	// string   string when Ctype() == CTSTR
-	// *Nilval  when Ctype() == CTNIL
-	U interface{}
-}
-
-func (v Val) Ctype() Ctype {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", v.U)
-		panic("unreachable")
-	case nil:
-		return CTxxx
-	case *NilVal:
-		return CTNIL
-	case bool:
-		return CTBOOL
-	case *Mpint:
-		if x.Rune {
-			return CTRUNE
-		}
-		return CTINT
-	case *Mpflt:
-		return CTFLT
-	case *Mpcplx:
-		return CTCPLX
-	case string:
-		return CTSTR
-	}
-}
-
-func eqval(a, b Val) bool {
-	if a.Ctype() != b.Ctype() {
-		return false
-	}
-	switch x := a.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", a.U)
-		panic("unreachable")
-	case *NilVal:
-		return true
-	case bool:
-		y := b.U.(bool)
-		return x == y
-	case *Mpint:
-		y := b.U.(*Mpint)
-		return x.Cmp(y) == 0
-	case *Mpflt:
-		y := b.U.(*Mpflt)
-		return x.Cmp(y) == 0
-	case *Mpcplx:
-		y := b.U.(*Mpcplx)
-		return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
-	case string:
-		y := b.U.(string)
-		return x == y
-	}
-}
-
-// Interface returns the constant value stored in v as an interface{}.
-// It returns int64s for ints and runes, float64s for floats,
-// complex128s for complex values, and nil for constant nils.
-func (v Val) Interface() interface{} {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Interface for %T", v.U)
-		panic("unreachable")
-	case *NilVal:
-		return nil
-	case bool, string:
-		return x
-	case *Mpint:
-		return x.Int64()
-	case *Mpflt:
-		return x.Float64()
-	case *Mpcplx:
-		return complex(x.Real.Float64(), x.Imag.Float64())
-	}
-}
-
-type NilVal struct{}
-
-// Int64Val returns n as an int64.
-// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
-	if !Isconst(n, CTINT) {
-		Fatalf("Int64Val(%v)", n)
-	}
-	return n.Val().U.(*Mpint).Int64()
-}
-
-// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
-	if !Isconst(n, CTINT) {
-		return false
-	}
-
-	// if the value inside n cannot be represented as an int64, the
-	// return value of Int64 is undefined
-	return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
-}
-
-// BoolVal returns n as a bool.
-// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
-	if !Isconst(n, CTBOOL) {
-		Fatalf("BoolVal(%v)", n)
-	}
-	return n.Val().U.(bool)
-}
-
-// StringVal returns the value of a literal string Node as a string.
-// n must be a string constant.
-func (n *Node) StringVal() string {
-	if !Isconst(n, CTSTR) {
-		Fatalf("StringVal(%v)", n)
-	}
-	return n.Val().U.(string)
-}
-
-// truncate float literal fv to 32-bit or 64-bit precision
-// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
-	if t == nil {
-		return oldv
-	}
-
-	if overflow(Val{oldv}, t) {
-		// If there was overflow, simply continuing would set the
-		// value to Inf which in turn would lead to spurious follow-on
-		// errors. Avoid this by returning the existing value.
-		return oldv
-	}
-
-	fv := newMpflt()
-
-	// convert large precision literal floating
-	// into limited precision (float64 or float32)
-	switch t.Etype {
-	case types.TFLOAT32:
-		fv.SetFloat64(oldv.Float32())
-	case types.TFLOAT64:
-		fv.SetFloat64(oldv.Float64())
-	default:
-		Fatalf("truncfltlit: unexpected Etype %v", t.Etype)
-	}
-
-	return fv
-}
-
-// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
-// precision, according to type; return truncated value. In case of
-// overflow, calls yyerror but does not truncate the input value.
-func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx {
-	if t == nil {
-		return oldv
-	}
-
-	if overflow(Val{oldv}, t) {
-		// If there was overflow, simply continuing would set the
-		// value to Inf which in turn would lead to spurious follow-on
-		// errors. Avoid this by returning the existing value.
-		return oldv
-	}
-
-	cv := newMpcmplx()
-
-	switch t.Etype {
-	case types.TCOMPLEX64:
-		cv.Real.SetFloat64(oldv.Real.Float32())
-		cv.Imag.SetFloat64(oldv.Imag.Float32())
-	case types.TCOMPLEX128:
-		cv.Real.SetFloat64(oldv.Real.Float64())
-		cv.Imag.SetFloat64(oldv.Imag.Float64())
-	default:
-		Fatalf("trunccplxlit: unexpected Etype %v", t.Etype)
-	}
-
-	return cv
-}
-
-// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *Node, t *types.Type) *Node    { return convlit1(n, t, false, nil) }
-func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
-
-// convlit1 converts an untyped expression n to type t. If n already
-// has a type, convlit1 has no effect.
-//
-// For explicit conversions, t must be non-nil, and integer-to-string
-// conversions are allowed.
-//
-// For implicit conversions (e.g., assignments), t may be nil; if so,
-// n is converted to its default type.
-//
-// If there's an error converting n to t, context is used in the error
-// message.
-func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node {
-	if explicit && t == nil {
-		Fatalf("explicit conversion missing type")
-	}
-	if t != nil && t.IsUntyped() {
-		Fatalf("bad conversion to untyped: %v", t)
-	}
-
-	if n == nil || n.Type == nil {
-		// Allow sloppy callers.
-		return n
-	}
-	if !n.Type.IsUntyped() {
-		// Already typed; nothing to do.
-		return n
-	}
-
-	if n.Op == OLITERAL {
-		// Can't always set n.Type directly on OLITERAL nodes.
-		// See discussion on CL 20813.
-		n = n.rawcopy()
-	}
-
-	// Nil is technically not a constant, so handle it specially.
-	if n.Type.Etype == TNIL {
-		if t == nil {
-			yyerror("use of untyped nil")
-			n.SetDiag(true)
-			n.Type = nil
-			return n
-		}
-
-		if !t.HasNil() {
-			// Leave for caller to handle.
-			return n
-		}
-
-		n.Type = t
-		return n
-	}
-
-	if t == nil || !okforconst[t.Etype] {
-		t = defaultType(n.Type)
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected untyped expression: %v", n)
-
-	case OLITERAL:
-		v := convertVal(n.Val(), t, explicit)
-		if v.U == nil {
-			break
-		}
-		n.SetVal(v)
-		n.Type = t
-		return n
-
-	case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG:
-		ot := operandType(n.Op, t)
-		if ot == nil {
-			n = defaultlit(n, nil)
-			break
-		}
-
-		n.Left = convlit(n.Left, ot)
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Type = t
-		return n
-
-	case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX:
-		ot := operandType(n.Op, t)
-		if ot == nil {
-			n = defaultlit(n, nil)
-			break
-		}
-
-		n.Left = convlit(n.Left, ot)
-		n.Right = convlit(n.Right, ot)
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if !types.Identical(n.Left.Type, n.Right.Type) {
-			yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-		return n
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		if !t.IsBoolean() {
-			break
-		}
-		n.Type = t
-		return n
-
-	case OLSH, ORSH:
-		n.Left = convlit1(n.Left, t, explicit, nil)
-		n.Type = n.Left.Type
-		if n.Type != nil && !n.Type.IsInteger() {
-			yyerror("invalid operation: %v (shift of type %v)", n, n.Type)
-			n.Type = nil
-		}
-		return n
-	}
-
-	if !n.Diag() {
-		if !t.Broke() {
-			if explicit {
-				yyerror("cannot convert %L to type %v", n, t)
-			} else if context != nil {
-				yyerror("cannot use %L as type %v in %s", n, t, context())
-			} else {
-				yyerror("cannot use %L as type %v", n, t)
-			}
-		}
-		n.SetDiag(true)
-	}
-	n.Type = nil
-	return n
-}
-
-func operandType(op Op, t *types.Type) *types.Type {
-	switch op {
-	case OCOMPLEX:
-		if t.IsComplex() {
-			return floatForComplex(t)
-		}
-	case OREAL, OIMAG:
-		if t.IsFloat() {
-			return complexForFloat(t)
-		}
-	default:
-		if okfor[op][t.Etype] {
-			return t
-		}
-	}
-	return nil
-}
-
-// convertVal converts v into a representation appropriate for t. If
-// no such representation exists, it returns Val{} instead.
-//
-// If explicit is true, then conversions from integer to string are
-// also allowed.
-func convertVal(v Val, t *types.Type, explicit bool) Val {
-	switch ct := v.Ctype(); ct {
-	case CTBOOL:
-		if t.IsBoolean() {
-			return v
-		}
-
-	case CTSTR:
-		if t.IsString() {
-			return v
-		}
-
-	case CTINT, CTRUNE:
-		if explicit && t.IsString() {
-			return tostr(v)
-		}
-		fallthrough
-	case CTFLT, CTCPLX:
-		switch {
-		case t.IsInteger():
-			v = toint(v)
-			overflow(v, t)
-			return v
-		case t.IsFloat():
-			v = toflt(v)
-			v = Val{truncfltlit(v.U.(*Mpflt), t)}
-			return v
-		case t.IsComplex():
-			v = tocplx(v)
-			v = Val{trunccmplxlit(v.U.(*Mpcplx), t)}
-			return v
-		}
-	}
-
-	return Val{}
-}
-
-func tocplx(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		c := newMpcmplx()
-		c.Real.SetInt(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-
-	case *Mpflt:
-		c := newMpcmplx()
-		c.Real.Set(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-	}
-
-	return v
-}
-
-func toflt(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		f := newMpflt()
-		f.SetInt(u)
-		v.U = f
-
-	case *Mpcplx:
-		f := newMpflt()
-		f.Set(&u.Real)
-		if u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v truncated to real", u.GoString())
-		}
-		v.U = f
-	}
-
-	return v
-}
-
-func toint(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if u.Rune {
-			i := new(Mpint)
-			i.Set(u)
-			v.U = i
-		}
-
-	case *Mpflt:
-		i := new(Mpint)
-		if !i.SetFloat(u) {
-			if i.checkOverflow(0) {
-				yyerror("integer too large")
-			} else {
-				// The value of u cannot be represented as an integer;
-				// so we need to print an error message.
-				// Unfortunately some float values cannot be
-				// reasonably formatted for inclusion in an error
-				// message (example: 1 + 1e-100), so first we try to
-				// format the float; if the truncation resulted in
-				// something that looks like an integer we omit the
-				// value from the error message.
-				// (See issue #11371).
-				var t big.Float
-				t.Parse(u.GoString(), 10)
-				if t.IsInt() {
-					yyerror("constant truncated to integer")
-				} else {
-					yyerror("constant %v truncated to integer", u.GoString())
-				}
-			}
-		}
-		v.U = i
-
-	case *Mpcplx:
-		i := new(Mpint)
-		if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v truncated to integer", u.GoString())
-		}
-
-		v.U = i
-	}
-
-	return v
-}
-
-func doesoverflow(v Val, t *types.Type) bool {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if !t.IsInteger() {
-			Fatalf("overflow: %v integer constant", t)
-		}
-		return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
-
-	case *Mpflt:
-		if !t.IsFloat() {
-			Fatalf("overflow: %v floating-point constant", t)
-		}
-		return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
-
-	case *Mpcplx:
-		if !t.IsComplex() {
-			Fatalf("overflow: %v complex constant", t)
-		}
-		return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
-			u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
-	}
-
-	return false
-}
-
-func overflow(v Val, t *types.Type) bool {
-	// v has already been converted
-	// to appropriate form for t.
-	if t == nil || t.Etype == TIDEAL {
-		return false
-	}
-
-	// Only uintptrs may be converted to pointers, which cannot overflow.
-	if t.IsPtr() || t.IsUnsafePtr() {
-		return false
-	}
-
-	if doesoverflow(v, t) {
-		yyerror("constant %v overflows %v", v, t)
-		return true
-	}
-
-	return false
-
-}
-
-func tostr(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		var r rune = 0xFFFD
-		if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 {
-			r = rune(u.Int64())
-		}
-		v.U = string(r)
-	}
-
-	return v
-}
-
-func consttype(n *Node) Ctype {
-	if n == nil || n.Op != OLITERAL {
-		return CTxxx
-	}
-	return n.Val().Ctype()
-}
-
-func Isconst(n *Node, ct Ctype) bool {
-	t := consttype(n)
-
-	// If the caller is asking for CTINT, allow CTRUNE too.
-	// Makes life easier for back ends.
-	return t == ct || (ct == CTINT && t == CTRUNE)
-}
-
-// evconst rewrites constant expressions into OLITERAL nodes.
-func evconst(n *Node) {
-	nl, nr := n.Left, n.Right
-
-	// Pick off just the opcodes that can be constant evaluated.
-	switch op := n.Op; op {
-	case OPLUS, ONEG, OBITNOT, ONOT:
-		if nl.Op == OLITERAL {
-			setconst(n, unaryOp(op, nl.Val(), n.Type))
-		}
-
-	case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setconst(n, binaryOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OEQ, ONE, OLT, OLE, OGT, OGE:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OLSH, ORSH:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			setconst(n, shiftOp(nl.Val(), op, nr.Val()))
-		}
-
-	case OCONV, ORUNESTR:
-		if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
-			setconst(n, convertVal(nl.Val(), n.Type, true))
-		}
-
-	case OCONVNOP:
-		if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
-			// set so n.Orig gets OCONV instead of OCONVNOP
-			n.Op = OCONV
-			setconst(n, nl.Val())
-		}
-
-	case OADDSTR:
-		// Merge adjacent constants in the argument list.
-		s := n.List.Slice()
-		for i1 := 0; i1 < len(s); i1++ {
-			if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
-				// merge from i1 up to but not including i2
-				var strs []string
-				i2 := i1
-				for i2 < len(s) && Isconst(s[i2], CTSTR) {
-					strs = append(strs, s[i2].StringVal())
-					i2++
-				}
-
-				nl := *s[i1]
-				nl.Orig = &nl
-				nl.SetVal(Val{strings.Join(strs, "")})
-				s[i1] = &nl
-				s = append(s[:i1+1], s[i2:]...)
-			}
-		}
-
-		if len(s) == 1 && Isconst(s[0], CTSTR) {
-			n.Op = OLITERAL
-			n.SetVal(s[0].Val())
-		} else {
-			n.List.Set(s)
-		}
-
-	case OCAP, OLEN:
-		switch nl.Type.Etype {
-		case TSTRING:
-			if Isconst(nl, CTSTR) {
-				setintconst(n, int64(len(nl.StringVal())))
-			}
-		case TARRAY:
-			if !hascallchan(nl) {
-				setintconst(n, nl.Type.NumElem())
-			}
-		}
-
-	case OALIGNOF, OOFFSETOF, OSIZEOF:
-		setintconst(n, evalunsafe(n))
-
-	case OREAL, OIMAG:
-		if nl.Op == OLITERAL {
-			var re, im *Mpflt
-			switch u := nl.Val().U.(type) {
-			case *Mpint:
-				re = newMpflt()
-				re.SetInt(u)
-				// im = 0
-			case *Mpflt:
-				re = u
-				// im = 0
-			case *Mpcplx:
-				re = &u.Real
-				im = &u.Imag
-			default:
-				Fatalf("impossible")
-			}
-			if n.Op == OIMAG {
-				if im == nil {
-					im = newMpflt()
-				}
-				re = im
-			}
-			setconst(n, Val{re})
-		}
-
-	case OCOMPLEX:
-		if nl.Op == OLITERAL && nr.Op == OLITERAL {
-			// make it a complex literal
-			c := newMpcmplx()
-			c.Real.Set(toflt(nl.Val()).U.(*Mpflt))
-			c.Imag.Set(toflt(nr.Val()).U.(*Mpflt))
-			setconst(n, Val{c})
-		}
-	}
-}
-
-func match(x, y Val) (Val, Val) {
-	switch {
-	case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
-		return tocplx(x), tocplx(y)
-	case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
-		return toflt(x), toflt(y)
-	}
-
-	// Mixed int/rune are fine.
-	return x, y
-}
-
-func compareOp(x Val, op Op, y Val) bool {
-	x, y = match(x, y)
-
-	switch x.Ctype() {
-	case CTBOOL:
-		x, y := x.U.(bool), y.U.(bool)
-		switch op {
-		case OEQ:
-			return x == y
-		case ONE:
-			return x != y
-		}
-
-	case CTINT, CTRUNE:
-		x, y := x.U.(*Mpint), y.U.(*Mpint)
-		return cmpZero(x.Cmp(y), op)
-
-	case CTFLT:
-		x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-		return cmpZero(x.Cmp(y), op)
-
-	case CTCPLX:
-		x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-		eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
-		switch op {
-		case OEQ:
-			return eq
-		case ONE:
-			return !eq
-		}
-
-	case CTSTR:
-		x, y := x.U.(string), y.U.(string)
-		switch op {
-		case OEQ:
-			return x == y
-		case ONE:
-			return x != y
-		case OLT:
-			return x < y
-		case OLE:
-			return x <= y
-		case OGT:
-			return x > y
-		case OGE:
-			return x >= y
-		}
-	}
-
-	Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
-	panic("unreachable")
-}
-
-func cmpZero(x int, op Op) bool {
-	switch op {
-	case OEQ:
-		return x == 0
-	case ONE:
-		return x != 0
-	case OLT:
-		return x < 0
-	case OLE:
-		return x <= 0
-	case OGT:
-		return x > 0
-	case OGE:
-		return x >= 0
-	}
-
-	Fatalf("cmpZero: want comparison operator, got %v", op)
-	panic("unreachable")
-}
-
-func binaryOp(x Val, op Op, y Val) Val {
-	x, y = match(x, y)
-
-Outer:
-	switch x.Ctype() {
-	case CTBOOL:
-		x, y := x.U.(bool), y.U.(bool)
-		switch op {
-		case OANDAND:
-			return Val{U: x && y}
-		case OOROR:
-			return Val{U: x || y}
-		}
-
-	case CTINT, CTRUNE:
-		x, y := x.U.(*Mpint), y.U.(*Mpint)
-
-		u := new(Mpint)
-		u.Rune = x.Rune || y.Rune
-		u.Set(x)
-		switch op {
-		case OADD:
-			u.Add(y)
-		case OSUB:
-			u.Sub(y)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if y.CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Quo(y)
-		case OMOD:
-			if y.CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Rem(y)
-		case OOR:
-			u.Or(y)
-		case OAND:
-			u.And(y)
-		case OANDNOT:
-			u.AndNot(y)
-		case OXOR:
-			u.Xor(y)
-		default:
-			break Outer
-		}
-		return Val{U: u}
-
-	case CTFLT:
-		x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-
-		u := newMpflt()
-		u.Set(x)
-		switch op {
-		case OADD:
-			u.Add(y)
-		case OSUB:
-			u.Sub(y)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if y.CmpFloat64(0) == 0 {
-				yyerror("division by zero")
-				return Val{}
-			}
-			u.Quo(y)
-		default:
-			break Outer
-		}
-		return Val{U: u}
-
-	case CTCPLX:
-		x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-
-		u := newMpcmplx()
-		u.Real.Set(&x.Real)
-		u.Imag.Set(&x.Imag)
-		switch op {
-		case OADD:
-			u.Real.Add(&y.Real)
-			u.Imag.Add(&y.Imag)
-		case OSUB:
-			u.Real.Sub(&y.Real)
-			u.Imag.Sub(&y.Imag)
-		case OMUL:
-			u.Mul(y)
-		case ODIV:
-			if !u.Div(y) {
-				yyerror("complex division by zero")
-				return Val{}
-			}
-		default:
-			break Outer
-		}
-		return Val{U: u}
-	}
-
-	Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
-	panic("unreachable")
-}
-
-func unaryOp(op Op, x Val, t *types.Type) Val {
-	switch op {
-	case OPLUS:
-		switch x.Ctype() {
-		case CTINT, CTRUNE, CTFLT, CTCPLX:
-			return x
-		}
-
-	case ONEG:
-		switch x.Ctype() {
-		case CTINT, CTRUNE:
-			x := x.U.(*Mpint)
-			u := new(Mpint)
-			u.Rune = x.Rune
-			u.Set(x)
-			u.Neg()
-			return Val{U: u}
-
-		case CTFLT:
-			x := x.U.(*Mpflt)
-			u := newMpflt()
-			u.Set(x)
-			u.Neg()
-			return Val{U: u}
-
-		case CTCPLX:
-			x := x.U.(*Mpcplx)
-			u := newMpcmplx()
-			u.Real.Set(&x.Real)
-			u.Imag.Set(&x.Imag)
-			u.Real.Neg()
-			u.Imag.Neg()
-			return Val{U: u}
-		}
-
-	case OBITNOT:
-		switch x.Ctype() {
-		case CTINT, CTRUNE:
-			x := x.U.(*Mpint)
-
-			u := new(Mpint)
-			u.Rune = x.Rune
-			if t.IsSigned() || t.IsUntyped() {
-				// Signed values change sign.
-				u.SetInt64(-1)
-			} else {
-				// Unsigned values invert their bits.
-				u.Set(maxintval[t.Etype])
-			}
-			u.Xor(x)
-			return Val{U: u}
-		}
-
-	case ONOT:
-		return Val{U: !x.U.(bool)}
-	}
-
-	Fatalf("unaryOp: bad operation: %v %v", op, x)
-	panic("unreachable")
-}
-
-func shiftOp(x Val, op Op, y Val) Val {
-	if x.Ctype() != CTRUNE {
-		x = toint(x)
-	}
-	y = toint(y)
-
-	u := new(Mpint)
-	u.Set(x.U.(*Mpint))
-	u.Rune = x.U.(*Mpint).Rune
-	switch op {
-	case OLSH:
-		u.Lsh(y.U.(*Mpint))
-	case ORSH:
-		u.Rsh(y.U.(*Mpint))
-	default:
-		Fatalf("shiftOp: bad operator: %v", op)
-		panic("unreachable")
-	}
-	return Val{U: u}
-}
-
-// setconst rewrites n as an OLITERAL with value v.
-func setconst(n *Node, v Val) {
-	// If constant folding failed, mark n as broken and give up.
-	if v.U == nil {
-		n.Type = nil
-		return
-	}
-
-	// Ensure n.Orig still points to a semantically-equivalent
-	// expression after we rewrite n into a constant.
-	if n.Orig == n {
-		n.Orig = n.sepcopy()
-	}
-
-	*n = Node{
-		Op:      OLITERAL,
-		Pos:     n.Pos,
-		Orig:    n.Orig,
-		Type:    n.Type,
-		Xoffset: BADWIDTH,
-	}
-	n.SetVal(v)
-	if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
-		Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
-	}
-
-	// Check range.
-	lno := setlineno(n)
-	overflow(v, n.Type)
-	lineno = lno
-
-	if !n.Type.IsUntyped() {
-		switch v.Ctype() {
-		// Truncate precision for non-ideal float.
-		case CTFLT:
-			n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
-		// Truncate precision for non-ideal complex.
-		case CTCPLX:
-			n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)})
-		}
-	}
-}
-
-func setboolconst(n *Node, v bool) {
-	setconst(n, Val{U: v})
-}
-
-func setintconst(n *Node, v int64) {
-	u := new(Mpint)
-	u.SetInt64(v)
-	setconst(n, Val{u})
-}
-
-// nodlit returns a new untyped constant with value v.
-func nodlit(v Val) *Node {
-	n := nod(OLITERAL, nil, nil)
-	n.SetVal(v)
-	n.Type = idealType(v.Ctype())
-	return n
-}
-
-func idealType(ct Ctype) *types.Type {
-	switch ct {
-	case CTSTR:
-		return types.UntypedString
-	case CTBOOL:
-		return types.UntypedBool
-	case CTINT:
-		return types.UntypedInt
-	case CTRUNE:
-		return types.UntypedRune
-	case CTFLT:
-		return types.UntypedFloat
-	case CTCPLX:
-		return types.UntypedComplex
-	case CTNIL:
-		return types.Types[TNIL]
-	}
-	Fatalf("unexpected Ctype: %v", ct)
-	return nil
-}
-
-// defaultlit on both nodes simultaneously;
-// if they're both ideal going in they better
-// get the same type going out.
-// force means must assign concrete (non-ideal) type.
-// The results of defaultlit2 MUST be assigned back to l and r, e.g.
-// 	n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
-	if l.Type == nil || r.Type == nil {
-		return l, r
-	}
-	if !l.Type.IsUntyped() {
-		r = convlit(r, l.Type)
-		return l, r
-	}
-
-	if !r.Type.IsUntyped() {
-		l = convlit(l, r.Type)
-		return l, r
-	}
-
-	if !force {
-		return l, r
-	}
-
-	// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
-	if l.Type.IsBoolean() != r.Type.IsBoolean() {
-		return l, r
-	}
-	if l.Type.IsString() != r.Type.IsString() {
-		return l, r
-	}
-	if l.isNil() || r.isNil() {
-		return l, r
-	}
-
-	t := defaultType(mixUntyped(l.Type, r.Type))
-	l = convlit(l, t)
-	r = convlit(r, t)
-	return l, r
-}
-
-func ctype(t *types.Type) Ctype {
-	switch t {
-	case types.UntypedBool:
-		return CTBOOL
-	case types.UntypedString:
-		return CTSTR
-	case types.UntypedInt:
-		return CTINT
-	case types.UntypedRune:
-		return CTRUNE
-	case types.UntypedFloat:
-		return CTFLT
-	case types.UntypedComplex:
-		return CTCPLX
-	}
-	Fatalf("bad type %v", t)
-	panic("unreachable")
-}
-
-func mixUntyped(t1, t2 *types.Type) *types.Type {
-	t := t1
-	if ctype(t2) > ctype(t1) {
-		t = t2
-	}
-	return t
-}
-
-func defaultType(t *types.Type) *types.Type {
-	if !t.IsUntyped() || t.Etype == TNIL {
-		return t
-	}
-
-	switch t {
-	case types.UntypedBool:
-		return types.Types[TBOOL]
-	case types.UntypedString:
-		return types.Types[TSTRING]
-	case types.UntypedInt:
-		return types.Types[TINT]
-	case types.UntypedRune:
-		return types.Runetype
-	case types.UntypedFloat:
-		return types.Types[TFLOAT64]
-	case types.UntypedComplex:
-		return types.Types[TCOMPLEX128]
-	}
-
-	Fatalf("bad type %v", t)
-	return nil
-}
-
-func smallintconst(n *Node) bool {
-	if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
-		switch simtype[n.Type.Etype] {
-		case TINT8,
-			TUINT8,
-			TINT16,
-			TUINT16,
-			TINT32,
-			TUINT32,
-			TBOOL:
-			return true
-
-		case TIDEAL, TINT64, TUINT64, TPTR:
-			v, ok := n.Val().U.(*Mpint)
-			if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// indexconst checks if Node n contains a constant expression
-// representable as a non-negative int and returns its value.
-// If n is not a constant expression, not representable as an
-// integer, or negative, it returns -1. If n is too large, it
-// returns -2.
-func indexconst(n *Node) int64 {
-	if n.Op != OLITERAL {
-		return -1
-	}
-
-	v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint
-	vi, ok := v.U.(*Mpint)
-	if !ok || vi.CmpInt64(0) < 0 {
-		return -1
-	}
-	if vi.Cmp(maxintval[TINT]) > 0 {
-		return -2
-	}
-
-	return vi.Int64()
-}
-
-// isGoConst reports whether n is a Go language constant (as opposed to a
-// compile-time constant).
-//
-// Expressions derived from nil, like string([]byte(nil)), while they
-// may be known at compile time, are not Go language constants.
-func (n *Node) isGoConst() bool {
-	return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
-func hascallchan(n *Node) bool {
-	if n == nil {
-		return false
-	}
-	switch n.Op {
-	case OAPPEND,
-		OCALL,
-		OCALLFUNC,
-		OCALLINTER,
-		OCALLMETH,
-		OCAP,
-		OCLOSE,
-		OCOMPLEX,
-		OCOPY,
-		ODELETE,
-		OIMAG,
-		OLEN,
-		OMAKE,
-		ONEW,
-		OPANIC,
-		OPRINT,
-		OPRINTN,
-		OREAL,
-		ORECOVER,
-		ORECV:
-		return true
-	}
-
-	if hascallchan(n.Left) || hascallchan(n.Right) {
-		return true
-	}
-	for _, n1 := range n.List.Slice() {
-		if hascallchan(n1) {
-			return true
-		}
-	}
-	for _, n2 := range n.Rlist.Slice() {
-		if hascallchan(n2) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// A constSet represents a set of Go constant expressions.
-type constSet struct {
-	m map[constSetKey]src.XPos
-}
-
-type constSetKey struct {
-	typ *types.Type
-	val interface{}
-}
-
-// add adds constant expression n to s. If a constant expression of
-// equal value and identical type has already been added, then add
-// reports an error about the duplicate value.
-//
-// pos provides position information for where expression n occurred
-// (in case n does not have its own position information). what and
-// where are used in the error message.
-//
-// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
-	if n.Op == OCONVIFACE && n.Implicit() {
-		n = n.Left
-	}
-
-	if !n.isGoConst() {
-		return
-	}
-	if n.Type.IsUntyped() {
-		Fatalf("%v is untyped", n)
-	}
-
-	// Consts are only duplicates if they have the same value and
-	// identical types.
-	//
-	// In general, we have to use types.Identical to test type
-	// identity, because == gives false negatives for anonymous
-	// types and the byte/uint8 and rune/int32 builtin type
-	// aliases.  However, this is not a problem here, because
-	// constant expressions are always untyped or have a named
-	// type, and we explicitly handle the builtin type aliases
-	// below.
-	//
-	// This approach may need to be revisited though if we fix
-	// #21866 by treating all type aliases like byte/uint8 and
-	// rune/int32.
-
-	typ := n.Type
-	switch typ {
-	case types.Bytetype:
-		typ = types.Types[TUINT8]
-	case types.Runetype:
-		typ = types.Types[TINT32]
-	}
-	k := constSetKey{typ, n.Val().Interface()}
-
-	if hasUniquePos(n) {
-		pos = n.Pos
-	}
-
-	if s.m == nil {
-		s.m = make(map[constSetKey]src.XPos)
-	}
-
-	if prevPos, isDup := s.m[k]; isDup {
-		yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
-			what, nodeAndVal(n), where,
-			what, linestr(prevPos))
-	} else {
-		s.m[k] = pos
-	}
-}
-
-// nodeAndVal reports both an expression and its constant value, if
-// the latter is non-obvious.
-//
-// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *Node) string {
-	show := n.String()
-	val := n.Val().Interface()
-	if s := fmt.Sprintf("%#v", val); show != s {
-		show += " (value " + s + ")"
-	}
-	return show
-}
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
deleted file mode 100644
index 6e90eb4..0000000
--- a/src/cmd/compile/internal/gc/dcl.go
+++ /dev/null
@@ -1,1185 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"cmd/internal/src"
-	"fmt"
-	"strings"
-)
-
-// Declaration stack & operations
-
-var externdcl []*Node
-
-func testdclstack() {
-	if !types.IsDclstackValid() {
-		if nerrors != 0 {
-			errorexit()
-		}
-		Fatalf("mark left on the dclstack")
-	}
-}
-
-// redeclare emits a diagnostic about symbol s being redeclared at pos.
-func redeclare(pos src.XPos, s *types.Sym, where string) {
-	if !s.Lastlineno.IsKnown() {
-		pkg := s.Origpkg
-		if pkg == nil {
-			pkg = s.Pkg
-		}
-		yyerrorl(pos, "%v redeclared %s\n"+
-			"\tprevious declaration during import %q", s, where, pkg.Path)
-	} else {
-		prevPos := s.Lastlineno
-
-		// When an import and a declaration collide in separate files,
-		// present the import as the "redeclared", because the declaration
-		// is visible where the import is, but not vice versa.
-		// See issue 4510.
-		if s.Def == nil {
-			pos, prevPos = prevPos, pos
-		}
-
-		yyerrorl(pos, "%v redeclared %s\n"+
-			"\tprevious declaration at %v", s, where, linestr(prevPos))
-	}
-}
-
-var vargen int
-
-// declare individual names - var, typ, const
-
-var declare_typegen int
-
-// declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func declare(n *Node, ctxt Class) {
-	if n.isBlank() {
-		return
-	}
-
-	if n.Name == nil {
-		// named OLITERAL needs Name; most OLITERALs don't.
-		n.Name = new(Name)
-	}
-
-	s := n.Sym
-
-	// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
-	if !inimport && !typecheckok && s.Pkg != localpkg {
-		yyerrorl(n.Pos, "cannot declare name %v", s)
-	}
-
-	gen := 0
-	if ctxt == PEXTERN {
-		if s.Name == "init" {
-			yyerrorl(n.Pos, "cannot declare init - must be func")
-		}
-		if s.Name == "main" && s.Pkg.Name == "main" {
-			yyerrorl(n.Pos, "cannot declare main - must be func")
-		}
-		externdcl = append(externdcl, n)
-	} else {
-		if Curfn == nil && ctxt == PAUTO {
-			lineno = n.Pos
-			Fatalf("automatic outside function")
-		}
-		if Curfn != nil && ctxt != PFUNC {
-			Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-		}
-		if n.Op == OTYPE {
-			declare_typegen++
-			gen = declare_typegen
-		} else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
-			vargen++
-			gen = vargen
-		}
-		types.Pushdcl(s)
-		n.Name.Curfn = Curfn
-	}
-
-	if ctxt == PAUTO {
-		n.Xoffset = 0
-	}
-
-	if s.Block == types.Block {
-		// functype will print errors about duplicate function arguments.
-		// Don't repeat the error here.
-		if ctxt != PPARAM && ctxt != PPARAMOUT {
-			redeclare(n.Pos, s, "in this block")
-		}
-	}
-
-	s.Block = types.Block
-	s.Lastlineno = lineno
-	s.Def = asTypesNode(n)
-	n.Name.Vargen = int32(gen)
-	n.SetClass(ctxt)
-	if ctxt == PFUNC {
-		n.Sym.SetFunc(true)
-	}
-
-	autoexport(n, ctxt)
-}
-
-func addvar(n *Node, t *types.Type, ctxt Class) {
-	if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
-		Fatalf("addvar: n=%v t=%v nil", n, t)
-	}
-
-	n.Op = ONAME
-	declare(n, ctxt)
-	n.Type = t
-}
-
-// declare variables from grammar
-// new_name_list (type | [type] = expr_list)
-func variter(vl []*Node, t *Node, el []*Node) []*Node {
-	var init []*Node
-	doexpr := len(el) > 0
-
-	if len(el) == 1 && len(vl) > 1 {
-		e := el[0]
-		as2 := nod(OAS2, nil, nil)
-		as2.List.Set(vl)
-		as2.Rlist.Set1(e)
-		for _, v := range vl {
-			v.Op = ONAME
-			declare(v, dclcontext)
-			v.Name.Param.Ntype = t
-			v.Name.Defn = as2
-			if Curfn != nil {
-				init = append(init, nod(ODCL, v, nil))
-			}
-		}
-
-		return append(init, as2)
-	}
-
-	nel := len(el)
-	for _, v := range vl {
-		var e *Node
-		if doexpr {
-			if len(el) == 0 {
-				yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
-				break
-			}
-			e = el[0]
-			el = el[1:]
-		}
-
-		v.Op = ONAME
-		declare(v, dclcontext)
-		v.Name.Param.Ntype = t
-
-		if e != nil || Curfn != nil || v.isBlank() {
-			if Curfn != nil {
-				init = append(init, nod(ODCL, v, nil))
-			}
-			e = nod(OAS, v, e)
-			init = append(init, e)
-			if e.Right != nil {
-				v.Name.Defn = e
-			}
-		}
-	}
-
-	if len(el) != 0 {
-		yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
-	}
-	return init
-}
-
-// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *Node {
-	if s == nil {
-		Fatalf("newnoname nil")
-	}
-	n := nod(ONONAME, nil, nil)
-	n.Sym = s
-	n.Xoffset = 0
-	return n
-}
-
-// newfuncnamel generates a new name node for a function or method.
-// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
-	n := newnamel(pos, s)
-	n.Func = new(Func)
-	n.Func.SetIsHiddenClosure(Curfn != nil)
-	return n
-}
-
-// this generates a new name node for a name
-// being declared.
-func dclname(s *types.Sym) *Node {
-	n := newname(s)
-	n.Op = ONONAME // caller will correct it
-	return n
-}
-
-func typenod(t *types.Type) *Node {
-	return typenodl(src.NoXPos, t)
-}
-
-func typenodl(pos src.XPos, t *types.Type) *Node {
-	// if we copied another type with *t = *u
-	// then t->nod might be out of date, so
-	// check t->nod->type too
-	if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
-		t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
-		asNode(t.Nod).Type = t
-		asNode(t.Nod).Sym = t.Sym
-	}
-
-	return asNode(t.Nod)
-}
-
-func anonfield(typ *types.Type) *Node {
-	return symfield(nil, typ)
-}
-
-func namedfield(s string, typ *types.Type) *Node {
-	return symfield(lookup(s), typ)
-}
-
-func symfield(s *types.Sym, typ *types.Type) *Node {
-	n := nodSym(ODCLFIELD, nil, s)
-	n.Type = typ
-	return n
-}
-
-// oldname returns the Node that declares symbol s in the current scope.
-// If no such Node currently exists, an ONONAME Node is returned instead.
-// Automatically creates a new closure variable if the referenced symbol was
-// declared in a different (containing) function.
-func oldname(s *types.Sym) *Node {
-	n := asNode(s.Def)
-	if n == nil {
-		// Maybe a top-level declaration will come along later to
-		// define s. resolve will check s.Def again once all input
-		// source has been processed.
-		return newnoname(s)
-	}
-
-	if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
-		// Inner func is referring to var in outer func.
-		//
-		// TODO(rsc): If there is an outer variable x and we
-		// are parsing x := 5 inside the closure, until we get to
-		// the := it looks like a reference to the outer x so we'll
-		// make x a closure variable unnecessarily.
-		c := n.Name.Param.Innermost
-		if c == nil || c.Name.Curfn != Curfn {
-			// Do not have a closure var for the active closure yet; make one.
-			c = newname(s)
-			c.SetClass(PAUTOHEAP)
-			c.Name.SetIsClosureVar(true)
-			c.SetIsDDD(n.IsDDD())
-			c.Name.Defn = n
-
-			// Link into list of active closure variables.
-			// Popped from list in func funcLit.
-			c.Name.Param.Outer = n.Name.Param.Innermost
-			n.Name.Param.Innermost = c
-
-			Curfn.Func.Cvars.Append(c)
-		}
-
-		// return ref to closure var, not original
-		return c
-	}
-
-	return n
-}
-
-// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *Node {
-	n := oldname(sym)
-	if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
-		n.SetDiag(true)
-		yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
-	}
-	return n
-}
-
-// := declarations
-func colasname(n *Node) bool {
-	switch n.Op {
-	case ONAME,
-		ONONAME,
-		OPACK,
-		OTYPE,
-		OLITERAL:
-		return n.Sym != nil
-	}
-
-	return false
-}
-
-func colasdefn(left []*Node, defn *Node) {
-	for _, n := range left {
-		if n.Sym != nil {
-			n.Sym.SetUniq(true)
-		}
-	}
-
-	var nnew, nerr int
-	for i, n := range left {
-		if n.isBlank() {
-			continue
-		}
-		if !colasname(n) {
-			yyerrorl(defn.Pos, "non-name %v on left side of :=", n)
-			nerr++
-			continue
-		}
-
-		if !n.Sym.Uniq() {
-			yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym)
-			n.SetDiag(true)
-			nerr++
-			continue
-		}
-
-		n.Sym.SetUniq(false)
-		if n.Sym.Block == types.Block {
-			continue
-		}
-
-		nnew++
-		n = newname(n.Sym)
-		declare(n, dclcontext)
-		n.Name.Defn = defn
-		defn.Ninit.Append(nod(ODCL, n, nil))
-		left[i] = n
-	}
-
-	if nnew == 0 && nerr == 0 {
-		yyerrorl(defn.Pos, "no new variables on left side of :=")
-	}
-}
-
-// declare the arguments in an
-// interface field declaration.
-func ifacedcl(n *Node) {
-	if n.Op != ODCLFIELD || n.Left == nil {
-		Fatalf("ifacedcl")
-	}
-
-	if n.Sym.IsBlank() {
-		yyerror("methods must have a unique non-blank name")
-	}
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func funchdr(n *Node) {
-	// change the declaration context from extern to auto
-	funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
-	Curfn = n
-	dclcontext = PAUTO
-
-	types.Markdcl()
-
-	if n.Func.Nname != nil {
-		funcargs(n.Func.Nname.Name.Param.Ntype)
-	} else if n.Func.Ntype != nil {
-		funcargs(n.Func.Ntype)
-	} else {
-		funcargs2(n.Type)
-	}
-}
-
-func funcargs(nt *Node) {
-	if nt.Op != OTFUNC {
-		Fatalf("funcargs %v", nt.Op)
-	}
-
-	// re-start the variable generation number
-	// we want to use small numbers for the return variables,
-	// so let them have the chunk starting at 1.
-	//
-	// TODO(mdempsky): This is ugly, and only necessary because
-	// esc.go uses Vargen to figure out result parameters' index
-	// within the result tuple.
-	vargen = nt.Rlist.Len()
-
-	// declare the receiver and in arguments.
-	if nt.Left != nil {
-		funcarg(nt.Left, PPARAM)
-	}
-	for _, n := range nt.List.Slice() {
-		funcarg(n, PPARAM)
-	}
-
-	oldvargen := vargen
-	vargen = 0
-
-	// declare the out arguments.
-	gen := nt.List.Len()
-	for _, n := range nt.Rlist.Slice() {
-		if n.Sym == nil {
-			// Name so that escape analysis can track it. ~r stands for 'result'.
-			n.Sym = lookupN("~r", gen)
-			gen++
-		}
-		if n.Sym.IsBlank() {
-			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
-			// The name must be different from ~r above because if you have
-			//	func f() (_ int)
-			//	func g() int
-			// f is allowed to use a plain 'return' with no arguments, while g is not.
-			// So the two cases must be distinguished.
-			n.Sym = lookupN("~b", gen)
-			gen++
-		}
-
-		funcarg(n, PPARAMOUT)
-	}
-
-	vargen = oldvargen
-}
-
-func funcarg(n *Node, ctxt Class) {
-	if n.Op != ODCLFIELD {
-		Fatalf("funcarg %v", n.Op)
-	}
-	if n.Sym == nil {
-		return
-	}
-
-	n.Right = newnamel(n.Pos, n.Sym)
-	n.Right.Name.Param.Ntype = n.Left
-	n.Right.SetIsDDD(n.IsDDD())
-	declare(n.Right, ctxt)
-
-	vargen++
-	n.Right.Name.Vargen = int32(vargen)
-}
-
-// Same as funcargs, except run over an already constructed TFUNC.
-// This happens during import, where the hidden_fndcl rule has
-// used functype directly to parse the function's type.
-func funcargs2(t *types.Type) {
-	if t.Etype != TFUNC {
-		Fatalf("funcargs2 %v", t)
-	}
-
-	for _, f := range t.Recvs().Fields().Slice() {
-		funcarg2(f, PPARAM)
-	}
-	for _, f := range t.Params().Fields().Slice() {
-		funcarg2(f, PPARAM)
-	}
-	for _, f := range t.Results().Fields().Slice() {
-		funcarg2(f, PPARAMOUT)
-	}
-}
-
-func funcarg2(f *types.Field, ctxt Class) {
-	if f.Sym == nil {
-		return
-	}
-	n := newnamel(f.Pos, f.Sym)
-	f.Nname = asTypesNode(n)
-	n.Type = f.Type
-	n.SetIsDDD(f.IsDDD())
-	declare(n, ctxt)
-}
-
-var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
-
-type funcStackEnt struct {
-	curfn      *Node
-	dclcontext Class
-}
-
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
-func funcbody() {
-	// change the declaration context from auto to previous context
-	types.Popdcl()
-	var e funcStackEnt
-	funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
-	Curfn, dclcontext = e.curfn, e.dclcontext
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *types.Type) {
-	if t == nil {
-		return
-	}
-
-	if t.Sym == nil && t.IsPtr() {
-		t = t.Elem()
-		if t.IsInterface() {
-			yyerror("embedded type cannot be a pointer to interface")
-		}
-	}
-
-	if t.IsPtr() || t.IsUnsafePtr() {
-		yyerror("embedded type cannot be a pointer")
-	} else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() {
-		t.ForwardType().Embedlineno = lineno
-	}
-}
-
-func structfield(n *Node) *types.Field {
-	lno := lineno
-	lineno = n.Pos
-
-	if n.Op != ODCLFIELD {
-		Fatalf("structfield: oops %v\n", n)
-	}
-
-	f := types.NewField()
-	f.Pos = n.Pos
-	f.Sym = n.Sym
-
-	if n.Left != nil {
-		n.Left = typecheck(n.Left, ctxType)
-		n.Type = n.Left.Type
-		n.Left = nil
-	}
-
-	f.Type = n.Type
-	if f.Type == nil {
-		f.SetBroke(true)
-	}
-
-	if n.Embedded() {
-		checkembeddedtype(n.Type)
-		f.Embedded = 1
-	} else {
-		f.Embedded = 0
-	}
-
-	switch u := n.Val().U.(type) {
-	case string:
-		f.Note = u
-	default:
-		yyerror("field tag must be a string")
-	case nil:
-		// no-op
-	}
-
-	lineno = lno
-	return f
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, fss ...[]*types.Field) {
-	seen := make(map[*types.Sym]bool)
-	for _, fs := range fss {
-		for _, f := range fs {
-			if f.Sym == nil || f.Sym.IsBlank() {
-				continue
-			}
-			if seen[f.Sym] {
-				yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name)
-				continue
-			}
-			seen[f.Sym] = true
-		}
-	}
-}
-
-// convert a parsed id/type list into
-// a type for struct/interface/arglist
-func tostruct(l []*Node) *types.Type {
-	t := types.New(TSTRUCT)
-
-	fields := make([]*types.Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-
-	checkdupfields("field", t.FieldSlice())
-
-	if !t.Broke() {
-		checkwidth(t)
-	}
-
-	return t
-}
-
-func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
-	t := types.New(TSTRUCT)
-	t.StructType().Funarg = funarg
-
-	fields := make([]*types.Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		f.SetIsDDD(n.IsDDD())
-		if n.Right != nil {
-			n.Right.Type = f.Type
-			f.Nname = asTypesNode(n.Right)
-		}
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-	return t
-}
-
-func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
-	t := types.New(TSTRUCT)
-	t.StructType().Funarg = funarg
-	t.SetFields(fields)
-	return t
-}
-
-func interfacefield(n *Node) *types.Field {
-	lno := lineno
-	lineno = n.Pos
-
-	if n.Op != ODCLFIELD {
-		Fatalf("interfacefield: oops %v\n", n)
-	}
-
-	if n.Val().Ctype() != CTxxx {
-		yyerror("interface method cannot have annotation")
-	}
-
-	// MethodSpec = MethodName Signature | InterfaceTypeName .
-	//
-	// If Sym != nil, then Sym is MethodName and Left is Signature.
-	// Otherwise, Left is InterfaceTypeName.
-
-	if n.Left != nil {
-		n.Left = typecheck(n.Left, ctxType)
-		n.Type = n.Left.Type
-		n.Left = nil
-	}
-
-	f := types.NewField()
-	f.Pos = n.Pos
-	f.Sym = n.Sym
-	f.Type = n.Type
-	if f.Type == nil {
-		f.SetBroke(true)
-	}
-
-	lineno = lno
-	return f
-}
-
-func tointerface(l []*Node) *types.Type {
-	if len(l) == 0 {
-		return types.Types[TINTER]
-	}
-	t := types.New(TINTER)
-	var fields []*types.Field
-	for _, n := range l {
-		f := interfacefield(n)
-		if f.Broke() {
-			t.SetBroke(true)
-		}
-		fields = append(fields, f)
-	}
-	t.SetInterface(fields)
-	return t
-}
-
-func fakeRecv() *Node {
-	return anonfield(types.FakeRecvType())
-}
-
-func fakeRecvField() *types.Field {
-	f := types.NewField()
-	f.Type = types.FakeRecvType()
-	return f
-}
-
-// isifacemethod reports whether (field) m is
-// an interface method. Such methods have the
-// special receiver type types.FakeRecvType().
-func isifacemethod(f *types.Type) bool {
-	return f.Recv().Type == types.FakeRecvType()
-}
-
-// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *types.Type {
-	t := types.New(TFUNC)
-
-	var rcvr []*Node
-	if this != nil {
-		rcvr = []*Node{this}
-	}
-	t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
-	t.FuncType().Params = tofunargs(in, types.FunargParams)
-	t.FuncType().Results = tofunargs(out, types.FunargResults)
-
-	checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
-
-	if t.Recvs().Broke() || t.Results().Broke() || t.Params().Broke() {
-		t.SetBroke(true)
-	}
-
-	t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
-
-	return t
-}
-
-func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
-	t := types.New(TFUNC)
-
-	var rcvr []*types.Field
-	if this != nil {
-		rcvr = []*types.Field{this}
-	}
-	t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr)
-	t.FuncType().Params = tofunargsfield(in, types.FunargParams)
-	t.FuncType().Results = tofunargsfield(out, types.FunargResults)
-
-	t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
-
-	return t
-}
-
-// origSym returns the original symbol written by the user.
-func origSym(s *types.Sym) *types.Sym {
-	if s == nil {
-		return nil
-	}
-
-	if len(s.Name) > 1 && s.Name[0] == '~' {
-		switch s.Name[1] {
-		case 'r': // originally an unnamed result
-			return nil
-		case 'b': // originally the blank identifier _
-			// TODO(mdempsky): Does s.Pkg matter here?
-			return nblank.Sym
-		}
-		return s
-	}
-
-	if strings.HasPrefix(s.Name, ".anon") {
-		// originally an unnamed or _ name (see subr.go: structargs)
-		return nil
-	}
-
-	return s
-}
-
-// methodSym returns the method symbol representing a method name
-// associated with a specific receiver type.
-//
-// Method symbols can be used to distinguish the same method appearing
-// in different method sets. For example, T.M and (*T).M have distinct
-// method symbols.
-//
-// The returned symbol will be marked as a function.
-func methodSym(recv *types.Type, msym *types.Sym) *types.Sym {
-	sym := methodSymSuffix(recv, msym, "")
-	sym.SetFunc(true)
-	return sym
-}
-
-// methodSymSuffix is like methodsym, but allows attaching a
-// distinguisher suffix. To avoid collisions, the suffix must not
-// start with a letter, number, or period.
-func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
-	if msym.IsBlank() {
-		Fatalf("blank method name")
-	}
-
-	rsym := recv.Sym
-	if recv.IsPtr() {
-		if rsym != nil {
-			Fatalf("declared pointer receiver type: %v", recv)
-		}
-		rsym = recv.Elem().Sym
-	}
-
-	// Find the package the receiver type appeared in. For
-	// anonymous receiver types (i.e., anonymous structs with
-	// embedded fields), use the "go" pseudo-package instead.
-	rpkg := gopkg
-	if rsym != nil {
-		rpkg = rsym.Pkg
-	}
-
-	var b bytes.Buffer
-	if recv.IsPtr() {
-		// The parentheses aren't really necessary, but
-		// they're pretty traditional at this point.
-		fmt.Fprintf(&b, "(%-S)", recv)
-	} else {
-		fmt.Fprintf(&b, "%-S", recv)
-	}
-
-	// A particular receiver type may have multiple non-exported
-	// methods with the same name. To disambiguate them, include a
-	// package qualifier for names that came from a different
-	// package than the receiver type.
-	if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
-		b.WriteString(".")
-		b.WriteString(msym.Pkg.Prefix)
-	}
-
-	b.WriteString(".")
-	b.WriteString(msym.Name)
-	b.WriteString(suffix)
-
-	return rpkg.LookupBytes(b.Bytes())
-}
-
-// Add a method, declared as a function.
-// - msym is the method symbol
-// - t is function type (with receiver)
-// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
-	if msym == nil {
-		Fatalf("no method symbol")
-	}
-
-	// get parent type sym
-	rf := t.Recv() // ptr to this structure
-	if rf == nil {
-		yyerror("missing receiver")
-		return nil
-	}
-
-	mt := methtype(rf.Type)
-	if mt == nil || mt.Sym == nil {
-		pa := rf.Type
-		t := pa
-		if t != nil && t.IsPtr() {
-			if t.Sym != nil {
-				yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-				return nil
-			}
-			t = t.Elem()
-		}
-
-		switch {
-		case t == nil || t.Broke():
-			// rely on typecheck having complained before
-		case t.Sym == nil:
-			yyerror("invalid receiver type %v (%v is not a defined type)", pa, t)
-		case t.IsPtr():
-			yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-		case t.IsInterface():
-			yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
-		default:
-			// Should have picked off all the reasons above,
-			// but just in case, fall back to generic error.
-			yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
-		}
-		return nil
-	}
-
-	if local && mt.Sym.Pkg != localpkg {
-		yyerror("cannot define new methods on non-local type %v", mt)
-		return nil
-	}
-
-	if msym.IsBlank() {
-		return nil
-	}
-
-	if mt.IsStruct() {
-		for _, f := range mt.Fields().Slice() {
-			if f.Sym == msym {
-				yyerror("type %v has both field and method named %v", mt, msym)
-				f.SetBroke(true)
-				return nil
-			}
-		}
-	}
-
-	for _, f := range mt.Methods().Slice() {
-		if msym.Name != f.Sym.Name {
-			continue
-		}
-		// types.Identical only checks that incoming and result parameters match,
-		// so explicitly check that the receiver parameters match too.
-		if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
-			yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
-		}
-		return f
-	}
-
-	f := types.NewField()
-	f.Pos = lineno
-	f.Sym = msym
-	f.Type = t
-	f.SetNointerface(nointerface)
-
-	mt.Methods().Append(f)
-	return f
-}
-
-func funcsymname(s *types.Sym) string {
-	return s.Name + "·f"
-}
-
-// funcsym returns s·f.
-func funcsym(s *types.Sym) *types.Sym {
-	// funcsymsmu here serves to protect not just mutations of funcsyms (below),
-	// but also the package lookup of the func sym name,
-	// since this function gets called concurrently from the backend.
-	// There are no other concurrent package lookups in the backend,
-	// except for the types package, which is protected separately.
-	// Reusing funcsymsmu to also cover this package lookup
-	// avoids a general, broader, expensive package lookup mutex.
-	// Note makefuncsym also does package look-up of func sym names,
-	// but that it is only called serially, from the front end.
-	funcsymsmu.Lock()
-	sf, existed := s.Pkg.LookupOK(funcsymname(s))
-	// Don't export s·f when compiling for dynamic linking.
-	// When dynamically linking, the necessary function
-	// symbols will be created explicitly with makefuncsym.
-	// See the makefuncsym comment for details.
-	if !Ctxt.Flag_dynlink && !existed {
-		funcsyms = append(funcsyms, s)
-	}
-	funcsymsmu.Unlock()
-	return sf
-}
-
-// makefuncsym ensures that s·f is exported.
-// It is only used with -dynlink.
-// When not compiling for dynamic linking,
-// the funcsyms are created as needed by
-// the packages that use them.
-// Normally we emit the s·f stubs as DUPOK syms,
-// but DUPOK doesn't work across shared library boundaries.
-// So instead, when dynamic linking, we only create
-// the s·f stubs in s's package.
-func makefuncsym(s *types.Sym) {
-	if !Ctxt.Flag_dynlink {
-		Fatalf("makefuncsym dynlink")
-	}
-	if s.IsBlank() {
-		return
-	}
-	if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
-		// runtime.getg(), getclosureptr(), getcallerpc(), and
-		// getcallersp() are not real functions and so do not
-		// get funcsyms.
-		return
-	}
-	if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
-		funcsyms = append(funcsyms, s)
-	}
-}
-
-// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *Node) {
-	if n.Op != ONAME || n.Class() != Pxxx {
-		Fatalf("expected ONAME/Pxxx node, got %v", n)
-	}
-
-	n.SetClass(PFUNC)
-	n.Sym.SetFunc(true)
-}
-
-func dclfunc(sym *types.Sym, tfn *Node) *Node {
-	if tfn.Op != OTFUNC {
-		Fatalf("expected OTFUNC node, got %v", tfn)
-	}
-
-	fn := nod(ODCLFUNC, nil, nil)
-	fn.Func.Nname = newfuncnamel(lineno, sym)
-	fn.Func.Nname.Name.Defn = fn
-	fn.Func.Nname.Name.Param.Ntype = tfn
-	setNodeNameFunc(fn.Func.Nname)
-	funchdr(fn)
-	fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
-	return fn
-}
-
-type nowritebarrierrecChecker struct {
-	// extraCalls contains extra function calls that may not be
-	// visible during later analysis. It maps from the ODCLFUNC of
-	// the caller to a list of callees.
-	extraCalls map[*Node][]nowritebarrierrecCall
-
-	// curfn is the current function during AST walks.
-	curfn *Node
-}
-
-type nowritebarrierrecCall struct {
-	target *Node    // ODCLFUNC of caller or callee
-	lineno src.XPos // line of call
-}
-
-type nowritebarrierrecCallSym struct {
-	target *obj.LSym // LSym of callee
-	lineno src.XPos  // line of call
-}
-
-// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
-// must be called before transformclosure and walk.
-func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
-	c := &nowritebarrierrecChecker{
-		extraCalls: make(map[*Node][]nowritebarrierrecCall),
-	}
-
-	// Find all systemstack calls and record their targets. In
-	// general, flow analysis can't see into systemstack, but it's
-	// important to handle it for this check, so we model it
-	// directly. This has to happen before transformclosure since
-	// it's a lot harder to work out the argument after.
-	for _, n := range xtop {
-		if n.Op != ODCLFUNC {
-			continue
-		}
-		c.curfn = n
-		inspect(n, c.findExtraCalls)
-	}
-	c.curfn = nil
-	return c
-}
-
-func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
-	if n.Op != OCALLFUNC {
-		return true
-	}
-	fn := n.Left
-	if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
-		return true
-	}
-	if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
-		return true
-	}
-
-	var callee *Node
-	arg := n.List.First()
-	switch arg.Op {
-	case ONAME:
-		callee = arg.Name.Defn
-	case OCLOSURE:
-		callee = arg.Func.Closure
-	default:
-		Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
-	}
-	if callee.Op != ODCLFUNC {
-		Fatalf("expected ODCLFUNC node, got %+v", callee)
-	}
-	c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
-	return true
-}
-
-// recordCall records a call from ODCLFUNC node "from", to function
-// symbol "to" at position pos.
-//
-// This should be done as late as possible during compilation to
-// capture precise call graphs. The target of the call is an LSym
-// because that's all we know after we start SSA.
-//
-// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
-	if from.Op != ODCLFUNC {
-		Fatalf("expected ODCLFUNC, got %v", from)
-	}
-	// We record this information on the *Func so this is
-	// concurrent-safe.
-	fn := from.Func
-	if fn.nwbrCalls == nil {
-		fn.nwbrCalls = new([]nowritebarrierrecCallSym)
-	}
-	*fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
-}
-
-func (c *nowritebarrierrecChecker) check() {
-	// We walk the call graph as late as possible so we can
-	// capture all calls created by lowering, but this means we
-	// only get to see the obj.LSyms of calls. symToFunc lets us
-	// get back to the ODCLFUNCs.
-	symToFunc := make(map[*obj.LSym]*Node)
-	// funcs records the back-edges of the BFS call graph walk. It
-	// maps from the ODCLFUNC of each function that must not have
-	// write barriers to the call that inhibits them. Functions
-	// that are directly marked go:nowritebarrierrec are in this
-	// map with a zero-valued nowritebarrierrecCall. This also
-	// acts as the set of marks for the BFS of the call graph.
-	funcs := make(map[*Node]nowritebarrierrecCall)
-	// q is the queue of ODCLFUNC Nodes to visit in BFS order.
-	var q nodeQueue
-
-	for _, n := range xtop {
-		if n.Op != ODCLFUNC {
-			continue
-		}
-
-		symToFunc[n.Func.lsym] = n
-
-		// Make nowritebarrierrec functions BFS roots.
-		if n.Func.Pragma&Nowritebarrierrec != 0 {
-			funcs[n] = nowritebarrierrecCall{}
-			q.pushRight(n)
-		}
-		// Check go:nowritebarrier functions.
-		if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
-			yyerrorl(n.Func.WBPos, "write barrier prohibited")
-		}
-	}
-
-	// Perform a BFS of the call graph from all
-	// go:nowritebarrierrec functions.
-	enqueue := func(src, target *Node, pos src.XPos) {
-		if target.Func.Pragma&Yeswritebarrierrec != 0 {
-			// Don't flow into this function.
-			return
-		}
-		if _, ok := funcs[target]; ok {
-			// Already found a path to target.
-			return
-		}
-
-		// Record the path.
-		funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
-		q.pushRight(target)
-	}
-	for !q.empty() {
-		fn := q.popLeft()
-
-		// Check fn.
-		if fn.Func.WBPos.IsKnown() {
-			var err bytes.Buffer
-			call := funcs[fn]
-			for call.target != nil {
-				fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
-				call = funcs[call.target]
-			}
-			yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
-			continue
-		}
-
-		// Enqueue fn's calls.
-		for _, callee := range c.extraCalls[fn] {
-			enqueue(fn, callee.target, callee.lineno)
-		}
-		if fn.Func.nwbrCalls == nil {
-			continue
-		}
-		for _, callee := range *fn.Func.nwbrCalls {
-			target := symToFunc[callee.target]
-			if target != nil {
-				enqueue(fn, target, callee.lineno)
-			}
-		}
-	}
-}
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
deleted file mode 100644
index f45796c..0000000
--- a/src/cmd/compile/internal/gc/embed.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/syntax"
-	"cmd/compile/internal/types"
-	"cmd/internal/obj"
-	"encoding/json"
-	"io/ioutil"
-	"log"
-	"path"
-	"sort"
-	"strconv"
-	"strings"
-)
-
-var embedlist []*Node
-
-var embedCfg struct {
-	Patterns map[string][]string
-	Files    map[string]string
-}
-
-func readEmbedCfg(file string) {
-	data, err := ioutil.ReadFile(file)
-	if err != nil {
-		log.Fatalf("-embedcfg: %v", err)
-	}
-	if err := json.Unmarshal(data, &embedCfg); err != nil {
-		log.Fatalf("%s: %v", file, err)
-	}
-	if embedCfg.Patterns == nil {
-		log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
-	}
-	if embedCfg.Files == nil {
-		log.Fatalf("%s: invalid embedcfg: missing Files", file)
-	}
-}
-
-const (
-	embedUnknown = iota
-	embedBytes
-	embedString
-	embedFiles
-)
-
-func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
-	haveEmbed := false
-	for _, decl := range p.file.DeclList {
-		imp, ok := decl.(*syntax.ImportDecl)
-		if !ok {
-			// imports always come first
-			break
-		}
-		path, _ := strconv.Unquote(imp.Path.Value)
-		if path == "embed" {
-			haveEmbed = true
-			break
-		}
-	}
-
-	pos := embeds[0].Pos
-	if !haveEmbed {
-		p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
-		return
-	}
-	if len(names) > 1 {
-		p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
-		return
-	}
-	if len(exprs) > 0 {
-		p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
-		return
-	}
-	if typ == nil {
-		// Should not happen, since len(exprs) == 0 now.
-		p.yyerrorpos(pos, "go:embed cannot apply to var without type")
-		return
-	}
-	if dclcontext != PEXTERN {
-		p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
-		return
-	}
-
-	var list []irEmbed
-	for _, e := range embeds {
-		list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
-	}
-	v := names[0]
-	v.Name.Param.SetEmbedList(list)
-	embedlist = append(embedlist, v)
-}
-
-func embedFileList(v *Node, kind int) []string {
-	// Build list of files to store.
-	have := make(map[string]bool)
-	var list []string
-	for _, e := range v.Name.Param.EmbedList() {
-		for _, pattern := range e.Patterns {
-			files, ok := embedCfg.Patterns[pattern]
-			if !ok {
-				yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
-			}
-			for _, file := range files {
-				if embedCfg.Files[file] == "" {
-					yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
-					continue
-				}
-				if !have[file] {
-					have[file] = true
-					list = append(list, file)
-				}
-				if kind == embedFiles {
-					for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
-						have[dir] = true
-						list = append(list, dir+"/")
-					}
-				}
-			}
-		}
-	}
-	sort.Slice(list, func(i, j int) bool {
-		return embedFileLess(list[i], list[j])
-	})
-
-	if kind == embedString || kind == embedBytes {
-		if len(list) > 1 {
-			yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
-			return nil
-		}
-	}
-
-	return list
-}
-
-// embedKind determines the kind of embedding variable.
-func embedKind(typ *types.Type) int {
-	if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
-		return embedFiles
-	}
-	if typ.Etype == types.TSTRING {
-		return embedString
-	}
-	if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
-		return embedBytes
-	}
-	return embedUnknown
-}
-
-func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
-	if name[len(name)-1] == '/' {
-		isDir = true
-		name = name[:len(name)-1]
-	}
-	i := len(name) - 1
-	for i >= 0 && name[i] != '/' {
-		i--
-	}
-	if i < 0 {
-		return ".", name, isDir
-	}
-	return name[:i], name[i+1:], isDir
-}
-
-// embedFileLess implements the sort order for a list of embedded files.
-// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
-func embedFileLess(x, y string) bool {
-	xdir, xelem, _ := embedFileNameSplit(x)
-	ydir, yelem, _ := embedFileNameSplit(y)
-	return xdir < ydir || xdir == ydir && xelem < yelem
-}
-
-func dumpembeds() {
-	for _, v := range embedlist {
-		initEmbed(v)
-	}
-}
-
-// initEmbed emits the init data for a //go:embed variable,
-// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *Node) {
-	commentPos := v.Name.Param.EmbedList()[0].Pos
-	if !langSupported(1, 16, localpkg) {
-		lno := lineno
-		lineno = commentPos
-		yyerrorv("go1.16", "go:embed")
-		lineno = lno
-		return
-	}
-	if embedCfg.Patterns == nil {
-		yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
-		return
-	}
-	kind := embedKind(v.Type)
-	if kind == embedUnknown {
-		yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
-		return
-	}
-
-	files := embedFileList(v, kind)
-	switch kind {
-	case embedString, embedBytes:
-		file := files[0]
-		fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
-		if err != nil {
-			yyerrorl(v.Pos, "embed %s: %v", file, err)
-		}
-		sym := v.Sym.Linksym()
-		off := 0
-		off = dsymptr(sym, off, fsym, 0)       // data string
-		off = duintptr(sym, off, uint64(size)) // len
-		if kind == embedBytes {
-			duintptr(sym, off, uint64(size)) // cap for slice
-		}
-
-	case embedFiles:
-		slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
-		off := 0
-		// []files pointed at by Files
-		off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
-		off = duintptr(slicedata, off, uint64(len(files)))
-		off = duintptr(slicedata, off, uint64(len(files)))
-
-		// embed/embed.go type file is:
-		//	name string
-		//	data string
-		//	hash [16]byte
-		// Emit one of these per file in the set.
-		const hashSize = 16
-		hash := make([]byte, hashSize)
-		for _, file := range files {
-			off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
-			off = duintptr(slicedata, off, uint64(len(file)))
-			if strings.HasSuffix(file, "/") {
-				// entry for directory - no data
-				off = duintptr(slicedata, off, 0)
-				off = duintptr(slicedata, off, 0)
-				off += hashSize
-			} else {
-				fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
-				if err != nil {
-					yyerrorl(v.Pos, "embed %s: %v", file, err)
-				}
-				off = dsymptr(slicedata, off, fsym, 0) // data string
-				off = duintptr(slicedata, off, uint64(size))
-				off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
-			}
-		}
-		ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
-		sym := v.Sym.Linksym()
-		dsymptr(sym, 0, slicedata, 0)
-	}
-}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
deleted file mode 100644
index 6f328ab..0000000
--- a/src/cmd/compile/internal/gc/esc.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/types"
-	"fmt"
-)
-
-func escapes(all []*Node) {
-	visitBottomUp(all, escapeFuncs)
-}
-
-const (
-	EscFuncUnknown = 0 + iota
-	EscFuncPlanned
-	EscFuncStarted
-	EscFuncTagged
-)
-
-func min8(a, b int8) int8 {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func max8(a, b int8) int8 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-const (
-	EscUnknown = iota
-	EscNone    // Does not escape to heap, result, or parameters.
-	EscHeap    // Reachable from the heap
-	EscNever   // By construction will not escape.
-)
-
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *types.Sym {
-	if fn == nil || fn.Func.Nname == nil {
-		return nil
-	}
-	return fn.Func.Nname.Sym
-}
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
-var (
-	looping    Node
-	nonlooping Node
-)
-
-func isSliceSelfAssign(dst, src *Node) bool {
-	// Detect the following special case.
-	//
-	//	func (b *Buffer) Foo() {
-	//		n, m := ...
-	//		b.buf = b.buf[n:m]
-	//	}
-	//
-	// This assignment is a no-op for escape analysis,
-	// it does not store any new pointers into b that were not already there.
-	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
-	// Here we assume that the statement will not contain calls,
-	// that is, that order will move any calls to init.
-	// Otherwise base ONAME value could change between the moments
-	// when we evaluate it for dst and for src.
-
-	// dst is ONAME dereference.
-	if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
-		return false
-	}
-	// src is a slice operation.
-	switch src.Op {
-	case OSLICE, OSLICE3, OSLICESTR:
-		// OK.
-	case OSLICEARR, OSLICE3ARR:
-		// Since arrays are embedded into containing object,
-		// slice of non-pointer array will introduce a new pointer into b that was not already there
-		// (pointer to b itself). After such assignment, if b contents escape,
-		// b escapes as well. If we ignore such OSLICEARR, we will conclude
-		// that b does not escape when b contents do.
-		//
-		// Pointer to an array is OK since it's not stored inside b directly.
-		// For slicing an array (not pointer to array), there is an implicit OADDR.
-		// We check that to determine non-pointer array slicing.
-		if src.Left.Op == OADDR {
-			return false
-		}
-	default:
-		return false
-	}
-	// slice is applied to ONAME dereference.
-	if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
-		return false
-	}
-	// dst and src reference the same base ONAME.
-	return dst.Left == src.Left.Left
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *Node) bool {
-	if isSliceSelfAssign(dst, src) {
-		return true
-	}
-
-	// Detect trivial assignments that assign back to the same object.
-	//
-	// It covers these cases:
-	//	val.x = val.y
-	//	val.x[i] = val.y[j]
-	//	val.x1.x2 = val.x1.y2
-	//	... etc
-	//
-	// These assignments do not change assigned object lifetime.
-
-	if dst == nil || src == nil || dst.Op != src.Op {
-		return false
-	}
-
-	switch dst.Op {
-	case ODOT, ODOTPTR:
-		// Safe trailing accessors that are permitted to differ.
-	case OINDEX:
-		if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
-			return false
-		}
-	default:
-		return false
-	}
-
-	// The expression prefix must be both "safe" and identical.
-	return samesafeexpr(dst.Left, src.Left)
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n *Node) bool {
-	// We may want to use a list of "memory safe" ops instead of generally
-	// "side-effect free", which would include all calls and other ops that can
-	// allocate or change global state. For now, it's safer to start with the latter.
-	//
-	// We're ignoring things like division by zero, index out of range,
-	// and nil pointer dereference here.
-	switch n.Op {
-	case ONAME, OCLOSUREVAR, OLITERAL:
-		return false
-
-	// Left+Right group.
-	case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
-		return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
-
-	// Left group.
-	case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
-		ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
-		return mayAffectMemory(n.Left)
-
-	default:
-		return true
-	}
-}
-
-// heapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *Node) string {
-	if n.Type == nil {
-		return ""
-	}
-
-	// Parameters are always passed via the stack.
-	if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
-		return ""
-	}
-
-	if n.Type.Width > maxStackVarSize {
-		return "too large for stack"
-	}
-
-	if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-
-	if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-	if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
-		return "too large for stack"
-	}
-
-	if n.Op == OMAKESLICE {
-		r := n.Right
-		if r == nil {
-			r = n.Left
-		}
-		if !smallintconst(r) {
-			return "non-constant size"
-		}
-		if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
-			return "too large for stack"
-		}
-	}
-
-	return ""
-}
-
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n *Node) {
-	switch n.Op {
-	default:
-		// Unexpected Op, probably due to a previous type error. Ignore.
-
-	case ODEREF, ODOTPTR:
-		// Nothing to do.
-
-	case ONAME:
-		if n == nodfp {
-			break
-		}
-
-		// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
-		// on PPARAM it means something different.
-		if n.Class() == PAUTO && n.Esc == EscNever {
-			break
-		}
-
-		// If a closure reference escapes, mark the outer variable as escaping.
-		if n.Name.IsClosureVar() {
-			addrescapes(n.Name.Defn)
-			break
-		}
-
-		if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
-			break
-		}
-
-		// This is a plain parameter or local variable that needs to move to the heap,
-		// but possibly for the function outside the one we're compiling.
-		// That is, if we have:
-		//
-		//	func f(x int) {
-		//		func() {
-		//			global = &x
-		//		}
-		//	}
-		//
-		// then we're analyzing the inner closure but we need to move x to the
-		// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
-		oldfn := Curfn
-		Curfn = n.Name.Curfn
-		if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
-			Curfn = Curfn.Func.Closure
-		}
-		ln := lineno
-		lineno = Curfn.Pos
-		moveToHeap(n)
-		Curfn = oldfn
-		lineno = ln
-
-	// ODOTPTR has already been introduced,
-	// so these are the non-pointer ODOT and OINDEX.
-	// In &x[0], if x is a slice, then x does not
-	// escape--the pointer inside x does, but that
-	// is always a heap pointer anyway.
-	case ODOT, OINDEX, OPAREN, OCONVNOP:
-		if !n.Left.Type.IsSlice() {
-			addrescapes(n.Left)
-		}
-	}
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *Node) {
-	if Debug.r != 0 {
-		Dump("MOVE", n)
-	}
-	if compiling_runtime {
-		yyerror("%v escapes to heap, not allowed in runtime", n)
-	}
-	if n.Class() == PAUTOHEAP {
-		Dump("n", n)
-		Fatalf("double move to heap")
-	}
-
-	// Allocate a local stack variable to hold the pointer to the heap copy.
-	// temp will add it to the function declaration list automatically.
-	heapaddr := temp(types.NewPtr(n.Type))
-	heapaddr.Sym = lookup("&" + n.Sym.Name)
-	heapaddr.Orig.Sym = heapaddr.Sym
-	heapaddr.Pos = n.Pos
-
-	// Unset AutoTemp to persist the &foo variable name through SSA to
-	// liveness analysis.
-	// TODO(mdempsky/drchase): Cleaner solution?
-	heapaddr.Name.SetAutoTemp(false)
-
-	// Parameters have a local stack copy used at function start/end
-	// in addition to the copy in the heap that may live longer than
-	// the function.
-	if n.Class() == PPARAM || n.Class() == PPARAMOUT {
-		if n.Xoffset == BADWIDTH {
-			Fatalf("addrescapes before param assignment")
-		}
-
-		// We rewrite n below to be a heap variable (indirection of heapaddr).
-		// Preserve a copy so we can still write code referring to the original,
-		// and substitute that copy into the function declaration list
-		// so that analyses of the local (on-stack) variables use it.
-		stackcopy := newname(n.Sym)
-		stackcopy.Type = n.Type
-		stackcopy.Xoffset = n.Xoffset
-		stackcopy.SetClass(n.Class())
-		stackcopy.Name.Param.Heapaddr = heapaddr
-		if n.Class() == PPARAMOUT {
-			// Make sure the pointer to the heap copy is kept live throughout the function.
-			// The function could panic at any point, and then a defer could recover.
-			// Thus, we need the pointer to the heap copy always available so the
-			// post-deferreturn code can copy the return value back to the stack.
-			// See issue 16095.
-			heapaddr.Name.SetIsOutputParamHeapAddr(true)
-		}
-		n.Name.Param.Stackcopy = stackcopy
-
-		// Substitute the stackcopy into the function variable list so that
-		// liveness and other analyses use the underlying stack slot
-		// and not the now-pseudo-variable n.
-		found := false
-		for i, d := range Curfn.Func.Dcl {
-			if d == n {
-				Curfn.Func.Dcl[i] = stackcopy
-				found = true
-				break
-			}
-			// Parameters are before locals, so can stop early.
-			// This limits the search even in functions with many local variables.
-			if d.Class() == PAUTO {
-				break
-			}
-		}
-		if !found {
-			Fatalf("cannot find %v in local variable list", n)
-		}
-		Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	}
-
-	// Modify n in place so that uses of n now mean indirection of the heapaddr.
-	n.SetClass(PAUTOHEAP)
-	n.Xoffset = 0
-	n.Name.Param.Heapaddr = heapaddr
-	n.Esc = EscHeap
-	if Debug.m != 0 {
-		Warnl(n.Pos, "moved to heap: %v", n)
-	}
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const unsafeUintptrTag = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
-
-func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
-	name := func() string {
-		if f.Sym != nil {
-			return f.Sym.Name
-		}
-		return fmt.Sprintf("arg#%d", narg)
-	}
-
-	if fn.Nbody.Len() == 0 {
-		// Assume that uintptr arguments must be held live across the call.
-		// This is most important for syscall.Syscall.
-		// See golang.org/issue/13372.
-		// This really doesn't have much to do with escape analysis per se,
-		// but we are reusing the ability to annotate an individual function
-		// argument and pass those annotations along to importing code.
-		if f.Type.IsUintptr() {
-			if Debug.m != 0 {
-				Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
-			}
-			return unsafeUintptrTag
-		}
-
-		if !f.Type.HasPointers() { // don't bother tagging for scalars
-			return ""
-		}
-
-		var esc EscLeaks
-
-		// External functions are assumed unsafe, unless
-		// //go:noescape is given before the declaration.
-		if fn.Func.Pragma&Noescape != 0 {
-			if Debug.m != 0 && f.Sym != nil {
-				Warnl(f.Pos, "%v does not escape", name())
-			}
-		} else {
-			if Debug.m != 0 && f.Sym != nil {
-				Warnl(f.Pos, "leaking param: %v", name())
-			}
-			esc.AddHeap(0)
-		}
-
-		return esc.Encode()
-	}
-
-	if fn.Func.Pragma&UintptrEscapes != 0 {
-		if f.Type.IsUintptr() {
-			if Debug.m != 0 {
-				Warnl(f.Pos, "marking %v as escaping uintptr", name())
-			}
-			return uintptrEscapesTag
-		}
-		if f.IsDDD() && f.Type.Elem().IsUintptr() {
-			// final argument is ...uintptr.
-			if Debug.m != 0 {
-				Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
-			}
-			return uintptrEscapesTag
-		}
-	}
-
-	if !f.Type.HasPointers() { // don't bother tagging for scalars
-		return ""
-	}
-
-	// Unnamed parameters are unused and therefore do not escape.
-	if f.Sym == nil || f.Sym.IsBlank() {
-		var esc EscLeaks
-		return esc.Encode()
-	}
-
-	n := asNode(f.Nname)
-	loc := e.oldLoc(n)
-	esc := loc.paramEsc
-	esc.Optimize()
-
-	if Debug.m != 0 && !loc.escapes {
-		if esc.Empty() {
-			Warnl(f.Pos, "%v does not escape", name())
-		}
-		if x := esc.Heap(); x >= 0 {
-			if x == 0 {
-				Warnl(f.Pos, "leaking param: %v", name())
-			} else {
-				// TODO(mdempsky): Mention level=x like below?
-				Warnl(f.Pos, "leaking param content: %v", name())
-			}
-		}
-		for i := 0; i < numEscResults; i++ {
-			if x := esc.Result(i); x >= 0 {
-				res := fn.Type.Results().Field(i).Sym
-				Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
-			}
-		}
-	}
-
-	return esc.Encode()
-}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
deleted file mode 100644
index 618bdf7..0000000
--- a/src/cmd/compile/internal/gc/escape.go
+++ /dev/null
@@ -1,1538 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"cmd/compile/internal/logopt"
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"math"
-	"strings"
-)
-
-// Escape analysis.
-//
-// Here we analyze functions to determine which Go variables
-// (including implicit allocations such as calls to "new" or "make",
-// composite literals, etc.) can be allocated on the stack. The two
-// key invariants we have to ensure are: (1) pointers to stack objects
-// cannot be stored in the heap, and (2) pointers to a stack object
-// cannot outlive that object (e.g., because the declaring function
-// returned and destroyed the object's stack frame, or its space is
-// reused across loop iterations for logically distinct variables).
-//
-// We implement this with a static data-flow analysis of the AST.
-// First, we construct a directed weighted graph where vertices
-// (termed "locations") represent variables allocated by statements
-// and expressions, and edges represent assignments between variables
-// (with weights representing addressing/dereference counts).
-//
-// Next we walk the graph looking for assignment paths that might
-// violate the invariants stated above. If a variable v's address is
-// stored in the heap or elsewhere that may outlive it, then v is
-// marked as requiring heap allocation.
-//
-// To support interprocedural analysis, we also record data-flow from
-// each function's parameters to the heap and to its result
-// parameters. This information is summarized as "parameter tags",
-// which are used at static call sites to improve escape analysis of
-// function arguments.
-
-// Constructing the location graph.
-//
-// Every allocating statement (e.g., variable declaration) or
-// expression (e.g., "new" or "make") is first mapped to a unique
-// "location."
-//
-// We also model every Go assignment as a directed edges between
-// locations. The number of dereference operations minus the number of
-// addressing operations is recorded as the edge's weight (termed
-// "derefs"). For example:
-//
-//     p = &q    // -1
-//     p = q     //  0
-//     p = *q    //  1
-//     p = **q   //  2
-//
-//     p = **&**&q  // 2
-//
-// Note that the & operator can only be applied to addressable
-// expressions, and the expression &x itself is not addressable, so
-// derefs cannot go below -1.
-//
-// Every Go language construct is lowered into this representation,
-// generally without sensitivity to flow, path, or context; and
-// without distinguishing elements within a compound variable. For
-// example:
-//
-//     var x struct { f, g *int }
-//     var u []*int
-//
-//     x.f = u[0]
-//
-// is modeled simply as
-//
-//     x = *u
-//
-// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
-// u[2], etc. However, we do record the implicit dereference involved
-// in indexing a slice.
-
-type Escape struct {
-	allLocs []*EscLocation
-
-	curfn *Node
-
-	// loopDepth counts the current loop nesting depth within
-	// curfn. It increments within each "for" loop and at each
-	// label with a corresponding backwards "goto" (i.e.,
-	// unstructured loop).
-	loopDepth int
-
-	heapLoc  EscLocation
-	blankLoc EscLocation
-}
-
-// An EscLocation represents an abstract location that stores a Go
-// variable.
-type EscLocation struct {
-	n         *Node     // represented variable or expression, if any
-	curfn     *Node     // enclosing function
-	edges     []EscEdge // incoming edges
-	loopDepth int       // loopDepth at declaration
-
-	// derefs and walkgen are used during walkOne to track the
-	// minimal dereferences from the walk root.
-	derefs  int // >= -1
-	walkgen uint32
-
-	// dst and dstEdgeindex track the next immediate assignment
-	// destination location during walkone, along with the index
-	// of the edge pointing back to this location.
-	dst        *EscLocation
-	dstEdgeIdx int
-
-	// queued is used by walkAll to track whether this location is
-	// in the walk queue.
-	queued bool
-
-	// escapes reports whether the represented variable's address
-	// escapes; that is, whether the variable must be heap
-	// allocated.
-	escapes bool
-
-	// transient reports whether the represented expression's
-	// address does not outlive the statement; that is, whether
-	// its storage can be immediately reused.
-	transient bool
-
-	// paramEsc records the represented parameter's leak set.
-	paramEsc EscLeaks
-}
-
-// An EscEdge represents an assignment edge between two Go variables.
-type EscEdge struct {
-	src    *EscLocation
-	derefs int // >= -1
-	notes  *EscNote
-}
-
-// escapeFuncs performs escape analysis on a minimal batch of
-// functions.
-func escapeFuncs(fns []*Node, recursive bool) {
-	for _, fn := range fns {
-		if fn.Op != ODCLFUNC {
-			Fatalf("unexpected node: %v", fn)
-		}
-	}
-
-	var e Escape
-	e.heapLoc.escapes = true
-
-	// Construct data-flow graph from syntax trees.
-	for _, fn := range fns {
-		e.initFunc(fn)
-	}
-	for _, fn := range fns {
-		e.walkFunc(fn)
-	}
-	e.curfn = nil
-
-	e.walkAll()
-	e.finish(fns)
-}
-
-func (e *Escape) initFunc(fn *Node) {
-	if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
-		Fatalf("unexpected node: %v", fn)
-	}
-	fn.Esc = EscFuncPlanned
-	if Debug.m > 3 {
-		Dump("escAnalyze", fn)
-	}
-
-	e.curfn = fn
-	e.loopDepth = 1
-
-	// Allocate locations for local variables.
-	for _, dcl := range fn.Func.Dcl {
-		if dcl.Op == ONAME {
-			e.newLoc(dcl, false)
-		}
-	}
-}
-
-func (e *Escape) walkFunc(fn *Node) {
-	fn.Esc = EscFuncStarted
-
-	// Identify labels that mark the head of an unstructured loop.
-	inspectList(fn.Nbody, func(n *Node) bool {
-		switch n.Op {
-		case OLABEL:
-			n.Sym.Label = asTypesNode(&nonlooping)
-
-		case OGOTO:
-			// If we visited the label before the goto,
-			// then this is a looping label.
-			if n.Sym.Label == asTypesNode(&nonlooping) {
-				n.Sym.Label = asTypesNode(&looping)
-			}
-		}
-
-		return true
-	})
-
-	e.curfn = fn
-	e.loopDepth = 1
-	e.block(fn.Nbody)
-}
-
-// Below we implement the methods for walking the AST and recording
-// data flow edges. Note that because a sub-expression might have
-// side-effects, it's important to always visit the entire AST.
-//
-// For example, write either:
-//
-//     if x {
-//         e.discard(n.Left)
-//     } else {
-//         e.value(k, n.Left)
-//     }
-//
-// or
-//
-//     if x {
-//         k = e.discardHole()
-//     }
-//     e.value(k, n.Left)
-//
-// Do NOT write:
-//
-//    // BAD: possibly loses side-effects within n.Left
-//    if !x {
-//        e.value(k, n.Left)
-//    }
-
-// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-	defer func() {
-		lineno = lno
-	}()
-
-	if Debug.m > 2 {
-		fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
-	}
-
-	e.stmts(n.Ninit)
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected stmt: %v", n)
-
-	case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
-		// nop
-
-	case OBREAK, OCONTINUE, OGOTO:
-		// TODO(mdempsky): Handle dead code?
-
-	case OBLOCK:
-		e.stmts(n.List)
-
-	case ODCL:
-		// Record loop depth at declaration.
-		if !n.Left.isBlank() {
-			e.dcl(n.Left)
-		}
-
-	case OLABEL:
-		switch asNode(n.Sym.Label) {
-		case &nonlooping:
-			if Debug.m > 2 {
-				fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
-			}
-		case &looping:
-			if Debug.m > 2 {
-				fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
-			}
-			e.loopDepth++
-		default:
-			Fatalf("label missing tag")
-		}
-		n.Sym.Label = nil
-
-	case OIF:
-		e.discard(n.Left)
-		e.block(n.Nbody)
-		e.block(n.Rlist)
-
-	case OFOR, OFORUNTIL:
-		e.loopDepth++
-		e.discard(n.Left)
-		e.stmt(n.Right)
-		e.block(n.Nbody)
-		e.loopDepth--
-
-	case ORANGE:
-		// for List = range Right { Nbody }
-		e.loopDepth++
-		ks := e.addrs(n.List)
-		e.block(n.Nbody)
-		e.loopDepth--
-
-		// Right is evaluated outside the loop.
-		k := e.discardHole()
-		if len(ks) >= 2 {
-			if n.Right.Type.IsArray() {
-				k = ks[1].note(n, "range")
-			} else {
-				k = ks[1].deref(n, "range-deref")
-			}
-		}
-		e.expr(e.later(k), n.Right)
-
-	case OSWITCH:
-		typesw := n.Left != nil && n.Left.Op == OTYPESW
-
-		var ks []EscHole
-		for _, cas := range n.List.Slice() { // cases
-			if typesw && n.Left.Left != nil {
-				cv := cas.Rlist.First()
-				k := e.dcl(cv) // type switch variables have no ODCL.
-				if cv.Type.HasPointers() {
-					ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
-				}
-			}
-
-			e.discards(cas.List)
-			e.block(cas.Nbody)
-		}
-
-		if typesw {
-			e.expr(e.teeHole(ks...), n.Left.Right)
-		} else {
-			e.discard(n.Left)
-		}
-
-	case OSELECT:
-		for _, cas := range n.List.Slice() {
-			e.stmt(cas.Left)
-			e.block(cas.Nbody)
-		}
-	case OSELRECV:
-		e.assign(n.Left, n.Right, "selrecv", n)
-	case OSELRECV2:
-		e.assign(n.Left, n.Right, "selrecv", n)
-		e.assign(n.List.First(), nil, "selrecv", n)
-	case ORECV:
-		// TODO(mdempsky): Consider e.discard(n.Left).
-		e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
-	case OSEND:
-		e.discard(n.Left)
-		e.assignHeap(n.Right, "send", n)
-
-	case OAS, OASOP:
-		e.assign(n.Left, n.Right, "assign", n)
-
-	case OAS2:
-		for i, nl := range n.List.Slice() {
-			e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
-		}
-
-	case OAS2DOTTYPE: // v, ok = x.(type)
-		e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
-		e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
-	case OAS2MAPR: // v, ok = m[k]
-		e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
-		e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
-	case OAS2RECV: // v, ok = <-ch
-		e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
-		e.assign(n.List.Second(), nil, "assign-pair-receive", n)
-
-	case OAS2FUNC:
-		e.stmts(n.Right.Ninit)
-		e.call(e.addrs(n.List), n.Right, nil)
-	case ORETURN:
-		results := e.curfn.Type.Results().FieldSlice()
-		for i, v := range n.List.Slice() {
-			e.assign(asNode(results[i].Nname), v, "return", n)
-		}
-	case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
-		e.call(nil, n, nil)
-	case OGO, ODEFER:
-		e.stmts(n.Left.Ninit)
-		e.call(nil, n.Left, n)
-
-	case ORETJMP:
-		// TODO(mdempsky): What do? esc.go just ignores it.
-	}
-}
-
-func (e *Escape) stmts(l Nodes) {
-	for _, n := range l.Slice() {
-		e.stmt(n)
-	}
-}
-
-// block is like stmts, but preserves loopDepth.
-func (e *Escape) block(l Nodes) {
-	old := e.loopDepth
-	e.stmts(l)
-	e.loopDepth = old
-}
-
-// expr models evaluating an expression n and flowing the result into
-// hole k.
-func (e *Escape) expr(k EscHole, n *Node) {
-	if n == nil {
-		return
-	}
-	e.stmts(n.Ninit)
-	e.exprSkipInit(k, n)
-}
-
-func (e *Escape) exprSkipInit(k EscHole, n *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-	defer func() {
-		lineno = lno
-	}()
-
-	uintptrEscapesHack := k.uintptrEscapesHack
-	k.uintptrEscapesHack = false
-
-	if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
-		// nop
-	} else if k.derefs >= 0 && !n.Type.HasPointers() {
-		k = e.discardHole()
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected expr: %v", n)
-
-	case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
-		// nop
-
-	case ONAME:
-		if n.Class() == PFUNC || n.Class() == PEXTERN {
-			return
-		}
-		e.flow(k, e.oldLoc(n))
-
-	case OPLUS, ONEG, OBITNOT, ONOT:
-		e.discard(n.Left)
-	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
-		e.discard(n.Left)
-		e.discard(n.Right)
-
-	case OADDR:
-		e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
-	case ODEREF:
-		e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
-	case ODOT, ODOTMETH, ODOTINTER:
-		e.expr(k.note(n, "dot"), n.Left)
-	case ODOTPTR:
-		e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
-	case ODOTTYPE, ODOTTYPE2:
-		e.expr(k.dotType(n.Type, n, "dot"), n.Left)
-	case OINDEX:
-		if n.Left.Type.IsArray() {
-			e.expr(k.note(n, "fixed-array-index-of"), n.Left)
-		} else {
-			// TODO(mdempsky): Fix why reason text.
-			e.expr(k.deref(n, "dot of pointer"), n.Left)
-		}
-		e.discard(n.Right)
-	case OINDEXMAP:
-		e.discard(n.Left)
-		e.discard(n.Right)
-	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
-		e.expr(k.note(n, "slice"), n.Left)
-		low, high, max := n.SliceBounds()
-		e.discard(low)
-		e.discard(high)
-		e.discard(max)
-
-	case OCONV, OCONVNOP:
-		if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
-			// When -d=checkptr=2 is enabled, treat
-			// conversions to unsafe.Pointer as an
-			// escaping operation. This allows better
-			// runtime instrumentation, since we can more
-			// easily detect object boundaries on the heap
-			// than the stack.
-			e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
-		} else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
-			e.unsafeValue(k, n.Left)
-		} else {
-			e.expr(k, n.Left)
-		}
-	case OCONVIFACE:
-		if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
-			k = e.spill(k, n)
-		}
-		e.expr(k.note(n, "interface-converted"), n.Left)
-
-	case ORECV:
-		e.discard(n.Left)
-
-	case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
-		e.call([]EscHole{k}, n, nil)
-
-	case ONEW:
-		e.spill(k, n)
-
-	case OMAKESLICE:
-		e.spill(k, n)
-		e.discard(n.Left)
-		e.discard(n.Right)
-	case OMAKECHAN:
-		e.discard(n.Left)
-	case OMAKEMAP:
-		e.spill(k, n)
-		e.discard(n.Left)
-
-	case ORECOVER:
-		// nop
-
-	case OCALLPART:
-		// Flow the receiver argument to both the closure and
-		// to the receiver parameter.
-
-		closureK := e.spill(k, n)
-
-		m := callpartMethod(n)
-
-		// We don't know how the method value will be called
-		// later, so conservatively assume the result
-		// parameters all flow to the heap.
-		//
-		// TODO(mdempsky): Change ks into a callback, so that
-		// we don't have to create this dummy slice?
-		var ks []EscHole
-		for i := m.Type.NumResults(); i > 0; i-- {
-			ks = append(ks, e.heapHole())
-		}
-		paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
-
-		e.expr(e.teeHole(paramK, closureK), n.Left)
-
-	case OPTRLIT:
-		e.expr(e.spill(k, n), n.Left)
-
-	case OARRAYLIT:
-		for _, elt := range n.List.Slice() {
-			if elt.Op == OKEY {
-				elt = elt.Right
-			}
-			e.expr(k.note(n, "array literal element"), elt)
-		}
-
-	case OSLICELIT:
-		k = e.spill(k, n)
-		k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
-
-		for _, elt := range n.List.Slice() {
-			if elt.Op == OKEY {
-				elt = elt.Right
-			}
-			e.expr(k.note(n, "slice-literal-element"), elt)
-		}
-
-	case OSTRUCTLIT:
-		for _, elt := range n.List.Slice() {
-			e.expr(k.note(n, "struct literal element"), elt.Left)
-		}
-
-	case OMAPLIT:
-		e.spill(k, n)
-
-		// Map keys and values are always stored in the heap.
-		for _, elt := range n.List.Slice() {
-			e.assignHeap(elt.Left, "map literal key", n)
-			e.assignHeap(elt.Right, "map literal value", n)
-		}
-
-	case OCLOSURE:
-		k = e.spill(k, n)
-
-		// Link addresses of captured variables to closure.
-		for _, v := range n.Func.Closure.Func.Cvars.Slice() {
-			if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
-				continue
-			}
-
-			k := k
-			if !v.Name.Byval() {
-				k = k.addr(v, "reference")
-			}
-
-			e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
-		}
-
-	case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
-		e.spill(k, n)
-		e.discard(n.Left)
-
-	case OADDSTR:
-		e.spill(k, n)
-
-		// Arguments of OADDSTR never escape;
-		// runtime.concatstrings makes sure of that.
-		e.discards(n.List)
-	}
-}
-
-// unsafeValue evaluates a uintptr-typed arithmetic expression looking
-// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *Node) {
-	if n.Type.Etype != TUINTPTR {
-		Fatalf("unexpected type %v for %v", n.Type, n)
-	}
-
-	e.stmts(n.Ninit)
-
-	switch n.Op {
-	case OCONV, OCONVNOP:
-		if n.Left.Type.IsUnsafePtr() {
-			e.expr(k, n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case ODOTPTR:
-		if isReflectHeaderDataField(n) {
-			e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case OPLUS, ONEG, OBITNOT:
-		e.unsafeValue(k, n.Left)
-	case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
-		e.unsafeValue(k, n.Left)
-		e.unsafeValue(k, n.Right)
-	case OLSH, ORSH:
-		e.unsafeValue(k, n.Left)
-		// RHS need not be uintptr-typed (#32959) and can't meaningfully
-		// flow pointers anyway.
-		e.discard(n.Right)
-	default:
-		e.exprSkipInit(e.discardHole(), n)
-	}
-}
-
-// discard evaluates an expression n for side-effects, but discards
-// its value.
-func (e *Escape) discard(n *Node) {
-	e.expr(e.discardHole(), n)
-}
-
-func (e *Escape) discards(l Nodes) {
-	for _, n := range l.Slice() {
-		e.discard(n)
-	}
-}
-
-// addr evaluates an addressable expression n and returns an EscHole
-// that represents storing into the represented location.
-func (e *Escape) addr(n *Node) EscHole {
-	if n == nil || n.isBlank() {
-		// Can happen at least in OSELRECV.
-		// TODO(mdempsky): Anywhere else?
-		return e.discardHole()
-	}
-
-	k := e.heapHole()
-
-	switch n.Op {
-	default:
-		Fatalf("unexpected addr: %v", n)
-	case ONAME:
-		if n.Class() == PEXTERN {
-			break
-		}
-		k = e.oldLoc(n).asHole()
-	case ODOT:
-		k = e.addr(n.Left)
-	case OINDEX:
-		e.discard(n.Right)
-		if n.Left.Type.IsArray() {
-			k = e.addr(n.Left)
-		} else {
-			e.discard(n.Left)
-		}
-	case ODEREF, ODOTPTR:
-		e.discard(n)
-	case OINDEXMAP:
-		e.discard(n.Left)
-		e.assignHeap(n.Right, "key of map put", n)
-	}
-
-	if !n.Type.HasPointers() {
-		k = e.discardHole()
-	}
-
-	return k
-}
-
-func (e *Escape) addrs(l Nodes) []EscHole {
-	var ks []EscHole
-	for _, n := range l.Slice() {
-		ks = append(ks, e.addr(n))
-	}
-	return ks
-}
-
-// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *Node, why string, where *Node) {
-	// Filter out some no-op assignments for escape analysis.
-	ignore := dst != nil && src != nil && isSelfAssign(dst, src)
-	if ignore && Debug.m != 0 {
-		Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
-	}
-
-	k := e.addr(dst)
-	if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
-		e.unsafeValue(e.heapHole().note(where, why), src)
-	} else {
-		if ignore {
-			k = e.discardHole()
-		}
-		e.expr(k.note(where, why), src)
-	}
-}
-
-func (e *Escape) assignHeap(src *Node, why string, where *Node) {
-	e.expr(e.heapHole().note(where, why), src)
-}
-
-// call evaluates a call expressions, including builtin calls. ks
-// should contain the holes representing where the function callee's
-// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *Node) {
-	topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
-	if topLevelDefer {
-		// force stack allocation of defer record, unless
-		// open-coded defers are used (see ssa.go)
-		where.Esc = EscNever
-	}
-
-	argument := func(k EscHole, arg *Node) {
-		if topLevelDefer {
-			// Top level defers arguments don't escape to
-			// heap, but they do need to last until end of
-			// function.
-			k = e.later(k)
-		} else if where != nil {
-			k = e.heapHole()
-		}
-
-		e.expr(k.note(call, "call parameter"), arg)
-	}
-
-	switch call.Op {
-	default:
-		Fatalf("unexpected call op: %v", call.Op)
-
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		fixVariadicCall(call)
-
-		// Pick out the function callee, if statically known.
-		var fn *Node
-		switch call.Op {
-		case OCALLFUNC:
-			switch v := staticValue(call.Left); {
-			case v.Op == ONAME && v.Class() == PFUNC:
-				fn = v
-			case v.Op == OCLOSURE:
-				fn = v.Func.Closure.Func.Nname
-			}
-		case OCALLMETH:
-			fn = asNode(call.Left.Type.FuncType().Nname)
-		}
-
-		fntype := call.Left.Type
-		if fn != nil {
-			fntype = fn.Type
-		}
-
-		if ks != nil && fn != nil && e.inMutualBatch(fn) {
-			for i, result := range fn.Type.Results().FieldSlice() {
-				e.expr(ks[i], asNode(result.Nname))
-			}
-		}
-
-		if r := fntype.Recv(); r != nil {
-			argument(e.tagHole(ks, fn, r), call.Left.Left)
-		} else {
-			// Evaluate callee function expression.
-			argument(e.discardHole(), call.Left)
-		}
-
-		args := call.List.Slice()
-		for i, param := range fntype.Params().FieldSlice() {
-			argument(e.tagHole(ks, fn, param), args[i])
-		}
-
-	case OAPPEND:
-		args := call.List.Slice()
-
-		// Appendee slice may flow directly to the result, if
-		// it has enough capacity. Alternatively, a new heap
-		// slice might be allocated, and all slice elements
-		// might flow to heap.
-		appendeeK := ks[0]
-		if args[0].Type.Elem().HasPointers() {
-			appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
-		}
-		argument(appendeeK, args[0])
-
-		if call.IsDDD() {
-			appendedK := e.discardHole()
-			if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
-				appendedK = e.heapHole().deref(call, "appended slice...")
-			}
-			argument(appendedK, args[1])
-		} else {
-			for _, arg := range args[1:] {
-				argument(e.heapHole(), arg)
-			}
-		}
-
-	case OCOPY:
-		argument(e.discardHole(), call.Left)
-
-		copiedK := e.discardHole()
-		if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
-			copiedK = e.heapHole().deref(call, "copied slice")
-		}
-		argument(copiedK, call.Right)
-
-	case OPANIC:
-		argument(e.heapHole(), call.Left)
-
-	case OCOMPLEX:
-		argument(e.discardHole(), call.Left)
-		argument(e.discardHole(), call.Right)
-	case ODELETE, OPRINT, OPRINTN, ORECOVER:
-		for _, arg := range call.List.Slice() {
-			argument(e.discardHole(), arg)
-		}
-	case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
-		argument(e.discardHole(), call.Left)
-	}
-}
-
-// tagHole returns a hole for evaluating an argument passed to param.
-// ks should contain the holes representing where the function
-// callee's results flows. fn is the statically-known callee function,
-// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
-	// If this is a dynamic call, we can't rely on param.Note.
-	if fn == nil {
-		return e.heapHole()
-	}
-
-	if e.inMutualBatch(fn) {
-		return e.addr(asNode(param.Nname))
-	}
-
-	// Call to previously tagged function.
-
-	if param.Note == uintptrEscapesTag {
-		k := e.heapHole()
-		k.uintptrEscapesHack = true
-		return k
-	}
-
-	var tagKs []EscHole
-
-	esc := ParseLeaks(param.Note)
-	if x := esc.Heap(); x >= 0 {
-		tagKs = append(tagKs, e.heapHole().shift(x))
-	}
-
-	if ks != nil {
-		for i := 0; i < numEscResults; i++ {
-			if x := esc.Result(i); x >= 0 {
-				tagKs = append(tagKs, ks[i].shift(x))
-			}
-		}
-	}
-
-	return e.teeHole(tagKs...)
-}
-
-// inMutualBatch reports whether function fn is in the batch of
-// mutually recursive functions being analyzed. When this is true,
-// fn has not yet been analyzed, so its parameters and results
-// should be incorporated directly into the flow graph instead of
-// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *Node) bool {
-	if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
-		if fn.Name.Defn.Esc == EscFuncUnknown {
-			Fatalf("graph inconsistency")
-		}
-		return true
-	}
-	return false
-}
-
-// An EscHole represents a context for evaluation a Go
-// expression. E.g., when evaluating p in "x = **p", we'd have a hole
-// with dst==x and derefs==2.
-type EscHole struct {
-	dst    *EscLocation
-	derefs int // >= -1
-	notes  *EscNote
-
-	// uintptrEscapesHack indicates this context is evaluating an
-	// argument for a //go:uintptrescapes function.
-	uintptrEscapesHack bool
-}
-
-type EscNote struct {
-	next  *EscNote
-	where *Node
-	why   string
-}
-
-func (k EscHole) note(where *Node, why string) EscHole {
-	if where == nil || why == "" {
-		Fatalf("note: missing where/why")
-	}
-	if Debug.m >= 2 || logopt.Enabled() {
-		k.notes = &EscNote{
-			next:  k.notes,
-			where: where,
-			why:   why,
-		}
-	}
-	return k
-}
-
-func (k EscHole) shift(delta int) EscHole {
-	k.derefs += delta
-	if k.derefs < -1 {
-		Fatalf("derefs underflow: %v", k.derefs)
-	}
-	return k
-}
-
-func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *Node, why string) EscHole  { return k.shift(-1).note(where, why) }
-
-func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
-	if !t.IsInterface() && !isdirectiface(t) {
-		k = k.shift(1)
-	}
-	return k.note(where, why)
-}
-
-// teeHole returns a new hole that flows into each hole of ks,
-// similar to the Unix tee(1) command.
-func (e *Escape) teeHole(ks ...EscHole) EscHole {
-	if len(ks) == 0 {
-		return e.discardHole()
-	}
-	if len(ks) == 1 {
-		return ks[0]
-	}
-	// TODO(mdempsky): Optimize if there's only one non-discard hole?
-
-	// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
-	// new temporary location ltmp, wire it into place, and return
-	// a hole for "ltmp = _".
-	loc := e.newLoc(nil, true)
-	for _, k := range ks {
-		// N.B., "p = &q" and "p = &tmp; tmp = q" are not
-		// semantically equivalent. To combine holes like "l1
-		// = _" and "l2 = &_", we'd need to wire them as "l1 =
-		// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
-		// instead.
-		if k.derefs < 0 {
-			Fatalf("teeHole: negative derefs")
-		}
-
-		e.flow(k, loc)
-	}
-	return loc.asHole()
-}
-
-func (e *Escape) dcl(n *Node) EscHole {
-	loc := e.oldLoc(n)
-	loc.loopDepth = e.loopDepth
-	return loc.asHole()
-}
-
-// spill allocates a new location associated with expression n, flows
-// its address to k, and returns a hole that flows values to it. It's
-// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *Node) EscHole {
-	loc := e.newLoc(n, true)
-	e.flow(k.addr(n, "spill"), loc)
-	return loc.asHole()
-}
-
-// later returns a new hole that flows into k, but some time later.
-// Its main effect is to prevent immediate reuse of temporary
-// variables introduced during Order.
-func (e *Escape) later(k EscHole) EscHole {
-	loc := e.newLoc(nil, false)
-	e.flow(k, loc)
-	return loc.asHole()
-}
-
-// canonicalNode returns the canonical *Node that n logically
-// represents.
-func canonicalNode(n *Node) *Node {
-	if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
-		n = n.Name.Defn
-		if n.Name.IsClosureVar() {
-			Fatalf("still closure var")
-		}
-	}
-
-	return n
-}
-
-func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
-	if e.curfn == nil {
-		Fatalf("e.curfn isn't set")
-	}
-	if n != nil && n.Type != nil && n.Type.NotInHeap() {
-		yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
-	}
-
-	n = canonicalNode(n)
-	loc := &EscLocation{
-		n:         n,
-		curfn:     e.curfn,
-		loopDepth: e.loopDepth,
-		transient: transient,
-	}
-	e.allLocs = append(e.allLocs, loc)
-	if n != nil {
-		if n.Op == ONAME && n.Name.Curfn != e.curfn {
-			Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
-		}
-
-		if n.HasOpt() {
-			Fatalf("%v already has a location", n)
-		}
-		n.SetOpt(loc)
-
-		if why := heapAllocReason(n); why != "" {
-			e.flow(e.heapHole().addr(n, why), loc)
-		}
-	}
-	return loc
-}
-
-func (e *Escape) oldLoc(n *Node) *EscLocation {
-	n = canonicalNode(n)
-	return n.Opt().(*EscLocation)
-}
-
-func (l *EscLocation) asHole() EscHole {
-	return EscHole{dst: l}
-}
-
-func (e *Escape) flow(k EscHole, src *EscLocation) {
-	dst := k.dst
-	if dst == &e.blankLoc {
-		return
-	}
-	if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
-		return
-	}
-	if dst.escapes && k.derefs < 0 { // dst = &src
-		if Debug.m >= 2 || logopt.Enabled() {
-			pos := linestr(src.n.Pos)
-			if Debug.m >= 2 {
-				fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
-			}
-			explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
-			if logopt.Enabled() {
-				logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
-			}
-
-		}
-		src.escapes = true
-		return
-	}
-
-	// TODO(mdempsky): Deduplicate edges?
-	dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
-}
-
-func (e *Escape) heapHole() EscHole    { return e.heapLoc.asHole() }
-func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
-
-// walkAll computes the minimal dereferences between all pairs of
-// locations.
-func (e *Escape) walkAll() {
-	// We use a work queue to keep track of locations that we need
-	// to visit, and repeatedly walk until we reach a fixed point.
-	//
-	// We walk once from each location (including the heap), and
-	// then re-enqueue each location on its transition from
-	// transient->!transient and !escapes->escapes, which can each
-	// happen at most once. So we take Θ(len(e.allLocs)) walks.
-
-	// LIFO queue, has enough room for e.allLocs and e.heapLoc.
-	todo := make([]*EscLocation, 0, len(e.allLocs)+1)
-	enqueue := func(loc *EscLocation) {
-		if !loc.queued {
-			todo = append(todo, loc)
-			loc.queued = true
-		}
-	}
-
-	for _, loc := range e.allLocs {
-		enqueue(loc)
-	}
-	enqueue(&e.heapLoc)
-
-	var walkgen uint32
-	for len(todo) > 0 {
-		root := todo[len(todo)-1]
-		todo = todo[:len(todo)-1]
-		root.queued = false
-
-		walkgen++
-		e.walkOne(root, walkgen, enqueue)
-	}
-}
-
-// walkOne computes the minimal number of dereferences from root to
-// all other locations.
-func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
-	// The data flow graph has negative edges (from addressing
-	// operations), so we use the Bellman-Ford algorithm. However,
-	// we don't have to worry about infinite negative cycles since
-	// we bound intermediate dereference counts to 0.
-
-	root.walkgen = walkgen
-	root.derefs = 0
-	root.dst = nil
-
-	todo := []*EscLocation{root} // LIFO queue
-	for len(todo) > 0 {
-		l := todo[len(todo)-1]
-		todo = todo[:len(todo)-1]
-
-		base := l.derefs
-
-		// If l.derefs < 0, then l's address flows to root.
-		addressOf := base < 0
-		if addressOf {
-			// For a flow path like "root = &l; l = x",
-			// l's address flows to root, but x's does
-			// not. We recognize this by lower bounding
-			// base at 0.
-			base = 0
-
-			// If l's address flows to a non-transient
-			// location, then l can't be transiently
-			// allocated.
-			if !root.transient && l.transient {
-				l.transient = false
-				enqueue(l)
-			}
-		}
-
-		if e.outlives(root, l) {
-			// l's value flows to root. If l is a function
-			// parameter and root is the heap or a
-			// corresponding result parameter, then record
-			// that value flow for tagging the function
-			// later.
-			if l.isName(PPARAM) {
-				if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
-					if Debug.m >= 2 {
-						fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
-					}
-					explanation := e.explainPath(root, l)
-					if logopt.Enabled() {
-						logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
-							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
-					}
-				}
-				l.leakTo(root, base)
-			}
-
-			// If l's address flows somewhere that
-			// outlives it, then l needs to be heap
-			// allocated.
-			if addressOf && !l.escapes {
-				if logopt.Enabled() || Debug.m >= 2 {
-					if Debug.m >= 2 {
-						fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
-					}
-					explanation := e.explainPath(root, l)
-					if logopt.Enabled() {
-						logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
-					}
-				}
-				l.escapes = true
-				enqueue(l)
-				continue
-			}
-		}
-
-		for i, edge := range l.edges {
-			if edge.src.escapes {
-				continue
-			}
-			derefs := base + edge.derefs
-			if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
-				edge.src.walkgen = walkgen
-				edge.src.derefs = derefs
-				edge.src.dst = l
-				edge.src.dstEdgeIdx = i
-				todo = append(todo, edge.src)
-			}
-		}
-	}
-}
-
-// explainPath prints an explanation of how src flows to the walk root.
-func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
-	visited := make(map[*EscLocation]bool)
-	pos := linestr(src.n.Pos)
-	var explanation []*logopt.LoggedOpt
-	for {
-		// Prevent infinite loop.
-		if visited[src] {
-			if Debug.m >= 2 {
-				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
-			}
-			break
-		}
-		visited[src] = true
-		dst := src.dst
-		edge := &dst.edges[src.dstEdgeIdx]
-		if edge.src != src {
-			Fatalf("path inconsistency: %v != %v", edge.src, src)
-		}
-
-		explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
-
-		if dst == root {
-			break
-		}
-		src = dst
-	}
-
-	return explanation
-}
-
-func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
-	ops := "&"
-	if derefs >= 0 {
-		ops = strings.Repeat("*", derefs)
-	}
-	print := Debug.m >= 2
-
-	flow := fmt.Sprintf("   flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
-	if print {
-		fmt.Printf("%s:%s\n", pos, flow)
-	}
-	if logopt.Enabled() {
-		var epos src.XPos
-		if notes != nil {
-			epos = notes.where.Pos
-		} else if srcloc != nil && srcloc.n != nil {
-			epos = srcloc.n.Pos
-		}
-		explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
-	}
-
-	for note := notes; note != nil; note = note.next {
-		if print {
-			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
-		}
-		if logopt.Enabled() {
-			explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
-				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
-		}
-	}
-	return explanation
-}
-
-func (e *Escape) explainLoc(l *EscLocation) string {
-	if l == &e.heapLoc {
-		return "{heap}"
-	}
-	if l.n == nil {
-		// TODO(mdempsky): Omit entirely.
-		return "{temp}"
-	}
-	if l.n.Op == ONAME {
-		return fmt.Sprintf("%v", l.n)
-	}
-	return fmt.Sprintf("{storage for %v}", l.n)
-}
-
-// outlives reports whether values stored in l may survive beyond
-// other's lifetime if stack allocated.
-func (e *Escape) outlives(l, other *EscLocation) bool {
-	// The heap outlives everything.
-	if l.escapes {
-		return true
-	}
-
-	// We don't know what callers do with returned values, so
-	// pessimistically we need to assume they flow to the heap and
-	// outlive everything too.
-	if l.isName(PPARAMOUT) {
-		// Exception: Directly called closures can return
-		// locations allocated outside of them without forcing
-		// them to the heap. For example:
-		//
-		//    var u int  // okay to stack allocate
-		//    *(func() *int { return &u }()) = 42
-		if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
-			return false
-		}
-
-		return true
-	}
-
-	// If l and other are within the same function, then l
-	// outlives other if it was declared outside other's loop
-	// scope. For example:
-	//
-	//    var l *int
-	//    for {
-	//        l = new(int)
-	//    }
-	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
-		return true
-	}
-
-	// If other is declared within a child closure of where l is
-	// declared, then l outlives it. For example:
-	//
-	//    var l *int
-	//    func() {
-	//        l = new(int)
-	//    }
-	if containsClosure(l.curfn, other.curfn) {
-		return true
-	}
-
-	return false
-}
-
-// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *Node) bool {
-	if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
-		Fatalf("bad containsClosure: %v, %v", f, c)
-	}
-
-	// Common case.
-	if f == c {
-		return false
-	}
-
-	// Closures within function Foo are named like "Foo.funcN..."
-	// TODO(mdempsky): Better way to recognize this.
-	fn := f.Func.Nname.Sym.Name
-	cn := c.Func.Nname.Sym.Name
-	return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
-}
-
-// leak records that parameter l leaks to sink.
-func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
-	// If sink is a result parameter and we can fit return bits
-	// into the escape analysis tag, then record a return leak.
-	if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
-		// TODO(mdempsky): Eliminate dependency on Vargen here.
-		ri := int(sink.n.Name.Vargen) - 1
-		if ri < numEscResults {
-			// Leak to result parameter.
-			l.paramEsc.AddResult(ri, derefs)
-			return
-		}
-	}
-
-	// Otherwise, record as heap leak.
-	l.paramEsc.AddHeap(derefs)
-}
-
-func (e *Escape) finish(fns []*Node) {
-	// Record parameter tags for package export data.
-	for _, fn := range fns {
-		fn.Esc = EscFuncTagged
-
-		narg := 0
-		for _, fs := range &types.RecvsParams {
-			for _, f := range fs(fn.Type).Fields().Slice() {
-				narg++
-				f.Note = e.paramTag(fn, narg, f)
-			}
-		}
-	}
-
-	for _, loc := range e.allLocs {
-		n := loc.n
-		if n == nil {
-			continue
-		}
-		n.SetOpt(nil)
-
-		// Update n.Esc based on escape analysis results.
-
-		if loc.escapes {
-			if n.Op != ONAME {
-				if Debug.m != 0 {
-					Warnl(n.Pos, "%S escapes to heap", n)
-				}
-				if logopt.Enabled() {
-					logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
-				}
-			}
-			n.Esc = EscHeap
-			addrescapes(n)
-		} else {
-			if Debug.m != 0 && n.Op != ONAME {
-				Warnl(n.Pos, "%S does not escape", n)
-			}
-			n.Esc = EscNone
-			if loc.transient {
-				n.SetTransient(true)
-			}
-		}
-	}
-}
-
-func (l *EscLocation) isName(c Class) bool {
-	return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
-}
-
-const numEscResults = 7
-
-// An EscLeaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type EscLeaks [1 + numEscResults]uint8
-
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
-
-// Heap returns the minimum deref count of any assignment flow from l
-// to the heap. If no such flows exist, Heap returns -1.
-func (l EscLeaks) Heap() int { return l.get(0) }
-
-// Result returns the minimum deref count of any assignment flow from
-// l to its function's i'th result parameter. If no such flows exist,
-// Result returns -1.
-func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
-
-// AddHeap adds an assignment flow from l to the heap.
-func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
-
-// AddResult adds an assignment flow from l to its function's i'th
-// result parameter.
-func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
-
-func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
-
-func (l *EscLeaks) add(i, derefs int) {
-	if old := l.get(i); old < 0 || derefs < old {
-		l.set(i, derefs)
-	}
-}
-
-func (l *EscLeaks) set(i, derefs int) {
-	v := derefs + 1
-	if v < 0 {
-		Fatalf("invalid derefs count: %v", derefs)
-	}
-	if v > math.MaxUint8 {
-		v = math.MaxUint8
-	}
-
-	l[i] = uint8(v)
-}
-
-// Optimize removes result flow paths that are equal in length or
-// longer than the shortest heap flow path.
-func (l *EscLeaks) Optimize() {
-	// If we have a path to the heap, then there's no use in
-	// keeping equal or longer paths elsewhere.
-	if x := l.Heap(); x >= 0 {
-		for i := 0; i < numEscResults; i++ {
-			if l.Result(i) >= x {
-				l.setResult(i, -1)
-			}
-		}
-	}
-}
-
-var leakTagCache = map[EscLeaks]string{}
-
-// Encode converts l into a binary string for export data.
-func (l EscLeaks) Encode() string {
-	if l.Heap() == 0 {
-		// Space optimization: empty string encodes more
-		// efficiently in export data.
-		return ""
-	}
-	if s, ok := leakTagCache[l]; ok {
-		return s
-	}
-
-	n := len(l)
-	for n > 0 && l[n-1] == 0 {
-		n--
-	}
-	s := "esc:" + string(l[:n])
-	leakTagCache[l] = s
-	return s
-}
-
-// ParseLeaks parses a binary string representing an EscLeaks.
-func ParseLeaks(s string) EscLeaks {
-	var l EscLeaks
-	if !strings.HasPrefix(s, "esc:") {
-		l.AddHeap(0)
-		return l
-	}
-	copy(l[:], s[4:])
-	return l
-}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index c6917e0..356fcfa 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,225 +5,68 @@
 package gc
 
 import (
+	"cmd/compile/internal/base"
+	"cmd/compile/internal/inline"
+	"cmd/compile/internal/ir"
+	"cmd/compile/internal/typecheck"
 	"cmd/compile/internal/types"
 	"cmd/internal/bio"
-	"cmd/internal/src"
 	"fmt"
-)
-
-var (
-	Debug_export int // if set, print debugging information about export data
+	"go/constant"
 )
 
 func exportf(bout *bio.Writer, format string, args ...interface{}) {
 	fmt.Fprintf(bout, format, args...)
-	if Debug_export != 0 {
+	if base.Debug.Export != 0 {
 		fmt.Printf(format, args...)
 	}
 }
 
-var asmlist []*Node
-
-// exportsym marks n for export (or reexport).
-func exportsym(n *Node) {
-	if n.Sym.OnExportList() {
-		return
-	}
-	n.Sym.SetOnExportList(true)
-
-	if Debug.E != 0 {
-		fmt.Printf("export symbol %v\n", n.Sym)
-	}
-
-	exportlist = append(exportlist, n)
-}
-
-func initname(s string) bool {
-	return s == "init"
-}
-
-func autoexport(n *Node, ctxt Class) {
-	if n.Sym.Pkg != localpkg {
-		return
-	}
-	if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
-		return
-	}
-	if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
-		return
-	}
-
-	if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
-		exportsym(n)
-	}
-	if asmhdr != "" && !n.Sym.Asm() {
-		n.Sym.SetAsm(true)
-		asmlist = append(asmlist, n)
-	}
-}
-
 func dumpexport(bout *bio.Writer) {
+	p := &exporter{marked: make(map[*types.Type]bool)}
+	for _, n := range typecheck.Target.Exports {
+		p.markObject(n)
+	}
+
 	// The linker also looks for the $$ marker - use char after $$ to distinguish format.
 	exportf(bout, "\n$$B\n") // indicate binary export format
 	off := bout.Offset()
-	iexport(bout.Writer)
+	typecheck.WriteExports(bout.Writer)
 	size := bout.Offset() - off
 	exportf(bout, "\n$$\n")
 
-	if Debug_export != 0 {
-		fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
-	}
-}
-
-func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
-	n := asNode(s.PkgDef())
-	if n == nil {
-		// iimport should have created a stub ONONAME
-		// declaration for all imported symbols. The exception
-		// is declarations for Runtimepkg, which are populated
-		// by loadsys instead.
-		if s.Pkg != Runtimepkg {
-			Fatalf("missing ONONAME for %v\n", s)
-		}
-
-		n = dclname(s)
-		s.SetPkgDef(asTypesNode(n))
-		s.Importdef = ipkg
-	}
-	if n.Op != ONONAME && n.Op != op {
-		redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
-	}
-	return n
-}
-
-// importtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
-	n := importsym(ipkg, s, OTYPE)
-	if n.Op != OTYPE {
-		t := types.New(TFORW)
-		t.Sym = s
-		t.Nod = asTypesNode(n)
-
-		n.Op = OTYPE
-		n.Pos = pos
-		n.Type = t
-		n.SetClass(PEXTERN)
-	}
-
-	t := n.Type
-	if t == nil {
-		Fatalf("importtype %v", s)
-	}
-	return t
-}
-
-// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
-	n := importsym(ipkg, s, op)
-	if n.Op != ONONAME {
-		if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
-			redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
-		}
-		return nil
-	}
-
-	n.Op = op
-	n.Pos = pos
-	n.SetClass(ctxt)
-	if ctxt == PFUNC {
-		n.Sym.SetFunc(true)
-	}
-	n.Type = t
-	return n
-}
-
-// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
-	n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
-	if n == nil { // TODO: Check that value matches.
-		return
-	}
-
-	n.SetVal(val)
-
-	if Debug.E != 0 {
-		fmt.Printf("import const %v %L = %v\n", s, t, val)
-	}
-}
-
-// importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
-	if n == nil {
-		return
-	}
-
-	n.Func = new(Func)
-	t.SetNname(asTypesNode(n))
-
-	if Debug.E != 0 {
-		fmt.Printf("import func %v%S\n", s, t)
-	}
-}
-
-// importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
-	if n == nil {
-		return
-	}
-
-	if Debug.E != 0 {
-		fmt.Printf("import var %v %L\n", s, t)
-	}
-}
-
-// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
-	n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
-	if n == nil {
-		return
-	}
-
-	if Debug.E != 0 {
-		fmt.Printf("import type %v = %L\n", s, t)
+	if base.Debug.Export != 0 {
+		fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
 	}
 }
 
 func dumpasmhdr() {
-	b, err := bio.Create(asmhdr)
+	b, err := bio.Create(base.Flag.AsmHdr)
 	if err != nil {
-		Fatalf("%v", err)
+		base.Fatalf("%v", err)
 	}
-	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
-	for _, n := range asmlist {
-		if n.Sym.IsBlank() {
+	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
+	for _, n := range typecheck.Target.Asms {
+		if n.Sym().IsBlank() {
 			continue
 		}
-		switch n.Op {
-		case OLITERAL:
-			t := n.Val().Ctype()
-			if t == CTFLT || t == CTCPLX {
+		switch n.Op() {
+		case ir.OLITERAL:
+			t := n.Val().Kind()
+			if t == constant.Float || t == constant.Complex {
 				break
 			}
-			fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+			fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
 
-		case OTYPE:
-			t := n.Type
+		case ir.OTYPE:
+			t := n.Type()
 			if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
 				break
 			}
-			fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+			fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
 			for _, f := range t.Fields().Slice() {
 				if !f.Sym.IsBlank() {
-					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
 				}
 			}
 		}
@@ -231,3 +74,83 @@
 
 	b.Close()
 }
+
+type exporter struct {
+	marked map[*types.Type]bool // types already seen by markType
+}
+
+// markObject visits a reachable object.
+func (p *exporter) markObject(n ir.Node) {
+	if n.Op() == ir.ONAME {
+		n := n.(*ir.Name)
+		if n.Class == ir.PFUNC {
+			inline.Inline_Flood(n, typecheck.Export)
+		}
+	}
+
+	p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *exporter) markType(t *types.Type) {
+	if p.marked[t] {
+		return
+	}
+	p.marked[t] = true
+
+	// If this is a named type, mark all of its associated
+	// methods. Skip interface types because t.Methods contains
+	// only their unexpanded method set (i.e., exclusive of
+	// interface embeddings), and the switch statement below
+	// handles their full method set.
+	if t.Sym() != nil && t.Kind() != types.TINTER {
+		for _, m := range t.Methods().Slice() {
+			if types.IsExported(m.Sym.Name) {
+				p.markObject(ir.AsNode(m.Nname))
+			}
+		}
+	}
+
+	// Recursively mark any types that can be produced given a
+	// value of type t: dereferencing a pointer; indexing or
+	// iterating over an array, slice, or map; receiving from a
+	// channel; accessing a struct field or interface method; or
+	// calling a function.
+	//
+	// Notably, we don't mark function parameter types, because
+	// the user already needs some way to construct values of
+	// those types.
+	switch t.Kind() {
+	case types.TPTR, types.TARRAY, types.TSLICE:
+		p.markType(t.Elem())
+
+	case types.TCHAN:
+		if t.ChanDir().CanRecv() {
+			p.markType(t.Elem())
+		}
+
+	case types.TMAP:
+		p.markType(t.Key())
+		p.markType(t.Elem())
+
+	case types.TSTRUCT:
+		for _, f := range t.FieldSlice() {
+			if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+				p.markType(f.Type)
+			}
+		}
+
+	case types.TFUNC:
+		for _, f := range t.Results().FieldSlice() {
+			p.markType(f.Type)
+		}
+
+	case types.TINTER:
+		for _, f := range t.FieldSlice() {
+			if types.IsExported(f.Sym.Name) {
+				p.markType(f.Type)
+			}
+		}
+	}
+}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
deleted file mode 100644
index f92f5d0..0000000
--- a/src/cmd/compile/internal/gc/fmt.go
+++ /dev/null
@@ -1,1986 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"cmd/compile/internal/types"
-	"cmd/internal/src"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-	"sync"
-	"unicode/utf8"
-)
-
-// A FmtFlag value is a set of flags (or 0).
-// They control how the Xconv functions format their values.
-// See the respective function's documentation for details.
-type FmtFlag int
-
-const ( //                                 fmt.Format flag/prec or verb
-	FmtLeft     FmtFlag = 1 << iota // '-'
-	FmtSharp                        // '#'
-	FmtSign                         // '+'
-	FmtUnsigned                     // internal use only (historic: u flag)
-	FmtShort                        // verb == 'S'       (historic: h flag)
-	FmtLong                         // verb == 'L'       (historic: l flag)
-	FmtComma                        // '.' (== hasPrec)  (historic: , flag)
-	FmtByte                         // '0'               (historic: hh flag)
-)
-
-// fmtFlag computes the (internal) FmtFlag
-// value given the fmt.State and format verb.
-func fmtFlag(s fmt.State, verb rune) FmtFlag {
-	var flag FmtFlag
-	if s.Flag('-') {
-		flag |= FmtLeft
-	}
-	if s.Flag('#') {
-		flag |= FmtSharp
-	}
-	if s.Flag('+') {
-		flag |= FmtSign
-	}
-	if s.Flag(' ') {
-		Fatalf("FmtUnsigned in format string")
-	}
-	if _, ok := s.Precision(); ok {
-		flag |= FmtComma
-	}
-	if s.Flag('0') {
-		flag |= FmtByte
-	}
-	switch verb {
-	case 'S':
-		flag |= FmtShort
-	case 'L':
-		flag |= FmtLong
-	}
-	return flag
-}
-
-// Format conversions:
-// TODO(gri) verify these; eliminate those not used anymore
-//
-//	%v Op		Node opcodes
-//		Flags:  #: print Go syntax (automatic unless mode == FDbg)
-//
-//	%j *Node	Node details
-//		Flags:  0: suppresses things not relevant until walk
-//
-//	%v *Val		Constant values
-//
-//	%v *types.Sym		Symbols
-//	%S              unqualified identifier in any mode
-//		Flags:  +,- #: mode (see below)
-//			0: in export mode: unqualified identifier if exported, qualified if not
-//
-//	%v *types.Type	Types
-//	%S              omit "func" and receiver in function types
-//	%L              definition instead of name.
-//		Flags:  +,- #: mode (see below)
-//			' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
-//
-//	%v *Node	Nodes
-//	%S              (only in +/debug mode) suppress recursion
-//	%L              (only in Error mode) print "foo (type Bar)"
-//		Flags:  +,- #: mode (see below)
-//
-//	%v Nodes	Node lists
-//		Flags:  those of *Node
-//			.: separate items with ',' instead of ';'
-
-// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
-const (
-	FErr fmtMode = iota
-	FDbg
-	FTypeId
-	FTypeIdName // same as FTypeId, but use package name instead of prefix
-)
-
-// The mode flags '+', '-', and '#' are sticky; they persist through
-// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is
-// sticky only on *types.Type recursions and only used in %-/*types.Sym mode.
-//
-// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
-
-// Useful format combinations:
-// TODO(gri): verify these
-//
-// *Node, Nodes:
-//   %+v    multiline recursive debug dump of *Node/Nodes
-//   %+S    non-recursive debug dump
-//
-// *Node:
-//   %#v    Go format
-//   %L     "foo (type Bar)" for error messages
-//
-// *types.Type:
-//   %#v    Go format
-//   %#L    type definition instead of name
-//   %#S    omit "func" and receiver in function signature
-//
-//   %-v    type identifiers
-//   %-S    type identifiers without "func" and arg names in type signatures (methodsym)
-//   %- v   type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
-
-// update returns the results of applying f to mode.
-func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
-	switch {
-	case f&FmtSign != 0:
-		mode = FDbg
-	case f&FmtSharp != 0:
-		// ignore (textual export format no longer supported)
-	case f&FmtUnsigned != 0:
-		mode = FTypeIdName
-	case f&FmtLeft != 0:
-		mode = FTypeId
-	}
-
-	f &^= FmtSharp | FmtLeft | FmtSign