all: handle Go 1.18 code on Go 1.16 AppEngine using new internal/backport/go/*

The immediate need was go/build, because the Go 1.16 go/build rejects
code that has //go:build without // +build, as Go 1.18 code now does.

But displaying code using generics would also have failed, for inability
to parse that code. Add the full go/ast, go/doc, go/format, go/parser,
go/printer, go/scanner, and go/token to fix those failures, which
would become more frequent as generics are used more inside the
standard library.

The code is all from the go1.18 tag of the Go repo, post-processed to
rewrite imports and apply gofmt -w -r 'any -> interface{}' to keep it
building with the other backports and with Go 1.16 syntax.

For golang/go#51686.

Change-Id: I1e14f4634d8bc09bdaa04c014eadb1be97ea5047
Reviewed-on: https://go-review.googlesource.com/c/website/+/393194
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
diff --git a/cmd/golangorg/server.go b/cmd/golangorg/server.go
index 41d2c6b..88c8d32 100644
--- a/cmd/golangorg/server.go
+++ b/cmd/golangorg/server.go
@@ -14,7 +14,6 @@
 	"errors"
 	"flag"
 	"fmt"
-	"go/format"
 	"io/fs"
 	"io/ioutil"
 	"log"
@@ -32,6 +31,7 @@
 	"cloud.google.com/go/datastore"
 	"golang.org/x/build/repos"
 	"golang.org/x/website"
+	"golang.org/x/website/internal/backport/go/format"
 	"golang.org/x/website/internal/backport/html/template"
 	"golang.org/x/website/internal/blog"
 	"golang.org/x/website/internal/codewalk"
diff --git a/internal/api/api_test.go b/internal/api/api_test.go
index 98b1dd4..7928627 100644
--- a/internal/api/api_test.go
+++ b/internal/api/api_test.go
@@ -5,10 +5,11 @@
 package api
 
 import (
-	"go/build"
 	"os"
 	"runtime"
 	"testing"
+
+	"golang.org/x/website/internal/backport/go/build"
 )
 
 func TestParseVersionRow(t *testing.T) {
diff --git a/internal/backport/go/ast/ast.go b/internal/backport/go/ast/ast.go
new file mode 100644
index 0000000..279e58e
--- /dev/null
+++ b/internal/backport/go/ast/ast.go
@@ -0,0 +1,1063 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ast declares the types used to represent syntax trees for Go
+// packages.
+package ast
+
+import (
+	"golang.org/x/website/internal/backport/go/token"
+	"strings"
+)
+
+// ----------------------------------------------------------------------------
+// Interfaces
+//
+// There are 3 main classes of nodes: Expressions and type nodes,
+// statement nodes, and declaration nodes. The node names usually
+// match the corresponding Go spec production names to which they
+// correspond. The node fields correspond to the individual parts
+// of the respective productions.
+//
+// All nodes contain position information marking the beginning of
+// the corresponding source text segment; it is accessible via the
+// Pos accessor method. Nodes may contain additional position info
+// for language constructs where comments may be found between parts
+// of the construct (typically any larger, parenthesized subpart).
+// That position information is needed to properly position comments
+// when printing the construct.
+
+// All node types implement the Node interface.
+type Node interface {
+	Pos() token.Pos // position of first character belonging to the node
+	End() token.Pos // position of first character immediately after the node
+}
+
+// All expression nodes implement the Expr interface.
+type Expr interface {
+	Node
+	exprNode()
+}
+
+// All statement nodes implement the Stmt interface.
+type Stmt interface {
+	Node
+	stmtNode()
+}
+
+// All declaration nodes implement the Decl interface.
+type Decl interface {
+	Node
+	declNode()
+}
+
+// ----------------------------------------------------------------------------
+// Comments
+
+// A Comment node represents a single //-style or /*-style comment.
+//
+// The Text field contains the comment text without carriage returns (\r) that
+// may have been present in the source. Because a comment's end position is
+// computed using len(Text), the position reported by End() does not match the
+// true source end position for comments containing carriage returns.
+type Comment struct {
+	Slash token.Pos // position of "/" starting the comment
+	Text  string    // comment text (excluding '\n' for //-style comments)
+}
+
+func (c *Comment) Pos() token.Pos { return c.Slash }
+func (c *Comment) End() token.Pos { return token.Pos(int(c.Slash) + len(c.Text)) }
+
+// A CommentGroup represents a sequence of comments
+// with no other tokens and no empty lines between.
+type CommentGroup struct {
+	List []*Comment // len(List) > 0
+}
+
+func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() }
+func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
+
+func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
+
+func stripTrailingWhitespace(s string) string {
+	i := len(s)
+	for i > 0 && isWhitespace(s[i-1]) {
+		i--
+	}
+	return s[0:i]
+}
+
+// Text returns the text of the comment.
+// Comment markers (//, /*, and */), the first space of a line comment, and
+// leading and trailing empty lines are removed.
+// Comment directives like "//line" and "//go:noinline" are also removed.
+// Multiple empty lines are reduced to one, and trailing space on lines is trimmed.
+// Unless the result is empty, it is newline-terminated.
+func (g *CommentGroup) Text() string {
+	if g == nil {
+		return ""
+	}
+	comments := make([]string, len(g.List))
+	for i, c := range g.List {
+		comments[i] = c.Text
+	}
+
+	lines := make([]string, 0, 10) // most comments are less than 10 lines
+	for _, c := range comments {
+		// Remove comment markers.
+		// The parser has given us exactly the comment text.
+		switch c[1] {
+		case '/':
+			//-style comment (no newline at the end)
+			c = c[2:]
+			if len(c) == 0 {
+				// empty line
+				break
+			}
+			if c[0] == ' ' {
+				// strip first space - required for Example tests
+				c = c[1:]
+				break
+			}
+			if isDirective(c) {
+				// Ignore //go:noinline, //line, and so on.
+				continue
+			}
+		case '*':
+			/*-style comment */
+			c = c[2 : len(c)-2]
+		}
+
+		// Split on newlines.
+		cl := strings.Split(c, "\n")
+
+		// Walk lines, stripping trailing white space and adding to list.
+		for _, l := range cl {
+			lines = append(lines, stripTrailingWhitespace(l))
+		}
+	}
+
+	// Remove leading blank lines; convert runs of
+	// interior blank lines to a single blank line.
+	n := 0
+	for _, line := range lines {
+		if line != "" || n > 0 && lines[n-1] != "" {
+			lines[n] = line
+			n++
+		}
+	}
+	lines = lines[0:n]
+
+	// Add final "" entry to get trailing newline from Join.
+	if n > 0 && lines[n-1] != "" {
+		lines = append(lines, "")
+	}
+
+	return strings.Join(lines, "\n")
+}
+
+// isDirective reports whether c is a comment directive.
+func isDirective(c string) bool {
+	// "//line " is a line directive.
+	// (The // has been removed.)
+	if strings.HasPrefix(c, "line ") {
+		return true
+	}
+
+	// "//[a-z0-9]+:[a-z0-9]"
+	// (The // has been removed.)
+	colon := strings.Index(c, ":")
+	if colon <= 0 || colon+1 >= len(c) {
+		return false
+	}
+	for i := 0; i <= colon+1; i++ {
+		if i == colon {
+			continue
+		}
+		b := c[i]
+		if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
+			return false
+		}
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Expressions and types
+
+// A Field represents a Field declaration list in a struct type,
+// a method list in an interface type, or a parameter/result declaration
+// in a signature.
+// Field.Names is nil for unnamed parameters (parameter lists which only contain types)
+// and embedded struct fields. In the latter case, the field name is the type name.
+type Field struct {
+	Doc     *CommentGroup // associated documentation; or nil
+	Names   []*Ident      // field/method/(type) parameter names; or nil
+	Type    Expr          // field/method/parameter type; or nil
+	Tag     *BasicLit     // field tag; or nil
+	Comment *CommentGroup // line comments; or nil
+}
+
+func (f *Field) Pos() token.Pos {
+	if len(f.Names) > 0 {
+		return f.Names[0].Pos()
+	}
+	if f.Type != nil {
+		return f.Type.Pos()
+	}
+	return token.NoPos
+}
+
+func (f *Field) End() token.Pos {
+	if f.Tag != nil {
+		return f.Tag.End()
+	}
+	if f.Type != nil {
+		return f.Type.End()
+	}
+	if len(f.Names) > 0 {
+		return f.Names[len(f.Names)-1].End()
+	}
+	return token.NoPos
+}
+
+// A FieldList represents a list of Fields, enclosed by parentheses,
+// curly braces, or square brackets.
+type FieldList struct {
+	Opening token.Pos // position of opening parenthesis/brace/bracket, if any
+	List    []*Field  // field list; or nil
+	Closing token.Pos // position of closing parenthesis/brace/bracket, if any
+}
+
+func (f *FieldList) Pos() token.Pos {
+	if f.Opening.IsValid() {
+		return f.Opening
+	}
+	// the list should not be empty in this case;
+	// be conservative and guard against bad ASTs
+	if len(f.List) > 0 {
+		return f.List[0].Pos()
+	}
+	return token.NoPos
+}
+
+func (f *FieldList) End() token.Pos {
+	if f.Closing.IsValid() {
+		return f.Closing + 1
+	}
+	// the list should not be empty in this case;
+	// be conservative and guard against bad ASTs
+	if n := len(f.List); n > 0 {
+		return f.List[n-1].End()
+	}
+	return token.NoPos
+}
+
+// NumFields returns the number of parameters or struct fields represented by a FieldList.
+func (f *FieldList) NumFields() int {
+	n := 0
+	if f != nil {
+		for _, g := range f.List {
+			m := len(g.Names)
+			if m == 0 {
+				m = 1
+			}
+			n += m
+		}
+	}
+	return n
+}
+
+// An expression is represented by a tree consisting of one
+// or more of the following concrete expression nodes.
+type (
+	// A BadExpr node is a placeholder for an expression containing
+	// syntax errors for which a correct expression node cannot be
+	// created.
+	//
+	BadExpr struct {
+		From, To token.Pos // position range of bad expression
+	}
+
+	// An Ident node represents an identifier.
+	Ident struct {
+		NamePos token.Pos // identifier position
+		Name    string    // identifier name
+		Obj     *Object   // denoted object; or nil
+	}
+
+	// An Ellipsis node stands for the "..." type in a
+	// parameter list or the "..." length in an array type.
+	//
+	Ellipsis struct {
+		Ellipsis token.Pos // position of "..."
+		Elt      Expr      // ellipsis element type (parameter lists only); or nil
+	}
+
+	// A BasicLit node represents a literal of basic type.
+	BasicLit struct {
+		ValuePos token.Pos   // literal position
+		Kind     token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING
+		Value    string      // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
+	}
+
+	// A FuncLit node represents a function literal.
+	FuncLit struct {
+		Type *FuncType  // function type
+		Body *BlockStmt // function body
+	}
+
+	// A CompositeLit node represents a composite literal.
+	CompositeLit struct {
+		Type       Expr      // literal type; or nil
+		Lbrace     token.Pos // position of "{"
+		Elts       []Expr    // list of composite elements; or nil
+		Rbrace     token.Pos // position of "}"
+		Incomplete bool      // true if (source) expressions are missing in the Elts list
+	}
+
+	// A ParenExpr node represents a parenthesized expression.
+	ParenExpr struct {
+		Lparen token.Pos // position of "("
+		X      Expr      // parenthesized expression
+		Rparen token.Pos // position of ")"
+	}
+
+	// A SelectorExpr node represents an expression followed by a selector.
+	SelectorExpr struct {
+		X   Expr   // expression
+		Sel *Ident // field selector
+	}
+
+	// An IndexExpr node represents an expression followed by an index.
+	IndexExpr struct {
+		X      Expr      // expression
+		Lbrack token.Pos // position of "["
+		Index  Expr      // index expression
+		Rbrack token.Pos // position of "]"
+	}
+
+	// An IndexListExpr node represents an expression followed by multiple
+	// indices.
+	IndexListExpr struct {
+		X       Expr      // expression
+		Lbrack  token.Pos // position of "["
+		Indices []Expr    // index expressions
+		Rbrack  token.Pos // position of "]"
+	}
+
+	// A SliceExpr node represents an expression followed by slice indices.
+	SliceExpr struct {
+		X      Expr      // expression
+		Lbrack token.Pos // position of "["
+		Low    Expr      // begin of slice range; or nil
+		High   Expr      // end of slice range; or nil
+		Max    Expr      // maximum capacity of slice; or nil
+		Slice3 bool      // true if 3-index slice (2 colons present)
+		Rbrack token.Pos // position of "]"
+	}
+
+	// A TypeAssertExpr node represents an expression followed by a
+	// type assertion.
+	//
+	TypeAssertExpr struct {
+		X      Expr      // expression
+		Lparen token.Pos // position of "("
+		Type   Expr      // asserted type; nil means type switch X.(type)
+		Rparen token.Pos // position of ")"
+	}
+
+	// A CallExpr node represents an expression followed by an argument list.
+	CallExpr struct {
+		Fun      Expr      // function expression
+		Lparen   token.Pos // position of "("
+		Args     []Expr    // function arguments; or nil
+		Ellipsis token.Pos // position of "..." (token.NoPos if there is no "...")
+		Rparen   token.Pos // position of ")"
+	}
+
+	// A StarExpr node represents an expression of the form "*" Expression.
+	// Semantically it could be a unary "*" expression, or a pointer type.
+	//
+	StarExpr struct {
+		Star token.Pos // position of "*"
+		X    Expr      // operand
+	}
+
+	// A UnaryExpr node represents a unary expression.
+	// Unary "*" expressions are represented via StarExpr nodes.
+	//
+	UnaryExpr struct {
+		OpPos token.Pos   // position of Op
+		Op    token.Token // operator
+		X     Expr        // operand
+	}
+
+	// A BinaryExpr node represents a binary expression.
+	BinaryExpr struct {
+		X     Expr        // left operand
+		OpPos token.Pos   // position of Op
+		Op    token.Token // operator
+		Y     Expr        // right operand
+	}
+
+	// A KeyValueExpr node represents (key : value) pairs
+	// in composite literals.
+	//
+	KeyValueExpr struct {
+		Key   Expr
+		Colon token.Pos // position of ":"
+		Value Expr
+	}
+)
+
+// The direction of a channel type is indicated by a bit
+// mask including one or both of the following constants.
+type ChanDir int
+
+const (
+	SEND ChanDir = 1 << iota
+	RECV
+)
+
+// A type is represented by a tree consisting of one
+// or more of the following type-specific expression
+// nodes.
+type (
+	// An ArrayType node represents an array or slice type.
+	ArrayType struct {
+		Lbrack token.Pos // position of "["
+		Len    Expr      // Ellipsis node for [...]T array types, nil for slice types
+		Elt    Expr      // element type
+	}
+
+	// A StructType node represents a struct type.
+	StructType struct {
+		Struct     token.Pos  // position of "struct" keyword
+		Fields     *FieldList // list of field declarations
+		Incomplete bool       // true if (source) fields are missing in the Fields list
+	}
+
+	// Pointer types are represented via StarExpr nodes.
+
+	// A FuncType node represents a function type.
+	FuncType struct {
+		Func       token.Pos  // position of "func" keyword (token.NoPos if there is no "func")
+		TypeParams *FieldList // type parameters; or nil
+		Params     *FieldList // (incoming) parameters; non-nil
+		Results    *FieldList // (outgoing) results; or nil
+	}
+
+	// An InterfaceType node represents an interface type.
+	InterfaceType struct {
+		Interface  token.Pos  // position of "interface" keyword
+		Methods    *FieldList // list of embedded interfaces, methods, or types
+		Incomplete bool       // true if (source) methods or types are missing in the Methods list
+	}
+
+	// A MapType node represents a map type.
+	MapType struct {
+		Map   token.Pos // position of "map" keyword
+		Key   Expr
+		Value Expr
+	}
+
+	// A ChanType node represents a channel type.
+	ChanType struct {
+		Begin token.Pos // position of "chan" keyword or "<-" (whichever comes first)
+		Arrow token.Pos // position of "<-" (token.NoPos if there is no "<-")
+		Dir   ChanDir   // channel direction
+		Value Expr      // value type
+	}
+)
+
+// Pos and End implementations for expression/type nodes.
+
+func (x *BadExpr) Pos() token.Pos  { return x.From }
+func (x *Ident) Pos() token.Pos    { return x.NamePos }
+func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis }
+func (x *BasicLit) Pos() token.Pos { return x.ValuePos }
+func (x *FuncLit) Pos() token.Pos  { return x.Type.Pos() }
+func (x *CompositeLit) Pos() token.Pos {
+	if x.Type != nil {
+		return x.Type.Pos()
+	}
+	return x.Lbrace
+}
+func (x *ParenExpr) Pos() token.Pos      { return x.Lparen }
+func (x *SelectorExpr) Pos() token.Pos   { return x.X.Pos() }
+func (x *IndexExpr) Pos() token.Pos      { return x.X.Pos() }
+func (x *IndexListExpr) Pos() token.Pos  { return x.X.Pos() }
+func (x *SliceExpr) Pos() token.Pos      { return x.X.Pos() }
+func (x *TypeAssertExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *CallExpr) Pos() token.Pos       { return x.Fun.Pos() }
+func (x *StarExpr) Pos() token.Pos       { return x.Star }
+func (x *UnaryExpr) Pos() token.Pos      { return x.OpPos }
+func (x *BinaryExpr) Pos() token.Pos     { return x.X.Pos() }
+func (x *KeyValueExpr) Pos() token.Pos   { return x.Key.Pos() }
+func (x *ArrayType) Pos() token.Pos      { return x.Lbrack }
+func (x *StructType) Pos() token.Pos     { return x.Struct }
+func (x *FuncType) Pos() token.Pos {
+	if x.Func.IsValid() || x.Params == nil { // see issue 3870
+		return x.Func
+	}
+	return x.Params.Pos() // interface method declarations have no "func" keyword
+}
+func (x *InterfaceType) Pos() token.Pos { return x.Interface }
+func (x *MapType) Pos() token.Pos       { return x.Map }
+func (x *ChanType) Pos() token.Pos      { return x.Begin }
+
+func (x *BadExpr) End() token.Pos { return x.To }
+func (x *Ident) End() token.Pos   { return token.Pos(int(x.NamePos) + len(x.Name)) }
+func (x *Ellipsis) End() token.Pos {
+	if x.Elt != nil {
+		return x.Elt.End()
+	}
+	return x.Ellipsis + 3 // len("...")
+}
+func (x *BasicLit) End() token.Pos       { return token.Pos(int(x.ValuePos) + len(x.Value)) }
+func (x *FuncLit) End() token.Pos        { return x.Body.End() }
+func (x *CompositeLit) End() token.Pos   { return x.Rbrace + 1 }
+func (x *ParenExpr) End() token.Pos      { return x.Rparen + 1 }
+func (x *SelectorExpr) End() token.Pos   { return x.Sel.End() }
+func (x *IndexExpr) End() token.Pos      { return x.Rbrack + 1 }
+func (x *IndexListExpr) End() token.Pos  { return x.Rbrack + 1 }
+func (x *SliceExpr) End() token.Pos      { return x.Rbrack + 1 }
+func (x *TypeAssertExpr) End() token.Pos { return x.Rparen + 1 }
+func (x *CallExpr) End() token.Pos       { return x.Rparen + 1 }
+func (x *StarExpr) End() token.Pos       { return x.X.End() }
+func (x *UnaryExpr) End() token.Pos      { return x.X.End() }
+func (x *BinaryExpr) End() token.Pos     { return x.Y.End() }
+func (x *KeyValueExpr) End() token.Pos   { return x.Value.End() }
+func (x *ArrayType) End() token.Pos      { return x.Elt.End() }
+func (x *StructType) End() token.Pos     { return x.Fields.End() }
+func (x *FuncType) End() token.Pos {
+	if x.Results != nil {
+		return x.Results.End()
+	}
+	return x.Params.End()
+}
+func (x *InterfaceType) End() token.Pos { return x.Methods.End() }
+func (x *MapType) End() token.Pos       { return x.Value.End() }
+func (x *ChanType) End() token.Pos      { return x.Value.End() }
+
+// exprNode() ensures that only expression/type nodes can be
+// assigned to an Expr.
+func (*BadExpr) exprNode()        {}
+func (*Ident) exprNode()          {}
+func (*Ellipsis) exprNode()       {}
+func (*BasicLit) exprNode()       {}
+func (*FuncLit) exprNode()        {}
+func (*CompositeLit) exprNode()   {}
+func (*ParenExpr) exprNode()      {}
+func (*SelectorExpr) exprNode()   {}
+func (*IndexExpr) exprNode()      {}
+func (*IndexListExpr) exprNode()  {}
+func (*SliceExpr) exprNode()      {}
+func (*TypeAssertExpr) exprNode() {}
+func (*CallExpr) exprNode()       {}
+func (*StarExpr) exprNode()       {}
+func (*UnaryExpr) exprNode()      {}
+func (*BinaryExpr) exprNode()     {}
+func (*KeyValueExpr) exprNode()   {}
+
+func (*ArrayType) exprNode()     {}
+func (*StructType) exprNode()    {}
+func (*FuncType) exprNode()      {}
+func (*InterfaceType) exprNode() {}
+func (*MapType) exprNode()       {}
+func (*ChanType) exprNode()      {}
+
+// ----------------------------------------------------------------------------
+// Convenience functions for Idents
+
+// NewIdent creates a new Ident without position.
+// Useful for ASTs generated by code other than the Go parser.
+func NewIdent(name string) *Ident { return &Ident{token.NoPos, name, nil} }
+
+// IsExported reports whether name starts with an upper-case letter.
+func IsExported(name string) bool { return token.IsExported(name) }
+
+// IsExported reports whether id starts with an upper-case letter.
+func (id *Ident) IsExported() bool { return token.IsExported(id.Name) }
+
+func (id *Ident) String() string {
+	if id != nil {
+		return id.Name
+	}
+	return "<nil>"
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// A statement is represented by a tree consisting of one
+// or more of the following concrete statement nodes.
+type (
+	// A BadStmt node is a placeholder for statements containing
+	// syntax errors for which no correct statement nodes can be
+	// created.
+	//
+	BadStmt struct {
+		From, To token.Pos // position range of bad statement
+	}
+
+	// A DeclStmt node represents a declaration in a statement list.
+	DeclStmt struct {
+		Decl Decl // *GenDecl with CONST, TYPE, or VAR token
+	}
+
+	// An EmptyStmt node represents an empty statement.
+	// The "position" of the empty statement is the position
+	// of the immediately following (explicit or implicit) semicolon.
+	//
+	EmptyStmt struct {
+		Semicolon token.Pos // position of following ";"
+		Implicit  bool      // if set, ";" was omitted in the source
+	}
+
+	// A LabeledStmt node represents a labeled statement.
+	LabeledStmt struct {
+		Label *Ident
+		Colon token.Pos // position of ":"
+		Stmt  Stmt
+	}
+
+	// An ExprStmt node represents a (stand-alone) expression
+	// in a statement list.
+	//
+	ExprStmt struct {
+		X Expr // expression
+	}
+
+	// A SendStmt node represents a send statement.
+	SendStmt struct {
+		Chan  Expr
+		Arrow token.Pos // position of "<-"
+		Value Expr
+	}
+
+	// An IncDecStmt node represents an increment or decrement statement.
+	IncDecStmt struct {
+		X      Expr
+		TokPos token.Pos   // position of Tok
+		Tok    token.Token // INC or DEC
+	}
+
+	// An AssignStmt node represents an assignment or
+	// a short variable declaration.
+	//
+	AssignStmt struct {
+		Lhs    []Expr
+		TokPos token.Pos   // position of Tok
+		Tok    token.Token // assignment token, DEFINE
+		Rhs    []Expr
+	}
+
+	// A GoStmt node represents a go statement.
+	GoStmt struct {
+		Go   token.Pos // position of "go" keyword
+		Call *CallExpr
+	}
+
+	// A DeferStmt node represents a defer statement.
+	DeferStmt struct {
+		Defer token.Pos // position of "defer" keyword
+		Call  *CallExpr
+	}
+
+	// A ReturnStmt node represents a return statement.
+	ReturnStmt struct {
+		Return  token.Pos // position of "return" keyword
+		Results []Expr    // result expressions; or nil
+	}
+
+	// A BranchStmt node represents a break, continue, goto,
+	// or fallthrough statement.
+	//
+	BranchStmt struct {
+		TokPos token.Pos   // position of Tok
+		Tok    token.Token // keyword token (BREAK, CONTINUE, GOTO, FALLTHROUGH)
+		Label  *Ident      // label name; or nil
+	}
+
+	// A BlockStmt node represents a braced statement list.
+	BlockStmt struct {
+		Lbrace token.Pos // position of "{"
+		List   []Stmt
+		Rbrace token.Pos // position of "}", if any (may be absent due to syntax error)
+	}
+
+	// An IfStmt node represents an if statement.
+	IfStmt struct {
+		If   token.Pos // position of "if" keyword
+		Init Stmt      // initialization statement; or nil
+		Cond Expr      // condition
+		Body *BlockStmt
+		Else Stmt // else branch; or nil
+	}
+
+	// A CaseClause represents a case of an expression or type switch statement.
+	CaseClause struct {
+		Case  token.Pos // position of "case" or "default" keyword
+		List  []Expr    // list of expressions or types; nil means default case
+		Colon token.Pos // position of ":"
+		Body  []Stmt    // statement list; or nil
+	}
+
+	// A SwitchStmt node represents an expression switch statement.
+	SwitchStmt struct {
+		Switch token.Pos  // position of "switch" keyword
+		Init   Stmt       // initialization statement; or nil
+		Tag    Expr       // tag expression; or nil
+		Body   *BlockStmt // CaseClauses only
+	}
+
+	// A TypeSwitchStmt node represents a type switch statement.
+	TypeSwitchStmt struct {
+		Switch token.Pos  // position of "switch" keyword
+		Init   Stmt       // initialization statement; or nil
+		Assign Stmt       // x := y.(type) or y.(type)
+		Body   *BlockStmt // CaseClauses only
+	}
+
+	// A CommClause node represents a case of a select statement.
+	CommClause struct {
+		Case  token.Pos // position of "case" or "default" keyword
+		Comm  Stmt      // send or receive statement; nil means default case
+		Colon token.Pos // position of ":"
+		Body  []Stmt    // statement list; or nil
+	}
+
+	// A SelectStmt node represents a select statement.
+	SelectStmt struct {
+		Select token.Pos  // position of "select" keyword
+		Body   *BlockStmt // CommClauses only
+	}
+
+	// A ForStmt represents a for statement.
+	ForStmt struct {
+		For  token.Pos // position of "for" keyword
+		Init Stmt      // initialization statement; or nil
+		Cond Expr      // condition; or nil
+		Post Stmt      // post iteration statement; or nil
+		Body *BlockStmt
+	}
+
+	// A RangeStmt represents a for statement with a range clause.
+	RangeStmt struct {
+		For        token.Pos   // position of "for" keyword
+		Key, Value Expr        // Key, Value may be nil
+		TokPos     token.Pos   // position of Tok; invalid if Key == nil
+		Tok        token.Token // ILLEGAL if Key == nil, ASSIGN, DEFINE
+		X          Expr        // value to range over
+		Body       *BlockStmt
+	}
+)
+
+// Pos and End implementations for statement nodes.
+
+func (s *BadStmt) Pos() token.Pos        { return s.From }
+func (s *DeclStmt) Pos() token.Pos       { return s.Decl.Pos() }
+func (s *EmptyStmt) Pos() token.Pos      { return s.Semicolon }
+func (s *LabeledStmt) Pos() token.Pos    { return s.Label.Pos() }
+func (s *ExprStmt) Pos() token.Pos       { return s.X.Pos() }
+func (s *SendStmt) Pos() token.Pos       { return s.Chan.Pos() }
+func (s *IncDecStmt) Pos() token.Pos     { return s.X.Pos() }
+func (s *AssignStmt) Pos() token.Pos     { return s.Lhs[0].Pos() }
+func (s *GoStmt) Pos() token.Pos         { return s.Go }
+func (s *DeferStmt) Pos() token.Pos      { return s.Defer }
+func (s *ReturnStmt) Pos() token.Pos     { return s.Return }
+func (s *BranchStmt) Pos() token.Pos     { return s.TokPos }
+func (s *BlockStmt) Pos() token.Pos      { return s.Lbrace }
+func (s *IfStmt) Pos() token.Pos         { return s.If }
+func (s *CaseClause) Pos() token.Pos     { return s.Case }
+func (s *SwitchStmt) Pos() token.Pos     { return s.Switch }
+func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch }
+func (s *CommClause) Pos() token.Pos     { return s.Case }
+func (s *SelectStmt) Pos() token.Pos     { return s.Select }
+func (s *ForStmt) Pos() token.Pos        { return s.For }
+func (s *RangeStmt) Pos() token.Pos      { return s.For }
+
+func (s *BadStmt) End() token.Pos  { return s.To }
+func (s *DeclStmt) End() token.Pos { return s.Decl.End() }
+func (s *EmptyStmt) End() token.Pos {
+	if s.Implicit {
+		return s.Semicolon
+	}
+	return s.Semicolon + 1 /* len(";") */
+}
+func (s *LabeledStmt) End() token.Pos { return s.Stmt.End() }
+func (s *ExprStmt) End() token.Pos    { return s.X.End() }
+func (s *SendStmt) End() token.Pos    { return s.Value.End() }
+func (s *IncDecStmt) End() token.Pos {
+	return s.TokPos + 2 /* len("++") */
+}
+func (s *AssignStmt) End() token.Pos { return s.Rhs[len(s.Rhs)-1].End() }
+func (s *GoStmt) End() token.Pos     { return s.Call.End() }
+func (s *DeferStmt) End() token.Pos  { return s.Call.End() }
+func (s *ReturnStmt) End() token.Pos {
+	if n := len(s.Results); n > 0 {
+		return s.Results[n-1].End()
+	}
+	return s.Return + 6 // len("return")
+}
+func (s *BranchStmt) End() token.Pos {
+	if s.Label != nil {
+		return s.Label.End()
+	}
+	return token.Pos(int(s.TokPos) + len(s.Tok.String()))
+}
+func (s *BlockStmt) End() token.Pos {
+	if s.Rbrace.IsValid() {
+		return s.Rbrace + 1
+	}
+	if n := len(s.List); n > 0 {
+		return s.List[n-1].End()
+	}
+	return s.Lbrace + 1
+}
+func (s *IfStmt) End() token.Pos {
+	if s.Else != nil {
+		return s.Else.End()
+	}
+	return s.Body.End()
+}
+func (s *CaseClause) End() token.Pos {
+	if n := len(s.Body); n > 0 {
+		return s.Body[n-1].End()
+	}
+	return s.Colon + 1
+}
+func (s *SwitchStmt) End() token.Pos     { return s.Body.End() }
+func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() }
+func (s *CommClause) End() token.Pos {
+	if n := len(s.Body); n > 0 {
+		return s.Body[n-1].End()
+	}
+	return s.Colon + 1
+}
+func (s *SelectStmt) End() token.Pos { return s.Body.End() }
+func (s *ForStmt) End() token.Pos    { return s.Body.End() }
+func (s *RangeStmt) End() token.Pos  { return s.Body.End() }
+
+// stmtNode() ensures that only statement nodes can be
+// assigned to a Stmt.
+func (*BadStmt) stmtNode()        {}
+func (*DeclStmt) stmtNode()       {}
+func (*EmptyStmt) stmtNode()      {}
+func (*LabeledStmt) stmtNode()    {}
+func (*ExprStmt) stmtNode()       {}
+func (*SendStmt) stmtNode()       {}
+func (*IncDecStmt) stmtNode()     {}
+func (*AssignStmt) stmtNode()     {}
+func (*GoStmt) stmtNode()         {}
+func (*DeferStmt) stmtNode()      {}
+func (*ReturnStmt) stmtNode()     {}
+func (*BranchStmt) stmtNode()     {}
+func (*BlockStmt) stmtNode()      {}
+func (*IfStmt) stmtNode()         {}
+func (*CaseClause) stmtNode()     {}
+func (*SwitchStmt) stmtNode()     {}
+func (*TypeSwitchStmt) stmtNode() {}
+func (*CommClause) stmtNode()     {}
+func (*SelectStmt) stmtNode()     {}
+func (*ForStmt) stmtNode()        {}
+func (*RangeStmt) stmtNode()      {}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// A Spec node represents a single (non-parenthesized) import,
+// constant, type, or variable declaration.
+type (
+	// The Spec type stands for any of *ImportSpec, *ValueSpec, and *TypeSpec.
+	Spec interface {
+		Node
+		specNode()
+	}
+
+	// An ImportSpec node represents a single package import.
+	ImportSpec struct {
+		Doc     *CommentGroup // associated documentation; or nil
+		Name    *Ident        // local package name (including "."); or nil
+		Path    *BasicLit     // import path
+		Comment *CommentGroup // line comments; or nil
+		EndPos  token.Pos     // end of spec (overrides Path.Pos if nonzero)
+	}
+
+	// A ValueSpec node represents a constant or variable declaration
+	// (ConstSpec or VarSpec production).
+	//
+	ValueSpec struct {
+		Doc     *CommentGroup // associated documentation; or nil
+		Names   []*Ident      // value names (len(Names) > 0)
+		Type    Expr          // value type; or nil
+		Values  []Expr        // initial values; or nil
+		Comment *CommentGroup // line comments; or nil
+	}
+
+	// A TypeSpec node represents a type declaration (TypeSpec production).
+	TypeSpec struct {
+		Doc        *CommentGroup // associated documentation; or nil
+		Name       *Ident        // type name
+		TypeParams *FieldList    // type parameters; or nil
+		Assign     token.Pos     // position of '=', if any
+		Type       Expr          // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
+		Comment    *CommentGroup // line comments; or nil
+	}
+)
+
+// Pos and End implementations for spec nodes.
+
+func (s *ImportSpec) Pos() token.Pos {
+	if s.Name != nil {
+		return s.Name.Pos()
+	}
+	return s.Path.Pos()
+}
+func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
+func (s *TypeSpec) Pos() token.Pos  { return s.Name.Pos() }
+
+func (s *ImportSpec) End() token.Pos {
+	if s.EndPos != 0 {
+		return s.EndPos
+	}
+	return s.Path.End()
+}
+
+func (s *ValueSpec) End() token.Pos {
+	if n := len(s.Values); n > 0 {
+		return s.Values[n-1].End()
+	}
+	if s.Type != nil {
+		return s.Type.End()
+	}
+	return s.Names[len(s.Names)-1].End()
+}
+func (s *TypeSpec) End() token.Pos { return s.Type.End() }
+
+// specNode() ensures that only spec nodes can be
+// assigned to a Spec.
+func (*ImportSpec) specNode() {}
+func (*ValueSpec) specNode()  {}
+func (*TypeSpec) specNode()   {}
+
+// A declaration is represented by one of the following declaration nodes.
+type (
+	// A BadDecl node is a placeholder for a declaration containing
+	// syntax errors for which a correct declaration node cannot be
+	// created.
+	//
+	BadDecl struct {
+		From, To token.Pos // position range of bad declaration
+	}
+
+	// A GenDecl node (generic declaration node) represents an import,
+	// constant, type or variable declaration. A valid Lparen position
+	// (Lparen.IsValid()) indicates a parenthesized declaration.
+	//
+	// Relationship between Tok value and Specs element type:
+	//
+	//	token.IMPORT  *ImportSpec
+	//	token.CONST   *ValueSpec
+	//	token.TYPE    *TypeSpec
+	//	token.VAR     *ValueSpec
+	//
+	GenDecl struct {
+		Doc    *CommentGroup // associated documentation; or nil
+		TokPos token.Pos     // position of Tok
+		Tok    token.Token   // IMPORT, CONST, TYPE, or VAR
+		Lparen token.Pos     // position of '(', if any
+		Specs  []Spec
+		Rparen token.Pos // position of ')', if any
+	}
+
+	// A FuncDecl node represents a function declaration.
+	FuncDecl struct {
+		Doc  *CommentGroup // associated documentation; or nil
+		Recv *FieldList    // receiver (methods); or nil (functions)
+		Name *Ident        // function/method name
+		Type *FuncType     // function signature: type and value parameters, results, and position of "func" keyword
+		Body *BlockStmt    // function body; or nil for external (non-Go) function
+	}
+)
+
+// Pos and End implementations for declaration nodes.
+
+func (d *BadDecl) Pos() token.Pos  { return d.From }
+func (d *GenDecl) Pos() token.Pos  { return d.TokPos }
+func (d *FuncDecl) Pos() token.Pos { return d.Type.Pos() }
+
+func (d *BadDecl) End() token.Pos { return d.To }
+func (d *GenDecl) End() token.Pos {
+	if d.Rparen.IsValid() {
+		return d.Rparen + 1
+	}
+	return d.Specs[0].End()
+}
+func (d *FuncDecl) End() token.Pos {
+	if d.Body != nil {
+		return d.Body.End()
+	}
+	return d.Type.End()
+}
+
+// declNode() ensures that only declaration nodes can be
+// assigned to a Decl.
+func (*BadDecl) declNode()  {}
+func (*GenDecl) declNode()  {}
+func (*FuncDecl) declNode() {}
+
+// ----------------------------------------------------------------------------
+// Files and packages
+
+// A File node represents a Go source file.
+//
+// The Comments list contains all comments in the source file in order of
+// appearance, including the comments that are pointed to from other nodes
+// via Doc and Comment fields.
+//
+// For correct printing of source code containing comments (using packages
+// go/format and go/printer), special care must be taken to update comments
+// when a File's syntax tree is modified: For printing, comments are interspersed
+// between tokens based on their position. If syntax tree nodes are
+// removed or moved, relevant comments in their vicinity must also be removed
+// (from the File.Comments list) or moved accordingly (by updating their
+// positions). A CommentMap may be used to facilitate some of these operations.
+//
+// Whether and how a comment is associated with a node depends on the
+// interpretation of the syntax tree by the manipulating program: Except for Doc
+// and Comment comments directly associated with nodes, the remaining comments
+// are "free-floating" (see also issues #18593, #20744).
+type File struct {
+	Doc        *CommentGroup   // associated documentation; or nil
+	Package    token.Pos       // position of "package" keyword
+	Name       *Ident          // package name
+	Decls      []Decl          // top-level declarations; or nil
+	Scope      *Scope          // package scope (this file only)
+	Imports    []*ImportSpec   // imports in this file
+	Unresolved []*Ident        // unresolved identifiers in this file
+	Comments   []*CommentGroup // list of all comments in the source file
+}
+
+func (f *File) Pos() token.Pos { return f.Package }
+func (f *File) End() token.Pos {
+	if n := len(f.Decls); n > 0 {
+		return f.Decls[n-1].End()
+	}
+	return f.Name.End()
+}
+
+// A Package node represents a set of source files
+// collectively building a Go package.
+type Package struct {
+	Name    string             // package name
+	Scope   *Scope             // package scope across all files
+	Imports map[string]*Object // map of package id -> package object
+	Files   map[string]*File   // Go source files by filename
+}
+
+func (p *Package) Pos() token.Pos { return token.NoPos }
+func (p *Package) End() token.Pos { return token.NoPos }
diff --git a/internal/backport/go/ast/ast_test.go b/internal/backport/go/ast/ast_test.go
new file mode 100644
index 0000000..71b2d6c
--- /dev/null
+++ b/internal/backport/go/ast/ast_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+	"testing"
+)
+
+var comments = []struct {
+	list []string
+	text string
+}{
+	{[]string{"//"}, ""},
+	{[]string{"//   "}, ""},
+	{[]string{"//", "//", "//   "}, ""},
+	{[]string{"// foo   "}, "foo\n"},
+	{[]string{"//", "//", "// foo"}, "foo\n"},
+	{[]string{"// foo  bar  "}, "foo  bar\n"},
+	{[]string{"// foo", "// bar"}, "foo\nbar\n"},
+	{[]string{"// foo", "//", "//", "//", "// bar"}, "foo\n\nbar\n"},
+	{[]string{"// foo", "/* bar */"}, "foo\n bar\n"},
+	{[]string{"//", "//", "//", "// foo", "//", "//", "//"}, "foo\n"},
+
+	{[]string{"/**/"}, ""},
+	{[]string{"/*   */"}, ""},
+	{[]string{"/**/", "/**/", "/*   */"}, ""},
+	{[]string{"/* Foo   */"}, " Foo\n"},
+	{[]string{"/* Foo  Bar  */"}, " Foo  Bar\n"},
+	{[]string{"/* Foo*/", "/* Bar*/"}, " Foo\n Bar\n"},
+	{[]string{"/* Foo*/", "/**/", "/**/", "/**/", "// Bar"}, " Foo\n\nBar\n"},
+	{[]string{"/* Foo*/", "/*\n*/", "//", "/*\n*/", "// Bar"}, " Foo\n\nBar\n"},
+	{[]string{"/* Foo*/", "// Bar"}, " Foo\nBar\n"},
+	{[]string{"/* Foo\n Bar*/"}, " Foo\n Bar\n"},
+
+	{[]string{"// foo", "//go:noinline", "// bar", "//:baz"}, "foo\nbar\n:baz\n"},
+	{[]string{"// foo", "//lint123:ignore", "// bar"}, "foo\nbar\n"},
+}
+
+func TestCommentText(t *testing.T) {
+	for i, c := range comments {
+		list := make([]*Comment, len(c.list))
+		for i, s := range c.list {
+			list[i] = &Comment{Text: s}
+		}
+
+		text := (&CommentGroup{list}).Text()
+		if text != c.text {
+			t.Errorf("case %d: got %q; expected %q", i, text, c.text)
+		}
+	}
+}
+
+var isDirectiveTests = []struct {
+	in string
+	ok bool
+}{
+	{"abc", false},
+	{"go:inline", true},
+	{"Go:inline", false},
+	{"go:Inline", false},
+	{":inline", false},
+	{"lint:ignore", true},
+	{"lint:1234", true},
+	{"1234:lint", true},
+	{"go: inline", false},
+	{"go:", false},
+	{"go:*", false},
+	{"go:x*", true},
+}
+
+func TestIsDirective(t *testing.T) {
+	for _, tt := range isDirectiveTests {
+		if ok := isDirective(tt.in); ok != tt.ok {
+			t.Errorf("isDirective(%q) = %v, want %v", tt.in, ok, tt.ok)
+		}
+	}
+}
diff --git a/internal/backport/go/ast/commentmap.go b/internal/backport/go/ast/commentmap.go
new file mode 100644
index 0000000..5e63605
--- /dev/null
+++ b/internal/backport/go/ast/commentmap.go
@@ -0,0 +1,329 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/token"
+	"sort"
+)
+
+type byPos []*CommentGroup
+
+func (a byPos) Len() int           { return len(a) }
+func (a byPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() }
+func (a byPos) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// sortComments sorts the list of comment groups in source order.
+func sortComments(list []*CommentGroup) {
+	// TODO(gri): Does it make sense to check for sorted-ness
+	//            first (because we know that sorted-ness is
+	//            very likely)?
+	if orderedList := byPos(list); !sort.IsSorted(orderedList) {
+		sort.Sort(orderedList)
+	}
+}
+
+// A CommentMap maps an AST node to a list of comment groups
+// associated with it. See NewCommentMap for a description of
+// the association.
+type CommentMap map[Node][]*CommentGroup
+
+func (cmap CommentMap) addComment(n Node, c *CommentGroup) {
+	list := cmap[n]
+	if len(list) == 0 {
+		list = []*CommentGroup{c}
+	} else {
+		list = append(list, c)
+	}
+	cmap[n] = list
+}
+
+type byInterval []Node
+
+func (a byInterval) Len() int { return len(a) }
+func (a byInterval) Less(i, j int) bool {
+	pi, pj := a[i].Pos(), a[j].Pos()
+	return pi < pj || pi == pj && a[i].End() > a[j].End()
+}
+func (a byInterval) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// nodeList returns the list of nodes of the AST n in source order.
+func nodeList(n Node) []Node {
+	var list []Node
+	Inspect(n, func(n Node) bool {
+		// don't collect comments
+		switch n.(type) {
+		case nil, *CommentGroup, *Comment:
+			return false
+		}
+		list = append(list, n)
+		return true
+	})
+	// Note: The current implementation assumes that Inspect traverses the
+	//       AST in depth-first and thus _source_ order. If AST traversal
+	//       does not follow source order, the sorting call below will be
+	//       required.
+	// sort.Sort(byInterval(list))
+	return list
+}
+
+// A commentListReader helps iterating through a list of comment groups.
+type commentListReader struct {
+	fset     *token.FileSet
+	list     []*CommentGroup
+	index    int
+	comment  *CommentGroup  // comment group at current index
+	pos, end token.Position // source interval of comment group at current index
+}
+
+func (r *commentListReader) eol() bool {
+	return r.index >= len(r.list)
+}
+
+func (r *commentListReader) next() {
+	if !r.eol() {
+		r.comment = r.list[r.index]
+		r.pos = r.fset.Position(r.comment.Pos())
+		r.end = r.fset.Position(r.comment.End())
+		r.index++
+	}
+}
+
+// A nodeStack keeps track of nested nodes.
+// A node lower on the stack lexically contains the nodes higher on the stack.
+type nodeStack []Node
+
+// push pops all nodes that appear lexically before n
+// and then pushes n on the stack.
+func (s *nodeStack) push(n Node) {
+	s.pop(n.Pos())
+	*s = append((*s), n)
+}
+
+// pop pops all nodes that appear lexically before pos
+// (i.e., whose lexical extent has ended before or at pos).
+// It returns the last node popped.
+func (s *nodeStack) pop(pos token.Pos) (top Node) {
+	i := len(*s)
+	for i > 0 && (*s)[i-1].End() <= pos {
+		top = (*s)[i-1]
+		i--
+	}
+	*s = (*s)[0:i]
+	return top
+}
+
+// NewCommentMap creates a new comment map by associating comment groups
+// of the comments list with the nodes of the AST specified by node.
+//
+// A comment group g is associated with a node n if:
+//
+//   - g starts on the same line as n ends
+//   - g starts on the line immediately following n, and there is
+//     at least one empty line after g and before the next node
+//   - g starts before n and is not associated to the node before n
+//     via the previous rules
+//
+// NewCommentMap tries to associate a comment group to the "largest"
+// node possible: For instance, if the comment is a line comment
+// trailing an assignment, the comment is associated with the entire
+// assignment rather than just the last operand in the assignment.
+func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap {
+	if len(comments) == 0 {
+		return nil // no comments to map
+	}
+
+	cmap := make(CommentMap)
+
+	// set up comment reader r
+	tmp := make([]*CommentGroup, len(comments))
+	copy(tmp, comments) // don't change incoming comments
+	sortComments(tmp)
+	r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0
+	r.next()
+
+	// create node list in lexical order
+	nodes := nodeList(node)
+	nodes = append(nodes, nil) // append sentinel
+
+	// set up iteration variables
+	var (
+		p     Node           // previous node
+		pend  token.Position // end of p
+		pg    Node           // previous node group (enclosing nodes of "importance")
+		pgend token.Position // end of pg
+		stack nodeStack      // stack of node groups
+	)
+
+	for _, q := range nodes {
+		var qpos token.Position
+		if q != nil {
+			qpos = fset.Position(q.Pos()) // current node position
+		} else {
+			// set fake sentinel position to infinity so that
+			// all comments get processed before the sentinel
+			const infinity = 1 << 30
+			qpos.Offset = infinity
+			qpos.Line = infinity
+		}
+
+		// process comments before current node
+		for r.end.Offset <= qpos.Offset {
+			// determine recent node group
+			if top := stack.pop(r.comment.Pos()); top != nil {
+				pg = top
+				pgend = fset.Position(pg.End())
+			}
+			// Try to associate a comment first with a node group
+			// (i.e., a node of "importance" such as a declaration);
+			// if that fails, try to associate it with the most recent
+			// node.
+			// TODO(gri) try to simplify the logic below
+			var assoc Node
+			switch {
+			case pg != nil &&
+				(pgend.Line == r.pos.Line ||
+					pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line):
+				// 1) comment starts on same line as previous node group ends, or
+				// 2) comment starts on the line immediately after the
+				//    previous node group and there is an empty line before
+				//    the current node
+				// => associate comment with previous node group
+				assoc = pg
+			case p != nil &&
+				(pend.Line == r.pos.Line ||
+					pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line ||
+					q == nil):
+				// same rules apply as above for p rather than pg,
+				// but also associate with p if we are at the end (q == nil)
+				assoc = p
+			default:
+				// otherwise, associate comment with current node
+				if q == nil {
+					// we can only reach here if there was no p
+					// which would imply that there were no nodes
+					panic("internal error: no comments should be associated with sentinel")
+				}
+				assoc = q
+			}
+			cmap.addComment(assoc, r.comment)
+			if r.eol() {
+				return cmap
+			}
+			r.next()
+		}
+
+		// update previous node
+		p = q
+		pend = fset.Position(p.End())
+
+		// update previous node group if we see an "important" node
+		switch q.(type) {
+		case *File, *Field, Decl, Spec, Stmt:
+			stack.push(q)
+		}
+	}
+
+	return cmap
+}
+
+// Update replaces an old node in the comment map with the new node
+// and returns the new node. Comments that were associated with the
+// old node are associated with the new node.
+func (cmap CommentMap) Update(old, new Node) Node {
+	if list := cmap[old]; len(list) > 0 {
+		delete(cmap, old)
+		cmap[new] = append(cmap[new], list...)
+	}
+	return new
+}
+
+// Filter returns a new comment map consisting of only those
+// entries of cmap for which a corresponding node exists in
+// the AST specified by node.
+func (cmap CommentMap) Filter(node Node) CommentMap {
+	umap := make(CommentMap)
+	Inspect(node, func(n Node) bool {
+		if g := cmap[n]; len(g) > 0 {
+			umap[n] = g
+		}
+		return true
+	})
+	return umap
+}
+
+// Comments returns the list of comment groups in the comment map.
+// The result is sorted in source order.
+func (cmap CommentMap) Comments() []*CommentGroup {
+	list := make([]*CommentGroup, 0, len(cmap))
+	for _, e := range cmap {
+		list = append(list, e...)
+	}
+	sortComments(list)
+	return list
+}
+
+func summary(list []*CommentGroup) string {
+	const maxLen = 40
+	var buf bytes.Buffer
+
+	// collect comments text
+loop:
+	for _, group := range list {
+		// Note: CommentGroup.Text() does too much work for what we
+		//       need and would only replace this innermost loop.
+		//       Just do it explicitly.
+		for _, comment := range group.List {
+			if buf.Len() >= maxLen {
+				break loop
+			}
+			buf.WriteString(comment.Text)
+		}
+	}
+
+	// truncate if too long
+	if buf.Len() > maxLen {
+		buf.Truncate(maxLen - 3)
+		buf.WriteString("...")
+	}
+
+	// replace any invisibles with blanks
+	bytes := buf.Bytes()
+	for i, b := range bytes {
+		switch b {
+		case '\t', '\n', '\r':
+			bytes[i] = ' '
+		}
+	}
+
+	return string(bytes)
+}
+
+func (cmap CommentMap) String() string {
+	// print map entries in sorted order
+	var nodes []Node
+	for node := range cmap {
+		nodes = append(nodes, node)
+	}
+	sort.Sort(byInterval(nodes))
+
+	var buf bytes.Buffer
+	fmt.Fprintln(&buf, "CommentMap {")
+	for _, node := range nodes {
+		comment := cmap[node]
+		// print name of identifiers; print node type for other nodes
+		var s string
+		if ident, ok := node.(*Ident); ok {
+			s = ident.Name
+		} else {
+			s = fmt.Sprintf("%T", node)
+		}
+		fmt.Fprintf(&buf, "\t%p  %20s:  %s\n", node, s, summary(comment))
+	}
+	fmt.Fprintln(&buf, "}")
+	return buf.String()
+}
diff --git a/internal/backport/go/ast/commentmap_test.go b/internal/backport/go/ast/commentmap_test.go
new file mode 100644
index 0000000..67c2daf
--- /dev/null
+++ b/internal/backport/go/ast/commentmap_test.go
@@ -0,0 +1,170 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To avoid a cyclic dependency with go/parser, this file is in a separate package.
+
+package ast_test
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"testing"
+
+	. "golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+const src = `
+// the very first comment
+
+// package p
+package p /* the name is p */
+
+// imports
+import (
+	"bytes"     // bytes
+	"fmt"       // fmt
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+)
+
+// T
+type T struct {
+	a, b, c int // associated with a, b, c
+	// associated with x, y
+	x, y float64    // float values
+	z    complex128 // complex value
+}
+// also associated with T
+
+// x
+var x = 0 // x = 0
+// also associated with x
+
+// f1
+func f1() {
+	/* associated with s1 */
+	s1()
+	// also associated with s1
+
+	// associated with s2
+
+	// also associated with s2
+	s2() // line comment for s2
+}
+// associated with f1
+// also associated with f1
+
+// associated with f2
+
+// f2
+func f2() {
+}
+
+func f3() {
+	i := 1 /* 1 */ + 2 // addition
+	_ = i
+}
+
+// the very last comment
+`
+
+// res maps a key of the form "line number: node type"
+// to the associated comments' text.
+var res = map[string]string{
+	" 5: *ast.File":       "the very first comment\npackage p\n",
+	" 5: *ast.Ident":      " the name is p\n",
+	" 8: *ast.GenDecl":    "imports\n",
+	" 9: *ast.ImportSpec": "bytes\n",
+	"10: *ast.ImportSpec": "fmt\n",
+	"16: *ast.GenDecl":    "T\nalso associated with T\n",
+	"17: *ast.Field":      "associated with a, b, c\n",
+	"19: *ast.Field":      "associated with x, y\nfloat values\n",
+	"20: *ast.Field":      "complex value\n",
+	"25: *ast.GenDecl":    "x\nx = 0\nalso associated with x\n",
+	"29: *ast.FuncDecl":   "f1\nassociated with f1\nalso associated with f1\n",
+	"31: *ast.ExprStmt":   " associated with s1\nalso associated with s1\n",
+	"37: *ast.ExprStmt":   "associated with s2\nalso associated with s2\nline comment for s2\n",
+	"45: *ast.FuncDecl":   "associated with f2\nf2\n",
+	"49: *ast.AssignStmt": "addition\n",
+	"49: *ast.BasicLit":   " 1\n",
+	"50: *ast.Ident":      "the very last comment\n",
+}
+
+func ctext(list []*CommentGroup) string {
+	var buf bytes.Buffer
+	for _, g := range list {
+		buf.WriteString(g.Text())
+	}
+	return buf.String()
+}
+
+func TestCommentMap(t *testing.T) {
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	cmap := NewCommentMap(fset, f, f.Comments)
+
+	// very correct association of comments
+	for n, list := range cmap {
+		key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
+		got := ctext(list)
+		want := res[key]
+		if got != want {
+			t.Errorf("%s: got %q; want %q", key, got, want)
+		}
+	}
+
+	// verify that no comments got lost
+	if n := len(cmap.Comments()); n != len(f.Comments) {
+		t.Errorf("got %d comment groups in map; want %d", n, len(f.Comments))
+	}
+
+	// support code to update test:
+	// set genMap to true to generate res map
+	const genMap = false
+	if genMap {
+		out := make([]string, 0, len(cmap))
+		for n, list := range cmap {
+			out = append(out, fmt.Sprintf("\t\"%2d: %T\":\t%q,", fset.Position(n.Pos()).Line, n, ctext(list)))
+		}
+		sort.Strings(out)
+		for _, s := range out {
+			fmt.Println(s)
+		}
+	}
+}
+
+func TestFilter(t *testing.T) {
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	cmap := NewCommentMap(fset, f, f.Comments)
+
+	// delete variable declaration
+	for i, decl := range f.Decls {
+		if gen, ok := decl.(*GenDecl); ok && gen.Tok == token.VAR {
+			copy(f.Decls[i:], f.Decls[i+1:])
+			f.Decls = f.Decls[:len(f.Decls)-1]
+			break
+		}
+	}
+
+	// check if comments are filtered correctly
+	cc := cmap.Filter(f)
+	for n, list := range cc {
+		key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
+		got := ctext(list)
+		want := res[key]
+		if key == "25: *ast.GenDecl" || got != want {
+			t.Errorf("%s: got %q; want %q", key, got, want)
+		}
+	}
+}
diff --git a/internal/backport/go/ast/example_test.go b/internal/backport/go/ast/example_test.go
new file mode 100644
index 0000000..ae92618
--- /dev/null
+++ b/internal/backport/go/ast/example_test.go
@@ -0,0 +1,206 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast_test
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/format"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// This example demonstrates how to inspect the AST of a Go program.
+func ExampleInspect() {
+	// src is the input for which we want to inspect the AST.
+	src := `
+package p
+const c = 1.0
+var X = f(3.14)*2 + c
+`
+
+	// Create the AST by parsing src.
+	fset := token.NewFileSet() // positions are relative to fset
+	f, err := parser.ParseFile(fset, "src.go", src, 0)
+	if err != nil {
+		panic(err)
+	}
+
+	// Inspect the AST and print all identifiers and literals.
+	ast.Inspect(f, func(n ast.Node) bool {
+		var s string
+		switch x := n.(type) {
+		case *ast.BasicLit:
+			s = x.Value
+		case *ast.Ident:
+			s = x.Name
+		}
+		if s != "" {
+			fmt.Printf("%s:\t%s\n", fset.Position(n.Pos()), s)
+		}
+		return true
+	})
+
+	// Output:
+	// src.go:2:9:	p
+	// src.go:3:7:	c
+	// src.go:3:11:	1.0
+	// src.go:4:5:	X
+	// src.go:4:9:	f
+	// src.go:4:11:	3.14
+	// src.go:4:17:	2
+	// src.go:4:21:	c
+}
+
+// This example shows what an AST looks like when printed for debugging.
+func ExamplePrint() {
+	// src is the input for which we want to print the AST.
+	src := `
+package main
+func main() {
+	println("Hello, World!")
+}
+`
+
+	// Create the AST by parsing src.
+	fset := token.NewFileSet() // positions are relative to fset
+	f, err := parser.ParseFile(fset, "", src, 0)
+	if err != nil {
+		panic(err)
+	}
+
+	// Print the AST.
+	ast.Print(fset, f)
+
+	// Output:
+	//      0  *ast.File {
+	//      1  .  Package: 2:1
+	//      2  .  Name: *ast.Ident {
+	//      3  .  .  NamePos: 2:9
+	//      4  .  .  Name: "main"
+	//      5  .  }
+	//      6  .  Decls: []ast.Decl (len = 1) {
+	//      7  .  .  0: *ast.FuncDecl {
+	//      8  .  .  .  Name: *ast.Ident {
+	//      9  .  .  .  .  NamePos: 3:6
+	//     10  .  .  .  .  Name: "main"
+	//     11  .  .  .  .  Obj: *ast.Object {
+	//     12  .  .  .  .  .  Kind: func
+	//     13  .  .  .  .  .  Name: "main"
+	//     14  .  .  .  .  .  Decl: *(obj @ 7)
+	//     15  .  .  .  .  }
+	//     16  .  .  .  }
+	//     17  .  .  .  Type: *ast.FuncType {
+	//     18  .  .  .  .  Func: 3:1
+	//     19  .  .  .  .  Params: *ast.FieldList {
+	//     20  .  .  .  .  .  Opening: 3:10
+	//     21  .  .  .  .  .  Closing: 3:11
+	//     22  .  .  .  .  }
+	//     23  .  .  .  }
+	//     24  .  .  .  Body: *ast.BlockStmt {
+	//     25  .  .  .  .  Lbrace: 3:13
+	//     26  .  .  .  .  List: []ast.Stmt (len = 1) {
+	//     27  .  .  .  .  .  0: *ast.ExprStmt {
+	//     28  .  .  .  .  .  .  X: *ast.CallExpr {
+	//     29  .  .  .  .  .  .  .  Fun: *ast.Ident {
+	//     30  .  .  .  .  .  .  .  .  NamePos: 4:2
+	//     31  .  .  .  .  .  .  .  .  Name: "println"
+	//     32  .  .  .  .  .  .  .  }
+	//     33  .  .  .  .  .  .  .  Lparen: 4:9
+	//     34  .  .  .  .  .  .  .  Args: []ast.Expr (len = 1) {
+	//     35  .  .  .  .  .  .  .  .  0: *ast.BasicLit {
+	//     36  .  .  .  .  .  .  .  .  .  ValuePos: 4:10
+	//     37  .  .  .  .  .  .  .  .  .  Kind: STRING
+	//     38  .  .  .  .  .  .  .  .  .  Value: "\"Hello, World!\""
+	//     39  .  .  .  .  .  .  .  .  }
+	//     40  .  .  .  .  .  .  .  }
+	//     41  .  .  .  .  .  .  .  Ellipsis: -
+	//     42  .  .  .  .  .  .  .  Rparen: 4:25
+	//     43  .  .  .  .  .  .  }
+	//     44  .  .  .  .  .  }
+	//     45  .  .  .  .  }
+	//     46  .  .  .  .  Rbrace: 5:1
+	//     47  .  .  .  }
+	//     48  .  .  }
+	//     49  .  }
+	//     50  .  Scope: *ast.Scope {
+	//     51  .  .  Objects: map[string]*ast.Object (len = 1) {
+	//     52  .  .  .  "main": *(obj @ 11)
+	//     53  .  .  }
+	//     54  .  }
+	//     55  .  Unresolved: []*ast.Ident (len = 1) {
+	//     56  .  .  0: *(obj @ 29)
+	//     57  .  }
+	//     58  }
+}
+
+// This example illustrates how to remove a variable declaration
+// in a Go program while maintaining correct comment association
+// using an ast.CommentMap.
+func ExampleCommentMap() {
+	// src is the input for which we create the AST that we
+	// are going to manipulate.
+	src := `
+// This is the package comment.
+package main
+
+// This comment is associated with the hello constant.
+const hello = "Hello, World!" // line comment 1
+
+// This comment is associated with the foo variable.
+var foo = hello // line comment 2
+
+// This comment is associated with the main function.
+func main() {
+	fmt.Println(hello) // line comment 3
+}
+`
+
+	// Create the AST by parsing src.
+	fset := token.NewFileSet() // positions are relative to fset
+	f, err := parser.ParseFile(fset, "src.go", src, parser.ParseComments)
+	if err != nil {
+		panic(err)
+	}
+
+	// Create an ast.CommentMap from the ast.File's comments.
+	// This helps keeping the association between comments
+	// and AST nodes.
+	cmap := ast.NewCommentMap(fset, f, f.Comments)
+
+	// Remove the first variable declaration from the list of declarations.
+	for i, decl := range f.Decls {
+		if gen, ok := decl.(*ast.GenDecl); ok && gen.Tok == token.VAR {
+			copy(f.Decls[i:], f.Decls[i+1:])
+			f.Decls = f.Decls[:len(f.Decls)-1]
+			break
+		}
+	}
+
+	// Use the comment map to filter comments that don't belong anymore
+	// (the comments associated with the variable declaration), and create
+	// the new comments list.
+	f.Comments = cmap.Filter(f).Comments()
+
+	// Print the modified AST.
+	var buf bytes.Buffer
+	if err := format.Node(&buf, fset, f); err != nil {
+		panic(err)
+	}
+	fmt.Printf("%s", buf.Bytes())
+
+	// Output:
+	// // This is the package comment.
+	// package main
+	//
+	// // This comment is associated with the hello constant.
+	// const hello = "Hello, World!" // line comment 1
+	//
+	// // This comment is associated with the main function.
+	// func main() {
+	// 	fmt.Println(hello) // line comment 3
+	// }
+}
diff --git a/internal/backport/go/ast/filter.go b/internal/backport/go/ast/filter.go
new file mode 100644
index 0000000..8c2ed9d
--- /dev/null
+++ b/internal/backport/go/ast/filter.go
@@ -0,0 +1,488 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+	"golang.org/x/website/internal/backport/go/token"
+	"sort"
+)
+
+// ----------------------------------------------------------------------------
+// Export filtering
+
+// exportFilter is a special filter function to extract exported nodes.
+func exportFilter(name string) bool {
+	return IsExported(name)
+}
+
+// FileExports trims the AST for a Go source file in place such that
+// only exported nodes remain: all top-level identifiers which are not exported
+// and their associated information (such as type, initial value, or function
+// body) are removed. Non-exported fields and methods of exported types are
+// stripped. The File.Comments list is not changed.
+//
+// FileExports reports whether there are exported declarations.
+func FileExports(src *File) bool {
+	return filterFile(src, exportFilter, true)
+}
+
+// PackageExports trims the AST for a Go package in place such that
+// only exported nodes remain. The pkg.Files list is not changed, so that
+// file names and top-level package comments don't get lost.
+//
+// PackageExports reports whether there are exported declarations;
+// it returns false otherwise.
+func PackageExports(pkg *Package) bool {
+	return filterPackage(pkg, exportFilter, true)
+}
+
+// ----------------------------------------------------------------------------
+// General filtering
+
+type Filter func(string) bool
+
+func filterIdentList(list []*Ident, f Filter) []*Ident {
+	j := 0
+	for _, x := range list {
+		if f(x.Name) {
+			list[j] = x
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+// fieldName assumes that x is the type of an anonymous field and
+// returns the corresponding field name. If x is not an acceptable
+// anonymous field, the result is nil.
+func fieldName(x Expr) *Ident {
+	switch t := x.(type) {
+	case *Ident:
+		return t
+	case *SelectorExpr:
+		if _, ok := t.X.(*Ident); ok {
+			return t.Sel
+		}
+	case *StarExpr:
+		return fieldName(t.X)
+	}
+	return nil
+}
+
+func filterFieldList(fields *FieldList, filter Filter, export bool) (removedFields bool) {
+	if fields == nil {
+		return false
+	}
+	list := fields.List
+	j := 0
+	for _, f := range list {
+		keepField := false
+		if len(f.Names) == 0 {
+			// anonymous field
+			name := fieldName(f.Type)
+			keepField = name != nil && filter(name.Name)
+		} else {
+			n := len(f.Names)
+			f.Names = filterIdentList(f.Names, filter)
+			if len(f.Names) < n {
+				removedFields = true
+			}
+			keepField = len(f.Names) > 0
+		}
+		if keepField {
+			if export {
+				filterType(f.Type, filter, export)
+			}
+			list[j] = f
+			j++
+		}
+	}
+	if j < len(list) {
+		removedFields = true
+	}
+	fields.List = list[0:j]
+	return
+}
+
+func filterCompositeLit(lit *CompositeLit, filter Filter, export bool) {
+	n := len(lit.Elts)
+	lit.Elts = filterExprList(lit.Elts, filter, export)
+	if len(lit.Elts) < n {
+		lit.Incomplete = true
+	}
+}
+
+func filterExprList(list []Expr, filter Filter, export bool) []Expr {
+	j := 0
+	for _, exp := range list {
+		switch x := exp.(type) {
+		case *CompositeLit:
+			filterCompositeLit(x, filter, export)
+		case *KeyValueExpr:
+			if x, ok := x.Key.(*Ident); ok && !filter(x.Name) {
+				continue
+			}
+			if x, ok := x.Value.(*CompositeLit); ok {
+				filterCompositeLit(x, filter, export)
+			}
+		}
+		list[j] = exp
+		j++
+	}
+	return list[0:j]
+}
+
+func filterParamList(fields *FieldList, filter Filter, export bool) bool {
+	if fields == nil {
+		return false
+	}
+	var b bool
+	for _, f := range fields.List {
+		if filterType(f.Type, filter, export) {
+			b = true
+		}
+	}
+	return b
+}
+
+func filterType(typ Expr, f Filter, export bool) bool {
+	switch t := typ.(type) {
+	case *Ident:
+		return f(t.Name)
+	case *ParenExpr:
+		return filterType(t.X, f, export)
+	case *ArrayType:
+		return filterType(t.Elt, f, export)
+	case *StructType:
+		if filterFieldList(t.Fields, f, export) {
+			t.Incomplete = true
+		}
+		return len(t.Fields.List) > 0
+	case *FuncType:
+		b1 := filterParamList(t.Params, f, export)
+		b2 := filterParamList(t.Results, f, export)
+		return b1 || b2
+	case *InterfaceType:
+		if filterFieldList(t.Methods, f, export) {
+			t.Incomplete = true
+		}
+		return len(t.Methods.List) > 0
+	case *MapType:
+		b1 := filterType(t.Key, f, export)
+		b2 := filterType(t.Value, f, export)
+		return b1 || b2
+	case *ChanType:
+		return filterType(t.Value, f, export)
+	}
+	return false
+}
+
+func filterSpec(spec Spec, f Filter, export bool) bool {
+	switch s := spec.(type) {
+	case *ValueSpec:
+		s.Names = filterIdentList(s.Names, f)
+		s.Values = filterExprList(s.Values, f, export)
+		if len(s.Names) > 0 {
+			if export {
+				filterType(s.Type, f, export)
+			}
+			return true
+		}
+	case *TypeSpec:
+		if f(s.Name.Name) {
+			if export {
+				filterType(s.Type, f, export)
+			}
+			return true
+		}
+		if !export {
+			// For general filtering (not just exports),
+			// filter type even if name is not filtered
+			// out.
+			// If the type contains filtered elements,
+			// keep the declaration.
+			return filterType(s.Type, f, export)
+		}
+	}
+	return false
+}
+
+func filterSpecList(list []Spec, f Filter, export bool) []Spec {
+	j := 0
+	for _, s := range list {
+		if filterSpec(s, f, export) {
+			list[j] = s
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+// FilterDecl trims the AST for a Go declaration in place by removing
+// all names (including struct field and interface method names, but
+// not from parameter lists) that don't pass through the filter f.
+//
+// FilterDecl reports whether there are any declared names left after
+// filtering.
+func FilterDecl(decl Decl, f Filter) bool {
+	return filterDecl(decl, f, false)
+}
+
+func filterDecl(decl Decl, f Filter, export bool) bool {
+	switch d := decl.(type) {
+	case *GenDecl:
+		d.Specs = filterSpecList(d.Specs, f, export)
+		return len(d.Specs) > 0
+	case *FuncDecl:
+		return f(d.Name.Name)
+	}
+	return false
+}
+
+// FilterFile trims the AST for a Go file in place by removing all
+// names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST. Import declarations are
+// always removed. The File.Comments list is not changed.
+//
+// FilterFile reports whether there are any top-level declarations
+// left after filtering.
+func FilterFile(src *File, f Filter) bool {
+	return filterFile(src, f, false)
+}
+
+func filterFile(src *File, f Filter, export bool) bool {
+	j := 0
+	for _, d := range src.Decls {
+		if filterDecl(d, f, export) {
+			src.Decls[j] = d
+			j++
+		}
+	}
+	src.Decls = src.Decls[0:j]
+	return j > 0
+}
+
+// FilterPackage trims the AST for a Go package in place by removing
+// all names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST. The pkg.Files list is not
+// changed, so that file names and top-level package comments don't get
+// lost.
+//
+// FilterPackage reports whether there are any top-level declarations
+// left after filtering.
+func FilterPackage(pkg *Package, f Filter) bool {
+	return filterPackage(pkg, f, false)
+}
+
+func filterPackage(pkg *Package, f Filter, export bool) bool {
+	hasDecls := false
+	for _, src := range pkg.Files {
+		if filterFile(src, f, export) {
+			hasDecls = true
+		}
+	}
+	return hasDecls
+}
+
+// ----------------------------------------------------------------------------
+// Merging of package files
+
+// The MergeMode flags control the behavior of MergePackageFiles.
+type MergeMode uint
+
+const (
+	// If set, duplicate function declarations are excluded.
+	FilterFuncDuplicates MergeMode = 1 << iota
+	// If set, comments that are not associated with a specific
+	// AST node (as Doc or Comment) are excluded.
+	FilterUnassociatedComments
+	// If set, duplicate import declarations are excluded.
+	FilterImportDuplicates
+)
+
+// nameOf returns the function (foo) or method name (foo.bar) for
+// the given function declaration. If the AST is incorrect for the
+// receiver, it assumes a function instead.
+func nameOf(f *FuncDecl) string {
+	if r := f.Recv; r != nil && len(r.List) == 1 {
+		// looks like a correct receiver declaration
+		t := r.List[0].Type
+		// dereference pointer receiver types
+		if p, _ := t.(*StarExpr); p != nil {
+			t = p.X
+		}
+		// the receiver type must be a type name
+		if p, _ := t.(*Ident); p != nil {
+			return p.Name + "." + f.Name.Name
+		}
+		// otherwise assume a function instead
+	}
+	return f.Name.Name
+}
+
+// separator is an empty //-style comment that is interspersed between
+// different comment groups when they are concatenated into a single group
+var separator = &Comment{token.NoPos, "//"}
+
+// MergePackageFiles creates a file AST by merging the ASTs of the
+// files belonging to a package. The mode flags control merging behavior.
+func MergePackageFiles(pkg *Package, mode MergeMode) *File {
+	// Count the number of package docs, comments and declarations across
+	// all package files. Also, compute sorted list of filenames, so that
+	// subsequent iterations can always iterate in the same order.
+	ndocs := 0
+	ncomments := 0
+	ndecls := 0
+	filenames := make([]string, len(pkg.Files))
+	i := 0
+	for filename, f := range pkg.Files {
+		filenames[i] = filename
+		i++
+		if f.Doc != nil {
+			ndocs += len(f.Doc.List) + 1 // +1 for separator
+		}
+		ncomments += len(f.Comments)
+		ndecls += len(f.Decls)
+	}
+	sort.Strings(filenames)
+
+	// Collect package comments from all package files into a single
+	// CommentGroup - the collected package documentation. In general
+	// there should be only one file with a package comment; but it's
+	// better to collect extra comments than drop them on the floor.
+	var doc *CommentGroup
+	var pos token.Pos
+	if ndocs > 0 {
+		list := make([]*Comment, ndocs-1) // -1: no separator before first group
+		i := 0
+		for _, filename := range filenames {
+			f := pkg.Files[filename]
+			if f.Doc != nil {
+				if i > 0 {
+					// not the first group - add separator
+					list[i] = separator
+					i++
+				}
+				for _, c := range f.Doc.List {
+					list[i] = c
+					i++
+				}
+				if f.Package > pos {
+					// Keep the maximum package clause position as
+					// position for the package clause of the merged
+					// files.
+					pos = f.Package
+				}
+			}
+		}
+		doc = &CommentGroup{list}
+	}
+
+	// Collect declarations from all package files.
+	var decls []Decl
+	if ndecls > 0 {
+		decls = make([]Decl, ndecls)
+		funcs := make(map[string]int) // map of func name -> decls index
+		i := 0                        // current index
+		n := 0                        // number of filtered entries
+		for _, filename := range filenames {
+			f := pkg.Files[filename]
+			for _, d := range f.Decls {
+				if mode&FilterFuncDuplicates != 0 {
+					// A language entity may be declared multiple
+					// times in different package files; only at
+					// build time declarations must be unique.
+					// For now, exclude multiple declarations of
+					// functions - keep the one with documentation.
+					//
+					// TODO(gri): Expand this filtering to other
+					//            entities (const, type, vars) if
+					//            multiple declarations are common.
+					if f, isFun := d.(*FuncDecl); isFun {
+						name := nameOf(f)
+						if j, exists := funcs[name]; exists {
+							// function declared already
+							if decls[j] != nil && decls[j].(*FuncDecl).Doc == nil {
+								// existing declaration has no documentation;
+								// ignore the existing declaration
+								decls[j] = nil
+							} else {
+								// ignore the new declaration
+								d = nil
+							}
+							n++ // filtered an entry
+						} else {
+							funcs[name] = i
+						}
+					}
+				}
+				decls[i] = d
+				i++
+			}
+		}
+
+		// Eliminate nil entries from the decls list if entries were
+		// filtered. We do this using a 2nd pass in order to not disturb
+		// the original declaration order in the source (otherwise, this
+		// would also invalidate the monotonically increasing position
+		// info within a single file).
+		if n > 0 {
+			i = 0
+			for _, d := range decls {
+				if d != nil {
+					decls[i] = d
+					i++
+				}
+			}
+			decls = decls[0:i]
+		}
+	}
+
+	// Collect import specs from all package files.
+	var imports []*ImportSpec
+	if mode&FilterImportDuplicates != 0 {
+		seen := make(map[string]bool)
+		for _, filename := range filenames {
+			f := pkg.Files[filename]
+			for _, imp := range f.Imports {
+				if path := imp.Path.Value; !seen[path] {
+					// TODO: consider handling cases where:
+					// - 2 imports exist with the same import path but
+					//   have different local names (one should probably
+					//   keep both of them)
+					// - 2 imports exist but only one has a comment
+					// - 2 imports exist and they both have (possibly
+					//   different) comments
+					imports = append(imports, imp)
+					seen[path] = true
+				}
+			}
+		}
+	} else {
+		// Iterate over filenames for deterministic order.
+		for _, filename := range filenames {
+			f := pkg.Files[filename]
+			imports = append(imports, f.Imports...)
+		}
+	}
+
+	// Collect comments from all package files.
+	var comments []*CommentGroup
+	if mode&FilterUnassociatedComments == 0 {
+		comments = make([]*CommentGroup, ncomments)
+		i := 0
+		for _, filename := range filenames {
+			f := pkg.Files[filename]
+			i += copy(comments[i:], f.Comments)
+		}
+	}
+
+	// TODO(gri) need to compute unresolved identifiers!
+	return &File{doc, pos, NewIdent(pkg.Name), decls, pkg.Scope, imports, nil, comments}
+}
diff --git a/internal/backport/go/ast/filter_test.go b/internal/backport/go/ast/filter_test.go
new file mode 100644
index 0000000..fa5da6a
--- /dev/null
+++ b/internal/backport/go/ast/filter_test.go
@@ -0,0 +1,85 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To avoid a cyclic dependency with go/parser, this file is in a separate package.
+
+package ast_test
+
+import (
+	"bytes"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/format"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"testing"
+)
+
+const input = `package p
+
+type t1 struct{}
+type t2 struct{}
+
+func f1() {}
+func f1() {}
+func f2() {}
+
+func (*t1) f1() {}
+func (t1) f1() {}
+func (t1) f2() {}
+
+func (t2) f1() {}
+func (t2) f2() {}
+func (x *t2) f2() {}
+`
+
+// Calling ast.MergePackageFiles with ast.FilterFuncDuplicates
+// keeps a duplicate entry with attached documentation in favor
+// of one without, and it favors duplicate entries appearing
+// later in the source over ones appearing earlier. This is why
+// (*t2).f2 is kept and t2.f2 is eliminated in this test case.
+const golden = `package p
+
+type t1 struct{}
+type t2 struct{}
+
+func f1() {}
+func f2() {}
+
+func (t1) f1() {}
+func (t1) f2() {}
+
+func (t2) f1() {}
+
+func (x *t2) f2() {}
+`
+
+func TestFilterDuplicates(t *testing.T) {
+	// parse input
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "", input, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// create package
+	files := map[string]*ast.File{"": file}
+	pkg, err := ast.NewPackage(fset, files, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// filter
+	merged := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates)
+
+	// pretty-print
+	var buf bytes.Buffer
+	if err := format.Node(&buf, fset, merged); err != nil {
+		t.Fatal(err)
+	}
+	output := buf.String()
+
+	if output != golden {
+		t.Errorf("incorrect output:\n%s", output)
+	}
+}
diff --git a/internal/backport/go/ast/import.go b/internal/backport/go/ast/import.go
new file mode 100644
index 0000000..7c70ef6
--- /dev/null
+++ b/internal/backport/go/ast/import.go
@@ -0,0 +1,230 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+	"golang.org/x/website/internal/backport/go/token"
+	"sort"
+	"strconv"
+)
+
+// SortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data loss.
+func SortImports(fset *token.FileSet, f *File) {
+	for _, d := range f.Decls {
+		d, ok := d.(*GenDecl)
+		if !ok || d.Tok != token.IMPORT {
+			// Not an import declaration, so we're done.
+			// Imports are always first.
+			break
+		}
+
+		if !d.Lparen.IsValid() {
+			// Not a block: sorted by default.
+			continue
+		}
+
+		// Identify and sort runs of specs on successive lines.
+		i := 0
+		specs := d.Specs[:0]
+		for j, s := range d.Specs {
+			if j > i && lineAt(fset, s.Pos()) > 1+lineAt(fset, d.Specs[j-1].End()) {
+				// j begins a new run. End this one.
+				specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
+				i = j
+			}
+		}
+		specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
+		d.Specs = specs
+
+		// Deduping can leave a blank line before the rparen; clean that up.
+		if len(d.Specs) > 0 {
+			lastSpec := d.Specs[len(d.Specs)-1]
+			lastLine := lineAt(fset, lastSpec.Pos())
+			rParenLine := lineAt(fset, d.Rparen)
+			for rParenLine > lastLine+1 {
+				rParenLine--
+				fset.File(d.Rparen).MergeLine(rParenLine)
+			}
+		}
+	}
+}
+
+func lineAt(fset *token.FileSet, pos token.Pos) int {
+	return fset.PositionFor(pos, false).Line
+}
+
+func importPath(s Spec) string {
+	t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
+	if err == nil {
+		return t
+	}
+	return ""
+}
+
+func importName(s Spec) string {
+	n := s.(*ImportSpec).Name
+	if n == nil {
+		return ""
+	}
+	return n.Name
+}
+
+func importComment(s Spec) string {
+	c := s.(*ImportSpec).Comment
+	if c == nil {
+		return ""
+	}
+	return c.Text()
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next Spec) bool {
+	if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+		return false
+	}
+	return prev.(*ImportSpec).Comment == nil
+}
+
+type posSpan struct {
+	Start token.Pos
+	End   token.Pos
+}
+
+type cgPos struct {
+	left bool // true if comment is to the left of the spec, false otherwise.
+	cg   *CommentGroup
+}
+
+func sortSpecs(fset *token.FileSet, f *File, specs []Spec) []Spec {
+	// Can't short-circuit here even if specs are already sorted,
+	// since they might yet need deduplication.
+	// A lone import, however, may be safely ignored.
+	if len(specs) <= 1 {
+		return specs
+	}
+
+	// Record positions for specs.
+	pos := make([]posSpan, len(specs))
+	for i, s := range specs {
+		pos[i] = posSpan{s.Pos(), s.End()}
+	}
+
+	// Identify comments in this range.
+	begSpecs := pos[0].Start
+	endSpecs := pos[len(pos)-1].End
+	beg := fset.File(begSpecs).LineStart(lineAt(fset, begSpecs))
+	endLine := lineAt(fset, endSpecs)
+	endFile := fset.File(endSpecs)
+	var end token.Pos
+	if endLine == endFile.LineCount() {
+		end = endSpecs
+	} else {
+		end = endFile.LineStart(endLine + 1) // beginning of next line
+	}
+	first := len(f.Comments)
+	last := -1
+	for i, g := range f.Comments {
+		if g.End() >= end {
+			break
+		}
+		// g.End() < end
+		if beg <= g.Pos() {
+			// comment is within the range [beg, end[ of import declarations
+			if i < first {
+				first = i
+			}
+			if i > last {
+				last = i
+			}
+		}
+	}
+
+	var comments []*CommentGroup
+	if last >= 0 {
+		comments = f.Comments[first : last+1]
+	}
+
+	// Assign each comment to the import spec on the same line.
+	importComments := map[*ImportSpec][]cgPos{}
+	specIndex := 0
+	for _, g := range comments {
+		for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+			specIndex++
+		}
+		var left bool
+		// A block comment can appear before the first import spec.
+		if specIndex == 0 && pos[specIndex].Start > g.Pos() {
+			left = true
+		} else if specIndex+1 < len(specs) && // Or it can appear on the left of an import spec.
+			lineAt(fset, pos[specIndex].Start)+1 == lineAt(fset, g.Pos()) {
+			specIndex++
+			left = true
+		}
+		s := specs[specIndex].(*ImportSpec)
+		importComments[s] = append(importComments[s], cgPos{left: left, cg: g})
+	}
+
+	// Sort the import specs by import path.
+	// Remove duplicates, when possible without data loss.
+	// Reassign the import paths to have the same position sequence.
+	// Reassign each comment to the spec on the same line.
+	// Sort the comments by new position.
+	sort.Slice(specs, func(i, j int) bool {
+		ipath := importPath(specs[i])
+		jpath := importPath(specs[j])
+		if ipath != jpath {
+			return ipath < jpath
+		}
+		iname := importName(specs[i])
+		jname := importName(specs[j])
+		if iname != jname {
+			return iname < jname
+		}
+		return importComment(specs[i]) < importComment(specs[j])
+	})
+
+	// Dedup. Thanks to our sorting, we can just consider
+	// adjacent pairs of imports.
+	deduped := specs[:0]
+	for i, s := range specs {
+		if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+			deduped = append(deduped, s)
+		} else {
+			p := s.Pos()
+			fset.File(p).MergeLine(lineAt(fset, p))
+		}
+	}
+	specs = deduped
+
+	// Fix up comment positions
+	for i, s := range specs {
+		s := s.(*ImportSpec)
+		if s.Name != nil {
+			s.Name.NamePos = pos[i].Start
+		}
+		s.Path.ValuePos = pos[i].Start
+		s.EndPos = pos[i].End
+		for _, g := range importComments[s] {
+			for _, c := range g.cg.List {
+				if g.left {
+					c.Slash = pos[i].Start - 1
+				} else {
+					// An import spec can have both block comment and a line comment
+					// to its right. In that case, both of them will have the same pos.
+					// But while formatting the AST, the line comment gets moved to
+					// after the block comment.
+					c.Slash = pos[i].End
+				}
+			}
+		}
+	}
+
+	sort.Slice(comments, func(i, j int) bool {
+		return comments[i].Pos() < comments[j].Pos()
+	})
+
+	return specs
+}
diff --git a/internal/backport/go/ast/issues_test.go b/internal/backport/go/ast/issues_test.go
new file mode 100644
index 0000000..3fb3205
--- /dev/null
+++ b/internal/backport/go/ast/issues_test.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast_test
+
+import (
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"testing"
+)
+
+func TestIssue33649(t *testing.T) {
+	for _, src := range []string{
+		`package p; func _()`,
+		`package p; func _() {`,
+		`package p; func _() { _ = 0`,
+		`package p; func _() { _ = 0 }`,
+	} {
+		fset := token.NewFileSet()
+		f, _ := parser.ParseFile(fset, "", src, parser.AllErrors)
+		if f == nil {
+			panic("invalid test setup: parser didn't return an AST")
+		}
+
+		// find corresponding token.File
+		var tf *token.File
+		fset.Iterate(func(f *token.File) bool {
+			tf = f
+			return true
+		})
+		tfEnd := tf.Base() + tf.Size()
+
+		fd := f.Decls[len(f.Decls)-1].(*ast.FuncDecl)
+		fdEnd := int(fd.End())
+
+		if fdEnd != tfEnd {
+			t.Errorf("%q: got fdEnd = %d; want %d (base = %d, size = %d)", src, fdEnd, tfEnd, tf.Base(), tf.Size())
+		}
+	}
+}
diff --git a/internal/backport/go/ast/print.go b/internal/backport/go/ast/print.go
new file mode 100644
index 0000000..98eeddc
--- /dev/null
+++ b/internal/backport/go/ast/print.go
@@ -0,0 +1,254 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains printing support for ASTs.
+
+package ast
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+	"os"
+	"reflect"
+)
+
+// A FieldFilter may be provided to Fprint to control the output.
+type FieldFilter func(name string, value reflect.Value) bool
+
+// NotNilFilter returns true for field values that are not nil;
+// it returns false otherwise.
+func NotNilFilter(_ string, v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return !v.IsNil()
+	}
+	return true
+}
+
+// Fprint prints the (sub-)tree starting at AST node x to w.
+// If fset != nil, position information is interpreted relative
+// to that file set. Otherwise positions are printed as integer
+// values (file set specific offsets).
+//
+// A non-nil FieldFilter f may be provided to control the output:
+// struct fields for which f(fieldname, fieldvalue) is true are
+// printed; all others are filtered from the output. Unexported
+// struct fields are never printed.
+func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) error {
+	return fprint(w, fset, x, f)
+}
+
+func fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (err error) {
+	// setup printer
+	p := printer{
+		output: w,
+		fset:   fset,
+		filter: f,
+		ptrmap: make(map[interface{}]int),
+		last:   '\n', // force printing of line number on first line
+	}
+
+	// install error handler
+	defer func() {
+		if e := recover(); e != nil {
+			err = e.(localError).err // re-panics if it's not a localError
+		}
+	}()
+
+	// print x
+	if x == nil {
+		p.printf("nil\n")
+		return
+	}
+	p.print(reflect.ValueOf(x))
+	p.printf("\n")
+
+	return
+}
+
+// Print prints x to standard output, skipping nil fields.
+// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
+func Print(fset *token.FileSet, x interface{}) error {
+	return Fprint(os.Stdout, fset, x, NotNilFilter)
+}
+
+type printer struct {
+	output io.Writer
+	fset   *token.FileSet
+	filter FieldFilter
+	ptrmap map[interface{}]int // *T -> line number
+	indent int                 // current indentation level
+	last   byte                // the last byte processed by Write
+	line   int                 // current line number
+}
+
+var indent = []byte(".  ")
+
+func (p *printer) Write(data []byte) (n int, err error) {
+	var m int
+	for i, b := range data {
+		// invariant: data[0:n] has been written
+		if b == '\n' {
+			m, err = p.output.Write(data[n : i+1])
+			n += m
+			if err != nil {
+				return
+			}
+			p.line++
+		} else if p.last == '\n' {
+			_, err = fmt.Fprintf(p.output, "%6d  ", p.line)
+			if err != nil {
+				return
+			}
+			for j := p.indent; j > 0; j-- {
+				_, err = p.output.Write(indent)
+				if err != nil {
+					return
+				}
+			}
+		}
+		p.last = b
+	}
+	if len(data) > n {
+		m, err = p.output.Write(data[n:])
+		n += m
+	}
+	return
+}
+
+// localError wraps locally caught errors so we can distinguish
+// them from genuine panics which we don't want to return as errors.
+type localError struct {
+	err error
+}
+
+// printf is a convenience wrapper that takes care of print errors.
+func (p *printer) printf(format string, args ...interface{}) {
+	if _, err := fmt.Fprintf(p, format, args...); err != nil {
+		panic(localError{err})
+	}
+}
+
+// Implementation note: Print is written for AST nodes but could be
+// used to print arbitrary data structures; such a version should
+// probably be in a different package.
+//
+// Note: This code detects (some) cycles created via pointers but
+// not cycles that are created via slices or maps containing the
+// same slice or map. Code for general data structures probably
+// should catch those as well.
+
+func (p *printer) print(x reflect.Value) {
+	if !NotNilFilter("", x) {
+		p.printf("nil")
+		return
+	}
+
+	switch x.Kind() {
+	case reflect.Interface:
+		p.print(x.Elem())
+
+	case reflect.Map:
+		p.printf("%s (len = %d) {", x.Type(), x.Len())
+		if x.Len() > 0 {
+			p.indent++
+			p.printf("\n")
+			for _, key := range x.MapKeys() {
+				p.print(key)
+				p.printf(": ")
+				p.print(x.MapIndex(key))
+				p.printf("\n")
+			}
+			p.indent--
+		}
+		p.printf("}")
+
+	case reflect.Ptr:
+		p.printf("*")
+		// type-checked ASTs may contain cycles - use ptrmap
+		// to keep track of objects that have been printed
+		// already and print the respective line number instead
+		ptr := x.Interface()
+		if line, exists := p.ptrmap[ptr]; exists {
+			p.printf("(obj @ %d)", line)
+		} else {
+			p.ptrmap[ptr] = p.line
+			p.print(x.Elem())
+		}
+
+	case reflect.Array:
+		p.printf("%s {", x.Type())
+		if x.Len() > 0 {
+			p.indent++
+			p.printf("\n")
+			for i, n := 0, x.Len(); i < n; i++ {
+				p.printf("%d: ", i)
+				p.print(x.Index(i))
+				p.printf("\n")
+			}
+			p.indent--
+		}
+		p.printf("}")
+
+	case reflect.Slice:
+		if s, ok := x.Interface().([]byte); ok {
+			p.printf("%#q", s)
+			return
+		}
+		p.printf("%s (len = %d) {", x.Type(), x.Len())
+		if x.Len() > 0 {
+			p.indent++
+			p.printf("\n")
+			for i, n := 0, x.Len(); i < n; i++ {
+				p.printf("%d: ", i)
+				p.print(x.Index(i))
+				p.printf("\n")
+			}
+			p.indent--
+		}
+		p.printf("}")
+
+	case reflect.Struct:
+		t := x.Type()
+		p.printf("%s {", t)
+		p.indent++
+		first := true
+		for i, n := 0, t.NumField(); i < n; i++ {
+			// exclude non-exported fields because their
+			// values cannot be accessed via reflection
+			if name := t.Field(i).Name; IsExported(name) {
+				value := x.Field(i)
+				if p.filter == nil || p.filter(name, value) {
+					if first {
+						p.printf("\n")
+						first = false
+					}
+					p.printf("%s: ", name)
+					p.print(value)
+					p.printf("\n")
+				}
+			}
+		}
+		p.indent--
+		p.printf("}")
+
+	default:
+		v := x.Interface()
+		switch v := v.(type) {
+		case string:
+			// print strings in quotes
+			p.printf("%q", v)
+			return
+		case token.Pos:
+			// position values can be printed nicely if we have a file set
+			if p.fset != nil {
+				p.printf("%s", p.fset.Position(v))
+				return
+			}
+		}
+		// default
+		p.printf("%v", v)
+	}
+}
diff --git a/internal/backport/go/ast/print_test.go b/internal/backport/go/ast/print_test.go
new file mode 100644
index 0000000..210f164
--- /dev/null
+++ b/internal/backport/go/ast/print_test.go
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+)
+
+var tests = []struct {
+	x interface{} // x is printed as s
+	s string
+}{
+	// basic types
+	{nil, "0  nil"},
+	{true, "0  true"},
+	{42, "0  42"},
+	{3.14, "0  3.14"},
+	{1 + 2.718i, "0  (1+2.718i)"},
+	{"foobar", "0  \"foobar\""},
+
+	// maps
+	{map[Expr]string{}, `0  map[ast.Expr]string (len = 0) {}`},
+	{map[string]int{"a": 1},
+		`0  map[string]int (len = 1) {
+		1  .  "a": 1
+		2  }`},
+
+	// pointers
+	{new(int), "0  *0"},
+
+	// arrays
+	{[0]int{}, `0  [0]int {}`},
+	{[3]int{1, 2, 3},
+		`0  [3]int {
+		1  .  0: 1
+		2  .  1: 2
+		3  .  2: 3
+		4  }`},
+	{[...]int{42},
+		`0  [1]int {
+		1  .  0: 42
+		2  }`},
+
+	// slices
+	{[]int{}, `0  []int (len = 0) {}`},
+	{[]int{1, 2, 3},
+		`0  []int (len = 3) {
+		1  .  0: 1
+		2  .  1: 2
+		3  .  2: 3
+		4  }`},
+
+	// structs
+	{struct{}{}, `0  struct {} {}`},
+	{struct{ x int }{007}, `0  struct { x int } {}`},
+	{struct{ X, y int }{42, 991},
+		`0  struct { X int; y int } {
+		1  .  X: 42
+		2  }`},
+	{struct{ X, Y int }{42, 991},
+		`0  struct { X int; Y int } {
+		1  .  X: 42
+		2  .  Y: 991
+		3  }`},
+}
+
+// Split s into lines, trim whitespace from all lines, and return
+// the concatenated non-empty lines.
+func trim(s string) string {
+	lines := strings.Split(s, "\n")
+	i := 0
+	for _, line := range lines {
+		line = strings.TrimSpace(line)
+		if line != "" {
+			lines[i] = line
+			i++
+		}
+	}
+	return strings.Join(lines[0:i], "\n")
+}
+
+func TestPrint(t *testing.T) {
+	var buf bytes.Buffer
+	for _, test := range tests {
+		buf.Reset()
+		if err := Fprint(&buf, nil, test.x, nil); err != nil {
+			t.Errorf("Fprint failed: %s", err)
+		}
+		if s, ts := trim(buf.String()), trim(test.s); s != ts {
+			t.Errorf("got:\n%s\nexpected:\n%s\n", s, ts)
+		}
+	}
+}
diff --git a/internal/backport/go/ast/resolve.go b/internal/backport/go/ast/resolve.go
new file mode 100644
index 0000000..4d3d3fd
--- /dev/null
+++ b/internal/backport/go/ast/resolve.go
@@ -0,0 +1,173 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements NewPackage.
+
+package ast
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+	"strconv"
+)
+
+type pkgBuilder struct {
+	fset   *token.FileSet
+	errors scanner.ErrorList
+}
+
+func (p *pkgBuilder) error(pos token.Pos, msg string) {
+	p.errors.Add(p.fset.Position(pos), msg)
+}
+
+func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
+	p.error(pos, fmt.Sprintf(format, args...))
+}
+
+func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
+	alt := scope.Insert(obj)
+	if alt == nil && altScope != nil {
+		// see if there is a conflicting declaration in altScope
+		alt = altScope.Lookup(obj.Name)
+	}
+	if alt != nil {
+		prevDecl := ""
+		if pos := alt.Pos(); pos.IsValid() {
+			prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.fset.Position(pos))
+		}
+		p.error(obj.Pos(), fmt.Sprintf("%s redeclared in this block%s", obj.Name, prevDecl))
+	}
+}
+
+func resolve(scope *Scope, ident *Ident) bool {
+	for ; scope != nil; scope = scope.Outer {
+		if obj := scope.Lookup(ident.Name); obj != nil {
+			ident.Obj = obj
+			return true
+		}
+	}
+	return false
+}
+
+// An Importer resolves import paths to package Objects.
+// The imports map records the packages already imported,
+// indexed by package id (canonical import path).
+// An Importer must determine the canonical import path and
+// check the map to see if it is already present in the imports map.
+// If so, the Importer can return the map entry. Otherwise, the
+// Importer should load the package data for the given path into
+// a new *Object (pkg), record pkg in the imports map, and then
+// return pkg.
+type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
+
+// NewPackage creates a new Package node from a set of File nodes. It resolves
+// unresolved identifiers across files and updates each file's Unresolved list
+// accordingly. If a non-nil importer and universe scope are provided, they are
+// used to resolve identifiers not declared in any of the package files. Any
+// remaining unresolved identifiers are reported as undeclared. If the files
+// belong to different packages, one package name is selected and files with
+// different package names are reported and then ignored.
+// The result is a package node and a scanner.ErrorList if there were errors.
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
+	var p pkgBuilder
+	p.fset = fset
+
+	// complete package scope
+	pkgName := ""
+	pkgScope := NewScope(universe)
+	for _, file := range files {
+		// package names must match
+		switch name := file.Name.Name; {
+		case pkgName == "":
+			pkgName = name
+		case name != pkgName:
+			p.errorf(file.Package, "package %s; expected %s", name, pkgName)
+			continue // ignore this file
+		}
+
+		// collect top-level file objects in package scope
+		for _, obj := range file.Scope.Objects {
+			p.declare(pkgScope, nil, obj)
+		}
+	}
+
+	// package global mapping of imported package ids to package objects
+	imports := make(map[string]*Object)
+
+	// complete file scopes with imports and resolve identifiers
+	for _, file := range files {
+		// ignore file if it belongs to a different package
+		// (error has already been reported)
+		if file.Name.Name != pkgName {
+			continue
+		}
+
+		// build file scope by processing all imports
+		importErrors := false
+		fileScope := NewScope(pkgScope)
+		for _, spec := range file.Imports {
+			if importer == nil {
+				importErrors = true
+				continue
+			}
+			path, _ := strconv.Unquote(spec.Path.Value)
+			pkg, err := importer(imports, path)
+			if err != nil {
+				p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+				importErrors = true
+				continue
+			}
+			// TODO(gri) If a local package name != "." is provided,
+			// global identifier resolution could proceed even if the
+			// import failed. Consider adjusting the logic here a bit.
+
+			// local name overrides imported package name
+			name := pkg.Name
+			if spec.Name != nil {
+				name = spec.Name.Name
+			}
+
+			// add import to file scope
+			if name == "." {
+				// merge imported scope with file scope
+				for _, obj := range pkg.Data.(*Scope).Objects {
+					p.declare(fileScope, pkgScope, obj)
+				}
+			} else if name != "_" {
+				// declare imported package object in file scope
+				// (do not re-use pkg in the file scope but create
+				// a new object instead; the Decl field is different
+				// for different files)
+				obj := NewObj(Pkg, name)
+				obj.Decl = spec
+				obj.Data = pkg.Data
+				p.declare(fileScope, pkgScope, obj)
+			}
+		}
+
+		// resolve identifiers
+		if importErrors {
+			// don't use the universe scope without correct imports
+			// (objects in the universe may be shadowed by imports;
+			// with missing imports, identifiers might get resolved
+			// incorrectly to universe objects)
+			pkgScope.Outer = nil
+		}
+		i := 0
+		for _, ident := range file.Unresolved {
+			if !resolve(fileScope, ident) {
+				p.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+				file.Unresolved[i] = ident
+				i++
+			}
+
+		}
+		file.Unresolved = file.Unresolved[0:i]
+		pkgScope.Outer = universe // reset universe scope
+	}
+
+	p.errors.Sort()
+	return &Package{pkgName, pkgScope, imports, files}, p.errors.Err()
+}
diff --git a/internal/backport/go/ast/scope.go b/internal/backport/go/ast/scope.go
new file mode 100644
index 0000000..165c935
--- /dev/null
+++ b/internal/backport/go/ast/scope.go
@@ -0,0 +1,156 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements scopes and the objects they contain.
+
+package ast
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// A Scope maintains the set of named language entities declared
+// in the scope and a link to the immediately surrounding (outer)
+// scope.
+type Scope struct {
+	Outer   *Scope
+	Objects map[string]*Object
+}
+
+// NewScope creates a new scope nested in the outer scope.
+func NewScope(outer *Scope) *Scope {
+	const n = 4 // initial scope capacity
+	return &Scope{outer, make(map[string]*Object, n)}
+}
+
+// Lookup returns the object with the given name if it is
+// found in scope s, otherwise it returns nil. Outer scopes
+// are ignored.
+func (s *Scope) Lookup(name string) *Object {
+	return s.Objects[name]
+}
+
+// Insert attempts to insert a named object obj into the scope s.
+// If the scope already contains an object alt with the same name,
+// Insert leaves the scope unchanged and returns alt. Otherwise
+// it inserts obj and returns nil.
+func (s *Scope) Insert(obj *Object) (alt *Object) {
+	if alt = s.Objects[obj.Name]; alt == nil {
+		s.Objects[obj.Name] = obj
+	}
+	return
+}
+
+// Debugging support
+func (s *Scope) String() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "scope %p {", s)
+	if s != nil && len(s.Objects) > 0 {
+		fmt.Fprintln(&buf)
+		for _, obj := range s.Objects {
+			fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name)
+		}
+	}
+	fmt.Fprintf(&buf, "}\n")
+	return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Objects
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+//
+// The Data fields contains object-specific data:
+//
+//	Kind    Data type         Data value
+//	Pkg     *Scope            package scope
+//	Con     int               iota for the respective declaration
+type Object struct {
+	Kind ObjKind
+	Name string      // declared name
+	Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil
+	Data interface{} // object-specific data; or nil
+	Type interface{} // placeholder for type information; may be nil
+}
+
+// NewObj creates a new object of a given kind and name.
+func NewObj(kind ObjKind, name string) *Object {
+	return &Object{Kind: kind, Name: name}
+}
+
+// Pos computes the source position of the declaration of an object name.
+// The result may be an invalid position if it cannot be computed
+// (obj.Decl may be nil or not correct).
+func (obj *Object) Pos() token.Pos {
+	name := obj.Name
+	switch d := obj.Decl.(type) {
+	case *Field:
+		for _, n := range d.Names {
+			if n.Name == name {
+				return n.Pos()
+			}
+		}
+	case *ImportSpec:
+		if d.Name != nil && d.Name.Name == name {
+			return d.Name.Pos()
+		}
+		return d.Path.Pos()
+	case *ValueSpec:
+		for _, n := range d.Names {
+			if n.Name == name {
+				return n.Pos()
+			}
+		}
+	case *TypeSpec:
+		if d.Name.Name == name {
+			return d.Name.Pos()
+		}
+	case *FuncDecl:
+		if d.Name.Name == name {
+			return d.Name.Pos()
+		}
+	case *LabeledStmt:
+		if d.Label.Name == name {
+			return d.Label.Pos()
+		}
+	case *AssignStmt:
+		for _, x := range d.Lhs {
+			if ident, isIdent := x.(*Ident); isIdent && ident.Name == name {
+				return ident.Pos()
+			}
+		}
+	case *Scope:
+		// predeclared object - nothing to do for now
+	}
+	return token.NoPos
+}
+
+// ObjKind describes what an object represents.
+type ObjKind int
+
+// The list of possible Object kinds.
+const (
+	Bad ObjKind = iota // for error handling
+	Pkg                // package
+	Con                // constant
+	Typ                // type
+	Var                // variable
+	Fun                // function or method
+	Lbl                // label
+)
+
+var objKindStrings = [...]string{
+	Bad: "bad",
+	Pkg: "package",
+	Con: "const",
+	Typ: "type",
+	Var: "var",
+	Fun: "func",
+	Lbl: "label",
+}
+
+func (kind ObjKind) String() string { return objKindStrings[kind] }
diff --git a/internal/backport/go/ast/walk.go b/internal/backport/go/ast/walk.go
new file mode 100644
index 0000000..0c64d27
--- /dev/null
+++ b/internal/backport/go/ast/walk.go
@@ -0,0 +1,398 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import "fmt"
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+	Visit(node Node) (w Visitor)
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func walkIdentList(v Visitor, list []*Ident) {
+	for _, x := range list {
+		Walk(v, x)
+	}
+}
+
+func walkExprList(v Visitor, list []Expr) {
+	for _, x := range list {
+		Walk(v, x)
+	}
+}
+
+func walkStmtList(v Visitor, list []Stmt) {
+	for _, x := range list {
+		Walk(v, x)
+	}
+}
+
+func walkDeclList(v Visitor, list []Decl) {
+	for _, x := range list {
+		Walk(v, x)
+	}
+}
+
+// TODO(gri): Investigate if providing a closure to Walk leads to
+//            simpler use (and may help eliminate Inspect in turn).
+
+// Walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+func Walk(v Visitor, node Node) {
+	if v = v.Visit(node); v == nil {
+		return
+	}
+
+	// walk children
+	// (the order of the cases matches the order
+	// of the corresponding node types in ast.go)
+	switch n := node.(type) {
+	// Comments and fields
+	case *Comment:
+		// nothing to do
+
+	case *CommentGroup:
+		for _, c := range n.List {
+			Walk(v, c)
+		}
+
+	case *Field:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		walkIdentList(v, n.Names)
+		if n.Type != nil {
+			Walk(v, n.Type)
+		}
+		if n.Tag != nil {
+			Walk(v, n.Tag)
+		}
+		if n.Comment != nil {
+			Walk(v, n.Comment)
+		}
+
+	case *FieldList:
+		for _, f := range n.List {
+			Walk(v, f)
+		}
+
+	// Expressions
+	case *BadExpr, *Ident, *BasicLit:
+		// nothing to do
+
+	case *Ellipsis:
+		if n.Elt != nil {
+			Walk(v, n.Elt)
+		}
+
+	case *FuncLit:
+		Walk(v, n.Type)
+		Walk(v, n.Body)
+
+	case *CompositeLit:
+		if n.Type != nil {
+			Walk(v, n.Type)
+		}
+		walkExprList(v, n.Elts)
+
+	case *ParenExpr:
+		Walk(v, n.X)
+
+	case *SelectorExpr:
+		Walk(v, n.X)
+		Walk(v, n.Sel)
+
+	case *IndexExpr:
+		Walk(v, n.X)
+		Walk(v, n.Index)
+
+	case *IndexListExpr:
+		Walk(v, n.X)
+		for _, index := range n.Indices {
+			Walk(v, index)
+		}
+
+	case *SliceExpr:
+		Walk(v, n.X)
+		if n.Low != nil {
+			Walk(v, n.Low)
+		}
+		if n.High != nil {
+			Walk(v, n.High)
+		}
+		if n.Max != nil {
+			Walk(v, n.Max)
+		}
+
+	case *TypeAssertExpr:
+		Walk(v, n.X)
+		if n.Type != nil {
+			Walk(v, n.Type)
+		}
+
+	case *CallExpr:
+		Walk(v, n.Fun)
+		walkExprList(v, n.Args)
+
+	case *StarExpr:
+		Walk(v, n.X)
+
+	case *UnaryExpr:
+		Walk(v, n.X)
+
+	case *BinaryExpr:
+		Walk(v, n.X)
+		Walk(v, n.Y)
+
+	case *KeyValueExpr:
+		Walk(v, n.Key)
+		Walk(v, n.Value)
+
+	// Types
+	case *ArrayType:
+		if n.Len != nil {
+			Walk(v, n.Len)
+		}
+		Walk(v, n.Elt)
+
+	case *StructType:
+		Walk(v, n.Fields)
+
+	case *FuncType:
+		if n.TypeParams != nil {
+			Walk(v, n.TypeParams)
+		}
+		if n.Params != nil {
+			Walk(v, n.Params)
+		}
+		if n.Results != nil {
+			Walk(v, n.Results)
+		}
+
+	case *InterfaceType:
+		Walk(v, n.Methods)
+
+	case *MapType:
+		Walk(v, n.Key)
+		Walk(v, n.Value)
+
+	case *ChanType:
+		Walk(v, n.Value)
+
+	// Statements
+	case *BadStmt:
+		// nothing to do
+
+	case *DeclStmt:
+		Walk(v, n.Decl)
+
+	case *EmptyStmt:
+		// nothing to do
+
+	case *LabeledStmt:
+		Walk(v, n.Label)
+		Walk(v, n.Stmt)
+
+	case *ExprStmt:
+		Walk(v, n.X)
+
+	case *SendStmt:
+		Walk(v, n.Chan)
+		Walk(v, n.Value)
+
+	case *IncDecStmt:
+		Walk(v, n.X)
+
+	case *AssignStmt:
+		walkExprList(v, n.Lhs)
+		walkExprList(v, n.Rhs)
+
+	case *GoStmt:
+		Walk(v, n.Call)
+
+	case *DeferStmt:
+		Walk(v, n.Call)
+
+	case *ReturnStmt:
+		walkExprList(v, n.Results)
+
+	case *BranchStmt:
+		if n.Label != nil {
+			Walk(v, n.Label)
+		}
+
+	case *BlockStmt:
+		walkStmtList(v, n.List)
+
+	case *IfStmt:
+		if n.Init != nil {
+			Walk(v, n.Init)
+		}
+		Walk(v, n.Cond)
+		Walk(v, n.Body)
+		if n.Else != nil {
+			Walk(v, n.Else)
+		}
+
+	case *CaseClause:
+		walkExprList(v, n.List)
+		walkStmtList(v, n.Body)
+
+	case *SwitchStmt:
+		if n.Init != nil {
+			Walk(v, n.Init)
+		}
+		if n.Tag != nil {
+			Walk(v, n.Tag)
+		}
+		Walk(v, n.Body)
+
+	case *TypeSwitchStmt:
+		if n.Init != nil {
+			Walk(v, n.Init)
+		}
+		Walk(v, n.Assign)
+		Walk(v, n.Body)
+
+	case *CommClause:
+		if n.Comm != nil {
+			Walk(v, n.Comm)
+		}
+		walkStmtList(v, n.Body)
+
+	case *SelectStmt:
+		Walk(v, n.Body)
+
+	case *ForStmt:
+		if n.Init != nil {
+			Walk(v, n.Init)
+		}
+		if n.Cond != nil {
+			Walk(v, n.Cond)
+		}
+		if n.Post != nil {
+			Walk(v, n.Post)
+		}
+		Walk(v, n.Body)
+
+	case *RangeStmt:
+		if n.Key != nil {
+			Walk(v, n.Key)
+		}
+		if n.Value != nil {
+			Walk(v, n.Value)
+		}
+		Walk(v, n.X)
+		Walk(v, n.Body)
+
+	// Declarations
+	case *ImportSpec:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		if n.Name != nil {
+			Walk(v, n.Name)
+		}
+		Walk(v, n.Path)
+		if n.Comment != nil {
+			Walk(v, n.Comment)
+		}
+
+	case *ValueSpec:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		walkIdentList(v, n.Names)
+		if n.Type != nil {
+			Walk(v, n.Type)
+		}
+		walkExprList(v, n.Values)
+		if n.Comment != nil {
+			Walk(v, n.Comment)
+		}
+
+	case *TypeSpec:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		Walk(v, n.Name)
+		if n.TypeParams != nil {
+			Walk(v, n.TypeParams)
+		}
+		Walk(v, n.Type)
+		if n.Comment != nil {
+			Walk(v, n.Comment)
+		}
+
+	case *BadDecl:
+		// nothing to do
+
+	case *GenDecl:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		for _, s := range n.Specs {
+			Walk(v, s)
+		}
+
+	case *FuncDecl:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		if n.Recv != nil {
+			Walk(v, n.Recv)
+		}
+		Walk(v, n.Name)
+		Walk(v, n.Type)
+		if n.Body != nil {
+			Walk(v, n.Body)
+		}
+
+	// Files and packages
+	case *File:
+		if n.Doc != nil {
+			Walk(v, n.Doc)
+		}
+		Walk(v, n.Name)
+		walkDeclList(v, n.Decls)
+		// don't walk n.Comments - they have been
+		// visited already through the individual
+		// nodes
+
+	case *Package:
+		for _, f := range n.Files {
+			Walk(v, f)
+		}
+
+	default:
+		panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
+	}
+
+	v.Visit(nil)
+}
+
+type inspector func(Node) bool
+
+func (f inspector) Visit(node Node) Visitor {
+	if f(node) {
+		return f
+	}
+	return nil
+}
+
+// Inspect traverses an AST in depth-first order: It starts by calling
+// f(node); node must not be nil. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of node, followed by a
+// call of f(nil).
+func Inspect(node Node, f func(Node) bool) {
+	Walk(inspector(f), node)
+}
diff --git a/internal/backport/go/build/build.go b/internal/backport/go/build/build.go
new file mode 100644
index 0000000..12866e7
--- /dev/null
+++ b/internal/backport/go/build/build.go
@@ -0,0 +1,1973 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"go/build"
+	"go/build/constraint"
+	"golang.org/x/website/internal/backport/go/doc"
+	"io"
+	"io/fs"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	pathpkg "path"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// A Context specifies the supporting context for a build.
+type Context struct {
+	GOARCH string // target architecture
+	GOOS   string // target operating system
+	GOROOT string // Go root
+	GOPATH string // Go path
+
+	// Dir is the caller's working directory, or the empty string to use
+	// the current directory of the running process. In module mode, this is used
+	// to locate the main module.
+	//
+	// If Dir is non-empty, directories passed to Import and ImportDir must
+	// be absolute.
+	Dir string
+
+	CgoEnabled  bool   // whether cgo files are included
+	UseAllFiles bool   // use files regardless of +build lines, file names
+	Compiler    string // compiler to assume when computing target paths
+
+	// The build, tool, and release tags specify build constraints
+	// that should be considered satisfied when processing +build lines.
+	// Clients creating a new context may customize BuildTags, which
+	// defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags.
+	// ToolTags defaults to build tags appropriate to the current Go toolchain configuration.
+	// ReleaseTags defaults to the list of Go releases the current release is compatible with.
+	// BuildTags is not set for the Default build Context.
+	// In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints
+	// consider the values of GOARCH and GOOS as satisfied tags.
+	// The last element in ReleaseTags is assumed to be the current release.
+	BuildTags   []string
+	ToolTags    []string
+	ReleaseTags []string
+
+	// The install suffix specifies a suffix to use in the name of the installation
+	// directory. By default it is empty, but custom builds that need to keep
+	// their outputs separate can set InstallSuffix to do so. For example, when
+	// using the race detector, the go command uses InstallSuffix = "race", so
+	// that on a Linux/386 system, packages are written to a directory named
+	// "linux_386_race" instead of the usual "linux_386".
+	InstallSuffix string
+
+	// By default, Import uses the operating system's file system calls
+	// to read directories and files. To read from other sources,
+	// callers can set the following functions. They all have default
+	// behaviors that use the local file system, so clients need only set
+	// the functions whose behaviors they wish to change.
+
+	// JoinPath joins the sequence of path fragments into a single path.
+	// If JoinPath is nil, Import uses filepath.Join.
+	JoinPath func(elem ...string) string
+
+	// SplitPathList splits the path list into a slice of individual paths.
+	// If SplitPathList is nil, Import uses filepath.SplitList.
+	SplitPathList func(list string) []string
+
+	// IsAbsPath reports whether path is an absolute path.
+	// If IsAbsPath is nil, Import uses filepath.IsAbs.
+	IsAbsPath func(path string) bool
+
+	// IsDir reports whether the path names a directory.
+	// If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+	IsDir func(path string) bool
+
+	// HasSubdir reports whether dir is lexically a subdirectory of
+	// root, perhaps multiple levels below. It does not try to check
+	// whether dir exists.
+	// If so, HasSubdir sets rel to a slash-separated path that
+	// can be joined to root to produce a path equivalent to dir.
+	// If HasSubdir is nil, Import uses an implementation built on
+	// filepath.EvalSymlinks.
+	HasSubdir func(root, dir string) (rel string, ok bool)
+
+	// ReadDir returns a slice of fs.FileInfo, sorted by Name,
+	// describing the content of the named directory.
+	// If ReadDir is nil, Import uses ioutil.ReadDir.
+	ReadDir func(dir string) ([]fs.FileInfo, error)
+
+	// OpenFile opens a file (not a directory) for reading.
+	// If OpenFile is nil, Import uses os.Open.
+	OpenFile func(path string) (io.ReadCloser, error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+	if f := ctxt.JoinPath; f != nil {
+		return f(elem...)
+	}
+	return filepath.Join(elem...)
+}
+
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+	if f := ctxt.SplitPathList; f != nil {
+		return f(s)
+	}
+	return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+	if f := ctxt.IsAbsPath; f != nil {
+		return f(path)
+	}
+	return filepath.IsAbs(path)
+}
+
+// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
+func (ctxt *Context) isDir(path string) bool {
+	if f := ctxt.IsDir; f != nil {
+		return f(path)
+	}
+	fi, err := os.Stat(path)
+	return err == nil && fi.IsDir()
+}
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+	if f := ctxt.HasSubdir; f != nil {
+		return f(root, dir)
+	}
+
+	// Try using paths we received.
+	if rel, ok = hasSubdir(root, dir); ok {
+		return
+	}
+
+	// Try expanding symlinks and comparing
+	// expanded against unexpanded and
+	// expanded against expanded.
+	rootSym, _ := filepath.EvalSymlinks(root)
+	dirSym, _ := filepath.EvalSymlinks(dir)
+
+	if rel, ok = hasSubdir(rootSym, dir); ok {
+		return
+	}
+	if rel, ok = hasSubdir(root, dirSym); ok {
+		return
+	}
+	return hasSubdir(rootSym, dirSym)
+}
+
+// hasSubdir reports if dir is within root by performing lexical analysis only.
+func hasSubdir(root, dir string) (rel string, ok bool) {
+	const sep = string(filepath.Separator)
+	root = filepath.Clean(root)
+	if !strings.HasSuffix(root, sep) {
+		root += sep
+	}
+	dir = filepath.Clean(dir)
+	if !strings.HasPrefix(dir, root) {
+		return "", false
+	}
+	return filepath.ToSlash(dir[len(root):]), true
+}
+
+// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
+func (ctxt *Context) readDir(path string) ([]fs.FileInfo, error) {
+	if f := ctxt.ReadDir; f != nil {
+		return f(path)
+	}
+	// TODO: use os.ReadDir
+	return ioutil.ReadDir(path)
+}
+
+// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
+func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
+	if fn := ctxt.OpenFile; fn != nil {
+		return fn(path)
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, err // nil interface
+	}
+	return f, nil
+}
+
+// isFile determines whether path is a file by trying to open it.
+// It reuses openFile instead of adding another function to the
+// list in Context.
+func (ctxt *Context) isFile(path string) bool {
+	f, err := ctxt.openFile(path)
+	if err != nil {
+		return false
+	}
+	f.Close()
+	return true
+}
+
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+	var all []string
+	for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+		if p == "" || p == ctxt.GOROOT {
+			// Empty paths are uninteresting.
+			// If the path is the GOROOT, ignore it.
+			// People sometimes set GOPATH=$GOROOT.
+			// Do not get confused by this common mistake.
+			continue
+		}
+		if strings.HasPrefix(p, "~") {
+			// Path segments starting with ~ on Unix are almost always
+			// users who have incorrectly quoted ~ while setting GOPATH,
+			// preventing it from expanding to $HOME.
+			// The situation is made more confusing by the fact that
+			// bash allows quoted ~ in $PATH (most shells do not).
+			// Do not get confused by this, and do not try to use the path.
+			// It does not exist, and printing errors about it confuses
+			// those users even more, because they think "sure ~ exists!".
+			// The go command diagnoses this situation and prints a
+			// useful error.
+			// On Windows, ~ is used in short names, such as c:\progra~1
+			// for c:\program files.
+			continue
+		}
+		all = append(all, p)
+	}
+	return all
+}
+
+// SrcDirs returns a list of package source root directories.
+// It draws from the current Go root and Go path but omits directories
+// that do not exist.
+func (ctxt *Context) SrcDirs() []string {
+	var all []string
+	if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
+		dir := ctxt.joinPath(ctxt.GOROOT, "src")
+		if ctxt.isDir(dir) {
+			all = append(all, dir)
+		}
+	}
+	for _, p := range ctxt.gopath() {
+		dir := ctxt.joinPath(p, "src")
+		if ctxt.isDir(dir) {
+			all = append(all, dir)
+		}
+	}
+	return all
+}
+
+// Default is the default Context for builds.
+// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
+// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
+var Default Context = defaultContext()
+
+func defaultContext() Context {
+	d := build.Default
+	return Context{
+		GOARCH:        d.GOARCH,
+		GOOS:          d.GOOS,
+		GOROOT:        d.GOROOT,
+		GOPATH:        d.GOPATH,
+		Dir:           d.Dir,
+		CgoEnabled:    d.CgoEnabled,
+		UseAllFiles:   d.UseAllFiles,
+		Compiler:      d.Compiler,
+		BuildTags:     d.BuildTags,
+		ReleaseTags:   d.ReleaseTags,
+		InstallSuffix: d.InstallSuffix,
+		JoinPath:      d.JoinPath,
+		SplitPathList: d.SplitPathList,
+		IsAbsPath:     d.IsAbsPath,
+		IsDir:         d.IsDir,
+		HasSubdir:     d.HasSubdir,
+		ReadDir:       d.ReadDir,
+		OpenFile:      d.OpenFile,
+	}
+}
+
+func defaultGOPATH() string {
+	env := "HOME"
+	if runtime.GOOS == "windows" {
+		env = "USERPROFILE"
+	} else if runtime.GOOS == "plan9" {
+		env = "home"
+	}
+	if home := os.Getenv(env); home != "" {
+		def := filepath.Join(home, "go")
+		if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
+			// Don't set the default GOPATH to GOROOT,
+			// as that will trigger warnings from the go tool.
+			return ""
+		}
+		return def
+	}
+	return ""
+}
+
+var defaultReleaseTags = build.Default.ReleaseTags
+
+func envOr(name, def string) string {
+	s := os.Getenv(name)
+	if s == "" {
+		return def
+	}
+	return s
+}
+
+// An ImportMode controls the behavior of the Import method.
+type ImportMode uint
+
+const (
+	// If FindOnly is set, Import stops after locating the directory
+	// that should contain the sources for a package. It does not
+	// read any files in the directory.
+	FindOnly ImportMode = 1 << iota
+
+	// If AllowBinary is set, Import can be satisfied by a compiled
+	// package object without corresponding sources.
+	//
+	// Deprecated:
+	// The supported way to create a compiled-only package is to
+	// write source code containing a //go:binary-only-package comment at
+	// the top of the file. Such a package will be recognized
+	// regardless of this flag setting (because it has source code)
+	// and will have BinaryOnly set to true in the returned Package.
+	AllowBinary
+
+	// If ImportComment is set, parse import comments on package statements.
+	// Import returns an error if it finds a comment it cannot understand
+	// or finds conflicting comments in multiple source files.
+	// See golang.org/s/go14customimport for more information.
+	ImportComment
+
+	// By default, Import searches vendor directories
+	// that apply in the given source directory before searching
+	// the GOROOT and GOPATH roots.
+	// If an Import finds and returns a package using a vendor
+	// directory, the resulting ImportPath is the complete path
+	// to the package, including the path elements leading up
+	// to and including "vendor".
+	// For example, if Import("y", "x/subdir", 0) finds
+	// "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
+	// not plain "y".
+	// See golang.org/s/go15vendor for more information.
+	//
+	// Setting IgnoreVendor ignores vendor directories.
+	//
+	// In contrast to the package's ImportPath,
+	// the returned package's Imports, TestImports, and XTestImports
+	// are always the exact import paths from the source files:
+	// Import makes no attempt to resolve or check those paths.
+	IgnoreVendor
+)
+
+// A Package describes the Go package found in a directory.
+type Package struct {
+	Dir           string   // directory containing package sources
+	Name          string   // package name
+	ImportComment string   // path in import comment on package statement
+	Doc           string   // documentation synopsis
+	ImportPath    string   // import path of package ("" if unknown)
+	Root          string   // root of Go tree where this package lives
+	SrcRoot       string   // package source root directory ("" if unknown)
+	PkgRoot       string   // package install root directory ("" if unknown)
+	PkgTargetRoot string   // architecture dependent install root directory ("" if unknown)
+	BinDir        string   // command install directory ("" if unknown)
+	Goroot        bool     // package found in Go root
+	PkgObj        string   // installed .a file
+	AllTags       []string // tags that can influence file selection in this directory
+	ConflictDir   string   // this directory shadows Dir in $GOPATH
+	BinaryOnly    bool     // cannot be rebuilt from source (has //go:binary-only-package comment)
+
+	// Source files
+	GoFiles           []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+	CgoFiles          []string // .go source files that import "C"
+	IgnoredGoFiles    []string // .go source files ignored for this build (including ignored _test.go files)
+	InvalidGoFiles    []string // .go source files with detected problems (parse error, wrong package name, and so on)
+	IgnoredOtherFiles []string // non-.go source files ignored for this build
+	CFiles            []string // .c source files
+	CXXFiles          []string // .cc, .cpp and .cxx source files
+	MFiles            []string // .m (Objective-C) source files
+	HFiles            []string // .h, .hh, .hpp and .hxx source files
+	FFiles            []string // .f, .F, .for and .f90 Fortran source files
+	SFiles            []string // .s source files
+	SwigFiles         []string // .swig files
+	SwigCXXFiles      []string // .swigcxx files
+	SysoFiles         []string // .syso system object files to add to archive
+
+	// Cgo directives
+	CgoCFLAGS    []string // Cgo CFLAGS directives
+	CgoCPPFLAGS  []string // Cgo CPPFLAGS directives
+	CgoCXXFLAGS  []string // Cgo CXXFLAGS directives
+	CgoFFLAGS    []string // Cgo FFLAGS directives
+	CgoLDFLAGS   []string // Cgo LDFLAGS directives
+	CgoPkgConfig []string // Cgo pkg-config directives
+
+	// Test information
+	TestGoFiles  []string // _test.go files in package
+	XTestGoFiles []string // _test.go files outside package
+
+	// Dependency information
+	Imports        []string                    // import paths from GoFiles, CgoFiles
+	ImportPos      map[string][]token.Position // line information for Imports
+	TestImports    []string                    // import paths from TestGoFiles
+	TestImportPos  map[string][]token.Position // line information for TestImports
+	XTestImports   []string                    // import paths from XTestGoFiles
+	XTestImportPos map[string][]token.Position // line information for XTestImports
+
+	// //go:embed patterns found in Go source files
+	// For example, if a source file says
+	//	//go:embed a* b.c
+	// then the list will contain those two strings as separate entries.
+	// (See package embed for more details about //go:embed.)
+	EmbedPatterns        []string                    // patterns from GoFiles, CgoFiles
+	EmbedPatternPos      map[string][]token.Position // line information for EmbedPatterns
+	TestEmbedPatterns    []string                    // patterns from TestGoFiles
+	TestEmbedPatternPos  map[string][]token.Position // line information for TestEmbedPatterns
+	XTestEmbedPatterns   []string                    // patterns from XTestGoFiles
+	XTestEmbedPatternPos map[string][]token.Position // line information for XTestEmbedPatternPos
+}
+
+// IsCommand reports whether the package is considered a
+// command to be installed (not just a library).
+// Packages named "main" are treated as commands.
+func (p *Package) IsCommand() bool {
+	return p.Name == "main"
+}
+
+// ImportDir is like Import but processes the Go package found in
+// the named directory.
+func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
+	return ctxt.Import(".", dir, mode)
+}
+
+// NoGoError is the error used by Import to describe a directory
+// containing no buildable Go source files. (It may still contain
+// test files, files hidden by build tags, and so on.)
+type NoGoError struct {
+	Dir string
+}
+
+func (e *NoGoError) Error() string {
+	return "no buildable Go source files in " + e.Dir
+}
+
+// MultiplePackageError describes a directory containing
+// multiple buildable Go source files for multiple packages.
+type MultiplePackageError struct {
+	Dir      string   // directory containing files
+	Packages []string // package names found
+	Files    []string // corresponding files: Files[i] declares package Packages[i]
+}
+
+func (e *MultiplePackageError) Error() string {
+	// Error string limited to two entries for compatibility.
+	return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
+}
+
+func nameExt(name string) string {
+	i := strings.LastIndex(name, ".")
+	if i < 0 {
+		return ""
+	}
+	return name[i:]
+}
+
+// Import returns details about the Go package named by the import path,
+// interpreting local import paths relative to the srcDir directory.
+// If the path is a local import path naming a package that can be imported
+// using a standard import path, the returned package will set p.ImportPath
+// to that path.
+//
+// In the directory containing the package, .go, .c, .h, and .s files are
+// considered part of the package except for:
+//
+//   - .go files in package documentation
+//   - files starting with _ or . (likely editor temporary files)
+//   - files with build constraints not satisfied by the context
+//
+// If an error occurs, Import returns a non-nil error and a non-nil
+// *Package containing partial information.
+func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
+	p := &Package{
+		ImportPath: path,
+	}
+	if path == "" {
+		return p, fmt.Errorf("import %q: invalid import path", path)
+	}
+
+	var pkgtargetroot string
+	var pkga string
+	var pkgerr error
+	suffix := ""
+	if ctxt.InstallSuffix != "" {
+		suffix = "_" + ctxt.InstallSuffix
+	}
+	switch ctxt.Compiler {
+	case "gccgo":
+		pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+	case "gc":
+		pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+	default:
+		// Save error for end of function.
+		pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
+	}
+	setPkga := func() {
+		switch ctxt.Compiler {
+		case "gccgo":
+			dir, elem := pathpkg.Split(p.ImportPath)
+			pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
+		case "gc":
+			pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
+		}
+	}
+	setPkga()
+
+	binaryOnly := false
+	if IsLocalImport(path) {
+		pkga = "" // local imports have no installed path
+		if srcDir == "" {
+			return p, fmt.Errorf("import %q: import relative to unknown directory", path)
+		}
+		if !ctxt.isAbsPath(path) {
+			p.Dir = ctxt.joinPath(srcDir, path)
+		}
+		// p.Dir directory may or may not exist. Gather partial information first, check if it exists later.
+		// Determine canonical import path, if any.
+		// Exclude results where the import path would include /testdata/.
+		inTestdata := func(sub string) bool {
+			return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata"
+		}
+		if ctxt.GOROOT != "" {
+			root := ctxt.joinPath(ctxt.GOROOT, "src")
+			if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) {
+				p.Goroot = true
+				p.ImportPath = sub
+				p.Root = ctxt.GOROOT
+				setPkga() // p.ImportPath changed
+				goto Found
+			}
+		}
+		all := ctxt.gopath()
+		for i, root := range all {
+			rootsrc := ctxt.joinPath(root, "src")
+			if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) {
+				// We found a potential import path for dir,
+				// but check that using it wouldn't find something
+				// else first.
+				if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
+					if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
+						p.ConflictDir = dir
+						goto Found
+					}
+				}
+				for _, earlyRoot := range all[:i] {
+					if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
+						p.ConflictDir = dir
+						goto Found
+					}
+				}
+
+				// sub would not name some other directory instead of this one.
+				// Record it.
+				p.ImportPath = sub
+				p.Root = root
+				setPkga() // p.ImportPath changed
+				goto Found
+			}
+		}
+		// It's okay that we didn't find a root containing dir.
+		// Keep going with the information we have.
+	} else {
+		if strings.HasPrefix(path, "/") {
+			return p, fmt.Errorf("import %q: cannot import absolute path", path)
+		}
+
+		if err := ctxt.importGo(p, path, srcDir, mode); err == nil {
+			goto Found
+		} else if err != errNoModules {
+			return p, err
+		}
+
+		gopath := ctxt.gopath() // needed twice below; avoid computing many times
+
+		// tried records the location of unsuccessful package lookups
+		var tried struct {
+			vendor []string
+			goroot string
+			gopath []string
+		}
+
+		// Vendor directories get first chance to satisfy import.
+		if mode&IgnoreVendor == 0 && srcDir != "" {
+			searchVendor := func(root string, isGoroot bool) bool {
+				sub, ok := ctxt.hasSubdir(root, srcDir)
+				if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
+					return false
+				}
+				for {
+					vendor := ctxt.joinPath(root, sub, "vendor")
+					if ctxt.isDir(vendor) {
+						dir := ctxt.joinPath(vendor, path)
+						if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) {
+							p.Dir = dir
+							p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
+							p.Goroot = isGoroot
+							p.Root = root
+							setPkga() // p.ImportPath changed
+							return true
+						}
+						tried.vendor = append(tried.vendor, dir)
+					}
+					i := strings.LastIndex(sub, "/")
+					if i < 0 {
+						break
+					}
+					sub = sub[:i]
+				}
+				return false
+			}
+			if ctxt.Compiler != "gccgo" && searchVendor(ctxt.GOROOT, true) {
+				goto Found
+			}
+			for _, root := range gopath {
+				if searchVendor(root, false) {
+					goto Found
+				}
+			}
+		}
+
+		// Determine directory from import path.
+		if ctxt.GOROOT != "" {
+			// If the package path starts with "vendor/", only search GOROOT before
+			// GOPATH if the importer is also within GOROOT. That way, if the user has
+			// vendored in a package that is subsequently included in the standard
+			// distribution, they'll continue to pick up their own vendored copy.
+			gorootFirst := srcDir == "" || !strings.HasPrefix(path, "vendor/")
+			if !gorootFirst {
+				_, gorootFirst = ctxt.hasSubdir(ctxt.GOROOT, srcDir)
+			}
+			if gorootFirst {
+				dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
+				if ctxt.Compiler != "gccgo" {
+					isDir := ctxt.isDir(dir)
+					binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
+					if isDir || binaryOnly {
+						p.Dir = dir
+						p.Goroot = true
+						p.Root = ctxt.GOROOT
+						goto Found
+					}
+				}
+				tried.goroot = dir
+			}
+		}
+		for _, root := range gopath {
+			dir := ctxt.joinPath(root, "src", path)
+			isDir := ctxt.isDir(dir)
+			binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
+			if isDir || binaryOnly {
+				p.Dir = dir
+				p.Root = root
+				goto Found
+			}
+			tried.gopath = append(tried.gopath, dir)
+		}
+
+		// If we tried GOPATH first due to a "vendor/" prefix, fall back to GOPATH.
+		// That way, the user can still get useful results from 'go list' for
+		// standard-vendored paths passed on the command line.
+		if ctxt.GOROOT != "" && tried.goroot == "" {
+			dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
+			if ctxt.Compiler != "gccgo" {
+				isDir := ctxt.isDir(dir)
+				binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
+				if isDir || binaryOnly {
+					p.Dir = dir
+					p.Goroot = true
+					p.Root = ctxt.GOROOT
+					goto Found
+				}
+			}
+			tried.goroot = dir
+		}
+
+		// package was not found
+		var paths []string
+		format := "\t%s (vendor tree)"
+		for _, dir := range tried.vendor {
+			paths = append(paths, fmt.Sprintf(format, dir))
+			format = "\t%s"
+		}
+		if tried.goroot != "" {
+			paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
+		} else {
+			paths = append(paths, "\t($GOROOT not set)")
+		}
+		format = "\t%s (from $GOPATH)"
+		for _, dir := range tried.gopath {
+			paths = append(paths, fmt.Sprintf(format, dir))
+			format = "\t%s"
+		}
+		if len(tried.gopath) == 0 {
+			paths = append(paths, "\t($GOPATH not set. For more details see: 'go help gopath')")
+		}
+		return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
+	}
+
+Found:
+	if p.Root != "" {
+		p.SrcRoot = ctxt.joinPath(p.Root, "src")
+		p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+		p.BinDir = ctxt.joinPath(p.Root, "bin")
+		if pkga != "" {
+			p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
+			p.PkgObj = ctxt.joinPath(p.Root, pkga)
+		}
+	}
+
+	// If it's a local import path, by the time we get here, we still haven't checked
+	// that p.Dir directory exists. This is the right time to do that check.
+	// We can't do it earlier, because we want to gather partial information for the
+	// non-nil *Package returned when an error occurs.
+	// We need to do this before we return early on FindOnly flag.
+	if IsLocalImport(path) && !ctxt.isDir(p.Dir) {
+		if ctxt.Compiler == "gccgo" && p.Goroot {
+			// gccgo has no sources for GOROOT packages.
+			return p, nil
+		}
+
+		// package was not found
+		return p, fmt.Errorf("cannot find package %q in:\n\t%s", p.ImportPath, p.Dir)
+	}
+
+	if mode&FindOnly != 0 {
+		return p, pkgerr
+	}
+	if binaryOnly && (mode&AllowBinary) != 0 {
+		return p, pkgerr
+	}
+
+	if ctxt.Compiler == "gccgo" && p.Goroot {
+		// gccgo has no sources for GOROOT packages.
+		return p, nil
+	}
+
+	dirs, err := ctxt.readDir(p.Dir)
+	if err != nil {
+		return p, err
+	}
+
+	var badGoError error
+	badFiles := make(map[string]bool)
+	badFile := func(name string, err error) {
+		if badGoError == nil {
+			badGoError = err
+		}
+		if !badFiles[name] {
+			p.InvalidGoFiles = append(p.InvalidGoFiles, name)
+			badFiles[name] = true
+		}
+	}
+
+	var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
+	var firstFile, firstCommentFile string
+	embedPos := make(map[string][]token.Position)
+	testEmbedPos := make(map[string][]token.Position)
+	xTestEmbedPos := make(map[string][]token.Position)
+	importPos := make(map[string][]token.Position)
+	testImportPos := make(map[string][]token.Position)
+	xTestImportPos := make(map[string][]token.Position)
+	allTags := make(map[string]bool)
+	fset := token.NewFileSet()
+	for _, d := range dirs {
+		if d.IsDir() {
+			continue
+		}
+		if d.Mode()&fs.ModeSymlink != 0 {
+			if ctxt.isDir(ctxt.joinPath(p.Dir, d.Name())) {
+				// Symlinks to directories are not source files.
+				continue
+			}
+		}
+
+		name := d.Name()
+		ext := nameExt(name)
+
+		info, err := ctxt.matchFile(p.Dir, name, allTags, &p.BinaryOnly, fset)
+		if err != nil {
+			badFile(name, err)
+			continue
+		}
+		if info == nil {
+			if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") {
+				// not due to build constraints - don't report
+			} else if ext == ".go" {
+				p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+			} else if fileListForExt(p, ext) != nil {
+				p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
+			}
+			continue
+		}
+		data, filename := info.header, info.name
+
+		// Going to save the file. For non-Go files, can stop here.
+		switch ext {
+		case ".go":
+			// keep going
+		case ".S", ".sx":
+			// special case for cgo, handled at end
+			Sfiles = append(Sfiles, name)
+			continue
+		default:
+			if list := fileListForExt(p, ext); list != nil {
+				*list = append(*list, name)
+			}
+			continue
+		}
+
+		if info.parseErr != nil {
+			badFile(name, info.parseErr)
+			// Fall through: we might still have a partial AST in info.parsed,
+			// and we want to list files with parse errors anyway.
+		}
+
+		var pkg string
+		if info.parsed != nil {
+			pkg = info.parsed.Name.Name
+			if pkg == "documentation" {
+				p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+				continue
+			}
+		}
+
+		isTest := strings.HasSuffix(name, "_test.go")
+		isXTest := false
+		if isTest && strings.HasSuffix(pkg, "_test") && p.Name != pkg {
+			isXTest = true
+			pkg = pkg[:len(pkg)-len("_test")]
+		}
+
+		if p.Name == "" {
+			p.Name = pkg
+			firstFile = name
+		} else if pkg != p.Name {
+			// TODO(#45999): The choice of p.Name is arbitrary based on file iteration
+			// order. Instead of resolving p.Name arbitrarily, we should clear out the
+			// existing name and mark the existing files as also invalid.
+			badFile(name, &MultiplePackageError{
+				Dir:      p.Dir,
+				Packages: []string{p.Name, pkg},
+				Files:    []string{firstFile, name},
+			})
+		}
+		// Grab the first package comment as docs, provided it is not from a test file.
+		if info.parsed != nil && info.parsed.Doc != nil && p.Doc == "" && !isTest && !isXTest {
+			p.Doc = doc.Synopsis(info.parsed.Doc.Text())
+		}
+
+		if mode&ImportComment != 0 {
+			qcom, line := findImportComment(data)
+			if line != 0 {
+				com, err := strconv.Unquote(qcom)
+				if err != nil {
+					badFile(name, fmt.Errorf("%s:%d: cannot parse import comment", filename, line))
+				} else if p.ImportComment == "" {
+					p.ImportComment = com
+					firstCommentFile = name
+				} else if p.ImportComment != com {
+					badFile(name, fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir))
+				}
+			}
+		}
+
+		// Record imports and information about cgo.
+		isCgo := false
+		for _, imp := range info.imports {
+			if imp.path == "C" {
+				if isTest {
+					badFile(name, fmt.Errorf("use of cgo in test %s not supported", filename))
+					continue
+				}
+				isCgo = true
+				if imp.doc != nil {
+					if err := ctxt.saveCgo(filename, p, imp.doc); err != nil {
+						badFile(name, err)
+					}
+				}
+			}
+		}
+
+		var fileList *[]string
+		var importMap, embedMap map[string][]token.Position
+		switch {
+		case isCgo:
+			allTags["cgo"] = true
+			if ctxt.CgoEnabled {
+				fileList = &p.CgoFiles
+				importMap = importPos
+				embedMap = embedPos
+			} else {
+				// Ignore imports and embeds from cgo files if cgo is disabled.
+				fileList = &p.IgnoredGoFiles
+			}
+		case isXTest:
+			fileList = &p.XTestGoFiles
+			importMap = xTestImportPos
+			embedMap = xTestEmbedPos
+		case isTest:
+			fileList = &p.TestGoFiles
+			importMap = testImportPos
+			embedMap = testEmbedPos
+		default:
+			fileList = &p.GoFiles
+			importMap = importPos
+			embedMap = embedPos
+		}
+		*fileList = append(*fileList, name)
+		if importMap != nil {
+			for _, imp := range info.imports {
+				importMap[imp.path] = append(importMap[imp.path], fset.Position(imp.pos))
+			}
+		}
+		if embedMap != nil {
+			for _, emb := range info.embeds {
+				embedMap[emb.pattern] = append(embedMap[emb.pattern], emb.pos)
+			}
+		}
+	}
+
+	for tag := range allTags {
+		p.AllTags = append(p.AllTags, tag)
+	}
+	sort.Strings(p.AllTags)
+
+	p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
+	p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
+	p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
+
+	p.Imports, p.ImportPos = cleanDecls(importPos)
+	p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
+	p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
+
+	// add the .S/.sx files only if we are using cgo
+	// (which means gcc will compile them).
+	// The standard assemblers expect .s files.
+	if len(p.CgoFiles) > 0 {
+		p.SFiles = append(p.SFiles, Sfiles...)
+		sort.Strings(p.SFiles)
+	} else {
+		p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
+		sort.Strings(p.IgnoredOtherFiles)
+	}
+
+	if badGoError != nil {
+		return p, badGoError
+	}
+	if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+		return p, &NoGoError{p.Dir}
+	}
+	return p, pkgerr
+}
+
+func fileListForExt(p *Package, ext string) *[]string {
+	switch ext {
+	case ".c":
+		return &p.CFiles
+	case ".cc", ".cpp", ".cxx":
+		return &p.CXXFiles
+	case ".m":
+		return &p.MFiles
+	case ".h", ".hh", ".hpp", ".hxx":
+		return &p.HFiles
+	case ".f", ".F", ".for", ".f90":
+		return &p.FFiles
+	case ".s", ".S", ".sx":
+		return &p.SFiles
+	case ".swig":
+		return &p.SwigFiles
+	case ".swigcxx":
+		return &p.SwigCXXFiles
+	case ".syso":
+		return &p.SysoFiles
+	}
+	return nil
+}
+
+func uniq(list []string) []string {
+	if list == nil {
+		return nil
+	}
+	out := make([]string, len(list))
+	copy(out, list)
+	sort.Strings(out)
+	uniq := out[:0]
+	for _, x := range out {
+		if len(uniq) == 0 || uniq[len(uniq)-1] != x {
+			uniq = append(uniq, x)
+		}
+	}
+	return uniq
+}
+
+var errNoModules = errors.New("not using modules")
+
+// importGo checks whether it can use the go command to find the directory for path.
+// If using the go command is not appropriate, importGo returns errNoModules.
+// Otherwise, importGo tries using the go command and reports whether that succeeded.
+// Using the go command lets build.Import and build.Context.Import find code
+// in Go modules. In the long term we want tools to use go/packages (currently golang.org/x/tools/go/packages),
+// which will also use the go command.
+// Invoking the go command here is not very efficient in that it computes information
+// about the requested package and all dependencies and then only reports about the requested package.
+// Then we reinvoke it for every dependency. But this is still better than not working at all.
+// See golang.org/issue/26504.
+func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode) error {
+	// To invoke the go command,
+	// we must not being doing special things like AllowBinary or IgnoreVendor,
+	// and all the file system callbacks must be nil (we're meant to use the local file system).
+	if mode&AllowBinary != 0 || mode&IgnoreVendor != 0 ||
+		ctxt.JoinPath != nil || ctxt.SplitPathList != nil || ctxt.IsAbsPath != nil || ctxt.IsDir != nil || ctxt.HasSubdir != nil || ctxt.ReadDir != nil || ctxt.OpenFile != nil || !equal(ctxt.ReleaseTags, defaultReleaseTags) {
+		return errNoModules
+	}
+
+	// Predict whether module aware mode is enabled by checking the value of
+	// GO111MODULE and looking for a go.mod file in the source directory or
+	// one of its parents. Running 'go env GOMOD' in the source directory would
+	// give a canonical answer, but we'd prefer not to execute another command.
+	go111Module := os.Getenv("GO111MODULE")
+	switch go111Module {
+	case "off":
+		return errNoModules
+	default: // "", "on", "auto", anything else
+		// Maybe use modules.
+	}
+
+	if srcDir != "" {
+		var absSrcDir string
+		if filepath.IsAbs(srcDir) {
+			absSrcDir = srcDir
+		} else if ctxt.Dir != "" {
+			return fmt.Errorf("go/build: Dir is non-empty, so relative srcDir is not allowed: %v", srcDir)
+		} else {
+			// Find the absolute source directory. hasSubdir does not handle
+			// relative paths (and can't because the callbacks don't support this).
+			var err error
+			absSrcDir, err = filepath.Abs(srcDir)
+			if err != nil {
+				return errNoModules
+			}
+		}
+
+		// If the source directory is in GOROOT, then the in-process code works fine
+		// and we should keep using it. Moreover, the 'go list' approach below doesn't
+		// take standard-library vendoring into account and will fail.
+		if _, ok := ctxt.hasSubdir(filepath.Join(ctxt.GOROOT, "src"), absSrcDir); ok {
+			return errNoModules
+		}
+	}
+
+	// For efficiency, if path is a standard library package, let the usual lookup code handle it.
+	if ctxt.GOROOT != "" {
+		dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
+		if ctxt.isDir(dir) {
+			return errNoModules
+		}
+	}
+
+	// If GO111MODULE=auto, look to see if there is a go.mod.
+	// Since go1.13, it doesn't matter if we're inside GOPATH.
+	if go111Module == "auto" {
+		var (
+			parent string
+			err    error
+		)
+		if ctxt.Dir == "" {
+			parent, err = os.Getwd()
+			if err != nil {
+				// A nonexistent working directory can't be in a module.
+				return errNoModules
+			}
+		} else {
+			parent, err = filepath.Abs(ctxt.Dir)
+			if err != nil {
+				// If the caller passed a bogus Dir explicitly, that's materially
+				// different from not having modules enabled.
+				return err
+			}
+		}
+		for {
+			if f, err := ctxt.openFile(ctxt.joinPath(parent, "go.mod")); err == nil {
+				buf := make([]byte, 100)
+				_, err := f.Read(buf)
+				f.Close()
+				if err == nil || err == io.EOF {
+					// go.mod exists and is readable (is a file, not a directory).
+					break
+				}
+			}
+			d := filepath.Dir(parent)
+			if len(d) >= len(parent) {
+				return errNoModules // reached top of file system, no go.mod
+			}
+			parent = d
+		}
+	}
+
+	cmd := exec.Command("go", "list", "-e", "-compiler="+ctxt.Compiler, "-tags="+strings.Join(ctxt.BuildTags, ","), "-installsuffix="+ctxt.InstallSuffix, "-f={{.Dir}}\n{{.ImportPath}}\n{{.Root}}\n{{.Goroot}}\n{{if .Error}}{{.Error}}{{end}}\n", "--", path)
+
+	if ctxt.Dir != "" {
+		cmd.Dir = ctxt.Dir
+	}
+
+	var stdout, stderr strings.Builder
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+
+	cgo := "0"
+	if ctxt.CgoEnabled {
+		cgo = "1"
+	}
+	cmd.Env = append(os.Environ(),
+		"GOOS="+ctxt.GOOS,
+		"GOARCH="+ctxt.GOARCH,
+		"GOROOT="+ctxt.GOROOT,
+		"GOPATH="+ctxt.GOPATH,
+		"CGO_ENABLED="+cgo,
+	)
+
+	if err := cmd.Run(); err != nil {
+		return fmt.Errorf("go/build: go list %s: %v\n%s\n", path, err, stderr.String())
+	}
+
+	f := strings.SplitN(stdout.String(), "\n", 5)
+	if len(f) != 5 {
+		return fmt.Errorf("go/build: importGo %s: unexpected output:\n%s\n", path, stdout.String())
+	}
+	dir := f[0]
+	errStr := strings.TrimSpace(f[4])
+	if errStr != "" && dir == "" {
+		// If 'go list' could not locate the package (dir is empty),
+		// return the same error that 'go list' reported.
+		return errors.New(errStr)
+	}
+
+	// If 'go list' did locate the package, ignore the error.
+	// It was probably related to loading source files, and we'll
+	// encounter it ourselves shortly if the FindOnly flag isn't set.
+	p.Dir = dir
+	p.ImportPath = f[1]
+	p.Root = f[2]
+	p.Goroot = f[3] == "true"
+	return nil
+}
+
+func equal(x, y []string) bool {
+	if len(x) != len(y) {
+		return false
+	}
+	for i, xi := range x {
+		if xi != y[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// hasGoFiles reports whether dir contains any files with names ending in .go.
+// For a vendor check we must exclude directories that contain no .go files.
+// Otherwise it is not possible to vendor just a/b/c and still import the
+// non-vendored a/b. See golang.org/issue/13832.
+func hasGoFiles(ctxt *Context, dir string) bool {
+	ents, _ := ctxt.readDir(dir)
+	for _, ent := range ents {
+		if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") {
+			return true
+		}
+	}
+	return false
+}
+
+func findImportComment(data []byte) (s string, line int) {
+	// expect keyword package
+	word, data := parseWord(data)
+	if string(word) != "package" {
+		return "", 0
+	}
+
+	// expect package name
+	_, data = parseWord(data)
+
+	// now ready for import comment, a // or /* */ comment
+	// beginning and ending on the current line.
+	for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
+		data = data[1:]
+	}
+
+	var comment []byte
+	switch {
+	case bytes.HasPrefix(data, slashSlash):
+		comment, _, _ = bytesCut(data[2:], newline)
+	case bytes.HasPrefix(data, slashStar):
+		var ok bool
+		comment, _, ok = bytesCut(data[2:], starSlash)
+		if !ok {
+			// malformed comment
+			return "", 0
+		}
+		if bytes.Contains(comment, newline) {
+			return "", 0
+		}
+	}
+	comment = bytes.TrimSpace(comment)
+
+	// split comment into `import`, `"pkg"`
+	word, arg := parseWord(comment)
+	if string(word) != "import" {
+		return "", 0
+	}
+
+	line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
+	return strings.TrimSpace(string(arg)), line
+}
+
+var (
+	slashSlash = []byte("//")
+	slashStar  = []byte("/*")
+	starSlash  = []byte("*/")
+	newline    = []byte("\n")
+)
+
+// skipSpaceOrComment returns data with any leading spaces or comments removed.
+func skipSpaceOrComment(data []byte) []byte {
+	for len(data) > 0 {
+		switch data[0] {
+		case ' ', '\t', '\r', '\n':
+			data = data[1:]
+			continue
+		case '/':
+			if bytes.HasPrefix(data, slashSlash) {
+				i := bytes.Index(data, newline)
+				if i < 0 {
+					return nil
+				}
+				data = data[i+1:]
+				continue
+			}
+			if bytes.HasPrefix(data, slashStar) {
+				data = data[2:]
+				i := bytes.Index(data, starSlash)
+				if i < 0 {
+					return nil
+				}
+				data = data[i+2:]
+				continue
+			}
+		}
+		break
+	}
+	return data
+}
+
+// parseWord skips any leading spaces or comments in data
+// and then parses the beginning of data as an identifier or keyword,
+// returning that word and what remains after the word.
+func parseWord(data []byte) (word, rest []byte) {
+	data = skipSpaceOrComment(data)
+
+	// Parse past leading word characters.
+	rest = data
+	for {
+		r, size := utf8.DecodeRune(rest)
+		if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
+			rest = rest[size:]
+			continue
+		}
+		break
+	}
+
+	word = data[:len(data)-len(rest)]
+	if len(word) == 0 {
+		return nil, nil
+	}
+
+	return word, rest
+}
+
+// MatchFile reports whether the file with the given name in the given directory
+// matches the context and would be included in a Package created by ImportDir
+// of that directory.
+//
+// MatchFile considers the name of the file and may use ctxt.OpenFile to
+// read some or all of the file's content.
+func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) {
+	info, err := ctxt.matchFile(dir, name, nil, nil, nil)
+	return info != nil, err
+}
+
+var dummyPkg Package
+
+// fileInfo records information learned about a file included in a build.
+type fileInfo struct {
+	name     string // full name including dir
+	header   []byte
+	fset     *token.FileSet
+	parsed   *ast.File
+	parseErr error
+	imports  []fileImport
+	embeds   []fileEmbed
+	embedErr error
+}
+
+type fileImport struct {
+	path string
+	pos  token.Pos
+	doc  *ast.CommentGroup
+}
+
+type fileEmbed struct {
+	pattern string
+	pos     token.Position
+}
+
+// matchFile determines whether the file with the given name in the given directory
+// should be included in the package being constructed.
+// If the file should be included, matchFile returns a non-nil *fileInfo (and a nil error).
+// Non-nil errors are reserved for unexpected problems.
+//
+// If name denotes a Go program, matchFile reads until the end of the
+// imports and returns that section of the file in the fileInfo's header field,
+// even though it only considers text until the first non-comment
+// for +build lines.
+//
+// If allTags is non-nil, matchFile records any encountered build tag
+// by setting allTags[tag] = true.
+func (ctxt *Context) matchFile(dir, name string, allTags map[string]bool, binaryOnly *bool, fset *token.FileSet) (*fileInfo, error) {
+	if strings.HasPrefix(name, "_") ||
+		strings.HasPrefix(name, ".") {
+		return nil, nil
+	}
+
+	i := strings.LastIndex(name, ".")
+	if i < 0 {
+		i = len(name)
+	}
+	ext := name[i:]
+
+	if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
+		return nil, nil
+	}
+
+	if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil {
+		// skip
+		return nil, nil
+	}
+
+	info := &fileInfo{name: ctxt.joinPath(dir, name), fset: fset}
+	if ext == ".syso" {
+		// binary, no reading
+		return info, nil
+	}
+
+	f, err := ctxt.openFile(info.name)
+	if err != nil {
+		return nil, err
+	}
+
+	if strings.HasSuffix(name, ".go") {
+		err = readGoInfo(f, info)
+		if strings.HasSuffix(name, "_test.go") {
+			binaryOnly = nil // ignore //go:binary-only-package comments in _test.go files
+		}
+	} else {
+		binaryOnly = nil // ignore //go:binary-only-package comments in non-Go sources
+		info.header, err = readComments(f)
+	}
+	f.Close()
+	if err != nil {
+		return nil, fmt.Errorf("read %s: %v", info.name, err)
+	}
+
+	// Look for +build comments to accept or reject the file.
+	ok, sawBinaryOnly, err := ctxt.shouldBuild(info.header, allTags)
+	if err != nil {
+		return nil, fmt.Errorf("%s: %v", name, err)
+	}
+	if !ok && !ctxt.UseAllFiles {
+		return nil, nil
+	}
+
+	if binaryOnly != nil && sawBinaryOnly {
+		*binaryOnly = true
+	}
+
+	return info, nil
+}
+
+func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+	all := make([]string, 0, len(m))
+	for path := range m {
+		all = append(all, path)
+	}
+	sort.Strings(all)
+	return all, m
+}
+
+// Import is shorthand for Default.Import.
+func Import(path, srcDir string, mode ImportMode) (*Package, error) {
+	return Default.Import(path, srcDir, mode)
+}
+
+// ImportDir is shorthand for Default.ImportDir.
+func ImportDir(dir string, mode ImportMode) (*Package, error) {
+	return Default.ImportDir(dir, mode)
+}
+
+var (
+	bSlashSlash = []byte(slashSlash)
+	bStarSlash  = []byte(starSlash)
+	bSlashStar  = []byte(slashStar)
+	bPlusBuild  = []byte("+build")
+
+	goBuildComment = []byte("//go:build")
+
+	errGoBuildWithoutBuild = errors.New("//go:build comment without // +build comment")
+	errMultipleGoBuild     = errors.New("multiple //go:build comments")
+)
+
+func isGoBuildComment(line []byte) bool {
+	if !bytes.HasPrefix(line, goBuildComment) {
+		return false
+	}
+	line = bytes.TrimSpace(line)
+	rest := line[len(goBuildComment):]
+	return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
+}
+
+// Special comment denoting a binary-only package.
+// See https://golang.org/design/2775-binary-only-packages
+// for more about the design of binary-only packages.
+var binaryOnlyComment = []byte("//go:binary-only-package")
+
+// shouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+//	// +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+// For each build tag it consults, shouldBuild sets allTags[tag] = true.
+//
+// shouldBuild reports whether the file should be built
+// and whether a //go:binary-only-package comment was found.
+func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool) (shouldBuild, binaryOnly bool, err error) {
+	// Identify leading run of // comments and blank lines,
+	// which must be followed by a blank line.
+	// Also identify any //go:build comments.
+	content, goBuild, sawBinaryOnly, err := parseFileHeader(content)
+	if err != nil {
+		return false, false, err
+	}
+
+	// If //go:build line is present, it controls.
+	// Otherwise fall back to +build processing.
+	switch {
+	case goBuild != nil:
+		x, err := constraint.Parse(string(goBuild))
+		if err != nil {
+			return false, false, fmt.Errorf("parsing //go:build line: %v", err)
+		}
+		shouldBuild = ctxt.eval(x, allTags)
+
+	default:
+		shouldBuild = true
+		p := content
+		for len(p) > 0 {
+			line := p
+			if i := bytes.IndexByte(line, '\n'); i >= 0 {
+				line, p = line[:i], p[i+1:]
+			} else {
+				p = p[len(p):]
+			}
+			line = bytes.TrimSpace(line)
+			if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) {
+				continue
+			}
+			text := string(line)
+			if !constraint.IsPlusBuild(text) {
+				continue
+			}
+			if x, err := constraint.Parse(text); err == nil {
+				if !ctxt.eval(x, allTags) {
+					shouldBuild = false
+				}
+			}
+		}
+	}
+
+	return shouldBuild, sawBinaryOnly, nil
+}
+
+func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
+	end := 0
+	p := content
+	ended := false       // found non-blank, non-// line, so stopped accepting // +build lines
+	inSlashStar := false // in /* */ comment
+
+Lines:
+	for len(p) > 0 {
+		line := p
+		if i := bytes.IndexByte(line, '\n'); i >= 0 {
+			line, p = line[:i], p[i+1:]
+		} else {
+			p = p[len(p):]
+		}
+		line = bytes.TrimSpace(line)
+		if len(line) == 0 && !ended { // Blank line
+			// Remember position of most recent blank line.
+			// When we find the first non-blank, non-// line,
+			// this "end" position marks the latest file position
+			// where a // +build line can appear.
+			// (It must appear _before_ a blank line before the non-blank, non-// line.
+			// Yes, that's confusing, which is part of why we moved to //go:build lines.)
+			// Note that ended==false here means that inSlashStar==false,
+			// since seeing a /* would have set ended==true.
+			end = len(content) - len(p)
+			continue Lines
+		}
+		if !bytes.HasPrefix(line, slashSlash) { // Not comment line
+			ended = true
+		}
+
+		if !inSlashStar && isGoBuildComment(line) {
+			if goBuild != nil {
+				return nil, nil, false, errMultipleGoBuild
+			}
+			goBuild = line
+		}
+		if !inSlashStar && bytes.Equal(line, binaryOnlyComment) {
+			sawBinaryOnly = true
+		}
+
+	Comments:
+		for len(line) > 0 {
+			if inSlashStar {
+				if i := bytes.Index(line, starSlash); i >= 0 {
+					inSlashStar = false
+					line = bytes.TrimSpace(line[i+len(starSlash):])
+					continue Comments
+				}
+				continue Lines
+			}
+			if bytes.HasPrefix(line, bSlashSlash) {
+				continue Lines
+			}
+			if bytes.HasPrefix(line, bSlashStar) {
+				inSlashStar = true
+				line = bytes.TrimSpace(line[len(bSlashStar):])
+				continue Comments
+			}
+			// Found non-comment text.
+			break Lines
+		}
+	}
+
+	return content[:end], goBuild, sawBinaryOnly, nil
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
+// that affect the way cgo's C code is built.
+func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
+	text := cg.Text()
+	for _, line := range strings.Split(text, "\n") {
+		orig := line
+
+		// Line is
+		//	#cgo [GOOS/GOARCH...] LDFLAGS: stuff
+		//
+		line = strings.TrimSpace(line)
+		if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+			continue
+		}
+
+		// Split at colon.
+		line, argstr, ok := stringsCut(strings.TrimSpace(line[4:]), ":")
+		if !ok {
+			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+		}
+
+		// Parse GOOS/GOARCH stuff.
+		f := strings.Fields(line)
+		if len(f) < 1 {
+			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+		}
+
+		cond, verb := f[:len(f)-1], f[len(f)-1]
+		if len(cond) > 0 {
+			ok := false
+			for _, c := range cond {
+				if ctxt.matchAuto(c, nil) {
+					ok = true
+					break
+				}
+			}
+			if !ok {
+				continue
+			}
+		}
+
+		args, err := splitQuoted(argstr)
+		if err != nil {
+			return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+		}
+		for i, arg := range args {
+			if arg, ok = expandSrcDir(arg, di.Dir); !ok {
+				return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+			}
+			args[i] = arg
+		}
+
+		switch verb {
+		case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS":
+			// Change relative paths to absolute.
+			ctxt.makePathsAbsolute(args, di.Dir)
+		}
+
+		switch verb {
+		case "CFLAGS":
+			di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+		case "CPPFLAGS":
+			di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
+		case "CXXFLAGS":
+			di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
+		case "FFLAGS":
+			di.CgoFFLAGS = append(di.CgoFFLAGS, args...)
+		case "LDFLAGS":
+			di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+		case "pkg-config":
+			di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+		default:
+			return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+		}
+	}
+	return nil
+}
+
+// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
+// the result is safe for the shell.
+func expandSrcDir(str string, srcdir string) (string, bool) {
+	// "\" delimited paths cause safeCgoName to fail
+	// so convert native paths with a different delimiter
+	// to "/" before starting (eg: on windows).
+	srcdir = filepath.ToSlash(srcdir)
+
+	chunks := strings.Split(str, "${SRCDIR}")
+	if len(chunks) < 2 {
+		return str, safeCgoName(str)
+	}
+	ok := true
+	for _, chunk := range chunks {
+		ok = ok && (chunk == "" || safeCgoName(chunk))
+	}
+	ok = ok && (srcdir == "" || safeCgoName(srcdir))
+	res := strings.Join(chunks, srcdir)
+	return res, ok && res != ""
+}
+
+// makePathsAbsolute looks for compiler options that take paths and
+// makes them absolute. We do this because through the 1.8 release we
+// ran the compiler in the package directory, so any relative -I or -L
+// options would be relative to that directory. In 1.9 we changed to
+// running the compiler in the build directory, to get consistent
+// build results (issue #19964). To keep builds working, we change any
+// relative -I or -L options to be absolute.
+//
+// Using filepath.IsAbs and filepath.Join here means the results will be
+// different on different systems, but that's OK: -I and -L options are
+// inherently system-dependent.
+func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) {
+	nextPath := false
+	for i, arg := range args {
+		if nextPath {
+			if !filepath.IsAbs(arg) {
+				args[i] = filepath.Join(srcDir, arg)
+			}
+			nextPath = false
+		} else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") {
+			if len(arg) == 2 {
+				nextPath = true
+			} else {
+				if !filepath.IsAbs(arg[2:]) {
+					args[i] = arg[:2] + filepath.Join(srcDir, arg[2:])
+				}
+			}
+		}
+	}
+}
+
+// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
+// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
+// See golang.org/issue/6038.
+// The @ is for OS X. See golang.org/issue/13720.
+// The % is for Jenkins. See golang.org/issue/16959.
+// The ! is because module paths may use them. See golang.org/issue/26716.
+// The ~ and ^ are for sr.ht. See golang.org/issue/32260.
+const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^"
+
+func safeCgoName(s string) bool {
+	if s == "" {
+		return false
+	}
+	for i := 0; i < len(s); i++ {
+		if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+//	a b:"c d" 'e''f'  "g\""
+//
+// Would be parsed as:
+//
+//	[]string{"a", "b:c d", "ef", `g"`}
+func splitQuoted(s string) (r []string, err error) {
+	var args []string
+	arg := make([]rune, len(s))
+	escaped := false
+	quoted := false
+	quote := '\x00'
+	i := 0
+	for _, rune := range s {
+		switch {
+		case escaped:
+			escaped = false
+		case rune == '\\':
+			escaped = true
+			continue
+		case quote != '\x00':
+			if rune == quote {
+				quote = '\x00'
+				continue
+			}
+		case rune == '"' || rune == '\'':
+			quoted = true
+			quote = rune
+			continue
+		case unicode.IsSpace(rune):
+			if quoted || i > 0 {
+				quoted = false
+				args = append(args, string(arg[:i]))
+				i = 0
+			}
+			continue
+		}
+		arg[i] = rune
+		i++
+	}
+	if quoted || i > 0 {
+		args = append(args, string(arg[:i]))
+	}
+	if quote != 0 {
+		err = errors.New("unclosed quote")
+	} else if escaped {
+		err = errors.New("unfinished escaping")
+	}
+	return args, err
+}
+
+// matchAuto interprets text as either a +build or //go:build expression (whichever works),
+// reporting whether the expression matches the build context.
+//
+// matchAuto is only used for testing of tag evaluation
+// and in #cgo lines, which accept either syntax.
+func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool {
+	if strings.ContainsAny(text, "&|()") {
+		text = "//go:build " + text
+	} else {
+		text = "// +build " + text
+	}
+	x, err := constraint.Parse(text)
+	if err != nil {
+		return false
+	}
+	return ctxt.eval(x, allTags)
+}
+
+func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
+	return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) })
+}
+
+// matchTag reports whether the name is one of:
+//
+//	cgo (if cgo is enabled)
+//	$GOOS
+//	$GOARCH
+//	ctxt.Compiler
+//	linux (if GOOS = android)
+//	solaris (if GOOS = illumos)
+//	tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+//
+// It records all consulted tags in allTags.
+func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
+	if allTags != nil {
+		allTags[name] = true
+	}
+
+	// special tags
+	if ctxt.CgoEnabled && name == "cgo" {
+		return true
+	}
+	if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
+		return true
+	}
+	if ctxt.GOOS == "android" && name == "linux" {
+		return true
+	}
+	if ctxt.GOOS == "illumos" && name == "solaris" {
+		return true
+	}
+	if ctxt.GOOS == "ios" && name == "darwin" {
+		return true
+	}
+
+	// other tags
+	for _, tag := range ctxt.BuildTags {
+		if tag == name {
+			return true
+		}
+	}
+	for _, tag := range ctxt.ToolTags {
+		if tag == name {
+			return true
+		}
+	}
+	for _, tag := range ctxt.ReleaseTags {
+		if tag == name {
+			return true
+		}
+	}
+
+	return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+//	name_$(GOOS).*
+//	name_$(GOARCH).*
+//	name_$(GOOS)_$(GOARCH).*
+//	name_$(GOOS)_test.*
+//	name_$(GOARCH)_test.*
+//	name_$(GOOS)_$(GOARCH)_test.*
+//
+// Exceptions:
+// if GOOS=android, then files with GOOS=linux are also matched.
+// if GOOS=illumos, then files with GOOS=solaris are also matched.
+// if GOOS=ios, then files with GOOS=darwin are also matched.
+func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
+	name, _, _ = stringsCut(name, ".")
+
+	// Before Go 1.4, a file called "linux.go" would be equivalent to having a
+	// build tag "linux" in that file. For Go 1.4 and beyond, we require this
+	// auto-tagging to apply only to files with a non-empty prefix, so
+	// "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
+	// systems, such as android, to arrive without breaking existing code with
+	// innocuous source code in "android.go". The easiest fix: cut everything
+	// in the name before the initial _.
+	i := strings.Index(name, "_")
+	if i < 0 {
+		return true
+	}
+	name = name[i:] // ignore everything before first _
+
+	l := strings.Split(name, "_")
+	if n := len(l); n > 0 && l[n-1] == "test" {
+		l = l[:n-1]
+	}
+	n := len(l)
+	if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+		return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags)
+	}
+	if n >= 1 && (knownOS[l[n-1]] || knownArch[l[n-1]]) {
+		return ctxt.matchTag(l[n-1], allTags)
+	}
+	return true
+}
+
+var knownOS = make(map[string]bool)
+var knownArch = make(map[string]bool)
+
+func init() {
+	for _, v := range strings.Fields(goosList) {
+		knownOS[v] = true
+	}
+	for _, v := range strings.Fields(goarchList) {
+		knownArch[v] = true
+	}
+}
+
+// ToolDir is the directory containing build tools.
+var ToolDir = getToolDir()
+
+// IsLocalImport reports whether the import path is
+// a local import path, like ".", "..", "./foo", or "../foo".
+func IsLocalImport(path string) bool {
+	return path == "." || path == ".." ||
+		strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
+}
+
+// ArchChar returns "?" and an error.
+// In earlier versions of Go, the returned string was used to derive
+// the compiler and linker tool names, the default object file suffix,
+// and the default linker output name. As of Go 1.5, those strings
+// no longer vary by architecture; they are compile, link, .o, and a.out, respectively.
+func ArchChar(goarch string) (string, error) {
+	return "?", errors.New("architecture letter no longer used")
+}
+
+func bytesCut(s, sep []byte) (before, after []byte, found bool) {
+	if i := bytes.Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, nil, false
+}
+
+func stringsCut(s, sep string) (before, after string, found bool) {
+	if i := strings.Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, "", false
+}
diff --git a/internal/backport/go/build/doc.go b/internal/backport/go/build/doc.go
new file mode 100644
index 0000000..262f670
--- /dev/null
+++ b/internal/backport/go/build/doc.go
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package build gathers information about Go packages.
+//
+// # Go Path
+//
+// The Go path is a list of directory trees containing Go source code.
+// It is consulted to resolve imports that cannot be found in the standard
+// Go tree. The default path is the value of the GOPATH environment
+// variable, interpreted as a path list appropriate to the operating system
+// (on Unix, the variable is a colon-separated string;
+// on Windows, a semicolon-separated string;
+// on Plan 9, a list).
+//
+// Each directory listed in the Go path must have a prescribed structure:
+//
+// The src/ directory holds source code. The path below 'src' determines
+// the import path or executable name.
+//
+// The pkg/ directory holds installed package objects.
+// As in the Go tree, each target operating system and
+// architecture pair has its own subdirectory of pkg
+// (pkg/GOOS_GOARCH).
+//
+// If DIR is a directory listed in the Go path, a package with
+// source in DIR/src/foo/bar can be imported as "foo/bar" and
+// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
+// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
+//
+// The bin/ directory holds compiled commands.
+// Each command is named for its source directory, but only
+// using the final element, not the entire path. That is, the
+// command with source in DIR/src/foo/quux is installed into
+// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
+// so that you can add DIR/bin to your PATH to get at the
+// installed commands.
+//
+// Here's an example directory layout:
+//
+//	GOPATH=/home/user/gocode
+//
+//	/home/user/gocode/
+//	    src/
+//	        foo/
+//	            bar/               (go code in package bar)
+//	                x.go
+//	            quux/              (go code in package main)
+//	                y.go
+//	    bin/
+//	        quux                   (installed command)
+//	    pkg/
+//	        linux_amd64/
+//	            foo/
+//	                bar.a          (installed package object)
+//
+// # Build Constraints
+//
+// A build constraint, also known as a build tag, is a line comment that begins
+//
+//	//go:build
+//
+// that lists the conditions under which a file should be included in the
+// package. Build constraints may also be part of a file's name
+// (for example, source_windows.go will only be included if the target
+// operating system is windows).
+//
+// See 'go help buildconstraint'
+// (https://golang.org/cmd/go/#hdr-Build_constraints) for details.
+//
+// # Binary-Only Packages
+//
+// In Go 1.12 and earlier, it was possible to distribute packages in binary
+// form without including the source code used for compiling the package.
+// The package was distributed with a source file not excluded by build
+// constraints and containing a "//go:binary-only-package" comment. Like a
+// build constraint, this comment appeared at the top of a file, preceded
+// only by blank lines and other line comments and with a blank line
+// following the comment, to separate it from the package documentation.
+// Unlike build constraints, this comment is only recognized in non-test
+// Go source files.
+//
+// The minimal source code for a binary-only package was therefore:
+//
+//	//go:binary-only-package
+//
+//	package mypkg
+//
+// The source code could include additional Go code. That code was never
+// compiled but would be processed by tools like godoc and might be useful
+// as end-user documentation.
+//
+// "go build" and other commands no longer support binary-only-packages.
+// Import and ImportDir will still set the BinaryOnly flag in packages
+// containing these comments for use in tools and error messages.
+package build
diff --git a/internal/backport/go/build/gc.go b/internal/backport/go/build/gc.go
new file mode 100644
index 0000000..e16e186
--- /dev/null
+++ b/internal/backport/go/build/gc.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+package build
+
+import (
+	"path/filepath"
+	"runtime"
+)
+
+// getToolDir returns the default value of ToolDir.
+func getToolDir() string {
+	return filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
+}
diff --git a/internal/backport/go/build/gccgo.go b/internal/backport/go/build/gccgo.go
new file mode 100644
index 0000000..c8ec704
--- /dev/null
+++ b/internal/backport/go/build/gccgo.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo
+// +build gccgo
+
+package build
+
+import "runtime"
+
+// getToolDir returns the default value of ToolDir.
+func getToolDir() string {
+	return envOr("GCCGOTOOLDIR", runtime.GCCGOTOOLDIR)
+}
diff --git a/internal/backport/go/build/read.go b/internal/backport/go/build/read.go
new file mode 100644
index 0000000..e8fe9a0
--- /dev/null
+++ b/internal/backport/go/build/read.go
@@ -0,0 +1,578 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type importReader struct {
+	b    *bufio.Reader
+	buf  []byte
+	peek byte
+	err  error
+	eof  bool
+	nerr int
+	pos  token.Position
+}
+
+var bom = []byte{0xef, 0xbb, 0xbf}
+
+func newImportReader(name string, r io.Reader) *importReader {
+	b := bufio.NewReader(r)
+	// Remove leading UTF-8 BOM.
+	// Per https://golang.org/ref/spec#Source_code_representation:
+	// a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
+	// if it is the first Unicode code point in the source text.
+	if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
+		b.Discard(3)
+	}
+	return &importReader{
+		b: b,
+		pos: token.Position{
+			Filename: name,
+			Line:     1,
+			Column:   1,
+		},
+	}
+}
+
+func isIdent(c byte) bool {
+	return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+	errSyntax = errors.New("syntax error")
+	errNUL    = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+	if r.err == nil {
+		r.err = errSyntax
+	}
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+	c, err := r.b.ReadByte()
+	if err == nil {
+		r.buf = append(r.buf, c)
+		if c == 0 {
+			err = errNUL
+		}
+	}
+	if err != nil {
+		if err == io.EOF {
+			r.eof = true
+		} else if r.err == nil {
+			r.err = err
+		}
+		c = 0
+	}
+	return c
+}
+
+// readByteNoBuf is like readByte but doesn't buffer the byte.
+// It exhausts r.buf before reading from r.b.
+func (r *importReader) readByteNoBuf() byte {
+	var c byte
+	var err error
+	if len(r.buf) > 0 {
+		c = r.buf[0]
+		r.buf = r.buf[1:]
+	} else {
+		c, err = r.b.ReadByte()
+		if err == nil && c == 0 {
+			err = errNUL
+		}
+	}
+
+	if err != nil {
+		if err == io.EOF {
+			r.eof = true
+		} else if r.err == nil {
+			r.err = err
+		}
+		return 0
+	}
+	r.pos.Offset++
+	if c == '\n' {
+		r.pos.Line++
+		r.pos.Column = 1
+	} else {
+		r.pos.Column++
+	}
+	return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+	if r.err != nil {
+		if r.nerr++; r.nerr > 10000 {
+			panic("go/build: import reader looping")
+		}
+		return 0
+	}
+
+	// Use r.peek as first input byte.
+	// Don't just return r.peek here: it might have been left by peekByte(false)
+	// and this might be peekByte(true).
+	c := r.peek
+	if c == 0 {
+		c = r.readByte()
+	}
+	for r.err == nil && !r.eof {
+		if skipSpace {
+			// For the purposes of this reader, semicolons are never necessary to
+			// understand the input and are treated as spaces.
+			switch c {
+			case ' ', '\f', '\t', '\r', '\n', ';':
+				c = r.readByte()
+				continue
+
+			case '/':
+				c = r.readByte()
+				if c == '/' {
+					for c != '\n' && r.err == nil && !r.eof {
+						c = r.readByte()
+					}
+				} else if c == '*' {
+					var c1 byte
+					for (c != '*' || c1 != '/') && r.err == nil {
+						if r.eof {
+							r.syntaxError()
+						}
+						c, c1 = c1, r.readByte()
+					}
+				} else {
+					r.syntaxError()
+				}
+				c = r.readByte()
+				continue
+			}
+		}
+		break
+	}
+	r.peek = c
+	return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+	c := r.peekByte(skipSpace)
+	r.peek = 0
+	return c
+}
+
+var goEmbed = []byte("go:embed")
+
+// findEmbed advances the input reader to the next //go:embed comment.
+// It reports whether it found a comment.
+// (Otherwise it found an error or EOF.)
+func (r *importReader) findEmbed(first bool) bool {
+	// The import block scan stopped after a non-space character,
+	// so the reader is not at the start of a line on the first call.
+	// After that, each //go:embed extraction leaves the reader
+	// at the end of a line.
+	startLine := !first
+	var c byte
+	for r.err == nil && !r.eof {
+		c = r.readByteNoBuf()
+	Reswitch:
+		switch c {
+		default:
+			startLine = false
+
+		case '\n':
+			startLine = true
+
+		case ' ', '\t':
+			// leave startLine alone
+
+		case '"':
+			startLine = false
+			for r.err == nil {
+				if r.eof {
+					r.syntaxError()
+				}
+				c = r.readByteNoBuf()
+				if c == '\\' {
+					r.readByteNoBuf()
+					if r.err != nil {
+						r.syntaxError()
+						return false
+					}
+					continue
+				}
+				if c == '"' {
+					c = r.readByteNoBuf()
+					goto Reswitch
+				}
+			}
+			goto Reswitch
+
+		case '`':
+			startLine = false
+			for r.err == nil {
+				if r.eof {
+					r.syntaxError()
+				}
+				c = r.readByteNoBuf()
+				if c == '`' {
+					c = r.readByteNoBuf()
+					goto Reswitch
+				}
+			}
+
+		case '\'':
+			startLine = false
+			for r.err == nil {
+				if r.eof {
+					r.syntaxError()
+				}
+				c = r.readByteNoBuf()
+				if c == '\\' {
+					r.readByteNoBuf()
+					if r.err != nil {
+						r.syntaxError()
+						return false
+					}
+					continue
+				}
+				if c == '\'' {
+					c = r.readByteNoBuf()
+					goto Reswitch
+				}
+			}
+
+		case '/':
+			c = r.readByteNoBuf()
+			switch c {
+			default:
+				startLine = false
+				goto Reswitch
+
+			case '*':
+				var c1 byte
+				for (c != '*' || c1 != '/') && r.err == nil {
+					if r.eof {
+						r.syntaxError()
+					}
+					c, c1 = c1, r.readByteNoBuf()
+				}
+				startLine = false
+
+			case '/':
+				if startLine {
+					// Try to read this as a //go:embed comment.
+					for i := range goEmbed {
+						c = r.readByteNoBuf()
+						if c != goEmbed[i] {
+							goto SkipSlashSlash
+						}
+					}
+					c = r.readByteNoBuf()
+					if c == ' ' || c == '\t' {
+						// Found one!
+						return true
+					}
+				}
+			SkipSlashSlash:
+				for c != '\n' && r.err == nil && !r.eof {
+					c = r.readByteNoBuf()
+				}
+				startLine = true
+			}
+		}
+	}
+	return false
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+	r.peekByte(true)
+	for i := 0; i < len(kw); i++ {
+		if r.nextByte(false) != kw[i] {
+			r.syntaxError()
+			return
+		}
+	}
+	if isIdent(r.peekByte(false)) {
+		r.syntaxError()
+	}
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+	c := r.peekByte(true)
+	if !isIdent(c) {
+		r.syntaxError()
+		return
+	}
+	for isIdent(r.peekByte(false)) {
+		r.peek = 0
+	}
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+	switch r.nextByte(true) {
+	case '`':
+		for r.err == nil {
+			if r.nextByte(false) == '`' {
+				break
+			}
+			if r.eof {
+				r.syntaxError()
+			}
+		}
+	case '"':
+		for r.err == nil {
+			c := r.nextByte(false)
+			if c == '"' {
+				break
+			}
+			if r.eof || c == '\n' {
+				r.syntaxError()
+			}
+			if c == '\\' {
+				r.nextByte(false)
+			}
+		}
+	default:
+		r.syntaxError()
+	}
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+	c := r.peekByte(true)
+	if c == '.' {
+		r.peek = 0
+	} else if isIdent(c) {
+		r.readIdent()
+	}
+	r.readString()
+}
+
+// readComments is like io.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+	r := newImportReader("", f)
+	r.peekByte(true)
+	if r.err == nil && !r.eof {
+		// Didn't reach EOF, so must have found a non-space byte. Remove it.
+		r.buf = r.buf[:len(r.buf)-1]
+	}
+	return r.buf, r.err
+}
+
+// readGoInfo expects a Go file as input and reads the file up to and including the import section.
+// It records what it learned in *info.
+// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
+// info.imports, info.embeds, and info.embedErr.
+//
+// It only returns an error if there are problems reading the file,
+// not for syntax errors in the file itself.
+func readGoInfo(f io.Reader, info *fileInfo) error {
+	r := newImportReader(info.name, f)
+
+	r.readKeyword("package")
+	r.readIdent()
+	for r.peekByte(true) == 'i' {
+		r.readKeyword("import")
+		if r.peekByte(true) == '(' {
+			r.nextByte(false)
+			for r.peekByte(true) != ')' && r.err == nil {
+				r.readImport()
+			}
+			r.nextByte(false)
+		} else {
+			r.readImport()
+		}
+	}
+
+	info.header = r.buf
+
+	// If we stopped successfully before EOF, we read a byte that told us we were done.
+	// Return all but that last byte, which would cause a syntax error if we let it through.
+	if r.err == nil && !r.eof {
+		info.header = r.buf[:len(r.buf)-1]
+	}
+
+	// If we stopped for a syntax error, consume the whole file so that
+	// we are sure we don't change the errors that go/parser returns.
+	if r.err == errSyntax {
+		r.err = nil
+		for r.err == nil && !r.eof {
+			r.readByte()
+		}
+		info.header = r.buf
+	}
+	if r.err != nil {
+		return r.err
+	}
+
+	if info.fset == nil {
+		return nil
+	}
+
+	// Parse file header & record imports.
+	info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments)
+	if info.parseErr != nil {
+		return nil
+	}
+
+	hasEmbed := false
+	for _, decl := range info.parsed.Decls {
+		d, ok := decl.(*ast.GenDecl)
+		if !ok {
+			continue
+		}
+		for _, dspec := range d.Specs {
+			spec, ok := dspec.(*ast.ImportSpec)
+			if !ok {
+				continue
+			}
+			quoted := spec.Path.Value
+			path, err := strconv.Unquote(quoted)
+			if err != nil {
+				return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
+			}
+			if path == "embed" {
+				hasEmbed = true
+			}
+
+			doc := spec.Doc
+			if doc == nil && len(d.Specs) == 1 {
+				doc = d.Doc
+			}
+			info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
+		}
+	}
+
+	// If the file imports "embed",
+	// we have to look for //go:embed comments
+	// in the remainder of the file.
+	// The compiler will enforce the mapping of comments to
+	// declared variables. We just need to know the patterns.
+	// If there were //go:embed comments earlier in the file
+	// (near the package statement or imports), the compiler
+	// will reject them. They can be (and have already been) ignored.
+	if hasEmbed {
+		var line []byte
+		for first := true; r.findEmbed(first); first = false {
+			line = line[:0]
+			pos := r.pos
+			for {
+				c := r.readByteNoBuf()
+				if c == '\n' || r.err != nil || r.eof {
+					break
+				}
+				line = append(line, c)
+			}
+			// Add args if line is well-formed.
+			// Ignore badly-formed lines - the compiler will report them when it finds them,
+			// and we can pretend they are not there to help go list succeed with what it knows.
+			embs, err := parseGoEmbed(string(line), pos)
+			if err == nil {
+				info.embeds = append(info.embeds, embs...)
+			}
+		}
+	}
+
+	return nil
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// This is based on a similar function in cmd/compile/internal/gc/noder.go;
+// this version calculates position information as well.
+func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) {
+	trimBytes := func(n int) {
+		pos.Offset += n
+		pos.Column += utf8.RuneCountInString(args[:n])
+		args = args[n:]
+	}
+	trimSpace := func() {
+		trim := strings.TrimLeftFunc(args, unicode.IsSpace)
+		trimBytes(len(args) - len(trim))
+	}
+
+	var list []fileEmbed
+	for trimSpace(); args != ""; trimSpace() {
+		var path string
+		pathPos := pos
+	Switch:
+		switch args[0] {
+		default:
+			i := len(args)
+			for j, c := range args {
+				if unicode.IsSpace(c) {
+					i = j
+					break
+				}
+			}
+			path = args[:i]
+			trimBytes(i)
+
+		case '`':
+			var ok bool
+			path, _, ok = stringsCut(args[1:], "`")
+			if !ok {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+			trimBytes(1 + len(path) + 1)
+
+		case '"':
+			i := 1
+			for ; i < len(args); i++ {
+				if args[i] == '\\' {
+					i++
+					continue
+				}
+				if args[i] == '"' {
+					q, err := strconv.Unquote(args[:i+1])
+					if err != nil {
+						return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+					}
+					path = q
+					trimBytes(i + 1)
+					break Switch
+				}
+			}
+			if i >= len(args) {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+		}
+
+		if args != "" {
+			r, _ := utf8.DecodeRuneInString(args)
+			if !unicode.IsSpace(r) {
+				return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+			}
+		}
+		list = append(list, fileEmbed{path, pathPos})
+	}
+	return list, nil
+}
diff --git a/internal/backport/go/build/read_test.go b/internal/backport/go/build/read_test.go
new file mode 100644
index 0000000..f15b153
--- /dev/null
+++ b/internal/backport/go/build/read_test.go
@@ -0,0 +1,360 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+	"fmt"
+	"io"
+	"strings"
+	"testing"
+
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+const quote = "`"
+
+type readTest struct {
+	// Test input contains ℙ where readGoInfo should stop.
+	in  string
+	err string
+}
+
+var readGoInfoTests = []readTest{
+	{
+		`package p`,
+		"",
+	},
+	{
+		`package p; import "x"`,
+		"",
+	},
+	{
+		`package p; import . "x"`,
+		"",
+	},
+	{
+		`package p; import "x";ℙvar x = 1`,
+		"",
+	},
+	{
+		`package p
+
+		// comment
+
+		import "x"
+		import _ "x"
+		import a "x"
+
+		/* comment */
+
+		import (
+			"x" /* comment */
+			_ "x"
+			a "x" // comment
+			` + quote + `x` + quote + `
+			_ /*comment*/ ` + quote + `x` + quote + `
+			a ` + quote + `x` + quote + `
+		)
+		import (
+		)
+		import ()
+		import()import()import()
+		import();import();import()
+
+		ℙvar x = 1
+		`,
+		"",
+	},
+	{
+		"\ufeff𝔻" + `package p; import "x";ℙvar x = 1`,
+		"",
+	},
+}
+
+var readCommentsTests = []readTest{
+	{
+		`ℙpackage p`,
+		"",
+	},
+	{
+		`ℙpackage p; import "x"`,
+		"",
+	},
+	{
+		`ℙpackage p; import . "x"`,
+		"",
+	},
+	{
+		"\ufeff𝔻" + `ℙpackage p; import . "x"`,
+		"",
+	},
+	{
+		`// foo
+
+		/* bar */
+
+		/* quux */ // baz
+
+		/*/ zot */
+
+		// asdf
+		ℙHello, world`,
+		"",
+	},
+	{
+		"\ufeff𝔻" + `// foo
+
+		/* bar */
+
+		/* quux */ // baz
+
+		/*/ zot */
+
+		// asdf
+		ℙHello, world`,
+		"",
+	},
+}
+
+func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) {
+	for i, tt := range tests {
+		beforeP, afterP, _ := stringsCut1(tt.in, "ℙ")
+		in := beforeP + afterP
+		testOut := beforeP
+
+		if beforeD, afterD, ok := stringsCut1(beforeP, "𝔻"); ok {
+			in = beforeD + afterD + afterP
+			testOut = afterD
+		}
+
+		r := strings.NewReader(in)
+		buf, err := read(r)
+		if err != nil {
+			if tt.err == "" {
+				t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf))
+			} else if !strings.Contains(err.Error(), tt.err) {
+				t.Errorf("#%d: err=%q, expected %q", i, err, tt.err)
+			}
+			continue
+		}
+		if tt.err != "" {
+			t.Errorf("#%d: success, expected %q", i, tt.err)
+			continue
+		}
+
+		out := string(buf)
+		if out != testOut {
+			t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut)
+		}
+	}
+}
+
+func TestReadGoInfo(t *testing.T) {
+	testRead(t, readGoInfoTests, func(r io.Reader) ([]byte, error) {
+		var info fileInfo
+		err := readGoInfo(r, &info)
+		return info.header, err
+	})
+}
+
+func TestReadComments(t *testing.T) {
+	testRead(t, readCommentsTests, readComments)
+}
+
+var readFailuresTests = []readTest{
+	{
+		`package`,
+		"syntax error",
+	},
+	{
+		"package p\n\x00\nimport `math`\n",
+		"unexpected NUL in input",
+	},
+	{
+		`package p; import`,
+		"syntax error",
+	},
+	{
+		`package p; import "`,
+		"syntax error",
+	},
+	{
+		"package p; import ` \n\n",
+		"syntax error",
+	},
+	{
+		`package p; import "x`,
+		"syntax error",
+	},
+	{
+		`package p; import _`,
+		"syntax error",
+	},
+	{
+		`package p; import _ "`,
+		"syntax error",
+	},
+	{
+		`package p; import _ "x`,
+		"syntax error",
+	},
+	{
+		`package p; import .`,
+		"syntax error",
+	},
+	{
+		`package p; import . "`,
+		"syntax error",
+	},
+	{
+		`package p; import . "x`,
+		"syntax error",
+	},
+	{
+		`package p; import (`,
+		"syntax error",
+	},
+	{
+		`package p; import ("`,
+		"syntax error",
+	},
+	{
+		`package p; import ("x`,
+		"syntax error",
+	},
+	{
+		`package p; import ("x"`,
+		"syntax error",
+	},
+}
+
+func TestReadFailuresIgnored(t *testing.T) {
+	// Syntax errors should not be reported (false arg to readImports).
+	// Instead, entire file should be the output and no error.
+	// Convert tests not to return syntax errors.
+	tests := make([]readTest, len(readFailuresTests))
+	copy(tests, readFailuresTests)
+	for i := range tests {
+		tt := &tests[i]
+		if !strings.Contains(tt.err, "NUL") {
+			tt.err = ""
+		}
+	}
+	testRead(t, tests, func(r io.Reader) ([]byte, error) {
+		var info fileInfo
+		err := readGoInfo(r, &info)
+		return info.header, err
+	})
+}
+
+var readEmbedTests = []struct {
+	in, out string
+}{
+	{
+		"package p\n",
+		"",
+	},
+	{
+		"package p\nimport \"embed\"\nvar i int\n//go:embed x y z\nvar files embed.FS",
+		`test:4:12:x
+		 test:4:14:y
+		 test:4:16:z`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar i int\n//go:embed x \"\\x79\" `z`\nvar files embed.FS",
+		`test:4:12:x
+		 test:4:14:y
+		 test:4:21:z`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar i int\n//go:embed x y\n//go:embed z\nvar files embed.FS",
+		`test:4:12:x
+		 test:4:14:y
+		 test:5:12:z`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar i int\n\t //go:embed x y\n\t //go:embed z\n\t var files embed.FS",
+		`test:4:14:x
+		 test:4:16:y
+		 test:5:14:z`,
+	},
+	{
+		"package p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS",
+		`test:3:12:x
+		 test:3:14:y
+		 test:3:16:z`,
+	},
+	{
+		"\ufeffpackage p\nimport \"embed\"\n//go:embed x y z\nvar files embed.FS",
+		`test:3:12:x
+		 test:3:14:y
+		 test:3:16:z`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar s = \"/*\"\n//go:embed x\nvar files embed.FS",
+		`test:4:12:x`,
+	},
+	{
+		`package p
+		 import "embed"
+		 var s = "\"\\\\"
+		 //go:embed x
+		 var files embed.FS`,
+		`test:4:15:x`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar s = `/*`\n//go:embed x\nvar files embed.FS",
+		`test:4:12:x`,
+	},
+	{
+		"package p\nimport \"embed\"\nvar s = z/ *y\n//go:embed pointer\nvar pointer embed.FS",
+		"test:4:12:pointer",
+	},
+	{
+		"package p\n//go:embed x y z\n", // no import, no scan
+		"",
+	},
+	{
+		"package p\n//go:embed x y z\nvar files embed.FS", // no import, no scan
+		"",
+	},
+	{
+		"\ufeffpackage p\n//go:embed x y z\nvar files embed.FS", // no import, no scan
+		"",
+	},
+}
+
+func TestReadEmbed(t *testing.T) {
+	fset := token.NewFileSet()
+	for i, tt := range readEmbedTests {
+		info := fileInfo{
+			name: "test",
+			fset: fset,
+		}
+		err := readGoInfo(strings.NewReader(tt.in), &info)
+		if err != nil {
+			t.Errorf("#%d: %v", i, err)
+			continue
+		}
+		b := &strings.Builder{}
+		sep := ""
+		for _, emb := range info.embeds {
+			fmt.Fprintf(b, "%s%v:%s", sep, emb.pos, emb.pattern)
+			sep = "\n"
+		}
+		got := b.String()
+		want := strings.Join(strings.Fields(tt.out), "\n")
+		if got != want {
+			t.Errorf("#%d: embeds:\n%s\nwant:\n%s", i, got, want)
+		}
+	}
+}
+
+func stringsCut1(s, sep string) (before, after string, found bool) {
+	if i := strings.Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, "", false
+}
diff --git a/internal/backport/go/build/syslist.go b/internal/backport/go/build/syslist.go
new file mode 100644
index 0000000..0f6e336
--- /dev/null
+++ b/internal/backport/go/build/syslist.go
@@ -0,0 +1,11 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+// List of past, present, and future known GOOS and GOARCH values.
+// Do not remove from this list, as these are used for go/build filename matching.
+
+const goosList = "aix android darwin dragonfly freebsd hurd illumos ios js linux nacl netbsd openbsd plan9 solaris windows zos "
+const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be loong64 mips mipsle mips64 mips64le mips64p32 mips64p32le ppc ppc64 ppc64le riscv riscv64 s390 s390x sparc sparc64 wasm "
diff --git a/internal/backport/go/build/syslist_test.go b/internal/backport/go/build/syslist_test.go
new file mode 100644
index 0000000..2b7b4c7
--- /dev/null
+++ b/internal/backport/go/build/syslist_test.go
@@ -0,0 +1,62 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+	"runtime"
+	"testing"
+)
+
+var (
+	thisOS    = runtime.GOOS
+	thisArch  = runtime.GOARCH
+	otherOS   = anotherOS()
+	otherArch = anotherArch()
+)
+
+func anotherOS() string {
+	if thisOS != "darwin" && thisOS != "ios" {
+		return "darwin"
+	}
+	return "linux"
+}
+
+func anotherArch() string {
+	if thisArch != "amd64" {
+		return "amd64"
+	}
+	return "386"
+}
+
+type GoodFileTest struct {
+	name   string
+	result bool
+}
+
+var tests = []GoodFileTest{
+	{"file.go", true},
+	{"file.c", true},
+	{"file_foo.go", true},
+	{"file_" + thisArch + ".go", true},
+	{"file_" + otherArch + ".go", false},
+	{"file_" + thisOS + ".go", true},
+	{"file_" + otherOS + ".go", false},
+	{"file_" + thisOS + "_" + thisArch + ".go", true},
+	{"file_" + otherOS + "_" + thisArch + ".go", false},
+	{"file_" + thisOS + "_" + otherArch + ".go", false},
+	{"file_" + otherOS + "_" + otherArch + ".go", false},
+	{"file_foo_" + thisArch + ".go", true},
+	{"file_foo_" + otherArch + ".go", false},
+	{"file_" + thisOS + ".c", true},
+	{"file_" + otherOS + ".c", false},
+}
+
+func TestGoodOSArch(t *testing.T) {
+	for _, test := range tests {
+		if Default.goodOSArchFile(test.name, make(map[string]bool)) != test.result {
+			t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
+		}
+	}
+}
diff --git a/internal/backport/go/doc/Makefile b/internal/backport/go/doc/Makefile
new file mode 100644
index 0000000..ca4948f
--- /dev/null
+++ b/internal/backport/go/doc/Makefile
@@ -0,0 +1,7 @@
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Script to test heading detection heuristic
+headscan: headscan.go
+	go build headscan.go
diff --git a/internal/backport/go/doc/comment.go b/internal/backport/go/doc/comment.go
new file mode 100644
index 0000000..6060fcf
--- /dev/null
+++ b/internal/backport/go/doc/comment.go
@@ -0,0 +1,518 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Godoc comment extraction and comment -> HTML formatting.
+
+package doc
+
+import (
+	"bytes"
+	"io"
+	"regexp"
+	"strings"
+	"text/template" // for HTMLEscape
+	"unicode"
+	"unicode/utf8"
+)
+
+const (
+	ldquo = "&ldquo;"
+	rdquo = "&rdquo;"
+	ulquo = "“"
+	urquo = "”"
+)
+
+var (
+	htmlQuoteReplacer    = strings.NewReplacer(ulquo, ldquo, urquo, rdquo)
+	unicodeQuoteReplacer = strings.NewReplacer("``", ulquo, "''", urquo)
+)
+
+// Escape comment text for HTML. If nice is set,
+// also turn “ into &ldquo; and ” into &rdquo;.
+func commentEscape(w io.Writer, text string, nice bool) {
+	if nice {
+		// In the first pass, we convert `` and '' into their unicode equivalents.
+		// This prevents them from being escaped in HTMLEscape.
+		text = convertQuotes(text)
+		var buf bytes.Buffer
+		template.HTMLEscape(&buf, []byte(text))
+		// Now we convert the unicode quotes to their HTML escaped entities to maintain old behavior.
+		// We need to use a temp buffer to read the string back and do the conversion,
+		// otherwise HTMLEscape will escape & to &amp;
+		htmlQuoteReplacer.WriteString(w, buf.String())
+		return
+	}
+	template.HTMLEscape(w, []byte(text))
+}
+
+func convertQuotes(text string) string {
+	return unicodeQuoteReplacer.Replace(text)
+}
+
+const (
+	// Regexp for Go identifiers
+	identRx = `[\pL_][\pL_0-9]*`
+
+	// Regexp for URLs
+	// Match parens, and check later for balance - see #5043, #22285
+	// Match .,:;?! within path, but not at end - see #18139, #16565
+	// This excludes some rare yet valid urls ending in common punctuation
+	// in order to allow sentences ending in URLs.
+
+	// protocol (required) e.g. http
+	protoPart = `(https?|ftp|file|gopher|mailto|nntp)`
+	// host (required) e.g. www.example.com or [::1]:8080
+	hostPart = `([a-zA-Z0-9_@\-.\[\]:]+)`
+	// path+query+fragment (optional) e.g. /path/index.html?q=foo#bar
+	pathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_/\-\[\]%])*`
+
+	urlRx = protoPart + `://` + hostPart + pathPart
+)
+
+var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
+
+var (
+	html_a      = []byte(`<a href="`)
+	html_aq     = []byte(`">`)
+	html_enda   = []byte("</a>")
+	html_i      = []byte("<i>")
+	html_endi   = []byte("</i>")
+	html_p      = []byte("<p>\n")
+	html_endp   = []byte("</p>\n")
+	html_pre    = []byte("<pre>")
+	html_endpre = []byte("</pre>\n")
+	html_h      = []byte(`<h3 id="`)
+	html_hq     = []byte(`">`)
+	html_endh   = []byte("</h3>\n")
+)
+
+// Emphasize and escape a line of text for HTML. URLs are converted into links;
+// if the URL also appears in the words map, the link is taken from the map (if
+// the corresponding map value is the empty string, the URL is not converted
+// into a link). Go identifiers that appear in the words map are italicized; if
+// the corresponding map value is not the empty string, it is considered a URL
+// and the word is converted into a link. If nice is set, the remaining text's
+// appearance is improved where it makes sense (e.g., “ is turned into &ldquo;
+// and ” into &rdquo;).
+func emphasize(w io.Writer, line string, words map[string]string, nice bool) {
+	for {
+		m := matchRx.FindStringSubmatchIndex(line)
+		if m == nil {
+			break
+		}
+		// m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
+
+		// write text before match
+		commentEscape(w, line[0:m[0]], nice)
+
+		// adjust match for URLs
+		match := line[m[0]:m[1]]
+		if strings.Contains(match, "://") {
+			m0, m1 := m[0], m[1]
+			for _, s := range []string{"()", "{}", "[]"} {
+				open, close := s[:1], s[1:] // E.g., "(" and ")"
+				// require opening parentheses before closing parentheses (#22285)
+				if i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) {
+					m1 = m0 + i
+					match = line[m0:m1]
+				}
+				// require balanced pairs of parentheses (#5043)
+				for i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ {
+					m1 = strings.LastIndexAny(line[:m1], s)
+					match = line[m0:m1]
+				}
+			}
+			if m1 != m[1] {
+				// redo matching with shortened line for correct indices
+				m = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)])
+			}
+		}
+
+		// analyze match
+		url := ""
+		italics := false
+		if words != nil {
+			url, italics = words[match]
+		}
+		if m[2] >= 0 {
+			// match against first parenthesized sub-regexp; must be match against urlRx
+			if !italics {
+				// no alternative URL in words list, use match instead
+				url = match
+			}
+			italics = false // don't italicize URLs
+		}
+
+		// write match
+		if len(url) > 0 {
+			w.Write(html_a)
+			template.HTMLEscape(w, []byte(url))
+			w.Write(html_aq)
+		}
+		if italics {
+			w.Write(html_i)
+		}
+		commentEscape(w, match, nice)
+		if italics {
+			w.Write(html_endi)
+		}
+		if len(url) > 0 {
+			w.Write(html_enda)
+		}
+
+		// advance
+		line = line[m[1]:]
+	}
+	commentEscape(w, line, nice)
+}
+
+func indentLen(s string) int {
+	i := 0
+	for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
+		i++
+	}
+	return i
+}
+
+func isBlank(s string) bool {
+	return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
+}
+
+func commonPrefix(a, b string) string {
+	i := 0
+	for i < len(a) && i < len(b) && a[i] == b[i] {
+		i++
+	}
+	return a[0:i]
+}
+
+func unindent(block []string) {
+	if len(block) == 0 {
+		return
+	}
+
+	// compute maximum common white prefix
+	prefix := block[0][0:indentLen(block[0])]
+	for _, line := range block {
+		if !isBlank(line) {
+			prefix = commonPrefix(prefix, line[0:indentLen(line)])
+		}
+	}
+	n := len(prefix)
+
+	// remove
+	for i, line := range block {
+		if !isBlank(line) {
+			block[i] = line[n:]
+		}
+	}
+}
+
+// heading returns the trimmed line if it passes as a section heading;
+// otherwise it returns the empty string.
+func heading(line string) string {
+	line = strings.TrimSpace(line)
+	if len(line) == 0 {
+		return ""
+	}
+
+	// a heading must start with an uppercase letter
+	r, _ := utf8.DecodeRuneInString(line)
+	if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
+		return ""
+	}
+
+	// it must end in a letter or digit:
+	r, _ = utf8.DecodeLastRuneInString(line)
+	if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
+		return ""
+	}
+
+	// exclude lines with illegal characters. we allow "(),"
+	if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") {
+		return ""
+	}
+
+	// allow "'" for possessive "'s" only
+	for b := line; ; {
+		var ok bool
+		if _, b, ok = stringsCut(b, "'"); !ok {
+			break
+		}
+		if b != "s" && !strings.HasPrefix(b, "s ") {
+			return "" // ' not followed by s and then end-of-word
+		}
+	}
+
+	// allow "." when followed by non-space
+	for b := line; ; {
+		var ok bool
+		if _, b, ok = stringsCut(b, "."); !ok {
+			break
+		}
+		if b == "" || strings.HasPrefix(b, " ") {
+			return "" // not followed by non-space
+		}
+	}
+
+	return line
+}
+
+type op int
+
+const (
+	opPara op = iota
+	opHead
+	opPre
+)
+
+type block struct {
+	op    op
+	lines []string
+}
+
+var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
+
+func anchorID(line string) string {
+	// Add a "hdr-" prefix to avoid conflicting with IDs used for package symbols.
+	return "hdr-" + nonAlphaNumRx.ReplaceAllString(line, "_")
+}
+
+// ToHTML converts comment text to formatted HTML.
+// The comment was prepared by DocReader,
+// so it is known not to have leading, trailing blank lines
+// nor to have trailing spaces at the end of lines.
+// The comment markers have already been removed.
+//
+// Each span of unindented non-blank lines is converted into
+// a single paragraph. There is one exception to the rule: a span that
+// consists of a single line, is followed by another paragraph span,
+// begins with a capital letter, and contains no punctuation
+// other than parentheses and commas is formatted as a heading.
+//
+// A span of indented lines is converted into a <pre> block,
+// with the common indent prefix removed.
+//
+// URLs in the comment text are converted into links; if the URL also appears
+// in the words map, the link is taken from the map (if the corresponding map
+// value is the empty string, the URL is not converted into a link).
+//
+// A pair of (consecutive) backticks (`) is converted to a unicode left quote (“), and a pair of (consecutive)
+// single quotes (') is converted to a unicode right quote (”).
+//
+// Go identifiers that appear in the words map are italicized; if the corresponding
+// map value is not the empty string, it is considered a URL and the word is converted
+// into a link.
+func ToHTML(w io.Writer, text string, words map[string]string) {
+	for _, b := range blocks(text) {
+		switch b.op {
+		case opPara:
+			w.Write(html_p)
+			for _, line := range b.lines {
+				emphasize(w, line, words, true)
+			}
+			w.Write(html_endp)
+		case opHead:
+			w.Write(html_h)
+			id := ""
+			for _, line := range b.lines {
+				if id == "" {
+					id = anchorID(line)
+					w.Write([]byte(id))
+					w.Write(html_hq)
+				}
+				commentEscape(w, line, true)
+			}
+			if id == "" {
+				w.Write(html_hq)
+			}
+			w.Write(html_endh)
+		case opPre:
+			w.Write(html_pre)
+			for _, line := range b.lines {
+				emphasize(w, line, nil, false)
+			}
+			w.Write(html_endpre)
+		}
+	}
+}
+
+func blocks(text string) []block {
+	var (
+		out  []block
+		para []string
+
+		lastWasBlank   = false
+		lastWasHeading = false
+	)
+
+	close := func() {
+		if para != nil {
+			out = append(out, block{opPara, para})
+			para = nil
+		}
+	}
+
+	lines := strings.SplitAfter(text, "\n")
+	unindent(lines)
+	for i := 0; i < len(lines); {
+		line := lines[i]
+		if isBlank(line) {
+			// close paragraph
+			close()
+			i++
+			lastWasBlank = true
+			continue
+		}
+		if indentLen(line) > 0 {
+			// close paragraph
+			close()
+
+			// count indented or blank lines
+			j := i + 1
+			for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {
+				j++
+			}
+			// but not trailing blank lines
+			for j > i && isBlank(lines[j-1]) {
+				j--
+			}
+			pre := lines[i:j]
+			i = j
+
+			unindent(pre)
+
+			// put those lines in a pre block
+			out = append(out, block{opPre, pre})
+			lastWasHeading = false
+			continue
+		}
+
+		if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
+			isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
+			// current line is non-blank, surrounded by blank lines
+			// and the next non-blank line is not indented: this
+			// might be a heading.
+			if head := heading(line); head != "" {
+				close()
+				out = append(out, block{opHead, []string{head}})
+				i += 2
+				lastWasHeading = true
+				continue
+			}
+		}
+
+		// open paragraph
+		lastWasBlank = false
+		lastWasHeading = false
+		para = append(para, lines[i])
+		i++
+	}
+	close()
+
+	return out
+}
+
+// ToText prepares comment text for presentation in textual output.
+// It wraps paragraphs of text to width or fewer Unicode code points
+// and then prefixes each line with the indent. In preformatted sections
+// (such as program text), it prefixes each non-blank line with preIndent.
+//
+// A pair of (consecutive) backticks (`) is converted to a unicode left quote (“), and a pair of (consecutive)
+// single quotes (') is converted to a unicode right quote (”).
+func ToText(w io.Writer, text string, indent, preIndent string, width int) {
+	l := lineWrapper{
+		out:    w,
+		width:  width,
+		indent: indent,
+	}
+	for _, b := range blocks(text) {
+		switch b.op {
+		case opPara:
+			// l.write will add leading newline if required
+			for _, line := range b.lines {
+				line = convertQuotes(line)
+				l.write(line)
+			}
+			l.flush()
+		case opHead:
+			w.Write(nl)
+			for _, line := range b.lines {
+				line = convertQuotes(line)
+				l.write(line + "\n")
+			}
+			l.flush()
+		case opPre:
+			w.Write(nl)
+			for _, line := range b.lines {
+				if isBlank(line) {
+					w.Write([]byte("\n"))
+				} else {
+					w.Write([]byte(preIndent))
+					w.Write([]byte(line))
+				}
+			}
+		}
+	}
+}
+
+type lineWrapper struct {
+	out       io.Writer
+	printed   bool
+	width     int
+	indent    string
+	n         int
+	pendSpace int
+}
+
+var nl = []byte("\n")
+var space = []byte(" ")
+var prefix = []byte("// ")
+
+func (l *lineWrapper) write(text string) {
+	if l.n == 0 && l.printed {
+		l.out.Write(nl) // blank line before new paragraph
+	}
+	l.printed = true
+
+	needsPrefix := false
+	isComment := strings.HasPrefix(text, "//")
+	for _, f := range strings.Fields(text) {
+		w := utf8.RuneCountInString(f)
+		// wrap if line is too long
+		if l.n > 0 && l.n+l.pendSpace+w > l.width {
+			l.out.Write(nl)
+			l.n = 0
+			l.pendSpace = 0
+			needsPrefix = isComment && !strings.HasPrefix(f, "//")
+		}
+		if l.n == 0 {
+			l.out.Write([]byte(l.indent))
+		}
+		if needsPrefix {
+			l.out.Write(prefix)
+			needsPrefix = false
+		}
+		l.out.Write(space[:l.pendSpace])
+		l.out.Write([]byte(f))
+		l.n += l.pendSpace + w
+		l.pendSpace = 1
+	}
+}
+
+func (l *lineWrapper) flush() {
+	if l.n == 0 {
+		return
+	}
+	l.out.Write(nl)
+	l.pendSpace = 0
+	l.n = 0
+}
+
+func stringsCut(s, sep string) (before, after string, found bool) {
+	if i := strings.Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, "", false
+}
diff --git a/internal/backport/go/doc/comment_test.go b/internal/backport/go/doc/comment_test.go
new file mode 100644
index 0000000..6d1b209
--- /dev/null
+++ b/internal/backport/go/doc/comment_test.go
@@ -0,0 +1,247 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+	"bytes"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+var headingTests = []struct {
+	line string
+	ok   bool
+}{
+	{"Section", true},
+	{"A typical usage", true},
+	{"ΔΛΞ is Greek", true},
+	{"Foo 42", true},
+	{"", false},
+	{"section", false},
+	{"A typical usage:", false},
+	{"This code:", false},
+	{"δ is Greek", false},
+	{"Foo §", false},
+	{"Fermat's Last Sentence", true},
+	{"Fermat's", true},
+	{"'sX", false},
+	{"Ted 'Too' Bar", false},
+	{"Use n+m", false},
+	{"Scanning:", false},
+	{"N:M", false},
+}
+
+func TestIsHeading(t *testing.T) {
+	for _, tt := range headingTests {
+		if h := heading(tt.line); (len(h) > 0) != tt.ok {
+			t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
+		}
+	}
+}
+
+var blocksTests = []struct {
+	in   string
+	out  []block
+	text string
+}{
+	{
+		in: `Para 1.
+Para 1 line 2.
+
+Para 2.
+
+Section
+
+Para 3.
+
+	pre
+	pre1
+
+Para 4.
+
+	pre
+	pre1
+
+	pre2
+
+Para 5.
+
+
+	pre
+
+
+	pre1
+	pre2
+
+Para 6.
+	pre
+	pre2
+`,
+		out: []block{
+			{opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
+			{opPara, []string{"Para 2.\n"}},
+			{opHead, []string{"Section"}},
+			{opPara, []string{"Para 3.\n"}},
+			{opPre, []string{"pre\n", "pre1\n"}},
+			{opPara, []string{"Para 4.\n"}},
+			{opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}},
+			{opPara, []string{"Para 5.\n"}},
+			{opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}},
+			{opPara, []string{"Para 6.\n"}},
+			{opPre, []string{"pre\n", "pre2\n"}},
+		},
+		text: `.   Para 1. Para 1 line 2.
+
+.   Para 2.
+
+
+.   Section
+
+.   Para 3.
+
+$	pre
+$	pre1
+
+.   Para 4.
+
+$	pre
+$	pre1
+
+$	pre2
+
+.   Para 5.
+
+$	pre
+
+
+$	pre1
+$	pre2
+
+.   Para 6.
+
+$	pre
+$	pre2
+`,
+	},
+	{
+		in: "Para.\n\tshould not be ``escaped''",
+		out: []block{
+			{opPara, []string{"Para.\n"}},
+			{opPre, []string{"should not be ``escaped''"}},
+		},
+		text: ".   Para.\n\n$	should not be ``escaped''",
+	},
+	{
+		in: "// A very long line of 46 char for line wrapping.",
+		out: []block{
+			{opPara, []string{"// A very long line of 46 char for line wrapping."}},
+		},
+		text: `.   // A very long line of 46 char for line
+.   // wrapping.
+`,
+	},
+	{
+		in: `/* A very long line of 46 char for line wrapping.
+A very long line of 46 char for line wrapping. */`,
+		out: []block{
+			{opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}},
+		},
+		text: `.   /* A very long line of 46 char for line
+.   wrapping. A very long line of 46 char
+.   for line wrapping. */
+`,
+	},
+	{
+		in: `A line of 36 char for line wrapping.
+//Another line starting with //`,
+		out: []block{
+			{opPara, []string{"A line of 36 char for line wrapping.\n",
+				"//Another line starting with //"}},
+		},
+		text: `.   A line of 36 char for line wrapping.
+.   //Another line starting with //
+`,
+	},
+}
+
+func TestBlocks(t *testing.T) {
+	for i, tt := range blocksTests {
+		b := blocks(tt.in)
+		if !reflect.DeepEqual(b, tt.out) {
+			t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
+		}
+	}
+}
+
+func TestToText(t *testing.T) {
+	var buf bytes.Buffer
+	for i, tt := range blocksTests {
+		ToText(&buf, tt.in, ".   ", "$\t", 40)
+		if have := buf.String(); have != tt.text {
+			t.Errorf("#%d: mismatch\nhave: %s\nwant: %s\nhave vs want:\n%q\n%q", i, have, tt.text, have, tt.text)
+		}
+		buf.Reset()
+	}
+}
+
+var emphasizeTests = []struct {
+	in, out string
+}{
+	{"", ""},
+	{"http://[::1]:8080/foo.txt", `<a href="http://[::1]:8080/foo.txt">http://[::1]:8080/foo.txt</a>`},
+	{"before (https://www.google.com) after", `before (<a href="https://www.google.com">https://www.google.com</a>) after`},
+	{"before https://www.google.com:30/x/y/z:b::c. After", `before <a href="https://www.google.com:30/x/y/z:b::c">https://www.google.com:30/x/y/z:b::c</a>. After`},
+	{"http://www.google.com/path/:;!-/?query=%34b#093124", `<a href="http://www.google.com/path/:;!-/?query=%34b#093124">http://www.google.com/path/:;!-/?query=%34b#093124</a>`},
+	{"http://www.google.com/path/:;!-/?query=%34bar#093124", `<a href="http://www.google.com/path/:;!-/?query=%34bar#093124">http://www.google.com/path/:;!-/?query=%34bar#093124</a>`},
+	{"http://www.google.com/index.html! After", `<a href="http://www.google.com/index.html">http://www.google.com/index.html</a>! After`},
+	{"http://www.google.com/", `<a href="http://www.google.com/">http://www.google.com/</a>`},
+	{"https://www.google.com/", `<a href="https://www.google.com/">https://www.google.com/</a>`},
+	{"http://www.google.com/path.", `<a href="http://www.google.com/path">http://www.google.com/path</a>.`},
+	{"http://en.wikipedia.org/wiki/Camellia_(cipher)", `<a href="http://en.wikipedia.org/wiki/Camellia_(cipher)">http://en.wikipedia.org/wiki/Camellia_(cipher)</a>`},
+	{"(http://www.google.com/)", `(<a href="http://www.google.com/">http://www.google.com/</a>)`},
+	{"http://gmail.com)", `<a href="http://gmail.com">http://gmail.com</a>)`},
+	{"((http://gmail.com))", `((<a href="http://gmail.com">http://gmail.com</a>))`},
+	{"http://gmail.com ((http://gmail.com)) ()", `<a href="http://gmail.com">http://gmail.com</a> ((<a href="http://gmail.com">http://gmail.com</a>)) ()`},
+	{"Foo bar http://example.com/ quux!", `Foo bar <a href="http://example.com/">http://example.com/</a> quux!`},
+	{"Hello http://example.com/%2f/ /world.", `Hello <a href="http://example.com/%2f/">http://example.com/%2f/</a> /world.`},
+	{"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"},
+	{"javascript://is/not/linked", "javascript://is/not/linked"},
+	{"http://foo", `<a href="http://foo">http://foo</a>`},
+	{"art by [[https://www.example.com/person/][Person Name]]", `art by [[<a href="https://www.example.com/person/">https://www.example.com/person/</a>][Person Name]]`},
+	{"please visit (http://golang.org/)", `please visit (<a href="http://golang.org/">http://golang.org/</a>)`},
+	{"please visit http://golang.org/hello())", `please visit <a href="http://golang.org/hello()">http://golang.org/hello()</a>)`},
+	{"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `<a href="http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD">http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD</a>`},
+	{"https://foo.bar/bal/x(])", `<a href="https://foo.bar/bal/x(">https://foo.bar/bal/x(</a>])`}, // inner ] causes (]) to be cut off from URL
+	{"foo [ http://bar(])", `foo [ <a href="http://bar(">http://bar(</a>])`},                      // outer [ causes ]) to be cut off from URL
+}
+
+func TestEmphasize(t *testing.T) {
+	for i, tt := range emphasizeTests {
+		var buf bytes.Buffer
+		emphasize(&buf, tt.in, nil, true)
+		out := buf.String()
+		if out != tt.out {
+			t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+		}
+	}
+}
+
+func TestCommentEscape(t *testing.T) {
+	commentTests := []struct {
+		in, out string
+	}{
+		{"typically invoked as ``go tool asm'',", "typically invoked as " + ldquo + "go tool asm" + rdquo + ","},
+		{"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ldquo + "go help test" + rdquo + " and " + ldquo + "go help testflag" + rdquo},
+	}
+	for i, tt := range commentTests {
+		var buf strings.Builder
+		commentEscape(&buf, tt.in, true)
+		out := buf.String()
+		if out != tt.out {
+			t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
+		}
+	}
+}
diff --git a/internal/backport/go/doc/doc.go b/internal/backport/go/doc/doc.go
new file mode 100644
index 0000000..43246d0
--- /dev/null
+++ b/internal/backport/go/doc/doc.go
@@ -0,0 +1,220 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package doc extracts source code documentation from a Go AST.
+package doc
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+	"strings"
+)
+
+// Package is the documentation for an entire package.
+type Package struct {
+	Doc        string
+	Name       string
+	ImportPath string
+	Imports    []string
+	Filenames  []string
+	Notes      map[string][]*Note
+
+	// Deprecated: For backward compatibility Bugs is still populated,
+	// but all new code should use Notes instead.
+	Bugs []string
+
+	// declarations
+	Consts []*Value
+	Types  []*Type
+	Vars   []*Value
+	Funcs  []*Func
+
+	// Examples is a sorted list of examples associated with
+	// the package. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// Value is the documentation for a (possibly grouped) var or const declaration.
+type Value struct {
+	Doc   string
+	Names []string // var or const names in declaration order
+	Decl  *ast.GenDecl
+
+	order int
+}
+
+// Type is the documentation for a type declaration.
+type Type struct {
+	Doc  string
+	Name string
+	Decl *ast.GenDecl
+
+	// associated declarations
+	Consts  []*Value // sorted list of constants of (mostly) this type
+	Vars    []*Value // sorted list of variables of (mostly) this type
+	Funcs   []*Func  // sorted list of functions returning this type
+	Methods []*Func  // sorted list of methods (including embedded ones) of this type
+
+	// Examples is a sorted list of examples associated with
+	// this type. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// Func is the documentation for a func declaration.
+type Func struct {
+	Doc  string
+	Name string
+	Decl *ast.FuncDecl
+
+	// methods
+	// (for functions, these fields have the respective zero value)
+	Recv  string // actual   receiver "T" or "*T"
+	Orig  string // original receiver "T" or "*T"
+	Level int    // embedding level; 0 means not embedded
+
+	// Examples is a sorted list of examples associated with this
+	// function or method. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// A Note represents a marked comment starting with "MARKER(uid): note body".
+// Any note with a marker of 2 or more upper case [A-Z] letters and a uid of
+// at least one character is recognized. The ":" following the uid is optional.
+// Notes are collected in the Package.Notes map indexed by the notes marker.
+type Note struct {
+	Pos, End token.Pos // position range of the comment containing the marker
+	UID      string    // uid found with the marker
+	Body     string    // note body text
+}
+
+// Mode values control the operation of New and NewFromFiles.
+type Mode int
+
+const (
+	// AllDecls says to extract documentation for all package-level
+	// declarations, not just exported ones.
+	AllDecls Mode = 1 << iota
+
+	// AllMethods says to show all embedded methods, not just the ones of
+	// invisible (unexported) anonymous fields.
+	AllMethods
+
+	// PreserveAST says to leave the AST unmodified. Originally, pieces of
+	// the AST such as function bodies were nil-ed out to save memory in
+	// godoc, but not all programs want that behavior.
+	PreserveAST
+)
+
+// New computes the package documentation for the given package AST.
+// New takes ownership of the AST pkg and may edit or overwrite it.
+// To have the Examples fields populated, use NewFromFiles and include
+// the package's _test.go files.
+func New(pkg *ast.Package, importPath string, mode Mode) *Package {
+	var r reader
+	r.readPackage(pkg, mode)
+	r.computeMethodSets()
+	r.cleanupTypes()
+	return &Package{
+		Doc:        r.doc,
+		Name:       pkg.Name,
+		ImportPath: importPath,
+		Imports:    sortedKeys(r.imports),
+		Filenames:  r.filenames,
+		Notes:      r.notes,
+		Bugs:       noteBodies(r.notes["BUG"]),
+		Consts:     sortedValues(r.values, token.CONST),
+		Types:      sortedTypes(r.types, mode&AllMethods != 0),
+		Vars:       sortedValues(r.values, token.VAR),
+		Funcs:      sortedFuncs(r.funcs, true),
+	}
+}
+
+// NewFromFiles computes documentation for a package.
+//
+// The package is specified by a list of *ast.Files and corresponding
+// file set, which must not be nil.
+// NewFromFiles uses all provided files when computing documentation,
+// so it is the caller's responsibility to provide only the files that
+// match the desired build context. "go/build".Context.MatchFile can
+// be used for determining whether a file matches a build context with
+// the desired GOOS and GOARCH values, and other build constraints.
+// The import path of the package is specified by importPath.
+//
+// Examples found in _test.go files are associated with the corresponding
+// type, function, method, or the package, based on their name.
+// If the example has a suffix in its name, it is set in the
+// Example.Suffix field. Examples with malformed names are skipped.
+//
+// Optionally, a single extra argument of type Mode can be provided to
+// control low-level aspects of the documentation extraction behavior.
+//
+// NewFromFiles takes ownership of the AST files and may edit them,
+// unless the PreserveAST Mode bit is on.
+func NewFromFiles(fset *token.FileSet, files []*ast.File, importPath string, opts ...interface{}) (*Package, error) {
+	// Check for invalid API usage.
+	if fset == nil {
+		panic(fmt.Errorf("doc.NewFromFiles: no token.FileSet provided (fset == nil)"))
+	}
+	var mode Mode
+	switch len(opts) { // There can only be 0 or 1 options, so a simple switch works for now.
+	case 0:
+		// Nothing to do.
+	case 1:
+		m, ok := opts[0].(Mode)
+		if !ok {
+			panic(fmt.Errorf("doc.NewFromFiles: option argument type must be doc.Mode"))
+		}
+		mode = m
+	default:
+		panic(fmt.Errorf("doc.NewFromFiles: there must not be more than 1 option argument"))
+	}
+
+	// Collect .go and _test.go files.
+	var (
+		goFiles     = make(map[string]*ast.File)
+		testGoFiles []*ast.File
+	)
+	for i := range files {
+		f := fset.File(files[i].Pos())
+		if f == nil {
+			return nil, fmt.Errorf("file files[%d] is not found in the provided file set", i)
+		}
+		switch name := f.Name(); {
+		case strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go"):
+			goFiles[name] = files[i]
+		case strings.HasSuffix(name, "_test.go"):
+			testGoFiles = append(testGoFiles, files[i])
+		default:
+			return nil, fmt.Errorf("file files[%d] filename %q does not have a .go extension", i, name)
+		}
+	}
+
+	// TODO(dmitshur,gri): A relatively high level call to ast.NewPackage with a simpleImporter
+	// ast.Importer implementation is made below. It might be possible to short-circuit and simplify.
+
+	// Compute package documentation.
+	pkg, _ := ast.NewPackage(fset, goFiles, simpleImporter, nil) // Ignore errors that can happen due to unresolved identifiers.
+	p := New(pkg, importPath, mode)
+	classifyExamples(p, Examples(testGoFiles...))
+	return p, nil
+}
+
+// simpleImporter returns a (dummy) package object named by the last path
+// component of the provided package path (as is the convention for packages).
+// This is sufficient to resolve package identifiers without doing an actual
+// import. It never returns an error.
+func simpleImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
+	pkg := imports[path]
+	if pkg == nil {
+		// note that strings.LastIndex returns -1 if there is no "/"
+		pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:])
+		pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import
+		imports[path] = pkg
+	}
+	return pkg, nil
+}
diff --git a/internal/backport/go/doc/doc_test.go b/internal/backport/go/doc/doc_test.go
new file mode 100644
index 0000000..a4a427a
--- /dev/null
+++ b/internal/backport/go/doc/doc_test.go
@@ -0,0 +1,246 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"testing"
+	"text/template"
+
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+var update = flag.Bool("update", false, "update golden (.out) files")
+var files = flag.String("files", "", "consider only Go test files matching this regular expression")
+
+const dataDir = "testdata"
+
+var templateTxt = readTemplate("template.txt")
+
+func readTemplate(filename string) *template.Template {
+	t := template.New(filename)
+	t.Funcs(template.FuncMap{
+		"node":     nodeFmt,
+		"synopsis": synopsisFmt,
+		"indent":   indentFmt,
+	})
+	return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
+}
+
+func nodeFmt(node interface{}, fset *token.FileSet) string {
+	var buf bytes.Buffer
+	printer.Fprint(&buf, fset, node)
+	return strings.ReplaceAll(strings.TrimSpace(buf.String()), "\n", "\n\t")
+}
+
+func synopsisFmt(s string) string {
+	const n = 64
+	if len(s) > n {
+		// cut off excess text and go back to a word boundary
+		s = s[0:n]
+		if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
+			s = s[0:i]
+		}
+		s = strings.TrimSpace(s) + " ..."
+	}
+	return "// " + strings.ReplaceAll(s, "\n", " ")
+}
+
+func indentFmt(indent, s string) string {
+	end := ""
+	if strings.HasSuffix(s, "\n") {
+		end = "\n"
+		s = s[:len(s)-1]
+	}
+	return indent + strings.ReplaceAll(s, "\n", "\n"+indent) + end
+}
+
+func isGoFile(fi fs.FileInfo) bool {
+	name := fi.Name()
+	return !fi.IsDir() &&
+		len(name) > 0 && name[0] != '.' && // ignore .files
+		filepath.Ext(name) == ".go"
+}
+
+type bundle struct {
+	*Package
+	FSet *token.FileSet
+}
+
+func test(t *testing.T, mode Mode) {
+	// determine file filter
+	filter := isGoFile
+	if *files != "" {
+		rx, err := regexp.Compile(*files)
+		if err != nil {
+			t.Fatal(err)
+		}
+		filter = func(fi fs.FileInfo) bool {
+			return isGoFile(fi) && rx.MatchString(fi.Name())
+		}
+	}
+
+	// get packages
+	fset := token.NewFileSet()
+	pkgs, err := parser.ParseDir(fset, dataDir, filter, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// test packages
+	for _, pkg := range pkgs {
+		t.Run(pkg.Name, func(t *testing.T) {
+			importPath := dataDir + "/" + pkg.Name
+			var files []*ast.File
+			for _, f := range pkg.Files {
+				files = append(files, f)
+			}
+			doc, err := NewFromFiles(fset, files, importPath, mode)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// golden files always use / in filenames - canonicalize them
+			for i, filename := range doc.Filenames {
+				doc.Filenames[i] = filepath.ToSlash(filename)
+			}
+
+			// print documentation
+			var buf bytes.Buffer
+			if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
+				t.Fatal(err)
+			}
+			got := buf.Bytes()
+
+			// update golden file if necessary
+			golden := filepath.Join(dataDir, fmt.Sprintf("%s.%d.golden", pkg.Name, mode))
+			if *update {
+				err := os.WriteFile(golden, got, 0644)
+				if err != nil {
+					t.Fatal(err)
+				}
+			}
+
+			// get golden file
+			want, err := os.ReadFile(golden)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// compare
+			if !bytes.Equal(got, want) {
+				t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
+			}
+		})
+	}
+}
+
+func Test(t *testing.T) {
+	t.Run("default", func(t *testing.T) { test(t, 0) })
+	t.Run("AllDecls", func(t *testing.T) { test(t, AllDecls) })
+	t.Run("AllMethods", func(t *testing.T) { test(t, AllMethods) })
+}
+
+func TestAnchorID(t *testing.T) {
+	const in = "Important Things 2 Know & Stuff"
+	const want = "hdr-Important_Things_2_Know___Stuff"
+	got := anchorID(in)
+	if got != want {
+		t.Errorf("anchorID(%q) = %q; want %q", in, got, want)
+	}
+}
+
+const funcsTestFile = `
+package funcs
+
+func F() {}
+
+type S1 struct {
+	S2  // embedded, exported
+	s3  // embedded, unexported
+}
+
+func NewS1()  S1 {return S1{} }
+func NewS1p() *S1 { return &S1{} }
+
+func (S1) M1() {}
+func (r S1) M2() {}
+func(S1) m3() {}		// unexported not shown
+func (*S1) P1() {}		// pointer receiver
+
+type S2 int
+func (S2) M3() {}		// shown on S2
+
+type s3 int
+func (s3) M4() {}		// shown on S1
+
+type G1[T any] struct {
+	*s3
+}
+
+func NewG1[T any]() G1[T] { return G1[T]{} }
+
+func (G1[T]) MG1() {}
+func (*G1[U]) MG2() {}
+
+type G2[T, U any] struct {}
+
+func NewG2[T, U any]() G2[T, U] { return G2[T, U]{} }
+
+func (G2[T, U]) MG3() {}
+func (*G2[A, B]) MG4() {}
+
+
+`
+
+var funcsPackage = &Package{
+	Funcs: []*Func{{Name: "F"}},
+	Types: []*Type{
+		{
+			Name:  "G1",
+			Funcs: []*Func{{Name: "NewG1"}},
+			Methods: []*Func{
+				{Name: "M4", Recv: "G1", // TODO: synthesize a param for G1?
+					Orig: "s3", Level: 1},
+				{Name: "MG1", Recv: "G1[T]", Orig: "G1[T]", Level: 0},
+				{Name: "MG2", Recv: "*G1[U]", Orig: "*G1[U]", Level: 0},
+			},
+		},
+		{
+			Name:  "G2",
+			Funcs: []*Func{{Name: "NewG2"}},
+			Methods: []*Func{
+				{Name: "MG3", Recv: "G2[T, U]", Orig: "G2[T, U]", Level: 0},
+				{Name: "MG4", Recv: "*G2[A, B]", Orig: "*G2[A, B]", Level: 0},
+			},
+		},
+		{
+			Name:  "S1",
+			Funcs: []*Func{{Name: "NewS1"}, {Name: "NewS1p"}},
+			Methods: []*Func{
+				{Name: "M1", Recv: "S1", Orig: "S1", Level: 0},
+				{Name: "M2", Recv: "S1", Orig: "S1", Level: 0},
+				{Name: "M4", Recv: "S1", Orig: "s3", Level: 1},
+				{Name: "P1", Recv: "*S1", Orig: "*S1", Level: 0},
+			},
+		},
+		{
+			Name: "S2",
+			Methods: []*Func{
+				{Name: "M3", Recv: "S2", Orig: "S2", Level: 0},
+			},
+		},
+	},
+}
diff --git a/internal/backport/go/doc/example.go b/internal/backport/go/doc/example.go
new file mode 100644
index 0000000..f4aaa83
--- /dev/null
+++ b/internal/backport/go/doc/example.go
@@ -0,0 +1,550 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extract example functions from file ASTs.
+
+package doc
+
+import (
+	"path"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// An Example represents an example function found in a test source file.
+type Example struct {
+	Name        string // name of the item being exemplified (including optional suffix)
+	Suffix      string // example suffix, without leading '_' (only populated by NewFromFiles)
+	Doc         string // example function doc string
+	Code        ast.Node
+	Play        *ast.File // a whole program version of the example
+	Comments    []*ast.CommentGroup
+	Output      string // expected output
+	Unordered   bool
+	EmptyOutput bool // expect empty output
+	Order       int  // original source code order
+}
+
+// Examples returns the examples found in testFiles, sorted by Name field.
+// The Order fields record the order in which the examples were encountered.
+// The Suffix field is not populated when Examples is called directly, it is
+// only populated by NewFromFiles for examples it finds in _test.go files.
+//
+// Playable Examples must be in a package whose name ends in "_test".
+// An Example is "playable" (the Play field is non-nil) in either of these
+// circumstances:
+//   - The example function is self-contained: the function references only
+//     identifiers from other packages (or predeclared identifiers, such as
+//     "int") and the test file does not include a dot import.
+//   - The entire test file is the example: the file contains exactly one
+//     example function, zero test, fuzz test, or benchmark function, and at
+//     least one top-level function, type, variable, or constant declaration
+//     other than the example function.
+func Examples(testFiles ...*ast.File) []*Example {
+	var list []*Example
+	for _, file := range testFiles {
+		hasTests := false // file contains tests, fuzz test, or benchmarks
+		numDecl := 0      // number of non-import declarations in the file
+		var flist []*Example
+		for _, decl := range file.Decls {
+			if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
+				numDecl++
+				continue
+			}
+			f, ok := decl.(*ast.FuncDecl)
+			if !ok || f.Recv != nil {
+				continue
+			}
+			numDecl++
+			name := f.Name.Name
+			if isTest(name, "Test") || isTest(name, "Benchmark") || isTest(name, "Fuzz") {
+				hasTests = true
+				continue
+			}
+			if !isTest(name, "Example") {
+				continue
+			}
+			if params := f.Type.Params; len(params.List) != 0 {
+				continue // function has params; not a valid example
+			}
+			if f.Body == nil { // ast.File.Body nil dereference (see issue 28044)
+				continue
+			}
+			var doc string
+			if f.Doc != nil {
+				doc = f.Doc.Text()
+			}
+			output, unordered, hasOutput := exampleOutput(f.Body, file.Comments)
+			flist = append(flist, &Example{
+				Name:        name[len("Example"):],
+				Doc:         doc,
+				Code:        f.Body,
+				Play:        playExample(file, f),
+				Comments:    file.Comments,
+				Output:      output,
+				Unordered:   unordered,
+				EmptyOutput: output == "" && hasOutput,
+				Order:       len(flist),
+			})
+		}
+		if !hasTests && numDecl > 1 && len(flist) == 1 {
+			// If this file only has one example function, some
+			// other top-level declarations, and no tests or
+			// benchmarks, use the whole file as the example.
+			flist[0].Code = file
+			flist[0].Play = playExampleFile(file)
+		}
+		list = append(list, flist...)
+	}
+	// sort by name
+	sort.Slice(list, func(i, j int) bool {
+		return list[i].Name < list[j].Name
+	})
+	return list
+}
+
+var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*(unordered )?output:`)
+
+// Extracts the expected output and whether there was a valid output comment
+func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
+	if _, last := lastComment(b, comments); last != nil {
+		// test that it begins with the correct prefix
+		text := last.Text()
+		if loc := outputPrefix.FindStringSubmatchIndex(text); loc != nil {
+			if loc[2] != -1 {
+				unordered = true
+			}
+			text = text[loc[1]:]
+			// Strip zero or more spaces followed by \n or a single space.
+			text = strings.TrimLeft(text, " ")
+			if len(text) > 0 && text[0] == '\n' {
+				text = text[1:]
+			}
+			return text, unordered, true
+		}
+	}
+	return "", false, false // no suitable comment found
+}
+
+// isTest tells whether name looks like a test, example, fuzz test, or
+// benchmark. It is a Test (say) if there is a character after Test that is not
+// a lower-case letter. (We don't want Testiness.)
+func isTest(name, prefix string) bool {
+	if !strings.HasPrefix(name, prefix) {
+		return false
+	}
+	if len(name) == len(prefix) { // "Test" is ok
+		return true
+	}
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+	return !unicode.IsLower(rune)
+}
+
+// playExample synthesizes a new *ast.File based on the provided
+// file with the provided function body as the body of main.
+func playExample(file *ast.File, f *ast.FuncDecl) *ast.File {
+	body := f.Body
+
+	if !strings.HasSuffix(file.Name.Name, "_test") {
+		// We don't support examples that are part of the
+		// greater package (yet).
+		return nil
+	}
+
+	// Collect top-level declarations in the file.
+	topDecls := make(map[*ast.Object]ast.Decl)
+	typMethods := make(map[string][]ast.Decl)
+
+	for _, decl := range file.Decls {
+		switch d := decl.(type) {
+		case *ast.FuncDecl:
+			if d.Recv == nil {
+				topDecls[d.Name.Obj] = d
+			} else {
+				if len(d.Recv.List) == 1 {
+					t := d.Recv.List[0].Type
+					tname, _ := baseTypeName(t)
+					typMethods[tname] = append(typMethods[tname], d)
+				}
+			}
+		case *ast.GenDecl:
+			for _, spec := range d.Specs {
+				switch s := spec.(type) {
+				case *ast.TypeSpec:
+					topDecls[s.Name.Obj] = d
+				case *ast.ValueSpec:
+					for _, name := range s.Names {
+						topDecls[name.Obj] = d
+					}
+				}
+			}
+		}
+	}
+
+	// Find unresolved identifiers and uses of top-level declarations.
+	unresolved := make(map[string]bool)
+	var depDecls []ast.Decl
+	hasDepDecls := make(map[ast.Decl]bool)
+
+	var inspectFunc func(ast.Node) bool
+	inspectFunc = func(n ast.Node) bool {
+		switch e := n.(type) {
+		case *ast.Ident:
+			if e.Obj == nil && e.Name != "_" {
+				unresolved[e.Name] = true
+			} else if d := topDecls[e.Obj]; d != nil {
+				if !hasDepDecls[d] {
+					hasDepDecls[d] = true
+					depDecls = append(depDecls, d)
+				}
+			}
+			return true
+		case *ast.SelectorExpr:
+			// For selector expressions, only inspect the left hand side.
+			// (For an expression like fmt.Println, only add "fmt" to the
+			// set of unresolved names, not "Println".)
+			ast.Inspect(e.X, inspectFunc)
+			return false
+		case *ast.KeyValueExpr:
+			// For key value expressions, only inspect the value
+			// as the key should be resolved by the type of the
+			// composite literal.
+			ast.Inspect(e.Value, inspectFunc)
+			return false
+		}
+		return true
+	}
+	ast.Inspect(body, inspectFunc)
+	for i := 0; i < len(depDecls); i++ {
+		switch d := depDecls[i].(type) {
+		case *ast.FuncDecl:
+			// Inspect types of parameters and results. See #28492.
+			if d.Type.Params != nil {
+				for _, p := range d.Type.Params.List {
+					ast.Inspect(p.Type, inspectFunc)
+				}
+			}
+			if d.Type.Results != nil {
+				for _, r := range d.Type.Results.List {
+					ast.Inspect(r.Type, inspectFunc)
+				}
+			}
+
+			// Functions might not have a body. See #42706.
+			if d.Body != nil {
+				ast.Inspect(d.Body, inspectFunc)
+			}
+		case *ast.GenDecl:
+			for _, spec := range d.Specs {
+				switch s := spec.(type) {
+				case *ast.TypeSpec:
+					ast.Inspect(s.Type, inspectFunc)
+
+					depDecls = append(depDecls, typMethods[s.Name.Name]...)
+				case *ast.ValueSpec:
+					if s.Type != nil {
+						ast.Inspect(s.Type, inspectFunc)
+					}
+					for _, val := range s.Values {
+						ast.Inspect(val, inspectFunc)
+					}
+				}
+			}
+		}
+	}
+
+	// Remove predeclared identifiers from unresolved list.
+	for n := range unresolved {
+		if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
+			delete(unresolved, n)
+		}
+	}
+
+	// Use unresolved identifiers to determine the imports used by this
+	// example. The heuristic assumes package names match base import
+	// paths for imports w/o renames (should be good enough most of the time).
+	namedImports := make(map[string]string) // [name]path
+	var blankImports []ast.Spec             // _ imports
+	for _, s := range file.Imports {
+		p, err := strconv.Unquote(s.Path.Value)
+		if err != nil {
+			continue
+		}
+		if p == "syscall/js" {
+			// We don't support examples that import syscall/js,
+			// because the package syscall/js is not available in the playground.
+			return nil
+		}
+		n := path.Base(p)
+		if s.Name != nil {
+			n = s.Name.Name
+			switch n {
+			case "_":
+				blankImports = append(blankImports, s)
+				continue
+			case ".":
+				// We can't resolve dot imports (yet).
+				return nil
+			}
+		}
+		if unresolved[n] {
+			namedImports[n] = p
+			delete(unresolved, n)
+		}
+	}
+
+	// If there are other unresolved identifiers, give up because this
+	// synthesized file is not going to build.
+	if len(unresolved) > 0 {
+		return nil
+	}
+
+	// Include documentation belonging to blank imports.
+	var comments []*ast.CommentGroup
+	for _, s := range blankImports {
+		if c := s.(*ast.ImportSpec).Doc; c != nil {
+			comments = append(comments, c)
+		}
+	}
+
+	// Include comments that are inside the function body.
+	for _, c := range file.Comments {
+		if body.Pos() <= c.Pos() && c.End() <= body.End() {
+			comments = append(comments, c)
+		}
+	}
+
+	// Strip the "Output:" or "Unordered output:" comment and adjust body
+	// end position.
+	body, comments = stripOutputComment(body, comments)
+
+	// Include documentation belonging to dependent declarations.
+	for _, d := range depDecls {
+		switch d := d.(type) {
+		case *ast.GenDecl:
+			if d.Doc != nil {
+				comments = append(comments, d.Doc)
+			}
+		case *ast.FuncDecl:
+			if d.Doc != nil {
+				comments = append(comments, d.Doc)
+			}
+		}
+	}
+
+	// Synthesize import declaration.
+	importDecl := &ast.GenDecl{
+		Tok:    token.IMPORT,
+		Lparen: 1, // Need non-zero Lparen and Rparen so that printer
+		Rparen: 1, // treats this as a factored import.
+	}
+	for n, p := range namedImports {
+		s := &ast.ImportSpec{Path: &ast.BasicLit{Value: strconv.Quote(p)}}
+		if path.Base(p) != n {
+			s.Name = ast.NewIdent(n)
+		}
+		importDecl.Specs = append(importDecl.Specs, s)
+	}
+	importDecl.Specs = append(importDecl.Specs, blankImports...)
+
+	// Synthesize main function.
+	funcDecl := &ast.FuncDecl{
+		Name: ast.NewIdent("main"),
+		Type: f.Type,
+		Body: body,
+	}
+
+	decls := make([]ast.Decl, 0, 2+len(depDecls))
+	decls = append(decls, importDecl)
+	decls = append(decls, depDecls...)
+	decls = append(decls, funcDecl)
+
+	sort.Slice(decls, func(i, j int) bool {
+		return decls[i].Pos() < decls[j].Pos()
+	})
+
+	sort.Slice(comments, func(i, j int) bool {
+		return comments[i].Pos() < comments[j].Pos()
+	})
+
+	// Synthesize file.
+	return &ast.File{
+		Name:     ast.NewIdent("main"),
+		Decls:    decls,
+		Comments: comments,
+	}
+}
+
+// playExampleFile takes a whole file example and synthesizes a new *ast.File
+// such that the example is function main in package main.
+func playExampleFile(file *ast.File) *ast.File {
+	// Strip copyright comment if present.
+	comments := file.Comments
+	if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
+		comments = comments[1:]
+	}
+
+	// Copy declaration slice, rewriting the ExampleX function to main.
+	var decls []ast.Decl
+	for _, d := range file.Decls {
+		if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
+			// Copy the FuncDecl, as it may be used elsewhere.
+			newF := *f
+			newF.Name = ast.NewIdent("main")
+			newF.Body, comments = stripOutputComment(f.Body, comments)
+			d = &newF
+		}
+		decls = append(decls, d)
+	}
+
+	// Copy the File, as it may be used elsewhere.
+	f := *file
+	f.Name = ast.NewIdent("main")
+	f.Decls = decls
+	f.Comments = comments
+	return &f
+}
+
+// stripOutputComment finds and removes the "Output:" or "Unordered output:"
+// comment from body and comments, and adjusts the body block's end position.
+func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
+	// Do nothing if there is no "Output:" or "Unordered output:" comment.
+	i, last := lastComment(body, comments)
+	if last == nil || !outputPrefix.MatchString(last.Text()) {
+		return body, comments
+	}
+
+	// Copy body and comments, as the originals may be used elsewhere.
+	newBody := &ast.BlockStmt{
+		Lbrace: body.Lbrace,
+		List:   body.List,
+		Rbrace: last.Pos(),
+	}
+	newComments := make([]*ast.CommentGroup, len(comments)-1)
+	copy(newComments, comments[:i])
+	copy(newComments[i:], comments[i+1:])
+	return newBody, newComments
+}
+
+// lastComment returns the last comment inside the provided block.
+func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
+	if b == nil {
+		return
+	}
+	pos, end := b.Pos(), b.End()
+	for j, cg := range c {
+		if cg.Pos() < pos {
+			continue
+		}
+		if cg.End() > end {
+			break
+		}
+		i, last = j, cg
+	}
+	return
+}
+
+// classifyExamples classifies examples and assigns them to the Examples field
+// of the relevant Func, Type, or Package that the example is associated with.
+//
+// The classification process is ambiguous in some cases:
+//
+//   - ExampleFoo_Bar matches a type named Foo_Bar
+//     or a method named Foo.Bar.
+//   - ExampleFoo_bar matches a type named Foo_bar
+//     or Foo (with a "bar" suffix).
+//
+// Examples with malformed names are not associated with anything.
+func classifyExamples(p *Package, examples []*Example) {
+	if len(examples) == 0 {
+		return
+	}
+
+	// Mapping of names for funcs, types, and methods to the example listing.
+	ids := make(map[string]*[]*Example)
+	ids[""] = &p.Examples // package-level examples have an empty name
+	for _, f := range p.Funcs {
+		if !token.IsExported(f.Name) {
+			continue
+		}
+		ids[f.Name] = &f.Examples
+	}
+	for _, t := range p.Types {
+		if !token.IsExported(t.Name) {
+			continue
+		}
+		ids[t.Name] = &t.Examples
+		for _, f := range t.Funcs {
+			if !token.IsExported(f.Name) {
+				continue
+			}
+			ids[f.Name] = &f.Examples
+		}
+		for _, m := range t.Methods {
+			if !token.IsExported(m.Name) {
+				continue
+			}
+			ids[strings.TrimPrefix(m.Recv, "*")+"_"+m.Name] = &m.Examples
+		}
+	}
+
+	// Group each example with the associated func, type, or method.
+	for _, ex := range examples {
+		// Consider all possible split points for the suffix
+		// by starting at the end of string (no suffix case),
+		// then trying all positions that contain a '_' character.
+		//
+		// An association is made on the first successful match.
+		// Examples with malformed names that match nothing are skipped.
+		for i := len(ex.Name); i >= 0; i = strings.LastIndexByte(ex.Name[:i], '_') {
+			prefix, suffix, ok := splitExampleName(ex.Name, i)
+			if !ok {
+				continue
+			}
+			exs, ok := ids[prefix]
+			if !ok {
+				continue
+			}
+			ex.Suffix = suffix
+			*exs = append(*exs, ex)
+			break
+		}
+	}
+
+	// Sort list of example according to the user-specified suffix name.
+	for _, exs := range ids {
+		sort.Slice((*exs), func(i, j int) bool {
+			return (*exs)[i].Suffix < (*exs)[j].Suffix
+		})
+	}
+}
+
+// splitExampleName attempts to split example name s at index i,
+// and reports if that produces a valid split. The suffix may be
+// absent. Otherwise, it must start with a lower-case letter and
+// be preceded by '_'.
+//
+// One of i == len(s) or s[i] == '_' must be true.
+func splitExampleName(s string, i int) (prefix, suffix string, ok bool) {
+	if i == len(s) {
+		return s, "", true
+	}
+	if i == len(s)-1 {
+		return "", "", false
+	}
+	prefix, suffix = s[:i], s[i+1:]
+	return prefix, suffix, isExampleSuffix(suffix)
+}
+
+func isExampleSuffix(s string) bool {
+	r, size := utf8.DecodeRuneInString(s)
+	return size > 0 && unicode.IsLower(r)
+}
diff --git a/internal/backport/go/doc/example_test.go b/internal/backport/go/doc/example_test.go
new file mode 100644
index 0000000..fc5b83c
--- /dev/null
+++ b/internal/backport/go/doc/example_test.go
@@ -0,0 +1,749 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc_test
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/doc"
+	"golang.org/x/website/internal/backport/go/format"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+const exampleTestFile = `
+package foo_test
+
+import (
+	"flag"
+	"fmt"
+	"log"
+	"sort"
+	"os/exec"
+)
+
+func ExampleHello() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+
+func ExampleImport() {
+	out, err := exec.Command("date").Output()
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("The date is %s\n", out)
+}
+
+func ExampleKeyValue() {
+	v := struct {
+		a string
+		b int
+	}{
+		a: "A",
+		b: 1,
+	}
+	fmt.Print(v)
+	// Output: a: "A", b: 1
+}
+
+func ExampleKeyValueImport() {
+	f := flag.Flag{
+		Name: "play",
+	}
+	fmt.Print(f)
+	// Output: Name: "play"
+}
+
+var keyValueTopDecl = struct {
+	a string
+	b int
+}{
+	a: "B",
+	b: 2,
+}
+
+func ExampleKeyValueTopDecl() {
+	fmt.Print(keyValueTopDecl)
+	// Output: a: "B", b: 2
+}
+
+// Person represents a person by name and age.
+type Person struct {
+    Name string
+    Age  int
+}
+
+// String returns a string representation of the Person.
+func (p Person) String() string {
+    return fmt.Sprintf("%s: %d", p.Name, p.Age)
+}
+
+// ByAge implements sort.Interface for []Person based on
+// the Age field.
+type ByAge []Person
+
+// Len returns the number of elements in ByAge.
+func (a (ByAge)) Len() int { return len(a) }
+
+// Swap swaps the elements in ByAge.
+func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
+
+// people is the array of Person
+var people = []Person{
+	{"Bob", 31},
+	{"John", 42},
+	{"Michael", 17},
+	{"Jenny", 26},
+}
+
+func ExampleSort() {
+    fmt.Println(people)
+    sort.Sort(ByAge(people))
+    fmt.Println(people)
+    // Output:
+    // [Bob: 31 John: 42 Michael: 17 Jenny: 26]
+    // [Michael: 17 Jenny: 26 Bob: 31 John: 42]
+}
+`
+
+var exampleTestCases = []struct {
+	Name, Play, Output string
+}{
+	{
+		Name:   "Hello",
+		Play:   exampleHelloPlay,
+		Output: "Hello, world!\n",
+	},
+	{
+		Name: "Import",
+		Play: exampleImportPlay,
+	},
+	{
+		Name:   "KeyValue",
+		Play:   exampleKeyValuePlay,
+		Output: "a: \"A\", b: 1\n",
+	},
+	{
+		Name:   "KeyValueImport",
+		Play:   exampleKeyValueImportPlay,
+		Output: "Name: \"play\"\n",
+	},
+	{
+		Name:   "KeyValueTopDecl",
+		Play:   exampleKeyValueTopDeclPlay,
+		Output: "a: \"B\", b: 2\n",
+	},
+	{
+		Name:   "Sort",
+		Play:   exampleSortPlay,
+		Output: "[Bob: 31 John: 42 Michael: 17 Jenny: 26]\n[Michael: 17 Jenny: 26 Bob: 31 John: 42]\n",
+	},
+}
+
+const exampleHelloPlay = `package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+const exampleImportPlay = `package main
+
+import (
+	"fmt"
+	"log"
+	"os/exec"
+)
+
+func main() {
+	out, err := exec.Command("date").Output()
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("The date is %s\n", out)
+}
+`
+
+const exampleKeyValuePlay = `package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	v := struct {
+		a string
+		b int
+	}{
+		a: "A",
+		b: 1,
+	}
+	fmt.Print(v)
+}
+`
+
+const exampleKeyValueImportPlay = `package main
+
+import (
+	"flag"
+	"fmt"
+)
+
+func main() {
+	f := flag.Flag{
+		Name: "play",
+	}
+	fmt.Print(f)
+}
+`
+
+const exampleKeyValueTopDeclPlay = `package main
+
+import (
+	"fmt"
+)
+
+var keyValueTopDecl = struct {
+	a string
+	b int
+}{
+	a: "B",
+	b: 2,
+}
+
+func main() {
+	fmt.Print(keyValueTopDecl)
+}
+`
+
+const exampleSortPlay = `package main
+
+import (
+	"fmt"
+	"sort"
+)
+
+// Person represents a person by name and age.
+type Person struct {
+	Name string
+	Age  int
+}
+
+// String returns a string representation of the Person.
+func (p Person) String() string {
+	return fmt.Sprintf("%s: %d", p.Name, p.Age)
+}
+
+// ByAge implements sort.Interface for []Person based on
+// the Age field.
+type ByAge []Person
+
+// Len returns the number of elements in ByAge.
+func (a ByAge) Len() int { return len(a) }
+
+// Swap swaps the elements in ByAge.
+func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
+
+// people is the array of Person
+var people = []Person{
+	{"Bob", 31},
+	{"John", 42},
+	{"Michael", 17},
+	{"Jenny", 26},
+}
+
+func main() {
+	fmt.Println(people)
+	sort.Sort(ByAge(people))
+	fmt.Println(people)
+}
+`
+
+func TestExamples(t *testing.T) {
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleTestFile), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, e := range doc.Examples(file) {
+		c := exampleTestCases[i]
+		if e.Name != c.Name {
+			t.Errorf("got Name == %q, want %q", e.Name, c.Name)
+		}
+		if w := c.Play; w != "" {
+			g := formatFile(t, fset, e.Play)
+			if g != w {
+				t.Errorf("%s: got Play == %q, want %q", c.Name, g, w)
+			}
+		}
+		if g, w := e.Output, c.Output; g != w {
+			t.Errorf("%s: got Output == %q, want %q", c.Name, g, w)
+		}
+	}
+}
+
+const exampleWholeFile = `package foo_test
+
+type X int
+
+func (X) Foo() {
+}
+
+func (X) TestBlah() {
+}
+
+func (X) BenchmarkFoo() {
+}
+
+func (X) FuzzFoo() {
+}
+
+func Example() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+`
+
+const exampleWholeFileOutput = `package main
+
+type X int
+
+func (X) Foo() {
+}
+
+func (X) TestBlah() {
+}
+
+func (X) BenchmarkFoo() {
+}
+
+func (X) FuzzFoo() {
+}
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+
+const exampleWholeFileFunction = `package foo_test
+
+func Foo(x int) {
+}
+
+func Example() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+`
+
+const exampleWholeFileFunctionOutput = `package main
+
+func Foo(x int) {
+}
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+
+const exampleWholeFileExternalFunction = `package foo_test
+
+func foo(int)
+
+func Example() {
+	foo(42)
+	// Output:
+}
+`
+
+const exampleWholeFileExternalFunctionOutput = `package main
+
+func foo(int)
+
+func main() {
+	foo(42)
+}
+`
+
+var exampleWholeFileTestCases = []struct {
+	Title, Source, Play, Output string
+}{
+	{
+		"Methods",
+		exampleWholeFile,
+		exampleWholeFileOutput,
+		"Hello, world!\n",
+	},
+	{
+		"Function",
+		exampleWholeFileFunction,
+		exampleWholeFileFunctionOutput,
+		"Hello, world!\n",
+	},
+	{
+		"ExternalFunction",
+		exampleWholeFileExternalFunction,
+		exampleWholeFileExternalFunctionOutput,
+		"",
+	},
+}
+
+func TestExamplesWholeFile(t *testing.T) {
+	for _, c := range exampleWholeFileTestCases {
+		fset := token.NewFileSet()
+		file, err := parser.ParseFile(fset, "test.go", strings.NewReader(c.Source), parser.ParseComments)
+		if err != nil {
+			t.Fatal(err)
+		}
+		es := doc.Examples(file)
+		if len(es) != 1 {
+			t.Fatalf("%s: wrong number of examples; got %d want 1", c.Title, len(es))
+		}
+		e := es[0]
+		if e.Name != "" {
+			t.Errorf("%s: got Name == %q, want %q", c.Title, e.Name, "")
+		}
+		if g, w := formatFile(t, fset, e.Play), c.Play; g != w {
+			t.Errorf("%s: got Play == %q, want %q", c.Title, g, w)
+		}
+		if g, w := e.Output, c.Output; g != w {
+			t.Errorf("%s: got Output == %q, want %q", c.Title, g, w)
+		}
+	}
+}
+
+const exampleInspectSignature = `package foo_test
+
+import (
+	"bytes"
+	"io"
+)
+
+func getReader() io.Reader { return nil }
+
+func do(b bytes.Reader) {}
+
+func Example() {
+	getReader()
+	do()
+	// Output:
+}
+
+func ExampleIgnored() {
+}
+`
+
+const exampleInspectSignatureOutput = `package main
+
+import (
+	"bytes"
+	"io"
+)
+
+func getReader() io.Reader { return nil }
+
+func do(b bytes.Reader) {}
+
+func main() {
+	getReader()
+	do()
+}
+`
+
+func TestExampleInspectSignature(t *testing.T) {
+	// Verify that "bytes" and "io" are imported. See issue #28492.
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleInspectSignature), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	es := doc.Examples(file)
+	if len(es) != 2 {
+		t.Fatalf("wrong number of examples; got %d want 2", len(es))
+	}
+	// We are interested in the first example only.
+	e := es[0]
+	if e.Name != "" {
+		t.Errorf("got Name == %q, want %q", e.Name, "")
+	}
+	if g, w := formatFile(t, fset, e.Play), exampleInspectSignatureOutput; g != w {
+		t.Errorf("got Play == %q, want %q", g, w)
+	}
+	if g, w := e.Output, ""; g != w {
+		t.Errorf("got Output == %q, want %q", g, w)
+	}
+}
+
+const exampleEmpty = `
+package p
+func Example() {}
+func Example_a()
+`
+
+const exampleEmptyOutput = `package main
+
+func main() {}
+func main()
+`
+
+func TestExampleEmpty(t *testing.T) {
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleEmpty), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	es := doc.Examples(file)
+	if len(es) != 1 {
+		t.Fatalf("wrong number of examples; got %d want 1", len(es))
+	}
+	e := es[0]
+	if e.Name != "" {
+		t.Errorf("got Name == %q, want %q", e.Name, "")
+	}
+	if g, w := formatFile(t, fset, e.Play), exampleEmptyOutput; g != w {
+		t.Errorf("got Play == %q, want %q", g, w)
+	}
+	if g, w := e.Output, ""; g != w {
+		t.Errorf("got Output == %q, want %q", g, w)
+	}
+}
+
+func formatFile(t *testing.T, fset *token.FileSet, n *ast.File) string {
+	if n == nil {
+		return "<nil>"
+	}
+	var buf bytes.Buffer
+	if err := format.Node(&buf, fset, n); err != nil {
+		t.Fatal(err)
+	}
+	return buf.String()
+}
+
+// This example illustrates how to use NewFromFiles
+// to compute package documentation with examples.
+func ExampleNewFromFiles() {
+	// src and test are two source files that make up
+	// a package whose documentation will be computed.
+	const src = `
+// This is the package comment.
+package p
+
+import "fmt"
+
+// This comment is associated with the Greet function.
+func Greet(who string) {
+	fmt.Printf("Hello, %s!\n", who)
+}
+`
+	const test = `
+package p_test
+
+// This comment is associated with the ExampleGreet_world example.
+func ExampleGreet_world() {
+	Greet("world")
+}
+`
+
+	// Create the AST by parsing src and test.
+	fset := token.NewFileSet()
+	files := []*ast.File{
+		mustParse(fset, "src.go", src),
+		mustParse(fset, "src_test.go", test),
+	}
+
+	// Compute package documentation with examples.
+	p, err := doc.NewFromFiles(fset, files, "example.com/p")
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Printf("package %s - %s", p.Name, p.Doc)
+	fmt.Printf("func %s - %s", p.Funcs[0].Name, p.Funcs[0].Doc)
+	fmt.Printf(" ⤷ example with suffix %q - %s", p.Funcs[0].Examples[0].Suffix, p.Funcs[0].Examples[0].Doc)
+
+	// Output:
+	// package p - This is the package comment.
+	// func Greet - This comment is associated with the Greet function.
+	//  ⤷ example with suffix "world" - This comment is associated with the ExampleGreet_world example.
+}
+
+func TestClassifyExamples(t *testing.T) {
+	const src = `
+package p
+
+const Const1 = 0
+var   Var1   = 0
+
+type (
+	Type1     int
+	Type1_Foo int
+	Type1_foo int
+	type2     int
+
+	Embed struct { Type1 }
+	Uembed struct { type2 }
+)
+
+func Func1()     {}
+func Func1_Foo() {}
+func Func1_foo() {}
+func func2()     {}
+
+func (Type1) Func1() {}
+func (Type1) Func1_Foo() {}
+func (Type1) Func1_foo() {}
+func (Type1) func2() {}
+
+func (type2) Func1() {}
+
+type (
+	Conflict          int
+	Conflict_Conflict int
+	Conflict_conflict int
+)
+
+func (Conflict) Conflict() {}
+`
+	const test = `
+package p_test
+
+func ExampleConst1() {} // invalid - no support for consts and vars
+func ExampleVar1()   {} // invalid - no support for consts and vars
+
+func Example()               {}
+func Example_()              {} // invalid - suffix must start with a lower-case letter
+func Example_suffix()        {}
+func Example_suffix_xX_X_x() {}
+func Example_世界()           {} // invalid - suffix must start with a lower-case letter
+func Example_123()           {} // invalid - suffix must start with a lower-case letter
+func Example_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+
+func ExampleType1()               {}
+func ExampleType1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_suffix()        {}
+func ExampleType1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Foo()           {}
+func ExampleType1_Foo_suffix()    {}
+func ExampleType1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_foo()           {}
+func ExampleType1_foo_suffix()    {}
+func ExampleType1_foo_Suffix()    {} // matches Type1, instead of Type1_foo
+func Exampletype2()               {} // invalid - cannot match unexported
+
+func ExampleFunc1()               {}
+func ExampleFunc1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_suffix()        {}
+func ExampleFunc1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_Foo()           {}
+func ExampleFunc1_Foo_suffix()    {}
+func ExampleFunc1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_foo()           {}
+func ExampleFunc1_foo_suffix()    {}
+func ExampleFunc1_foo_Suffix()    {} // matches Func1, instead of Func1_foo
+func Examplefunc1()               {} // invalid - cannot match unexported
+
+func ExampleType1_Func1()               {}
+func ExampleType1_Func1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_suffix()        {}
+func ExampleType1_Func1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_Foo()           {}
+func ExampleType1_Func1_Foo_suffix()    {}
+func ExampleType1_Func1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_foo()           {}
+func ExampleType1_Func1_foo_suffix()    {}
+func ExampleType1_Func1_foo_Suffix()    {} // matches Type1.Func1, instead of Type1.Func1_foo
+func ExampleType1_func2()               {} // matches Type1, instead of Type1.func2
+
+func ExampleEmbed_Func1()         {} // invalid - no support for forwarded methods from embedding exported type
+func ExampleUembed_Func1()        {} // methods from embedding unexported types are OK
+func ExampleUembed_Func1_suffix() {}
+
+func ExampleConflict_Conflict()        {} // ambiguous with either Conflict or Conflict_Conflict type
+func ExampleConflict_conflict()        {} // ambiguous with either Conflict or Conflict_conflict type
+func ExampleConflict_Conflict_suffix() {} // ambiguous with either Conflict or Conflict_Conflict type
+func ExampleConflict_conflict_suffix() {} // ambiguous with either Conflict or Conflict_conflict type
+`
+
+	// Parse literal source code as a *doc.Package.
+	fset := token.NewFileSet()
+	files := []*ast.File{
+		mustParse(fset, "src.go", src),
+		mustParse(fset, "src_test.go", test),
+	}
+	p, err := doc.NewFromFiles(fset, files, "example.com/p")
+	if err != nil {
+		t.Fatalf("doc.NewFromFiles: %v", err)
+	}
+
+	// Collect the association of examples to top-level identifiers.
+	got := map[string][]string{}
+	got[""] = exampleNames(p.Examples)
+	for _, f := range p.Funcs {
+		got[f.Name] = exampleNames(f.Examples)
+	}
+	for _, t := range p.Types {
+		got[t.Name] = exampleNames(t.Examples)
+		for _, f := range t.Funcs {
+			got[f.Name] = exampleNames(f.Examples)
+		}
+		for _, m := range t.Methods {
+			got[t.Name+"."+m.Name] = exampleNames(m.Examples)
+		}
+	}
+
+	want := map[string][]string{
+		"": {"", "suffix", "suffix_xX_X_x"}, // Package-level examples.
+
+		"Type1":     {"", "foo_Suffix", "func2", "suffix"},
+		"Type1_Foo": {"", "suffix"},
+		"Type1_foo": {"", "suffix"},
+
+		"Func1":     {"", "foo_Suffix", "suffix"},
+		"Func1_Foo": {"", "suffix"},
+		"Func1_foo": {"", "suffix"},
+
+		"Type1.Func1":     {"", "foo_Suffix", "suffix"},
+		"Type1.Func1_Foo": {"", "suffix"},
+		"Type1.Func1_foo": {"", "suffix"},
+
+		"Uembed.Func1": {"", "suffix"},
+
+		// These are implementation dependent due to the ambiguous parsing.
+		"Conflict_Conflict": {"", "suffix"},
+		"Conflict_conflict": {"", "suffix"},
+	}
+
+	for id := range got {
+		if !reflect.DeepEqual(got[id], want[id]) {
+			t.Errorf("classification mismatch for %q:\ngot  %q\nwant %q", id, got[id], want[id])
+		}
+	}
+}
+
+func exampleNames(exs []*doc.Example) (out []string) {
+	for _, ex := range exs {
+		out = append(out, ex.Suffix)
+	}
+	return out
+}
+
+func mustParse(fset *token.FileSet, filename, src string) *ast.File {
+	f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+	if err != nil {
+		panic(err)
+	}
+	return f
+}
diff --git a/internal/backport/go/doc/exports.go b/internal/backport/go/doc/exports.go
new file mode 100644
index 0000000..c2b91d5
--- /dev/null
+++ b/internal/backport/go/doc/exports.go
@@ -0,0 +1,324 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements export filtering of an AST.
+
+package doc
+
+import (
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// filterIdentList removes unexported names from list in place
+// and returns the resulting list.
+func filterIdentList(list []*ast.Ident) []*ast.Ident {
+	j := 0
+	for _, x := range list {
+		if token.IsExported(x.Name) {
+			list[j] = x
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+var underscore = ast.NewIdent("_")
+
+func filterCompositeLit(lit *ast.CompositeLit, filter Filter, export bool) {
+	n := len(lit.Elts)
+	lit.Elts = filterExprList(lit.Elts, filter, export)
+	if len(lit.Elts) < n {
+		lit.Incomplete = true
+	}
+}
+
+func filterExprList(list []ast.Expr, filter Filter, export bool) []ast.Expr {
+	j := 0
+	for _, exp := range list {
+		switch x := exp.(type) {
+		case *ast.CompositeLit:
+			filterCompositeLit(x, filter, export)
+		case *ast.KeyValueExpr:
+			if x, ok := x.Key.(*ast.Ident); ok && !filter(x.Name) {
+				continue
+			}
+			if x, ok := x.Value.(*ast.CompositeLit); ok {
+				filterCompositeLit(x, filter, export)
+			}
+		}
+		list[j] = exp
+		j++
+	}
+	return list[0:j]
+}
+
+// updateIdentList replaces all unexported identifiers with underscore
+// and reports whether at least one exported name exists.
+func updateIdentList(list []*ast.Ident) (hasExported bool) {
+	for i, x := range list {
+		if token.IsExported(x.Name) {
+			hasExported = true
+		} else {
+			list[i] = underscore
+		}
+	}
+	return hasExported
+}
+
+// hasExportedName reports whether list contains any exported names.
+func hasExportedName(list []*ast.Ident) bool {
+	for _, x := range list {
+		if x.IsExported() {
+			return true
+		}
+	}
+	return false
+}
+
+// removeAnonymousField removes anonymous fields named name from an interface.
+func removeAnonymousField(name string, ityp *ast.InterfaceType) {
+	list := ityp.Methods.List // we know that ityp.Methods != nil
+	j := 0
+	for _, field := range list {
+		keepField := true
+		if n := len(field.Names); n == 0 {
+			// anonymous field
+			if fname, _ := baseTypeName(field.Type); fname == name {
+				keepField = false
+			}
+		}
+		if keepField {
+			list[j] = field
+			j++
+		}
+	}
+	if j < len(list) {
+		ityp.Incomplete = true
+	}
+	ityp.Methods.List = list[0:j]
+}
+
+// filterFieldList removes unexported fields (field names) from the field list
+// in place and reports whether fields were removed. Anonymous fields are
+// recorded with the parent type. filterType is called with the types of
+// all remaining fields.
+func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
+	if fields == nil {
+		return
+	}
+	list := fields.List
+	j := 0
+	for _, field := range list {
+		keepField := false
+		if n := len(field.Names); n == 0 {
+			// anonymous field or embedded type or union element
+			fname := r.recordAnonymousField(parent, field.Type)
+			if fname != "" {
+				if token.IsExported(fname) {
+					keepField = true
+				} else if ityp != nil && predeclaredTypes[fname] {
+					// possibly an embedded predeclared type; keep it for now but
+					// remember this interface so that it can be fixed if name is also
+					// defined locally
+					keepField = true
+					r.remember(fname, ityp)
+				}
+			} else {
+				// If we're operating on an interface, assume that this is an embedded
+				// type or union element.
+				//
+				// TODO(rfindley): consider traversing into approximation/unions
+				// elements to see if they are entirely unexported.
+				keepField = ityp != nil
+			}
+		} else {
+			field.Names = filterIdentList(field.Names)
+			if len(field.Names) < n {
+				removedFields = true
+			}
+			if len(field.Names) > 0 {
+				keepField = true
+			}
+		}
+		if keepField {
+			r.filterType(nil, field.Type)
+			list[j] = field
+			j++
+		}
+	}
+	if j < len(list) {
+		removedFields = true
+	}
+	fields.List = list[0:j]
+	return
+}
+
+// filterParamList applies filterType to each parameter type in fields.
+func (r *reader) filterParamList(fields *ast.FieldList) {
+	if fields != nil {
+		for _, f := range fields.List {
+			r.filterType(nil, f.Type)
+		}
+	}
+}
+
+// filterType strips any unexported struct fields or method types from typ
+// in place. If fields (or methods) have been removed, the corresponding
+// struct or interface type has the Incomplete field set to true.
+func (r *reader) filterType(parent *namedType, typ ast.Expr) {
+	switch t := typ.(type) {
+	case *ast.Ident:
+		// nothing to do
+	case *ast.ParenExpr:
+		r.filterType(nil, t.X)
+	case *ast.StarExpr: // possibly an embedded type literal
+		r.filterType(nil, t.X)
+	case *ast.UnaryExpr:
+		if t.Op == token.TILDE { // approximation element
+			r.filterType(nil, t.X)
+		}
+	case *ast.BinaryExpr:
+		if t.Op == token.OR { // union
+			r.filterType(nil, t.X)
+			r.filterType(nil, t.Y)
+		}
+	case *ast.ArrayType:
+		r.filterType(nil, t.Elt)
+	case *ast.StructType:
+		if r.filterFieldList(parent, t.Fields, nil) {
+			t.Incomplete = true
+		}
+	case *ast.FuncType:
+		r.filterParamList(t.TypeParams)
+		r.filterParamList(t.Params)
+		r.filterParamList(t.Results)
+	case *ast.InterfaceType:
+		if r.filterFieldList(parent, t.Methods, t) {
+			t.Incomplete = true
+		}
+	case *ast.MapType:
+		r.filterType(nil, t.Key)
+		r.filterType(nil, t.Value)
+	case *ast.ChanType:
+		r.filterType(nil, t.Value)
+	}
+}
+
+func (r *reader) filterSpec(spec ast.Spec) bool {
+	switch s := spec.(type) {
+	case *ast.ImportSpec:
+		// always keep imports so we can collect them
+		return true
+	case *ast.ValueSpec:
+		s.Values = filterExprList(s.Values, token.IsExported, true)
+		if len(s.Values) > 0 || s.Type == nil && len(s.Values) == 0 {
+			// If there are values declared on RHS, just replace the unexported
+			// identifiers on the LHS with underscore, so that it matches
+			// the sequence of expression on the RHS.
+			//
+			// Similarly, if there are no type and values, then this expression
+			// must be following an iota expression, where order matters.
+			if updateIdentList(s.Names) {
+				r.filterType(nil, s.Type)
+				return true
+			}
+		} else {
+			s.Names = filterIdentList(s.Names)
+			if len(s.Names) > 0 {
+				r.filterType(nil, s.Type)
+				return true
+			}
+		}
+	case *ast.TypeSpec:
+		// Don't filter type parameters here, by analogy with function parameters
+		// which are not filtered for top-level function declarations.
+		if name := s.Name.Name; token.IsExported(name) {
+			r.filterType(r.lookupType(s.Name.Name), s.Type)
+			return true
+		} else if IsPredeclared(name) {
+			if r.shadowedPredecl == nil {
+				r.shadowedPredecl = make(map[string]bool)
+			}
+			r.shadowedPredecl[name] = true
+		}
+	}
+	return false
+}
+
+// copyConstType returns a copy of typ with position pos.
+// typ must be a valid constant type.
+// In practice, only (possibly qualified) identifiers are possible.
+func copyConstType(typ ast.Expr, pos token.Pos) ast.Expr {
+	switch typ := typ.(type) {
+	case *ast.Ident:
+		return &ast.Ident{Name: typ.Name, NamePos: pos}
+	case *ast.SelectorExpr:
+		if id, ok := typ.X.(*ast.Ident); ok {
+			// presumably a qualified identifier
+			return &ast.SelectorExpr{
+				Sel: ast.NewIdent(typ.Sel.Name),
+				X:   &ast.Ident{Name: id.Name, NamePos: pos},
+			}
+		}
+	}
+	return nil // shouldn't happen, but be conservative and don't panic
+}
+
+func (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {
+	if tok == token.CONST {
+		// Propagate any type information that would get lost otherwise
+		// when unexported constants are filtered.
+		var prevType ast.Expr
+		for _, spec := range list {
+			spec := spec.(*ast.ValueSpec)
+			if spec.Type == nil && len(spec.Values) == 0 && prevType != nil {
+				// provide current spec with an explicit type
+				spec.Type = copyConstType(prevType, spec.Pos())
+			}
+			if hasExportedName(spec.Names) {
+				// exported names are preserved so there's no need to propagate the type
+				prevType = nil
+			} else {
+				prevType = spec.Type
+			}
+		}
+	}
+
+	j := 0
+	for _, s := range list {
+		if r.filterSpec(s) {
+			list[j] = s
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+func (r *reader) filterDecl(decl ast.Decl) bool {
+	switch d := decl.(type) {
+	case *ast.GenDecl:
+		d.Specs = r.filterSpecList(d.Specs, d.Tok)
+		return len(d.Specs) > 0
+	case *ast.FuncDecl:
+		// ok to filter these methods early because any
+		// conflicting method will be filtered here, too -
+		// thus, removing these methods early will not lead
+		// to the false removal of possible conflicts
+		return token.IsExported(d.Name.Name)
+	}
+	return false
+}
+
+// fileExports removes unexported declarations from src in place.
+func (r *reader) fileExports(src *ast.File) {
+	j := 0
+	for _, d := range src.Decls {
+		if r.filterDecl(d) {
+			src.Decls[j] = d
+			j++
+		}
+	}
+	src.Decls = src.Decls[0:j]
+}
diff --git a/internal/backport/go/doc/filter.go b/internal/backport/go/doc/filter.go
new file mode 100644
index 0000000..ddf0368
--- /dev/null
+++ b/internal/backport/go/doc/filter.go
@@ -0,0 +1,106 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "golang.org/x/website/internal/backport/go/ast"
+
+type Filter func(string) bool
+
+func matchFields(fields *ast.FieldList, f Filter) bool {
+	if fields != nil {
+		for _, field := range fields.List {
+			for _, name := range field.Names {
+				if f(name.Name) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func matchDecl(d *ast.GenDecl, f Filter) bool {
+	for _, d := range d.Specs {
+		switch v := d.(type) {
+		case *ast.ValueSpec:
+			for _, name := range v.Names {
+				if f(name.Name) {
+					return true
+				}
+			}
+		case *ast.TypeSpec:
+			if f(v.Name.Name) {
+				return true
+			}
+			// We don't match ordinary parameters in filterFuncs, so by analogy don't
+			// match type parameters here.
+			switch t := v.Type.(type) {
+			case *ast.StructType:
+				if matchFields(t.Fields, f) {
+					return true
+				}
+			case *ast.InterfaceType:
+				if matchFields(t.Methods, f) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func filterValues(a []*Value, f Filter) []*Value {
+	w := 0
+	for _, vd := range a {
+		if matchDecl(vd.Decl, f) {
+			a[w] = vd
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+func filterFuncs(a []*Func, f Filter) []*Func {
+	w := 0
+	for _, fd := range a {
+		if f(fd.Name) {
+			a[w] = fd
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+func filterTypes(a []*Type, f Filter) []*Type {
+	w := 0
+	for _, td := range a {
+		n := 0 // number of matches
+		if matchDecl(td.Decl, f) {
+			n = 1
+		} else {
+			// type name doesn't match, but we may have matching consts, vars, factories or methods
+			td.Consts = filterValues(td.Consts, f)
+			td.Vars = filterValues(td.Vars, f)
+			td.Funcs = filterFuncs(td.Funcs, f)
+			td.Methods = filterFuncs(td.Methods, f)
+			n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
+		}
+		if n > 0 {
+			a[w] = td
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+// Filter eliminates documentation for names that don't pass through the filter f.
+// TODO(gri): Recognize "Type.Method" as a name.
+func (p *Package) Filter(f Filter) {
+	p.Consts = filterValues(p.Consts, f)
+	p.Vars = filterValues(p.Vars, f)
+	p.Types = filterTypes(p.Types, f)
+	p.Funcs = filterFuncs(p.Funcs, f)
+	p.Doc = "" // don't show top-level package doc
+}
diff --git a/internal/backport/go/doc/reader.go b/internal/backport/go/doc/reader.go
new file mode 100644
index 0000000..45f1280
--- /dev/null
+++ b/internal/backport/go/doc/reader.go
@@ -0,0 +1,953 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// ----------------------------------------------------------------------------
+// function/method sets
+//
+// Internally, we treat functions like methods and collect them in method sets.
+
+// A methodSet describes a set of methods. Entries where Decl == nil are conflict
+// entries (more than one method with the same name at the same embedding level).
+type methodSet map[string]*Func
+
+// recvString returns a string representation of recv of the form "T", "*T",
+// "T[A, ...]", "*T[A, ...]" or "BADRECV" (if not a proper receiver type).
+func recvString(recv ast.Expr) string {
+	switch t := recv.(type) {
+	case *ast.Ident:
+		return t.Name
+	case *ast.StarExpr:
+		return "*" + recvString(t.X)
+	case *ast.IndexExpr:
+		// Generic type with one parameter.
+		return fmt.Sprintf("%s[%s]", recvString(t.X), recvParam(t.Index))
+	case *ast.IndexListExpr:
+		// Generic type with multiple parameters.
+		if len(t.Indices) > 0 {
+			var b strings.Builder
+			b.WriteString(recvString(t.X))
+			b.WriteByte('[')
+			b.WriteString(recvParam(t.Indices[0]))
+			for _, e := range t.Indices[1:] {
+				b.WriteString(", ")
+				b.WriteString(recvParam(e))
+			}
+			b.WriteByte(']')
+			return b.String()
+		}
+	}
+	return "BADRECV"
+}
+
+func recvParam(p ast.Expr) string {
+	if id, ok := p.(*ast.Ident); ok {
+		return id.Name
+	}
+	return "BADPARAM"
+}
+
+// set creates the corresponding Func for f and adds it to mset.
+// If there are multiple f's with the same name, set keeps the first
+// one with documentation; conflicts are ignored. The boolean
+// specifies whether to leave the AST untouched.
+func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) {
+	name := f.Name.Name
+	if g := mset[name]; g != nil && g.Doc != "" {
+		// A function with the same name has already been registered;
+		// since it has documentation, assume f is simply another
+		// implementation and ignore it. This does not happen if the
+		// caller is using go/build.ScanDir to determine the list of
+		// files implementing a package.
+		return
+	}
+	// function doesn't exist or has no documentation; use f
+	recv := ""
+	if f.Recv != nil {
+		var typ ast.Expr
+		// be careful in case of incorrect ASTs
+		if list := f.Recv.List; len(list) == 1 {
+			typ = list[0].Type
+		}
+		recv = recvString(typ)
+	}
+	mset[name] = &Func{
+		Doc:  f.Doc.Text(),
+		Name: name,
+		Decl: f,
+		Recv: recv,
+		Orig: recv,
+	}
+	if !preserveAST {
+		f.Doc = nil // doc consumed - remove from AST
+	}
+}
+
+// add adds method m to the method set; m is ignored if the method set
+// already contains a method with the same name at the same or a higher
+// level than m.
+func (mset methodSet) add(m *Func) {
+	old := mset[m.Name]
+	if old == nil || m.Level < old.Level {
+		mset[m.Name] = m
+		return
+	}
+	if m.Level == old.Level {
+		// conflict - mark it using a method with nil Decl
+		mset[m.Name] = &Func{
+			Name:  m.Name,
+			Level: m.Level,
+		}
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Named types
+
+// baseTypeName returns the name of the base type of x (or "")
+// and whether the type is imported or not.
+func baseTypeName(x ast.Expr) (name string, imported bool) {
+	switch t := x.(type) {
+	case *ast.Ident:
+		return t.Name, false
+	case *ast.IndexExpr:
+		return baseTypeName(t.X)
+	case *ast.IndexListExpr:
+		return baseTypeName(t.X)
+	case *ast.SelectorExpr:
+		if _, ok := t.X.(*ast.Ident); ok {
+			// only possible for qualified type names;
+			// assume type is imported
+			return t.Sel.Name, true
+		}
+	case *ast.ParenExpr:
+		return baseTypeName(t.X)
+	case *ast.StarExpr:
+		return baseTypeName(t.X)
+	}
+	return "", false
+}
+
+// An embeddedSet describes a set of embedded types.
+type embeddedSet map[*namedType]bool
+
+// A namedType represents a named unqualified (package local, or possibly
+// predeclared) type. The namedType for a type name is always found via
+// reader.lookupType.
+type namedType struct {
+	doc  string       // doc comment for type
+	name string       // type name
+	decl *ast.GenDecl // nil if declaration hasn't been seen yet
+
+	isEmbedded bool        // true if this type is embedded
+	isStruct   bool        // true if this type is a struct
+	embedded   embeddedSet // true if the embedded type is a pointer
+
+	// associated declarations
+	values  []*Value // consts and vars
+	funcs   methodSet
+	methods methodSet
+}
+
+// ----------------------------------------------------------------------------
+// AST reader
+
+// reader accumulates documentation for a single package.
+// It modifies the AST: Comments (declaration documentation)
+// that have been collected by the reader are set to nil
+// in the respective AST nodes so that they are not printed
+// twice (once when printing the documentation and once when
+// printing the corresponding AST node).
+type reader struct {
+	mode Mode
+
+	// package properties
+	doc       string // package documentation, if any
+	filenames []string
+	notes     map[string][]*Note
+
+	// declarations
+	imports   map[string]int
+	hasDotImp bool     // if set, package contains a dot import
+	values    []*Value // consts and vars
+	order     int      // sort order of const and var declarations (when we can't use a name)
+	types     map[string]*namedType
+	funcs     methodSet
+
+	// support for package-local shadowing of predeclared types
+	shadowedPredecl map[string]bool
+	fixmap          map[string][]*ast.InterfaceType
+}
+
+func (r *reader) isVisible(name string) bool {
+	return r.mode&AllDecls != 0 || token.IsExported(name)
+}
+
+// lookupType returns the base type with the given name.
+// If the base type has not been encountered yet, a new
+// type with the given name but no associated declaration
+// is added to the type map.
+func (r *reader) lookupType(name string) *namedType {
+	if name == "" || name == "_" {
+		return nil // no type docs for anonymous types
+	}
+	if typ, found := r.types[name]; found {
+		return typ
+	}
+	// type not found - add one without declaration
+	typ := &namedType{
+		name:     name,
+		embedded: make(embeddedSet),
+		funcs:    make(methodSet),
+		methods:  make(methodSet),
+	}
+	r.types[name] = typ
+	return typ
+}
+
+// recordAnonymousField registers fieldType as the type of an
+// anonymous field in the parent type. If the field is imported
+// (qualified name) or the parent is nil, the field is ignored.
+// The function returns the field name.
+func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
+	fname, imp := baseTypeName(fieldType)
+	if parent == nil || imp {
+		return
+	}
+	if ftype := r.lookupType(fname); ftype != nil {
+		ftype.isEmbedded = true
+		_, ptr := fieldType.(*ast.StarExpr)
+		parent.embedded[ftype] = ptr
+	}
+	return
+}
+
+func (r *reader) readDoc(comment *ast.CommentGroup) {
+	// By convention there should be only one package comment
+	// but collect all of them if there are more than one.
+	text := comment.Text()
+	if r.doc == "" {
+		r.doc = text
+		return
+	}
+	r.doc += "\n" + text
+}
+
+func (r *reader) remember(predecl string, typ *ast.InterfaceType) {
+	if r.fixmap == nil {
+		r.fixmap = make(map[string][]*ast.InterfaceType)
+	}
+	r.fixmap[predecl] = append(r.fixmap[predecl], typ)
+}
+
+func specNames(specs []ast.Spec) []string {
+	names := make([]string, 0, len(specs)) // reasonable estimate
+	for _, s := range specs {
+		// s guaranteed to be an *ast.ValueSpec by readValue
+		for _, ident := range s.(*ast.ValueSpec).Names {
+			names = append(names, ident.Name)
+		}
+	}
+	return names
+}
+
+// readValue processes a const or var declaration.
+func (r *reader) readValue(decl *ast.GenDecl) {
+	// determine if decl should be associated with a type
+	// Heuristic: For each typed entry, determine the type name, if any.
+	//            If there is exactly one type name that is sufficiently
+	//            frequent, associate the decl with the respective type.
+	domName := ""
+	domFreq := 0
+	prev := ""
+	n := 0
+	for _, spec := range decl.Specs {
+		s, ok := spec.(*ast.ValueSpec)
+		if !ok {
+			continue // should not happen, but be conservative
+		}
+		name := ""
+		switch {
+		case s.Type != nil:
+			// a type is present; determine its name
+			if n, imp := baseTypeName(s.Type); !imp {
+				name = n
+			}
+		case decl.Tok == token.CONST && len(s.Values) == 0:
+			// no type or value is present but we have a constant declaration;
+			// use the previous type name (possibly the empty string)
+			name = prev
+		}
+		if name != "" {
+			// entry has a named type
+			if domName != "" && domName != name {
+				// more than one type name - do not associate
+				// with any type
+				domName = ""
+				break
+			}
+			domName = name
+			domFreq++
+		}
+		prev = name
+		n++
+	}
+
+	// nothing to do w/o a legal declaration
+	if n == 0 {
+		return
+	}
+
+	// determine values list with which to associate the Value for this decl
+	values := &r.values
+	const threshold = 0.75
+	if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) {
+		// typed entries are sufficiently frequent
+		if typ := r.lookupType(domName); typ != nil {
+			values = &typ.values // associate with that type
+		}
+	}
+
+	*values = append(*values, &Value{
+		Doc:   decl.Doc.Text(),
+		Names: specNames(decl.Specs),
+		Decl:  decl,
+		order: r.order,
+	})
+	if r.mode&PreserveAST == 0 {
+		decl.Doc = nil // doc consumed - remove from AST
+	}
+	// Note: It's important that the order used here is global because the cleanupTypes
+	// methods may move values associated with types back into the global list. If the
+	// order is list-specific, sorting is not deterministic because the same order value
+	// may appear multiple times (was bug, found when fixing #16153).
+	r.order++
+}
+
+// fields returns a struct's fields or an interface's methods.
+func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) {
+	var fields *ast.FieldList
+	switch t := typ.(type) {
+	case *ast.StructType:
+		fields = t.Fields
+		isStruct = true
+	case *ast.InterfaceType:
+		fields = t.Methods
+	}
+	if fields != nil {
+		list = fields.List
+	}
+	return
+}
+
+// readType processes a type declaration.
+func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
+	typ := r.lookupType(spec.Name.Name)
+	if typ == nil {
+		return // no name or blank name - ignore the type
+	}
+
+	// A type should be added at most once, so typ.decl
+	// should be nil - if it is not, simply overwrite it.
+	typ.decl = decl
+
+	// compute documentation
+	doc := spec.Doc
+	if doc == nil {
+		// no doc associated with the spec, use the declaration doc, if any
+		doc = decl.Doc
+	}
+	if r.mode&PreserveAST == 0 {
+		spec.Doc = nil // doc consumed - remove from AST
+		decl.Doc = nil // doc consumed - remove from AST
+	}
+	typ.doc = doc.Text()
+
+	// record anonymous fields (they may contribute methods)
+	// (some fields may have been recorded already when filtering
+	// exports, but that's ok)
+	var list []*ast.Field
+	list, typ.isStruct = fields(spec.Type)
+	for _, field := range list {
+		if len(field.Names) == 0 {
+			r.recordAnonymousField(typ, field.Type)
+		}
+	}
+}
+
+// isPredeclared reports whether n denotes a predeclared type.
+func (r *reader) isPredeclared(n string) bool {
+	return predeclaredTypes[n] && r.types[n] == nil
+}
+
+// readFunc processes a func or method declaration.
+func (r *reader) readFunc(fun *ast.FuncDecl) {
+	// strip function body if requested.
+	if r.mode&PreserveAST == 0 {
+		fun.Body = nil
+	}
+
+	// associate methods with the receiver type, if any
+	if fun.Recv != nil {
+		// method
+		if len(fun.Recv.List) == 0 {
+			// should not happen (incorrect AST); (See issue 17788)
+			// don't show this method
+			return
+		}
+		recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
+		if imp {
+			// should not happen (incorrect AST);
+			// don't show this method
+			return
+		}
+		if typ := r.lookupType(recvTypeName); typ != nil {
+			typ.methods.set(fun, r.mode&PreserveAST != 0)
+		}
+		// otherwise ignore the method
+		// TODO(gri): There may be exported methods of non-exported types
+		// that can be called because of exported values (consts, vars, or
+		// function results) of that type. Could determine if that is the
+		// case and then show those methods in an appropriate section.
+		return
+	}
+
+	// Associate factory functions with the first visible result type, as long as
+	// others are predeclared types.
+	if fun.Type.Results.NumFields() >= 1 {
+		var typ *namedType // type to associate the function with
+		numResultTypes := 0
+		for _, res := range fun.Type.Results.List {
+			factoryType := res.Type
+			if t, ok := factoryType.(*ast.ArrayType); ok {
+				// We consider functions that return slices or arrays of type
+				// T (or pointers to T) as factory functions of T.
+				factoryType = t.Elt
+			}
+			if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) {
+				if lookupTypeParam(n, fun.Type.TypeParams) != nil {
+					// Issue #49477: don't associate fun with its type parameter result.
+					// A type parameter is not a defined type.
+					continue
+				}
+				if t := r.lookupType(n); t != nil {
+					typ = t
+					numResultTypes++
+					if numResultTypes > 1 {
+						break
+					}
+				}
+			}
+		}
+		// If there is exactly one result type,
+		// associate the function with that type.
+		if numResultTypes == 1 {
+			typ.funcs.set(fun, r.mode&PreserveAST != 0)
+			return
+		}
+	}
+
+	// just an ordinary function
+	r.funcs.set(fun, r.mode&PreserveAST != 0)
+}
+
+// lookupTypeParam searches for type parameters named name within the tparams
+// field list, returning the relevant identifier if found, or nil if not.
+func lookupTypeParam(name string, tparams *ast.FieldList) *ast.Ident {
+	if tparams == nil {
+		return nil
+	}
+	for _, field := range tparams.List {
+		for _, id := range field.Names {
+			if id.Name == name {
+				return id
+			}
+		}
+	}
+	return nil
+}
+
+var (
+	noteMarker    = `([A-Z][A-Z]+)\(([^)]+)\):?`                    // MARKER(uid), MARKER at least 2 chars, uid at least 1 char
+	noteMarkerRx  = regexp.MustCompile(`^[ \t]*` + noteMarker)      // MARKER(uid) at text start
+	noteCommentRx = regexp.MustCompile(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start
+)
+
+// readNote collects a single note from a sequence of comments.
+func (r *reader) readNote(list []*ast.Comment) {
+	text := (&ast.CommentGroup{List: list}).Text()
+	if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil {
+		// The note body starts after the marker.
+		// We remove any formatting so that we don't
+		// get spurious line breaks/indentation when
+		// showing the TODO body.
+		body := clean(text[m[1]:], keepNL)
+		if body != "" {
+			marker := text[m[2]:m[3]]
+			r.notes[marker] = append(r.notes[marker], &Note{
+				Pos:  list[0].Pos(),
+				End:  list[len(list)-1].End(),
+				UID:  text[m[4]:m[5]],
+				Body: body,
+			})
+		}
+	}
+}
+
+// readNotes extracts notes from comments.
+// A note must start at the beginning of a comment with "MARKER(uid):"
+// and is followed by the note body (e.g., "// BUG(gri): fix this").
+// The note ends at the end of the comment group or at the start of
+// another note in the same comment group, whichever comes first.
+func (r *reader) readNotes(comments []*ast.CommentGroup) {
+	for _, group := range comments {
+		i := -1 // comment index of most recent note start, valid if >= 0
+		list := group.List
+		for j, c := range list {
+			if noteCommentRx.MatchString(c.Text) {
+				if i >= 0 {
+					r.readNote(list[i:j])
+				}
+				i = j
+			}
+		}
+		if i >= 0 {
+			r.readNote(list[i:])
+		}
+	}
+}
+
+// readFile adds the AST for a source file to the reader.
+func (r *reader) readFile(src *ast.File) {
+	// add package documentation
+	if src.Doc != nil {
+		r.readDoc(src.Doc)
+		if r.mode&PreserveAST == 0 {
+			src.Doc = nil // doc consumed - remove from AST
+		}
+	}
+
+	// add all declarations but for functions which are processed in a separate pass
+	for _, decl := range src.Decls {
+		switch d := decl.(type) {
+		case *ast.GenDecl:
+			switch d.Tok {
+			case token.IMPORT:
+				// imports are handled individually
+				for _, spec := range d.Specs {
+					if s, ok := spec.(*ast.ImportSpec); ok {
+						if import_, err := strconv.Unquote(s.Path.Value); err == nil {
+							r.imports[import_] = 1
+							if s.Name != nil && s.Name.Name == "." {
+								r.hasDotImp = true
+							}
+						}
+					}
+				}
+			case token.CONST, token.VAR:
+				// constants and variables are always handled as a group
+				r.readValue(d)
+			case token.TYPE:
+				// types are handled individually
+				if len(d.Specs) == 1 && !d.Lparen.IsValid() {
+					// common case: single declaration w/o parentheses
+					// (if a single declaration is parenthesized,
+					// create a new fake declaration below, so that
+					// go/doc type declarations always appear w/o
+					// parentheses)
+					if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
+						r.readType(d, s)
+					}
+					break
+				}
+				for _, spec := range d.Specs {
+					if s, ok := spec.(*ast.TypeSpec); ok {
+						// use an individual (possibly fake) declaration
+						// for each type; this also ensures that each type
+						// gets to (re-)use the declaration documentation
+						// if there's none associated with the spec itself
+						fake := &ast.GenDecl{
+							Doc: d.Doc,
+							// don't use the existing TokPos because it
+							// will lead to the wrong selection range for
+							// the fake declaration if there are more
+							// than one type in the group (this affects
+							// src/cmd/godoc/godoc.go's posLink_urlFunc)
+							TokPos: s.Pos(),
+							Tok:    token.TYPE,
+							Specs:  []ast.Spec{s},
+						}
+						r.readType(fake, s)
+					}
+				}
+			}
+		}
+	}
+
+	// collect MARKER(...): annotations
+	r.readNotes(src.Comments)
+	if r.mode&PreserveAST == 0 {
+		src.Comments = nil // consumed unassociated comments - remove from AST
+	}
+}
+
+func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
+	// initialize reader
+	r.filenames = make([]string, len(pkg.Files))
+	r.imports = make(map[string]int)
+	r.mode = mode
+	r.types = make(map[string]*namedType)
+	r.funcs = make(methodSet)
+	r.notes = make(map[string][]*Note)
+
+	// sort package files before reading them so that the
+	// result does not depend on map iteration order
+	i := 0
+	for filename := range pkg.Files {
+		r.filenames[i] = filename
+		i++
+	}
+	sort.Strings(r.filenames)
+
+	// process files in sorted order
+	for _, filename := range r.filenames {
+		f := pkg.Files[filename]
+		if mode&AllDecls == 0 {
+			r.fileExports(f)
+		}
+		r.readFile(f)
+	}
+
+	// process functions now that we have better type information
+	for _, f := range pkg.Files {
+		for _, decl := range f.Decls {
+			if d, ok := decl.(*ast.FuncDecl); ok {
+				r.readFunc(d)
+			}
+		}
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
+	if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
+		return f // shouldn't happen, but be safe
+	}
+
+	// copy existing receiver field and set new type
+	newField := *f.Decl.Recv.List[0]
+	origPos := newField.Type.Pos()
+	_, origRecvIsPtr := newField.Type.(*ast.StarExpr)
+	newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName}
+	var typ ast.Expr = newIdent
+	if !embeddedIsPtr && origRecvIsPtr {
+		newIdent.NamePos++ // '*' is one character
+		typ = &ast.StarExpr{Star: origPos, X: newIdent}
+	}
+	newField.Type = typ
+
+	// copy existing receiver field list and set new receiver field
+	newFieldList := *f.Decl.Recv
+	newFieldList.List = []*ast.Field{&newField}
+
+	// copy existing function declaration and set new receiver field list
+	newFuncDecl := *f.Decl
+	newFuncDecl.Recv = &newFieldList
+
+	// copy existing function documentation and set new declaration
+	newF := *f
+	newF.Decl = &newFuncDecl
+	newF.Recv = recvString(typ)
+	// the Orig field never changes
+	newF.Level = level
+
+	return &newF
+}
+
+// collectEmbeddedMethods collects the embedded methods of typ in mset.
+func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) {
+	visited[typ] = true
+	for embedded, isPtr := range typ.embedded {
+		// Once an embedded type is embedded as a pointer type
+		// all embedded types in those types are treated like
+		// pointer types for the purpose of the receiver type
+		// computation; i.e., embeddedIsPtr is sticky for this
+		// embedding hierarchy.
+		thisEmbeddedIsPtr := embeddedIsPtr || isPtr
+		for _, m := range embedded.methods {
+			// only top-level methods are embedded
+			if m.Level == 0 {
+				mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
+			}
+		}
+		if !visited[embedded] {
+			r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited)
+		}
+	}
+	delete(visited, typ)
+}
+
+// computeMethodSets determines the actual method sets for each type encountered.
+func (r *reader) computeMethodSets() {
+	for _, t := range r.types {
+		// collect embedded methods for t
+		if t.isStruct {
+			// struct
+			r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet))
+		} else {
+			// interface
+			// TODO(gri) fix this
+		}
+	}
+
+	// For any predeclared names that are declared locally, don't treat them as
+	// exported fields anymore.
+	for predecl := range r.shadowedPredecl {
+		for _, ityp := range r.fixmap[predecl] {
+			removeAnonymousField(predecl, ityp)
+		}
+	}
+}
+
+// cleanupTypes removes the association of functions and methods with
+// types that have no declaration. Instead, these functions and methods
+// are shown at the package level. It also removes types with missing
+// declarations or which are not visible.
+func (r *reader) cleanupTypes() {
+	for _, t := range r.types {
+		visible := r.isVisible(t.name)
+		predeclared := predeclaredTypes[t.name]
+
+		if t.decl == nil && (predeclared || visible && (t.isEmbedded || r.hasDotImp)) {
+			// t.name is a predeclared type (and was not redeclared in this package),
+			// or it was embedded somewhere but its declaration is missing (because
+			// the AST is incomplete), or we have a dot-import (and all bets are off):
+			// move any associated values, funcs, and methods back to the top-level so
+			// that they are not lost.
+			// 1) move values
+			r.values = append(r.values, t.values...)
+			// 2) move factory functions
+			for name, f := range t.funcs {
+				// in a correct AST, package-level function names
+				// are all different - no need to check for conflicts
+				r.funcs[name] = f
+			}
+			// 3) move methods
+			if !predeclared {
+				for name, m := range t.methods {
+					// don't overwrite functions with the same name - drop them
+					if _, found := r.funcs[name]; !found {
+						r.funcs[name] = m
+					}
+				}
+			}
+		}
+		// remove types w/o declaration or which are not visible
+		if t.decl == nil || !visible {
+			delete(r.types, t.name)
+		}
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Sorting
+
+type data struct {
+	n    int
+	swap func(i, j int)
+	less func(i, j int) bool
+}
+
+func (d *data) Len() int           { return d.n }
+func (d *data) Swap(i, j int)      { d.swap(i, j) }
+func (d *data) Less(i, j int) bool { return d.less(i, j) }
+
+// sortBy is a helper function for sorting
+func sortBy(less func(i, j int) bool, swap func(i, j int), n int) {
+	sort.Sort(&data{n, swap, less})
+}
+
+func sortedKeys(m map[string]int) []string {
+	list := make([]string, len(m))
+	i := 0
+	for key := range m {
+		list[i] = key
+		i++
+	}
+	sort.Strings(list)
+	return list
+}
+
+// sortingName returns the name to use when sorting d into place.
+func sortingName(d *ast.GenDecl) string {
+	if len(d.Specs) == 1 {
+		if s, ok := d.Specs[0].(*ast.ValueSpec); ok {
+			return s.Names[0].Name
+		}
+	}
+	return ""
+}
+
+func sortedValues(m []*Value, tok token.Token) []*Value {
+	list := make([]*Value, len(m)) // big enough in any case
+	i := 0
+	for _, val := range m {
+		if val.Decl.Tok == tok {
+			list[i] = val
+			i++
+		}
+	}
+	list = list[0:i]
+
+	sortBy(
+		func(i, j int) bool {
+			if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj {
+				return ni < nj
+			}
+			return list[i].order < list[j].order
+		},
+		func(i, j int) { list[i], list[j] = list[j], list[i] },
+		len(list),
+	)
+
+	return list
+}
+
+func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
+	list := make([]*Type, len(m))
+	i := 0
+	for _, t := range m {
+		list[i] = &Type{
+			Doc:     t.doc,
+			Name:    t.name,
+			Decl:    t.decl,
+			Consts:  sortedValues(t.values, token.CONST),
+			Vars:    sortedValues(t.values, token.VAR),
+			Funcs:   sortedFuncs(t.funcs, true),
+			Methods: sortedFuncs(t.methods, allMethods),
+		}
+		i++
+	}
+
+	sortBy(
+		func(i, j int) bool { return list[i].Name < list[j].Name },
+		func(i, j int) { list[i], list[j] = list[j], list[i] },
+		len(list),
+	)
+
+	return list
+}
+
+func removeStar(s string) string {
+	if len(s) > 0 && s[0] == '*' {
+		return s[1:]
+	}
+	return s
+}
+
+func sortedFuncs(m methodSet, allMethods bool) []*Func {
+	list := make([]*Func, len(m))
+	i := 0
+	for _, m := range m {
+		// determine which methods to include
+		switch {
+		case m.Decl == nil:
+			// exclude conflict entry
+		case allMethods, m.Level == 0, !token.IsExported(removeStar(m.Orig)):
+			// forced inclusion, method not embedded, or method
+			// embedded but original receiver type not exported
+			list[i] = m
+			i++
+		}
+	}
+	list = list[0:i]
+	sortBy(
+		func(i, j int) bool { return list[i].Name < list[j].Name },
+		func(i, j int) { list[i], list[j] = list[j], list[i] },
+		len(list),
+	)
+	return list
+}
+
+// noteBodies returns a list of note body strings given a list of notes.
+// This is only used to populate the deprecated Package.Bugs field.
+func noteBodies(notes []*Note) []string {
+	var list []string
+	for _, n := range notes {
+		list = append(list, n.Body)
+	}
+	return list
+}
+
+// ----------------------------------------------------------------------------
+// Predeclared identifiers
+
+// IsPredeclared reports whether s is a predeclared identifier.
+func IsPredeclared(s string) bool {
+	return predeclaredTypes[s] || predeclaredFuncs[s] || predeclaredConstants[s]
+}
+
+var predeclaredTypes = map[string]bool{
+	"any":        true,
+	"bool":       true,
+	"byte":       true,
+	"comparable": true,
+	"complex64":  true,
+	"complex128": true,
+	"error":      true,
+	"float32":    true,
+	"float64":    true,
+	"int":        true,
+	"int8":       true,
+	"int16":      true,
+	"int32":      true,
+	"int64":      true,
+	"rune":       true,
+	"string":     true,
+	"uint":       true,
+	"uint8":      true,
+	"uint16":     true,
+	"uint32":     true,
+	"uint64":     true,
+	"uintptr":    true,
+}
+
+var predeclaredFuncs = map[string]bool{
+	"append":  true,
+	"cap":     true,
+	"close":   true,
+	"complex": true,
+	"copy":    true,
+	"delete":  true,
+	"imag":    true,
+	"len":     true,
+	"make":    true,
+	"new":     true,
+	"panic":   true,
+	"print":   true,
+	"println": true,
+	"real":    true,
+	"recover": true,
+}
+
+var predeclaredConstants = map[string]bool{
+	"false": true,
+	"iota":  true,
+	"nil":   true,
+	"true":  true,
+}
diff --git a/internal/backport/go/doc/synopsis.go b/internal/backport/go/doc/synopsis.go
new file mode 100644
index 0000000..ca607cc
--- /dev/null
+++ b/internal/backport/go/doc/synopsis.go
@@ -0,0 +1,81 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+	"strings"
+	"unicode"
+)
+
+// firstSentenceLen returns the length of the first sentence in s.
+// The sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter.
+func firstSentenceLen(s string) int {
+	var ppp, pp, p rune
+	for i, q := range s {
+		if q == '\n' || q == '\r' || q == '\t' {
+			q = ' '
+		}
+		if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
+			return i
+		}
+		if p == '。' || p == '.' {
+			return i
+		}
+		ppp, pp, p = pp, p, q
+	}
+	return len(s)
+}
+
+const (
+	keepNL = 1 << iota
+)
+
+// clean replaces each sequence of space, \n, \r, or \t characters
+// with a single space and removes any trailing and leading spaces.
+// If the keepNL flag is set, newline characters are passed through
+// instead of being change to spaces.
+func clean(s string, flags int) string {
+	var b []byte
+	p := byte(' ')
+	for i := 0; i < len(s); i++ {
+		q := s[i]
+		if (flags&keepNL) == 0 && q == '\n' || q == '\r' || q == '\t' {
+			q = ' '
+		}
+		if q != ' ' || p != ' ' {
+			b = append(b, q)
+			p = q
+		}
+	}
+	// remove trailing blank, if any
+	if n := len(b); n > 0 && p == ' ' {
+		b = b[0 : n-1]
+	}
+	return string(b)
+}
+
+// Synopsis returns a cleaned version of the first sentence in s.
+// That sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter. The result string
+// has no \n, \r, or \t characters and uses only single spaces between
+// words. If s starts with any of the IllegalPrefixes, the result
+// is the empty string.
+func Synopsis(s string) string {
+	s = clean(s[0:firstSentenceLen(s)], 0)
+	for _, prefix := range IllegalPrefixes {
+		if strings.HasPrefix(strings.ToLower(s), prefix) {
+			return ""
+		}
+	}
+	s = convertQuotes(s)
+	return s
+}
+
+var IllegalPrefixes = []string{
+	"copyright",
+	"all rights",
+	"author",
+}
diff --git a/internal/backport/go/doc/synopsis_test.go b/internal/backport/go/doc/synopsis_test.go
new file mode 100644
index 0000000..3f443dc
--- /dev/null
+++ b/internal/backport/go/doc/synopsis_test.go
@@ -0,0 +1,52 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "testing"
+
+var tests = []struct {
+	txt string
+	fsl int
+	syn string
+}{
+	{"", 0, ""},
+	{"foo", 3, "foo"},
+	{"foo.", 4, "foo."},
+	{"foo.bar", 7, "foo.bar"},
+	{"  foo.  ", 6, "foo."},
+	{"  foo\t  bar.\n", 12, "foo bar."},
+	{"  foo\t  bar.\n", 12, "foo bar."},
+	{"a  b\n\nc\r\rd\t\t", 12, "a b c d"},
+	{"a  b\n\nc\r\rd\t\t  . BLA", 15, "a b c d ."},
+	{"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."},
+	{"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."},
+	{"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."},
+	{"Package\nfoo. ..", 12, "Package foo."},
+	{"P . Q.", 3, "P ."},
+	{"P. Q.   ", 8, "P. Q."},
+	{"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
+	{"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
+	{"Package こんにちは。世界", 26, "Package こんにちは。"},
+	{"Package 안녕.世界", 17, "Package 안녕."},
+	{"Package foo does bar.", 21, "Package foo does bar."},
+	{"Copyright 2012 Google, Inc. Package foo does bar.", 27, ""},
+	{"All Rights reserved. Package foo does bar.", 20, ""},
+	{"All rights reserved. Package foo does bar.", 20, ""},
+	{"Authors: foo@bar.com. Package foo does bar.", 21, ""},
+	{"typically invoked as ``go tool asm'',", 37, "typically invoked as " + ulquo + "go tool asm" + urquo + ","},
+}
+
+func TestSynopsis(t *testing.T) {
+	for _, e := range tests {
+		fsl := firstSentenceLen(e.txt)
+		if fsl != e.fsl {
+			t.Errorf("got fsl = %d; want %d for %q\n", fsl, e.fsl, e.txt)
+		}
+		syn := Synopsis(e.txt)
+		if syn != e.syn {
+			t.Errorf("got syn = %q; want %q for %q\n", syn, e.syn, e.txt)
+		}
+	}
+}
diff --git a/internal/backport/go/doc/testdata/a.0.golden b/internal/backport/go/doc/testdata/a.0.golden
new file mode 100644
index 0000000..7e680b8
--- /dev/null
+++ b/internal/backport/go/doc/testdata/a.0.golden
@@ -0,0 +1,52 @@
+// comment 0  comment 1 
+PACKAGE a
+
+IMPORTPATH
+	testdata/a
+
+FILENAMES
+	testdata/a0.go
+	testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	bug0
+
+	bug1
+
+
+BUGS
+BUG(uid)	bug0
+
+BUG(uid)	bug1
+
+
+NOTES
+NOTE(uid)	
+
+NOTE(foo)	1 of 4 - this is the first line of note 1
+	- note 1 continues on this 2nd line
+	- note 1 continues on this 3rd line
+
+NOTE(foo)	2 of 4
+
+NOTE(bar)	3 of 4
+
+NOTE(bar)	4 of 4
+	- this is the last line of note 4
+
+NOTE(bam)	This note which contains a (parenthesized) subphrase
+	 must appear in its entirety.
+
+NOTE(xxx)	The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid)	sec hole 0
+	need to fix asap
+
+
+TODOS
+TODO(uid)	todo0
+
+TODO(uid)	todo1
+
diff --git a/internal/backport/go/doc/testdata/a.1.golden b/internal/backport/go/doc/testdata/a.1.golden
new file mode 100644
index 0000000..7e680b8
--- /dev/null
+++ b/internal/backport/go/doc/testdata/a.1.golden
@@ -0,0 +1,52 @@
+// comment 0  comment 1 
+PACKAGE a
+
+IMPORTPATH
+	testdata/a
+
+FILENAMES
+	testdata/a0.go
+	testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	bug0
+
+	bug1
+
+
+BUGS
+BUG(uid)	bug0
+
+BUG(uid)	bug1
+
+
+NOTES
+NOTE(uid)	
+
+NOTE(foo)	1 of 4 - this is the first line of note 1
+	- note 1 continues on this 2nd line
+	- note 1 continues on this 3rd line
+
+NOTE(foo)	2 of 4
+
+NOTE(bar)	3 of 4
+
+NOTE(bar)	4 of 4
+	- this is the last line of note 4
+
+NOTE(bam)	This note which contains a (parenthesized) subphrase
+	 must appear in its entirety.
+
+NOTE(xxx)	The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid)	sec hole 0
+	need to fix asap
+
+
+TODOS
+TODO(uid)	todo0
+
+TODO(uid)	todo1
+
diff --git a/internal/backport/go/doc/testdata/a.2.golden b/internal/backport/go/doc/testdata/a.2.golden
new file mode 100644
index 0000000..7e680b8
--- /dev/null
+++ b/internal/backport/go/doc/testdata/a.2.golden
@@ -0,0 +1,52 @@
+// comment 0  comment 1 
+PACKAGE a
+
+IMPORTPATH
+	testdata/a
+
+FILENAMES
+	testdata/a0.go
+	testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	bug0
+
+	bug1
+
+
+BUGS
+BUG(uid)	bug0
+
+BUG(uid)	bug1
+
+
+NOTES
+NOTE(uid)	
+
+NOTE(foo)	1 of 4 - this is the first line of note 1
+	- note 1 continues on this 2nd line
+	- note 1 continues on this 3rd line
+
+NOTE(foo)	2 of 4
+
+NOTE(bar)	3 of 4
+
+NOTE(bar)	4 of 4
+	- this is the last line of note 4
+
+NOTE(bam)	This note which contains a (parenthesized) subphrase
+	 must appear in its entirety.
+
+NOTE(xxx)	The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid)	sec hole 0
+	need to fix asap
+
+
+TODOS
+TODO(uid)	todo0
+
+TODO(uid)	todo1
+
diff --git a/internal/backport/go/doc/testdata/a0.go b/internal/backport/go/doc/testdata/a0.go
new file mode 100644
index 0000000..2420c8a
--- /dev/null
+++ b/internal/backport/go/doc/testdata/a0.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 0
+package a
+
+//BUG(uid): bug0
+
+//TODO(uid): todo0
+
+// A note with some spaces after it, should be ignored (watch out for
+// emacs modes that remove trailing whitespace).
+//NOTE(uid):
+
+// SECBUG(uid): sec hole 0
+// need to fix asap
+
+// Multiple notes may be in the same comment group and should be
+// recognized individually. Notes may start in the middle of a
+// comment group as long as they start at the beginning of an
+// individual comment.
+//
+// NOTE(foo): 1 of 4 - this is the first line of note 1
+// - note 1 continues on this 2nd line
+// - note 1 continues on this 3rd line
+// NOTE(foo): 2 of 4
+// NOTE(bar): 3 of 4
+/* NOTE(bar): 4 of 4 */
+// - this is the last line of note 4
+//
+//
+
+// NOTE(bam): This note which contains a (parenthesized) subphrase
+//            must appear in its entirety.
+
+// NOTE(xxx) The ':' after the marker and uid is optional.
+
+// NOTE(): NO uid - should not show up.
+// NOTE()  NO uid - should not show up.
diff --git a/internal/backport/go/doc/testdata/a1.go b/internal/backport/go/doc/testdata/a1.go
new file mode 100644
index 0000000..9fad1e0
--- /dev/null
+++ b/internal/backport/go/doc/testdata/a1.go
@@ -0,0 +1,12 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 1
+package a
+
+//BUG(uid): bug1
+
+//TODO(uid): todo1
+
+//TODO(): ignored
diff --git a/internal/backport/go/doc/testdata/b.0.golden b/internal/backport/go/doc/testdata/b.0.golden
new file mode 100644
index 0000000..c06246a
--- /dev/null
+++ b/internal/backport/go/doc/testdata/b.0.golden
@@ -0,0 +1,74 @@
+// 
+PACKAGE b
+
+IMPORTPATH
+	testdata/b
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/b.go
+
+CONSTANTS
+	// 
+	const (
+		C1	notExported	= iota
+		C2
+	
+		C4
+		C5
+	)
+
+	// 
+	const C notExported = 0
+
+	// 
+	const Pi = 3.14	// Pi
+
+
+VARIABLES
+	// 
+	var (
+		U1, U2, U4, U5	notExported
+	
+		U7	notExported	= 7
+	)
+
+	// 
+	var MaxInt int	// MaxInt
+
+	// 
+	var V notExported
+
+	// 
+	var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+	// Associated with comparable type if AllDecls is set. 
+	func ComparableFactory() comparable
+
+	// 
+	func F(x int) int
+
+	// 
+	func F1() notExported
+
+	// Always under the package functions list. 
+	func NotAFactory() int
+
+	// Associated with uint type if AllDecls is set. 
+	func UintFactory() uint
+
+
+TYPES
+	// 
+	type T struct{}	// T
+
+	// 
+	var V T	// v
+
+	// 
+	func (x *T) M()
+
diff --git a/internal/backport/go/doc/testdata/b.1.golden b/internal/backport/go/doc/testdata/b.1.golden
new file mode 100644
index 0000000..2b62c34
--- /dev/null
+++ b/internal/backport/go/doc/testdata/b.1.golden
@@ -0,0 +1,89 @@
+// 
+PACKAGE b
+
+IMPORTPATH
+	testdata/b
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/b.go
+
+CONSTANTS
+	// 
+	const Pi = 3.14	// Pi
+
+
+VARIABLES
+	// 
+	var MaxInt int	// MaxInt
+
+
+FUNCTIONS
+	// 
+	func F(x int) int
+
+	// Always under the package functions list. 
+	func NotAFactory() int
+
+
+TYPES
+	// 
+	type T struct{}	// T
+
+	// 
+	var V T	// v
+
+	// 
+	func (x *T) M()
+
+	// Should only appear if AllDecls is set. 
+	type comparable struct{}	// overrides a predeclared type comparable
+
+	// Associated with comparable type if AllDecls is set. 
+	func ComparableFactory() comparable
+
+	// 
+	type notExported int
+
+	// 
+	const (
+		C1	notExported	= iota
+		C2
+		c3
+		C4
+		C5
+	)
+
+	// 
+	const C notExported = 0
+
+	// 
+	var (
+		U1, U2, u3, U4, U5	notExported
+		u6			notExported
+		U7			notExported	= 7
+	)
+
+	// 
+	var V notExported
+
+	// 
+	var V1, V2, v3, V4, V5 notExported
+
+	// 
+	func F1() notExported
+
+	// 
+	func f2() notExported
+
+	// Should only appear if AllDecls is set. 
+	type uint struct{}	// overrides a predeclared type uint
+
+	// Associated with uint type if AllDecls is set. 
+	func UintFactory() uint
+
+	// Associated with uint type if AllDecls is set. 
+	func uintFactory() uint
+
diff --git a/internal/backport/go/doc/testdata/b.2.golden b/internal/backport/go/doc/testdata/b.2.golden
new file mode 100644
index 0000000..c06246a
--- /dev/null
+++ b/internal/backport/go/doc/testdata/b.2.golden
@@ -0,0 +1,74 @@
+// 
+PACKAGE b
+
+IMPORTPATH
+	testdata/b
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/b.go
+
+CONSTANTS
+	// 
+	const (
+		C1	notExported	= iota
+		C2
+	
+		C4
+		C5
+	)
+
+	// 
+	const C notExported = 0
+
+	// 
+	const Pi = 3.14	// Pi
+
+
+VARIABLES
+	// 
+	var (
+		U1, U2, U4, U5	notExported
+	
+		U7	notExported	= 7
+	)
+
+	// 
+	var MaxInt int	// MaxInt
+
+	// 
+	var V notExported
+
+	// 
+	var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+	// Associated with comparable type if AllDecls is set. 
+	func ComparableFactory() comparable
+
+	// 
+	func F(x int) int
+
+	// 
+	func F1() notExported
+
+	// Always under the package functions list. 
+	func NotAFactory() int
+
+	// Associated with uint type if AllDecls is set. 
+	func UintFactory() uint
+
+
+TYPES
+	// 
+	type T struct{}	// T
+
+	// 
+	var V T	// v
+
+	// 
+	func (x *T) M()
+
diff --git a/internal/backport/go/doc/testdata/b.go b/internal/backport/go/doc/testdata/b.go
new file mode 100644
index 0000000..61b512b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/b.go
@@ -0,0 +1,64 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Basic declarations
+
+const Pi = 3.14   // Pi
+var MaxInt int    // MaxInt
+type T struct{}   // T
+var V T           // v
+func F(x int) int {} // F
+func (x *T) M()   {} // M
+
+// Corner cases: association with (presumed) predeclared types
+
+// Always under the package functions list.
+func NotAFactory() int {}
+
+// Associated with uint type if AllDecls is set.
+func UintFactory() uint {}
+
+// Associated with uint type if AllDecls is set.
+func uintFactory() uint {}
+
+// Associated with comparable type if AllDecls is set.
+func ComparableFactory() comparable {}
+
+// Should only appear if AllDecls is set.
+type uint struct{} // overrides a predeclared type uint
+
+// Should only appear if AllDecls is set.
+type comparable struct{} // overrides a predeclared type comparable
+
+// ----------------------------------------------------------------------------
+// Exported declarations associated with non-exported types must always be shown.
+
+type notExported int
+
+const C notExported = 0
+
+const (
+	C1 notExported = iota
+	C2
+	c3
+	C4
+	C5
+)
+
+var V notExported
+var V1, V2, v3, V4, V5 notExported
+
+var (
+	U1, U2, u3, U4, U5 notExported
+	u6                 notExported
+	U7                 notExported = 7
+)
+
+func F1() notExported {}
+func f2() notExported {}
diff --git a/internal/backport/go/doc/testdata/benchmark.go b/internal/backport/go/doc/testdata/benchmark.go
new file mode 100644
index 0000000..d27bf11
--- /dev/null
+++ b/internal/backport/go/doc/testdata/benchmark.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"runtime"
+	"time"
+)
+
+var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalBenchmark struct {
+	Name string
+	F    func(b *B)
+}
+
+// B is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type B struct {
+	common
+	N         int
+	benchmark InternalBenchmark
+	bytes     int64
+	timerOn   bool
+	result    BenchmarkResult
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (b *B) StartTimer() {
+	if !b.timerOn {
+		b.start = time.Now()
+		b.timerOn = true
+	}
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (b *B) StopTimer() {
+	if b.timerOn {
+		b.duration += time.Now().Sub(b.start)
+		b.timerOn = false
+	}
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (b *B) ResetTimer() {
+	if b.timerOn {
+		b.start = time.Now()
+	}
+	b.duration = 0
+}
+
+// SetBytes records the number of bytes processed in a single operation.
+// If this is called, the benchmark will report ns/op and MB/s.
+func (b *B) SetBytes(n int64) { b.bytes = n }
+
+func (b *B) nsPerOp() int64 {
+	if b.N <= 0 {
+		return 0
+	}
+	return b.duration.Nanoseconds() / int64(b.N)
+}
+
+// runN runs a single benchmark for the specified number of iterations.
+func (b *B) runN(n int) {
+	// Try to get a comparable environment for each run
+	// by clearing garbage from previous runs.
+	runtime.GC()
+	b.N = n
+	b.ResetTimer()
+	b.StartTimer()
+	b.benchmark.F(b)
+	b.StopTimer()
+}
+
+func min(x, y int) int {
+	if x > y {
+		return y
+	}
+	return x
+}
+
+func max(x, y int) int {
+	if x < y {
+		return y
+	}
+	return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+	var tens = 0
+	// tens = floor(log_10(n))
+	for n > 10 {
+		n = n / 10
+		tens++
+	}
+	// result = 10^tens
+	result := 1
+	for i := 0; i < tens; i++ {
+		result *= 10
+	}
+	return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+	base := roundDown10(n)
+	if n < (2 * base) {
+		return 2 * base
+	}
+	if n < (5 * base) {
+		return 5 * base
+	}
+	return 10 * base
+}
+
+// run times the benchmark function in a separate goroutine.
+func (b *B) run() BenchmarkResult {
+	go b.launch()
+	<-b.signal
+	return b.result
+}
+
+// launch launches the benchmark function. It gradually increases the number
+// of benchmark iterations until the benchmark runs for a second in order
+// to get a reasonable measurement. It prints timing information in this form
+//		testing.BenchmarkHello	100000		19 ns/op
+// launch is run by the fun function as a separate goroutine.
+func (b *B) launch() {
+	// Run the benchmark for a single iteration in case it's expensive.
+	n := 1
+
+	// Signal that we're done whether we return normally
+	// or by FailNow's runtime.Goexit.
+	defer func() {
+		b.signal <- b
+	}()
+
+	b.runN(n)
+	// Run the benchmark for at least the specified amount of time.
+	d := *benchTime
+	for !b.failed && b.duration < d && n < 1e9 {
+		last := n
+		// Predict iterations/sec.
+		if b.nsPerOp() == 0 {
+			n = 1e9
+		} else {
+			n = int(d.Nanoseconds() / b.nsPerOp())
+		}
+		// Run more iterations than we think we'll need for a second (1.5x).
+		// Don't grow too fast in case we had timing errors previously.
+		// Be sure to run at least one more than last time.
+		n = max(min(n+n/2, 100*last), last+1)
+		// Round up to something easy to read.
+		n = roundUp(n)
+		b.runN(n)
+	}
+	b.result = BenchmarkResult{b.N, b.duration, b.bytes}
+}
+
+// The results of a benchmark run.
+type BenchmarkResult struct {
+	N     int           // The number of iterations.
+	T     time.Duration // The total time taken.
+	Bytes int64         // Bytes processed in one iteration.
+}
+
+func (r BenchmarkResult) NsPerOp() int64 {
+	if r.N <= 0 {
+		return 0
+	}
+	return r.T.Nanoseconds() / int64(r.N)
+}
+
+func (r BenchmarkResult) mbPerSec() float64 {
+	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
+		return 0
+	}
+	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+func (r BenchmarkResult) String() string {
+	mbs := r.mbPerSec()
+	mb := ""
+	if mbs != 0 {
+		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+	}
+	nsop := r.NsPerOp()
+	ns := fmt.Sprintf("%10d ns/op", nsop)
+	if r.N > 0 && nsop < 100 {
+		// The format specifiers here make sure that
+		// the ones digits line up for all three possible formats.
+		if nsop < 10 {
+			ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+		} else {
+			ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+		}
+	}
+	return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
+	// If no flag was specified, don't run benchmarks.
+	if len(*matchBenchmarks) == 0 {
+		return
+	}
+	for _, Benchmark := range benchmarks {
+		matched, err := matchString(*matchBenchmarks, Benchmark.Name)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
+			os.Exit(1)
+		}
+		if !matched {
+			continue
+		}
+		for _, procs := range cpuList {
+			runtime.GOMAXPROCS(procs)
+			b := &B{
+				common: common{
+					signal: make(chan any),
+				},
+				benchmark: Benchmark,
+			}
+			benchName := Benchmark.Name
+			if procs != 1 {
+				benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
+			}
+			fmt.Printf("%s\t", benchName)
+			r := b.run()
+			if b.failed {
+				// The output could be very long here, but probably isn't.
+				// We print it all, regardless, because we don't want to trim the reason
+				// the benchmark failed.
+				fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
+				continue
+			}
+			fmt.Printf("%v\n", r)
+			// Unlike with tests, we ignore the -chatty flag and always print output for
+			// benchmarks since the output generation time will skew the results.
+			if len(b.output) > 0 {
+				b.trimOutput()
+				fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
+			}
+			if p := runtime.GOMAXPROCS(-1); p != procs {
+				fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
+			}
+		}
+	}
+}
+
+// trimOutput shortens the output from a benchmark, which can be very long.
+func (b *B) trimOutput() {
+	// The output is likely to appear multiple times because the benchmark
+	// is run multiple times, but at least it will be seen. This is not a big deal
+	// because benchmarks rarely print, but just in case, we trim it if it's too long.
+	const maxNewlines = 10
+	for nlCount, j := 0, 0; j < len(b.output); j++ {
+		if b.output[j] == '\n' {
+			nlCount++
+			if nlCount >= maxNewlines {
+				b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
+				break
+			}
+		}
+	}
+}
+
+// Benchmark benchmarks a single function. Useful for creating
+// custom benchmarks that do not use go test.
+func Benchmark(f func(b *B)) BenchmarkResult {
+	b := &B{
+		common: common{
+			signal: make(chan any),
+		},
+		benchmark: InternalBenchmark{"", f},
+	}
+	return b.run()
+}
diff --git a/internal/backport/go/doc/testdata/blank.0.golden b/internal/backport/go/doc/testdata/blank.0.golden
new file mode 100644
index 0000000..70f2929
--- /dev/null
+++ b/internal/backport/go/doc/testdata/blank.0.golden
@@ -0,0 +1,62 @@
+// Package blank is a go/doc test for the handling of _. See issue ...
+PACKAGE blank
+
+IMPORTPATH
+	testdata/blank
+
+IMPORTS
+	os
+
+FILENAMES
+	testdata/blank.go
+
+CONSTANTS
+	// T constants counting from unexported constants. 
+	const (
+		C1	T
+		C2
+	
+		C3
+	
+		C4	int
+	)
+
+	// Constants with a single type that is not propagated. 
+	const (
+		Default		= 0644
+		Useless		= 0312
+		WideOpen	= 0777
+	)
+
+	// Constants with an imported type that is propagated. 
+	const (
+		M1	os.FileMode
+		M2
+		M3
+	)
+
+	// Package constants. 
+	const (
+		I1	int
+		I2
+	)
+
+
+TYPES
+	// S has a padding field. 
+	type S struct {
+		H	uint32
+	
+		A	uint8
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T int
+
+	// T constants counting from a blank constant. 
+	const (
+		T1	T
+		T2
+	)
+
diff --git a/internal/backport/go/doc/testdata/blank.1.golden b/internal/backport/go/doc/testdata/blank.1.golden
new file mode 100644
index 0000000..8098cb6
--- /dev/null
+++ b/internal/backport/go/doc/testdata/blank.1.golden
@@ -0,0 +1,83 @@
+// Package blank is a go/doc test for the handling of _. See issue ...
+PACKAGE blank
+
+IMPORTPATH
+	testdata/blank
+
+IMPORTS
+	os
+
+FILENAMES
+	testdata/blank.go
+
+CONSTANTS
+	// T constants counting from unexported constants. 
+	const (
+		tweedledee	T	= iota
+		tweedledum
+		C1
+		C2
+		alice
+		C3
+		redQueen	int	= iota
+		C4
+	)
+
+	// Constants with a single type that is not propagated. 
+	const (
+		zero		os.FileMode	= 0
+		Default				= 0644
+		Useless				= 0312
+		WideOpen			= 0777
+	)
+
+	// Constants with an imported type that is propagated. 
+	const (
+		zero	os.FileMode	= 0
+		M1
+		M2
+		M3
+	)
+
+	// Package constants. 
+	const (
+		_	int	= iota
+		I1
+		I2
+	)
+
+	// Unexported constants counting from blank iota. See issue 9615. 
+	const (
+		_	= iota
+		one	= iota + 1
+	)
+
+
+VARIABLES
+	// 
+	var _ = T(55)
+
+
+FUNCTIONS
+	// 
+	func _()
+
+
+TYPES
+	// S has a padding field. 
+	type S struct {
+		H	uint32
+		_	uint8
+		A	uint8
+	}
+
+	// 
+	type T int
+
+	// T constants counting from a blank constant. 
+	const (
+		_	T	= iota
+		T1
+		T2
+	)
+
diff --git a/internal/backport/go/doc/testdata/blank.2.golden b/internal/backport/go/doc/testdata/blank.2.golden
new file mode 100644
index 0000000..70f2929
--- /dev/null
+++ b/internal/backport/go/doc/testdata/blank.2.golden
@@ -0,0 +1,62 @@
+// Package blank is a go/doc test for the handling of _. See issue ...
+PACKAGE blank
+
+IMPORTPATH
+	testdata/blank
+
+IMPORTS
+	os
+
+FILENAMES
+	testdata/blank.go
+
+CONSTANTS
+	// T constants counting from unexported constants. 
+	const (
+		C1	T
+		C2
+	
+		C3
+	
+		C4	int
+	)
+
+	// Constants with a single type that is not propagated. 
+	const (
+		Default		= 0644
+		Useless		= 0312
+		WideOpen	= 0777
+	)
+
+	// Constants with an imported type that is propagated. 
+	const (
+		M1	os.FileMode
+		M2
+		M3
+	)
+
+	// Package constants. 
+	const (
+		I1	int
+		I2
+	)
+
+
+TYPES
+	// S has a padding field. 
+	type S struct {
+		H	uint32
+	
+		A	uint8
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T int
+
+	// T constants counting from a blank constant. 
+	const (
+		T1	T
+		T2
+	)
+
diff --git a/internal/backport/go/doc/testdata/blank.go b/internal/backport/go/doc/testdata/blank.go
new file mode 100644
index 0000000..5ea6186
--- /dev/null
+++ b/internal/backport/go/doc/testdata/blank.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blank is a go/doc test for the handling of _.
+// See issue 5397.
+package blank
+
+import "os"
+
+type T int
+
+// T constants counting from a blank constant.
+const (
+	_ T = iota
+	T1
+	T2
+)
+
+// T constants counting from unexported constants.
+const (
+	tweedledee T = iota
+	tweedledum
+	C1
+	C2
+	alice
+	C3
+	redQueen int = iota
+	C4
+)
+
+// Constants with a single type that is not propagated.
+const (
+	zero     os.FileMode = 0
+	Default              = 0644
+	Useless              = 0312
+	WideOpen             = 0777
+)
+
+// Constants with an imported type that is propagated.
+const (
+	zero os.FileMode = 0
+	M1
+	M2
+	M3
+)
+
+// Package constants.
+const (
+	_ int = iota
+	I1
+	I2
+)
+
+// Unexported constants counting from blank iota.
+// See issue 9615.
+const (
+	_   = iota
+	one = iota + 1
+)
+
+// Blanks not in doc output:
+
+// S has a padding field.
+type S struct {
+	H uint32
+	_ uint8
+	A uint8
+}
+
+func _() {}
+
+type _ T
+
+var _ = T(55)
diff --git a/internal/backport/go/doc/testdata/bugpara.0.golden b/internal/backport/go/doc/testdata/bugpara.0.golden
new file mode 100644
index 0000000..5804859
--- /dev/null
+++ b/internal/backport/go/doc/testdata/bugpara.0.golden
@@ -0,0 +1,20 @@
+// 
+PACKAGE bugpara
+
+IMPORTPATH
+	testdata/bugpara
+
+FILENAMES
+	testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
+
+BUGS
+BUG(rsc)	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
diff --git a/internal/backport/go/doc/testdata/bugpara.1.golden b/internal/backport/go/doc/testdata/bugpara.1.golden
new file mode 100644
index 0000000..5804859
--- /dev/null
+++ b/internal/backport/go/doc/testdata/bugpara.1.golden
@@ -0,0 +1,20 @@
+// 
+PACKAGE bugpara
+
+IMPORTPATH
+	testdata/bugpara
+
+FILENAMES
+	testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
+
+BUGS
+BUG(rsc)	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
diff --git a/internal/backport/go/doc/testdata/bugpara.2.golden b/internal/backport/go/doc/testdata/bugpara.2.golden
new file mode 100644
index 0000000..5804859
--- /dev/null
+++ b/internal/backport/go/doc/testdata/bugpara.2.golden
@@ -0,0 +1,20 @@
+// 
+PACKAGE bugpara
+
+IMPORTPATH
+	testdata/bugpara
+
+FILENAMES
+	testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
+
+BUGS
+BUG(rsc)	Sometimes bugs have multiple paragraphs.
+	
+	Like this one.
+
diff --git a/internal/backport/go/doc/testdata/bugpara.go b/internal/backport/go/doc/testdata/bugpara.go
new file mode 100644
index 0000000..0360a6f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/bugpara.go
@@ -0,0 +1,9 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bugpara
+
+// BUG(rsc): Sometimes bugs have multiple paragraphs.
+//
+// Like this one.
diff --git a/internal/backport/go/doc/testdata/c.0.golden b/internal/backport/go/doc/testdata/c.0.golden
new file mode 100644
index 0000000..e21959b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/c.0.golden
@@ -0,0 +1,48 @@
+// 
+PACKAGE c
+
+IMPORTPATH
+	testdata/c
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/c.go
+
+TYPES
+	// A (should see this) 
+	type A struct{}
+
+	// B (should see this) 
+	type B struct{}
+
+	// C (should see this) 
+	type C struct{}
+
+	// D (should see this) 
+	type D struct{}
+
+	// E1 (should see this) 
+	type E1 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E2 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E3 struct{}
+
+	// E4 (should see this) 
+	type E4 struct{}
+
+	// 
+	type T1 struct{}
+
+	// 
+	func (t1 *T1) M()
+
+	// T2 must not show methods of local T1 
+	type T2 struct {
+		a.T1	// not the same as locally declared T1
+	}
+
diff --git a/internal/backport/go/doc/testdata/c.1.golden b/internal/backport/go/doc/testdata/c.1.golden
new file mode 100644
index 0000000..e21959b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/c.1.golden
@@ -0,0 +1,48 @@
+// 
+PACKAGE c
+
+IMPORTPATH
+	testdata/c
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/c.go
+
+TYPES
+	// A (should see this) 
+	type A struct{}
+
+	// B (should see this) 
+	type B struct{}
+
+	// C (should see this) 
+	type C struct{}
+
+	// D (should see this) 
+	type D struct{}
+
+	// E1 (should see this) 
+	type E1 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E2 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E3 struct{}
+
+	// E4 (should see this) 
+	type E4 struct{}
+
+	// 
+	type T1 struct{}
+
+	// 
+	func (t1 *T1) M()
+
+	// T2 must not show methods of local T1 
+	type T2 struct {
+		a.T1	// not the same as locally declared T1
+	}
+
diff --git a/internal/backport/go/doc/testdata/c.2.golden b/internal/backport/go/doc/testdata/c.2.golden
new file mode 100644
index 0000000..e21959b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/c.2.golden
@@ -0,0 +1,48 @@
+// 
+PACKAGE c
+
+IMPORTPATH
+	testdata/c
+
+IMPORTS
+	a
+
+FILENAMES
+	testdata/c.go
+
+TYPES
+	// A (should see this) 
+	type A struct{}
+
+	// B (should see this) 
+	type B struct{}
+
+	// C (should see this) 
+	type C struct{}
+
+	// D (should see this) 
+	type D struct{}
+
+	// E1 (should see this) 
+	type E1 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E2 struct{}
+
+	// E (should see this for E2 and E3) 
+	type E3 struct{}
+
+	// E4 (should see this) 
+	type E4 struct{}
+
+	// 
+	type T1 struct{}
+
+	// 
+	func (t1 *T1) M()
+
+	// T2 must not show methods of local T1 
+	type T2 struct {
+		a.T1	// not the same as locally declared T1
+	}
+
diff --git a/internal/backport/go/doc/testdata/c.go b/internal/backport/go/doc/testdata/c.go
new file mode 100644
index 0000000..e0f3919
--- /dev/null
+++ b/internal/backport/go/doc/testdata/c.go
@@ -0,0 +1,62 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Test that empty declarations don't cause problems
+
+const ()
+
+type ()
+
+var ()
+
+// ----------------------------------------------------------------------------
+// Test that types with documentation on both, the Decl and the Spec node
+// are handled correctly.
+
+// A (should see this)
+type A struct{}
+
+// B (should see this)
+type (
+	B struct{}
+)
+
+type (
+	// C (should see this)
+	C struct{}
+)
+
+// D (should not see this)
+type (
+	// D (should see this)
+	D struct{}
+)
+
+// E (should see this for E2 and E3)
+type (
+	// E1 (should see this)
+	E1 struct{}
+	E2 struct{}
+	E3 struct{}
+	// E4 (should see this)
+	E4 struct{}
+)
+
+// ----------------------------------------------------------------------------
+// Test that local and imported types are different when
+// handling anonymous fields.
+
+type T1 struct{}
+
+func (t1 *T1) M() {}
+
+// T2 must not show methods of local T1
+type T2 struct {
+	a.T1 // not the same as locally declared T1
+}
diff --git a/internal/backport/go/doc/testdata/d.0.golden b/internal/backport/go/doc/testdata/d.0.golden
new file mode 100644
index 0000000..c005199
--- /dev/null
+++ b/internal/backport/go/doc/testdata/d.0.golden
@@ -0,0 +1,104 @@
+// 
+PACKAGE d
+
+IMPORTPATH
+	testdata/d
+
+FILENAMES
+	testdata/d1.go
+	testdata/d2.go
+
+CONSTANTS
+	// CBx constants should appear before CAx constants. 
+	const (
+		CB2	= iota	// before CB1
+		CB1		// before CB0
+		CB0		// at end
+	)
+
+	// CAx constants should appear after CBx constants. 
+	const (
+		CA2	= iota	// before CA1
+		CA1		// before CA0
+		CA0		// at end
+	)
+
+	// C0 should be first. 
+	const C0 = 0
+
+	// C1 should be second. 
+	const C1 = 1
+
+	// C2 should be third. 
+	const C2 = 2
+
+	// 
+	const (
+		// Single const declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Cungrouped = 0
+	)
+
+
+VARIABLES
+	// VBx variables should appear before VAx variables. 
+	var (
+		VB2	int	// before VB1
+		VB1	int	// before VB0
+		VB0	int	// at end
+	)
+
+	// VAx variables should appear after VBx variables. 
+	var (
+		VA2	int	// before VA1
+		VA1	int	// before VA0
+		VA0	int	// at end
+	)
+
+	// V0 should be first. 
+	var V0 uintptr
+
+	// V1 should be second. 
+	var V1 uint
+
+	// V2 should be third. 
+	var V2 int
+
+	// 
+	var (
+		// Single var declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Vungrouped = 0
+	)
+
+
+FUNCTIONS
+	// F0 should be first. 
+	func F0()
+
+	// F1 should be second. 
+	func F1()
+
+	// F2 should be third. 
+	func F2()
+
+
+TYPES
+	// T0 should be first. 
+	type T0 struct{}
+
+	// T1 should be second. 
+	type T1 struct{}
+
+	// T2 should be third. 
+	type T2 struct{}
+
+	// TG0 should be first. 
+	type TG0 struct{}
+
+	// TG1 should be second. 
+	type TG1 struct{}
+
+	// TG2 should be third. 
+	type TG2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/d.1.golden b/internal/backport/go/doc/testdata/d.1.golden
new file mode 100644
index 0000000..c005199
--- /dev/null
+++ b/internal/backport/go/doc/testdata/d.1.golden
@@ -0,0 +1,104 @@
+// 
+PACKAGE d
+
+IMPORTPATH
+	testdata/d
+
+FILENAMES
+	testdata/d1.go
+	testdata/d2.go
+
+CONSTANTS
+	// CBx constants should appear before CAx constants. 
+	const (
+		CB2	= iota	// before CB1
+		CB1		// before CB0
+		CB0		// at end
+	)
+
+	// CAx constants should appear after CBx constants. 
+	const (
+		CA2	= iota	// before CA1
+		CA1		// before CA0
+		CA0		// at end
+	)
+
+	// C0 should be first. 
+	const C0 = 0
+
+	// C1 should be second. 
+	const C1 = 1
+
+	// C2 should be third. 
+	const C2 = 2
+
+	// 
+	const (
+		// Single const declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Cungrouped = 0
+	)
+
+
+VARIABLES
+	// VBx variables should appear before VAx variables. 
+	var (
+		VB2	int	// before VB1
+		VB1	int	// before VB0
+		VB0	int	// at end
+	)
+
+	// VAx variables should appear after VBx variables. 
+	var (
+		VA2	int	// before VA1
+		VA1	int	// before VA0
+		VA0	int	// at end
+	)
+
+	// V0 should be first. 
+	var V0 uintptr
+
+	// V1 should be second. 
+	var V1 uint
+
+	// V2 should be third. 
+	var V2 int
+
+	// 
+	var (
+		// Single var declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Vungrouped = 0
+	)
+
+
+FUNCTIONS
+	// F0 should be first. 
+	func F0()
+
+	// F1 should be second. 
+	func F1()
+
+	// F2 should be third. 
+	func F2()
+
+
+TYPES
+	// T0 should be first. 
+	type T0 struct{}
+
+	// T1 should be second. 
+	type T1 struct{}
+
+	// T2 should be third. 
+	type T2 struct{}
+
+	// TG0 should be first. 
+	type TG0 struct{}
+
+	// TG1 should be second. 
+	type TG1 struct{}
+
+	// TG2 should be third. 
+	type TG2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/d.2.golden b/internal/backport/go/doc/testdata/d.2.golden
new file mode 100644
index 0000000..c005199
--- /dev/null
+++ b/internal/backport/go/doc/testdata/d.2.golden
@@ -0,0 +1,104 @@
+// 
+PACKAGE d
+
+IMPORTPATH
+	testdata/d
+
+FILENAMES
+	testdata/d1.go
+	testdata/d2.go
+
+CONSTANTS
+	// CBx constants should appear before CAx constants. 
+	const (
+		CB2	= iota	// before CB1
+		CB1		// before CB0
+		CB0		// at end
+	)
+
+	// CAx constants should appear after CBx constants. 
+	const (
+		CA2	= iota	// before CA1
+		CA1		// before CA0
+		CA0		// at end
+	)
+
+	// C0 should be first. 
+	const C0 = 0
+
+	// C1 should be second. 
+	const C1 = 1
+
+	// C2 should be third. 
+	const C2 = 2
+
+	// 
+	const (
+		// Single const declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Cungrouped = 0
+	)
+
+
+VARIABLES
+	// VBx variables should appear before VAx variables. 
+	var (
+		VB2	int	// before VB1
+		VB1	int	// before VB0
+		VB0	int	// at end
+	)
+
+	// VAx variables should appear after VBx variables. 
+	var (
+		VA2	int	// before VA1
+		VA1	int	// before VA0
+		VA0	int	// at end
+	)
+
+	// V0 should be first. 
+	var V0 uintptr
+
+	// V1 should be second. 
+	var V1 uint
+
+	// V2 should be third. 
+	var V2 int
+
+	// 
+	var (
+		// Single var declarations inside ()'s are considered ungrouped
+		// and show up in sorted order.
+		Vungrouped = 0
+	)
+
+
+FUNCTIONS
+	// F0 should be first. 
+	func F0()
+
+	// F1 should be second. 
+	func F1()
+
+	// F2 should be third. 
+	func F2()
+
+
+TYPES
+	// T0 should be first. 
+	type T0 struct{}
+
+	// T1 should be second. 
+	type T1 struct{}
+
+	// T2 should be third. 
+	type T2 struct{}
+
+	// TG0 should be first. 
+	type TG0 struct{}
+
+	// TG1 should be second. 
+	type TG1 struct{}
+
+	// TG2 should be third. 
+	type TG2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/d1.go b/internal/backport/go/doc/testdata/d1.go
new file mode 100644
index 0000000..ebd6941
--- /dev/null
+++ b/internal/backport/go/doc/testdata/d1.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C2 should be third.
+const C2 = 2
+
+// V2 should be third.
+var V2 int
+
+// CBx constants should appear before CAx constants.
+const (
+	CB2 = iota // before CB1
+	CB1        // before CB0
+	CB0        // at end
+)
+
+// VBx variables should appear before VAx variables.
+var (
+	VB2 int // before VB1
+	VB1 int // before VB0
+	VB0 int // at end
+)
+
+const (
+	// Single const declarations inside ()'s are considered ungrouped
+	// and show up in sorted order.
+	Cungrouped = 0
+)
+
+var (
+	// Single var declarations inside ()'s are considered ungrouped
+	// and show up in sorted order.
+	Vungrouped = 0
+)
+
+// T2 should be third.
+type T2 struct{}
+
+// Grouped types are sorted nevertheless.
+type (
+	// TG2 should be third.
+	TG2 struct{}
+
+	// TG1 should be second.
+	TG1 struct{}
+
+	// TG0 should be first.
+	TG0 struct{}
+)
+
+// F2 should be third.
+func F2() {}
diff --git a/internal/backport/go/doc/testdata/d2.go b/internal/backport/go/doc/testdata/d2.go
new file mode 100644
index 0000000..2f56f4f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/d2.go
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C1 should be second.
+const C1 = 1
+
+// C0 should be first.
+const C0 = 0
+
+// V1 should be second.
+var V1 uint
+
+// V0 should be first.
+var V0 uintptr
+
+// CAx constants should appear after CBx constants.
+const (
+	CA2 = iota // before CA1
+	CA1        // before CA0
+	CA0        // at end
+)
+
+// VAx variables should appear after VBx variables.
+var (
+	VA2 int // before VA1
+	VA1 int // before VA0
+	VA0 int // at end
+)
+
+// T1 should be second.
+type T1 struct{}
+
+// T0 should be first.
+type T0 struct{}
+
+// F1 should be second.
+func F1() {}
+
+// F0 should be first.
+func F0() {}
diff --git a/internal/backport/go/doc/testdata/e.0.golden b/internal/backport/go/doc/testdata/e.0.golden
new file mode 100644
index 0000000..6987e58
--- /dev/null
+++ b/internal/backport/go/doc/testdata/e.0.golden
@@ -0,0 +1,109 @@
+// The package e is a go/doc test for embedded methods. 
+PACKAGE e
+
+IMPORTPATH
+	testdata/e
+
+FILENAMES
+	testdata/e.go
+
+TYPES
+	// T1 has no embedded (level 1) M method due to conflict. 
+	type T1 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T2 has only M as top-level method. 
+	type T2 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T2.M should appear as method of T2. 
+	func (T2) M()
+
+	// T3 has only M as top-level method. 
+	type T3 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T3.M should appear as method of T3. 
+	func (T3) M()
+
+	// 
+	type T4 struct{}
+
+	// T4.M should appear as method of T5 only if AllMethods is set. 
+	func (*T4) M()
+
+	// 
+	type T5 struct {
+		T4
+	}
+
+	// 
+	type U1 struct {
+		*U1
+	}
+
+	// U1.M should appear as method of U1. 
+	func (*U1) M()
+
+	// 
+	type U2 struct {
+		*U3
+	}
+
+	// U2.M should appear as method of U2 and as method of U3 only if ...
+	func (*U2) M()
+
+	// 
+	type U3 struct {
+		*U2
+	}
+
+	// U3.N should appear as method of U3 and as method of U2 only if ...
+	func (*U3) N()
+
+	// 
+	type U4 struct {
+		// contains filtered or unexported fields
+	}
+
+	// U4.M should appear as method of U4. 
+	func (*U4) M()
+
+	// 
+	type V1 struct {
+		*V2
+		*V5
+	}
+
+	// 
+	type V2 struct {
+		*V3
+	}
+
+	// 
+	type V3 struct {
+		*V4
+	}
+
+	// 
+	type V4 struct {
+		*V5
+	}
+
+	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
+	func (*V4) M()
+
+	// 
+	type V5 struct {
+		*V6
+	}
+
+	// 
+	type V6 struct{}
+
+	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
+	func (*V6) M()
+
diff --git a/internal/backport/go/doc/testdata/e.1.golden b/internal/backport/go/doc/testdata/e.1.golden
new file mode 100644
index 0000000..cbe22e0
--- /dev/null
+++ b/internal/backport/go/doc/testdata/e.1.golden
@@ -0,0 +1,144 @@
+// The package e is a go/doc test for embedded methods. 
+PACKAGE e
+
+IMPORTPATH
+	testdata/e
+
+FILENAMES
+	testdata/e.go
+
+TYPES
+	// T1 has no embedded (level 1) M method due to conflict. 
+	type T1 struct {
+		t1
+		t2
+	}
+
+	// T2 has only M as top-level method. 
+	type T2 struct {
+		t1
+	}
+
+	// T2.M should appear as method of T2. 
+	func (T2) M()
+
+	// T3 has only M as top-level method. 
+	type T3 struct {
+		t1e
+		t2e
+	}
+
+	// T3.M should appear as method of T3. 
+	func (T3) M()
+
+	// 
+	type T4 struct{}
+
+	// T4.M should appear as method of T5 only if AllMethods is set. 
+	func (*T4) M()
+
+	// 
+	type T5 struct {
+		T4
+	}
+
+	// 
+	type U1 struct {
+		*U1
+	}
+
+	// U1.M should appear as method of U1. 
+	func (*U1) M()
+
+	// 
+	type U2 struct {
+		*U3
+	}
+
+	// U2.M should appear as method of U2 and as method of U3 only if ...
+	func (*U2) M()
+
+	// 
+	type U3 struct {
+		*U2
+	}
+
+	// U3.N should appear as method of U3 and as method of U2 only if ...
+	func (*U3) N()
+
+	// 
+	type U4 struct {
+		*u5
+	}
+
+	// U4.M should appear as method of U4. 
+	func (*U4) M()
+
+	// 
+	type V1 struct {
+		*V2
+		*V5
+	}
+
+	// 
+	type V2 struct {
+		*V3
+	}
+
+	// 
+	type V3 struct {
+		*V4
+	}
+
+	// 
+	type V4 struct {
+		*V5
+	}
+
+	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
+	func (*V4) M()
+
+	// 
+	type V5 struct {
+		*V6
+	}
+
+	// 
+	type V6 struct{}
+
+	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
+	func (*V6) M()
+
+	// 
+	type t1 struct{}
+
+	// t1.M should not appear as method in a Tx type. 
+	func (t1) M()
+
+	// 
+	type t1e struct {
+		t1
+	}
+
+	// t1.M should not appear as method in a Tx type. 
+	func (t1e) M()
+
+	// 
+	type t2 struct{}
+
+	// t2.M should not appear as method in a Tx type. 
+	func (t2) M()
+
+	// 
+	type t2e struct {
+		t2
+	}
+
+	// t2.M should not appear as method in a Tx type. 
+	func (t2e) M()
+
+	// 
+	type u5 struct {
+		*U4
+	}
+
diff --git a/internal/backport/go/doc/testdata/e.2.golden b/internal/backport/go/doc/testdata/e.2.golden
new file mode 100644
index 0000000..e7b05e8
--- /dev/null
+++ b/internal/backport/go/doc/testdata/e.2.golden
@@ -0,0 +1,130 @@
+// The package e is a go/doc test for embedded methods. 
+PACKAGE e
+
+IMPORTPATH
+	testdata/e
+
+FILENAMES
+	testdata/e.go
+
+TYPES
+	// T1 has no embedded (level 1) M method due to conflict. 
+	type T1 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T2 has only M as top-level method. 
+	type T2 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T2.M should appear as method of T2. 
+	func (T2) M()
+
+	// T3 has only M as top-level method. 
+	type T3 struct {
+		// contains filtered or unexported fields
+	}
+
+	// T3.M should appear as method of T3. 
+	func (T3) M()
+
+	// 
+	type T4 struct{}
+
+	// T4.M should appear as method of T5 only if AllMethods is set. 
+	func (*T4) M()
+
+	// 
+	type T5 struct {
+		T4
+	}
+
+	// T4.M should appear as method of T5 only if AllMethods is set. 
+	func (*T5) M()
+
+	// 
+	type U1 struct {
+		*U1
+	}
+
+	// U1.M should appear as method of U1. 
+	func (*U1) M()
+
+	// 
+	type U2 struct {
+		*U3
+	}
+
+	// U2.M should appear as method of U2 and as method of U3 only if ...
+	func (*U2) M()
+
+	// U3.N should appear as method of U3 and as method of U2 only if ...
+	func (U2) N()
+
+	// 
+	type U3 struct {
+		*U2
+	}
+
+	// U2.M should appear as method of U2 and as method of U3 only if ...
+	func (U3) M()
+
+	// U3.N should appear as method of U3 and as method of U2 only if ...
+	func (*U3) N()
+
+	// 
+	type U4 struct {
+		// contains filtered or unexported fields
+	}
+
+	// U4.M should appear as method of U4. 
+	func (*U4) M()
+
+	// 
+	type V1 struct {
+		*V2
+		*V5
+	}
+
+	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
+	func (V1) M()
+
+	// 
+	type V2 struct {
+		*V3
+	}
+
+	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
+	func (V2) M()
+
+	// 
+	type V3 struct {
+		*V4
+	}
+
+	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
+	func (V3) M()
+
+	// 
+	type V4 struct {
+		*V5
+	}
+
+	// V4.M should appear as method of V2 and V3 if AllMethods is set. 
+	func (*V4) M()
+
+	// 
+	type V5 struct {
+		*V6
+	}
+
+	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
+	func (V5) M()
+
+	// 
+	type V6 struct{}
+
+	// V6.M should appear as method of V1 and V5 if AllMethods is set. 
+	func (*V6) M()
+
diff --git a/internal/backport/go/doc/testdata/e.go b/internal/backport/go/doc/testdata/e.go
new file mode 100644
index 0000000..ec432e3
--- /dev/null
+++ b/internal/backport/go/doc/testdata/e.go
@@ -0,0 +1,147 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package e is a go/doc test for embedded methods.
+package e
+
+// ----------------------------------------------------------------------------
+// Conflicting methods M must not show up.
+
+type t1 struct{}
+
+// t1.M should not appear as method in a Tx type.
+func (t1) M() {}
+
+type t2 struct{}
+
+// t2.M should not appear as method in a Tx type.
+func (t2) M() {}
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T1 struct {
+	t1
+	t2
+}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level method M.
+
+// T2 has only M as top-level method.
+type T2 struct {
+	t1
+}
+
+// T2.M should appear as method of T2.
+func (T2) M() {}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level conflicting methods M.
+
+type t1e struct {
+	t1
+}
+
+type t2e struct {
+	t2
+}
+
+// T3 has only M as top-level method.
+type T3 struct {
+	t1e
+	t2e
+}
+
+// T3.M should appear as method of T3.
+func (T3) M() {}
+
+// ----------------------------------------------------------------------------
+// Don't show conflicting methods M embedded via an exported and non-exported
+// type.
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T4 struct {
+	t2
+	T2
+}
+
+// ----------------------------------------------------------------------------
+// Don't show embedded methods of exported anonymous fields unless AllMethods
+// is set.
+
+type T4 struct{}
+
+// T4.M should appear as method of T5 only if AllMethods is set.
+func (*T4) M() {}
+
+type T5 struct {
+	T4
+}
+
+// ----------------------------------------------------------------------------
+// Recursive type declarations must not lead to endless recursion.
+
+type U1 struct {
+	*U1
+}
+
+// U1.M should appear as method of U1.
+func (*U1) M() {}
+
+type U2 struct {
+	*U3
+}
+
+// U2.M should appear as method of U2 and as method of U3 only if AllMethods is set.
+func (*U2) M() {}
+
+type U3 struct {
+	*U2
+}
+
+// U3.N should appear as method of U3 and as method of U2 only if AllMethods is set.
+func (*U3) N() {}
+
+type U4 struct {
+	*u5
+}
+
+// U4.M should appear as method of U4.
+func (*U4) M() {}
+
+type u5 struct {
+	*U4
+}
+
+// ----------------------------------------------------------------------------
+// A higher-level embedded type (and its methods) wins over the same type (and
+// its methods) embedded at a lower level.
+
+type V1 struct {
+	*V2
+	*V5
+}
+
+type V2 struct {
+	*V3
+}
+
+type V3 struct {
+	*V4
+}
+
+type V4 struct {
+	*V5
+}
+
+type V5 struct {
+	*V6
+}
+
+type V6 struct{}
+
+// V4.M should appear as method of V2 and V3 if AllMethods is set.
+func (*V4) M() {}
+
+// V6.M should appear as method of V1 and V5 if AllMethods is set.
+func (*V6) M() {}
diff --git a/internal/backport/go/doc/testdata/error1.0.golden b/internal/backport/go/doc/testdata/error1.0.golden
new file mode 100644
index 0000000..6c6fe5d
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error1.0.golden
@@ -0,0 +1,30 @@
+// 
+PACKAGE error1
+
+IMPORTPATH
+	testdata/error1
+
+FILENAMES
+	testdata/error1.go
+
+TYPES
+	// 
+	type I0 interface {
+		// When embedded, the predeclared error interface
+		// must remain visible in interface types.
+		error
+	}
+
+	// 
+	type S0 struct {
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// error should be visible
+			error
+		}
+	}
+
diff --git a/internal/backport/go/doc/testdata/error1.1.golden b/internal/backport/go/doc/testdata/error1.1.golden
new file mode 100644
index 0000000..a8dc2e7
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error1.1.golden
@@ -0,0 +1,32 @@
+// 
+PACKAGE error1
+
+IMPORTPATH
+	testdata/error1
+
+FILENAMES
+	testdata/error1.go
+
+TYPES
+	// 
+	type I0 interface {
+		// When embedded, the predeclared error interface
+		// must remain visible in interface types.
+		error
+	}
+
+	// 
+	type S0 struct {
+		// In struct types, an embedded error must only be visible
+		// if AllDecls is set.
+		error
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// error should be visible
+			error
+		}
+	}
+
diff --git a/internal/backport/go/doc/testdata/error1.2.golden b/internal/backport/go/doc/testdata/error1.2.golden
new file mode 100644
index 0000000..6c6fe5d
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error1.2.golden
@@ -0,0 +1,30 @@
+// 
+PACKAGE error1
+
+IMPORTPATH
+	testdata/error1
+
+FILENAMES
+	testdata/error1.go
+
+TYPES
+	// 
+	type I0 interface {
+		// When embedded, the predeclared error interface
+		// must remain visible in interface types.
+		error
+	}
+
+	// 
+	type S0 struct {
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// error should be visible
+			error
+		}
+	}
+
diff --git a/internal/backport/go/doc/testdata/error1.go b/internal/backport/go/doc/testdata/error1.go
new file mode 100644
index 0000000..3c777a7
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error1.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error1
+
+type I0 interface {
+	// When embedded, the predeclared error interface
+	// must remain visible in interface types.
+	error
+}
+
+type T0 struct {
+	ExportedField interface {
+		// error should be visible
+		error
+	}
+}
+
+type S0 struct {
+	// In struct types, an embedded error must only be visible
+	// if AllDecls is set.
+	error
+}
diff --git a/internal/backport/go/doc/testdata/error2.0.golden b/internal/backport/go/doc/testdata/error2.0.golden
new file mode 100644
index 0000000..dedfe41
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error2.0.golden
@@ -0,0 +1,27 @@
+// 
+PACKAGE error2
+
+IMPORTPATH
+	testdata/error2
+
+FILENAMES
+	testdata/error2.go
+
+TYPES
+	// 
+	type I0 interface {
+		// contains filtered or unexported methods
+	}
+
+	// 
+	type S0 struct {
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// contains filtered or unexported methods
+		}
+	}
+
diff --git a/internal/backport/go/doc/testdata/error2.1.golden b/internal/backport/go/doc/testdata/error2.1.golden
new file mode 100644
index 0000000..dbcc1b0
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error2.1.golden
@@ -0,0 +1,37 @@
+// 
+PACKAGE error2
+
+IMPORTPATH
+	testdata/error2
+
+FILENAMES
+	testdata/error2.go
+
+TYPES
+	// 
+	type I0 interface {
+		// When embedded, the locally-declared error interface
+		// is only visible if all declarations are shown.
+		error
+	}
+
+	// 
+	type S0 struct {
+		// In struct types, an embedded error must only be visible
+		// if AllDecls is set.
+		error
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// error should not be visible
+			error
+		}
+	}
+
+	// This error declaration shadows the predeclared error type. 
+	type error interface {
+		Error() string
+	}
+
diff --git a/internal/backport/go/doc/testdata/error2.2.golden b/internal/backport/go/doc/testdata/error2.2.golden
new file mode 100644
index 0000000..dedfe41
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error2.2.golden
@@ -0,0 +1,27 @@
+// 
+PACKAGE error2
+
+IMPORTPATH
+	testdata/error2
+
+FILENAMES
+	testdata/error2.go
+
+TYPES
+	// 
+	type I0 interface {
+		// contains filtered or unexported methods
+	}
+
+	// 
+	type S0 struct {
+		// contains filtered or unexported fields
+	}
+
+	// 
+	type T0 struct {
+		ExportedField interface {
+			// contains filtered or unexported methods
+		}
+	}
+
diff --git a/internal/backport/go/doc/testdata/error2.go b/internal/backport/go/doc/testdata/error2.go
new file mode 100644
index 0000000..6ee96c2
--- /dev/null
+++ b/internal/backport/go/doc/testdata/error2.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error2
+
+type I0 interface {
+	// When embedded, the locally-declared error interface
+	// is only visible if all declarations are shown.
+	error
+}
+
+type T0 struct {
+	ExportedField interface {
+		// error should not be visible
+		error
+	}
+}
+
+type S0 struct {
+	// In struct types, an embedded error must only be visible
+	// if AllDecls is set.
+	error
+}
+
+// This error declaration shadows the predeclared error type.
+type error interface {
+	Error() string
+}
diff --git a/internal/backport/go/doc/testdata/example.go b/internal/backport/go/doc/testdata/example.go
new file mode 100644
index 0000000..fdeda13
--- /dev/null
+++ b/internal/backport/go/doc/testdata/example.go
@@ -0,0 +1,81 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+	"time"
+)
+
+type InternalExample struct {
+	Name   string
+	F      func()
+	Output string
+}
+
+func RunExamples(examples []InternalExample) (ok bool) {
+	ok = true
+
+	var eg InternalExample
+
+	stdout, stderr := os.Stdout, os.Stderr
+	defer func() {
+		os.Stdout, os.Stderr = stdout, stderr
+		if e := recover(); e != nil {
+			fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
+			os.Exit(1)
+		}
+	}()
+
+	for _, eg = range examples {
+		if *chatty {
+			fmt.Printf("=== RUN: %s\n", eg.Name)
+		}
+
+		// capture stdout and stderr
+		r, w, err := os.Pipe()
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			os.Exit(1)
+		}
+		os.Stdout, os.Stderr = w, w
+		outC := make(chan string)
+		go func() {
+			buf := new(bytes.Buffer)
+			_, err := io.Copy(buf, r)
+			if err != nil {
+				fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
+				os.Exit(1)
+			}
+			outC <- buf.String()
+		}()
+
+		// run example
+		t0 := time.Now()
+		eg.F()
+		dt := time.Now().Sub(t0)
+
+		// close pipe, restore stdout/stderr, get output
+		w.Close()
+		os.Stdout, os.Stderr = stdout, stderr
+		out := <-outC
+
+		// report any errors
+		tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
+		if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
+			fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
+				eg.Name, tstr, g, e)
+			ok = false
+		} else if *chatty {
+			fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
+		}
+	}
+
+	return
+}
diff --git a/internal/backport/go/doc/testdata/f.0.golden b/internal/backport/go/doc/testdata/f.0.golden
new file mode 100644
index 0000000..8175901
--- /dev/null
+++ b/internal/backport/go/doc/testdata/f.0.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+	testdata/f
+
+FILENAMES
+	testdata/f.go
+
+FUNCTIONS
+	// Exported must always be visible. Was issue 2824. 
+	func Exported() private
+
diff --git a/internal/backport/go/doc/testdata/f.1.golden b/internal/backport/go/doc/testdata/f.1.golden
new file mode 100644
index 0000000..ba68e88
--- /dev/null
+++ b/internal/backport/go/doc/testdata/f.1.golden
@@ -0,0 +1,16 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+	testdata/f
+
+FILENAMES
+	testdata/f.go
+
+TYPES
+	// 
+	type private struct{}
+
+	// Exported must always be visible. Was issue 2824. 
+	func Exported() private
+
diff --git a/internal/backport/go/doc/testdata/f.2.golden b/internal/backport/go/doc/testdata/f.2.golden
new file mode 100644
index 0000000..8175901
--- /dev/null
+++ b/internal/backport/go/doc/testdata/f.2.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+	testdata/f
+
+FILENAMES
+	testdata/f.go
+
+FUNCTIONS
+	// Exported must always be visible. Was issue 2824. 
+	func Exported() private
+
diff --git a/internal/backport/go/doc/testdata/f.go b/internal/backport/go/doc/testdata/f.go
new file mode 100644
index 0000000..7e9add9
--- /dev/null
+++ b/internal/backport/go/doc/testdata/f.go
@@ -0,0 +1,14 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package f is a go/doc test for functions and factory methods.
+package f
+
+// ----------------------------------------------------------------------------
+// Factory functions for non-exported types must not get lost.
+
+type private struct{}
+
+// Exported must always be visible. Was issue 2824.
+func Exported() private {}
diff --git a/internal/backport/go/doc/testdata/g.0.golden b/internal/backport/go/doc/testdata/g.0.golden
new file mode 100644
index 0000000..487cf06
--- /dev/null
+++ b/internal/backport/go/doc/testdata/g.0.golden
@@ -0,0 +1,32 @@
+// The package g is a go/doc test for mixed exported/unexported ...
+PACKAGE g
+
+IMPORTPATH
+	testdata/g
+
+FILENAMES
+	testdata/g.go
+
+CONSTANTS
+	// 
+	const (
+		A, _	= iota, iota
+		_, D
+		E, _
+		G, H
+	)
+
+
+VARIABLES
+	// 
+	var (
+		_, C2, _	= 1, 2, 3
+		C4, _, C6	= 4, 5, 6
+		_, C8, _	= 7, 8, 9
+	)
+
+	// 
+	var (
+		_, X = f()
+	)
+
diff --git a/internal/backport/go/doc/testdata/g.1.golden b/internal/backport/go/doc/testdata/g.1.golden
new file mode 100644
index 0000000..438441a
--- /dev/null
+++ b/internal/backport/go/doc/testdata/g.1.golden
@@ -0,0 +1,34 @@
+// The package g is a go/doc test for mixed exported/unexported ...
+PACKAGE g
+
+IMPORTPATH
+	testdata/g
+
+FILENAMES
+	testdata/g.go
+
+CONSTANTS
+	// 
+	const (
+		A, b	= iota, iota
+		c, D
+		E, f
+		G, H
+	)
+
+
+VARIABLES
+	// 
+	var (
+		c1, C2, c3	= 1, 2, 3
+		C4, c5, C6	= 4, 5, 6
+		c7, C8, c9	= 7, 8, 9
+		xx, yy, zz	= 0, 0, 0	// all unexported and hidden
+	)
+
+	// 
+	var (
+		x, X	= f()
+		y, z	= f()
+	)
+
diff --git a/internal/backport/go/doc/testdata/g.2.golden b/internal/backport/go/doc/testdata/g.2.golden
new file mode 100644
index 0000000..487cf06
--- /dev/null
+++ b/internal/backport/go/doc/testdata/g.2.golden
@@ -0,0 +1,32 @@
+// The package g is a go/doc test for mixed exported/unexported ...
+PACKAGE g
+
+IMPORTPATH
+	testdata/g
+
+FILENAMES
+	testdata/g.go
+
+CONSTANTS
+	// 
+	const (
+		A, _	= iota, iota
+		_, D
+		E, _
+		G, H
+	)
+
+
+VARIABLES
+	// 
+	var (
+		_, C2, _	= 1, 2, 3
+		C4, _, C6	= 4, 5, 6
+		_, C8, _	= 7, 8, 9
+	)
+
+	// 
+	var (
+		_, X = f()
+	)
+
diff --git a/internal/backport/go/doc/testdata/g.go b/internal/backport/go/doc/testdata/g.go
new file mode 100644
index 0000000..ceeb417
--- /dev/null
+++ b/internal/backport/go/doc/testdata/g.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package g is a go/doc test for mixed exported/unexported values.
+package g
+
+const (
+	A, b = iota, iota
+	c, D
+	E, f
+	G, H
+)
+
+var (
+	c1, C2, c3 = 1, 2, 3
+	C4, c5, C6 = 4, 5, 6
+	c7, C8, c9 = 7, 8, 9
+	xx, yy, zz = 0, 0, 0 // all unexported and hidden
+)
+
+var (
+	x, X = f()
+	y, z = f()
+)
diff --git a/internal/backport/go/doc/testdata/generics.0.golden b/internal/backport/go/doc/testdata/generics.0.golden
new file mode 100644
index 0000000..91c874c
--- /dev/null
+++ b/internal/backport/go/doc/testdata/generics.0.golden
@@ -0,0 +1,76 @@
+// Package generics contains the new syntax supporting generic ...
+PACKAGE generics
+
+IMPORTPATH
+	testdata/generics
+
+FILENAMES
+	testdata/generics.go
+
+FUNCTIONS
+	// AnotherFunc has an implicit constraint interface.  Neither type ...
+	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
+
+	// Func has an instantiated constraint. 
+	func Func[T Constraint[string, Type[int]]]()
+
+	// Single is not a factory function. 
+	func Single[T any]() *T
+
+	// Slice is not a factory function. 
+	func Slice[T any]() []T
+
+
+TYPES
+	// AFuncType demonstrates filtering of parameters and type ...
+	type AFuncType[T ~struct{ f int }] func(_ struct {
+		// contains filtered or unexported fields
+	})
+
+	// Constraint is a constraint interface with two type parameters. 
+	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
+		~int | ~byte | Type[string]
+		M() P
+	}
+
+	// NewEmbeddings demonstrates how we filter the new embedded ...
+	type NewEmbeddings interface {
+		string	// should not be filtered
+	
+		struct {
+			// contains filtered or unexported fields
+		}
+		~struct {
+			// contains filtered or unexported fields
+		}
+		*struct {
+			// contains filtered or unexported fields
+		}
+		struct {
+			// contains filtered or unexported fields
+		} | ~struct {
+			// contains filtered or unexported fields
+		}
+		// contains filtered or unexported methods
+	}
+
+	// Parameterized types should be shown. 
+	type Type[P any] struct {
+		Field P
+	}
+
+	// Variables with an instantiated type should be shown. 
+	var X Type[int]
+
+	// Constructors for parameterized types should be shown. 
+	func Constructor[lowerCase any]() Type[lowerCase]
+
+	// MethodA uses a different name for its receiver type parameter. 
+	func (t Type[A]) MethodA(p A)
+
+	// MethodB has a blank receiver type parameter. 
+	func (t Type[_]) MethodB()
+
+	// MethodC has a lower-case receiver type parameter. 
+	func (t Type[c]) MethodC()
+
diff --git a/internal/backport/go/doc/testdata/generics.1.golden b/internal/backport/go/doc/testdata/generics.1.golden
new file mode 100644
index 0000000..923a4ce
--- /dev/null
+++ b/internal/backport/go/doc/testdata/generics.1.golden
@@ -0,0 +1,66 @@
+// Package generics contains the new syntax supporting generic ...
+PACKAGE generics
+
+IMPORTPATH
+	testdata/generics
+
+FILENAMES
+	testdata/generics.go
+
+FUNCTIONS
+	// AnotherFunc has an implicit constraint interface.  Neither type ...
+	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
+
+	// Func has an instantiated constraint. 
+	func Func[T Constraint[string, Type[int]]]()
+
+	// Single is not a factory function. 
+	func Single[T any]() *T
+
+	// Slice is not a factory function. 
+	func Slice[T any]() []T
+
+
+TYPES
+	// AFuncType demonstrates filtering of parameters and type ...
+	type AFuncType[T ~struct{ f int }] func(_ struct{ f int })
+
+	// Constraint is a constraint interface with two type parameters. 
+	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
+		~int | ~byte | Type[string]
+		M() P
+	}
+
+	// NewEmbeddings demonstrates how we filter the new embedded ...
+	type NewEmbeddings interface {
+		string	// should not be filtered
+		int16
+		struct{ f int }
+		~struct{ f int }
+		*struct{ f int }
+		struct{ f int } | ~struct{ f int }
+	}
+
+	// Parameterized types should be shown. 
+	type Type[P any] struct {
+		Field P
+	}
+
+	// Variables with an instantiated type should be shown. 
+	var X Type[int]
+
+	// Constructors for parameterized types should be shown. 
+	func Constructor[lowerCase any]() Type[lowerCase]
+
+	// MethodA uses a different name for its receiver type parameter. 
+	func (t Type[A]) MethodA(p A)
+
+	// MethodB has a blank receiver type parameter. 
+	func (t Type[_]) MethodB()
+
+	// MethodC has a lower-case receiver type parameter. 
+	func (t Type[c]) MethodC()
+
+	// int16 shadows the predeclared type int16. 
+	type int16 int
+
diff --git a/internal/backport/go/doc/testdata/generics.2.golden b/internal/backport/go/doc/testdata/generics.2.golden
new file mode 100644
index 0000000..91c874c
--- /dev/null
+++ b/internal/backport/go/doc/testdata/generics.2.golden
@@ -0,0 +1,76 @@
+// Package generics contains the new syntax supporting generic ...
+PACKAGE generics
+
+IMPORTPATH
+	testdata/generics
+
+FILENAMES
+	testdata/generics.go
+
+FUNCTIONS
+	// AnotherFunc has an implicit constraint interface.  Neither type ...
+	func AnotherFunc[T ~struct{ f int }](_ struct{ f int })
+
+	// Func has an instantiated constraint. 
+	func Func[T Constraint[string, Type[int]]]()
+
+	// Single is not a factory function. 
+	func Single[T any]() *T
+
+	// Slice is not a factory function. 
+	func Slice[T any]() []T
+
+
+TYPES
+	// AFuncType demonstrates filtering of parameters and type ...
+	type AFuncType[T ~struct{ f int }] func(_ struct {
+		// contains filtered or unexported fields
+	})
+
+	// Constraint is a constraint interface with two type parameters. 
+	type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
+		~int | ~byte | Type[string]
+		M() P
+	}
+
+	// NewEmbeddings demonstrates how we filter the new embedded ...
+	type NewEmbeddings interface {
+		string	// should not be filtered
+	
+		struct {
+			// contains filtered or unexported fields
+		}
+		~struct {
+			// contains filtered or unexported fields
+		}
+		*struct {
+			// contains filtered or unexported fields
+		}
+		struct {
+			// contains filtered or unexported fields
+		} | ~struct {
+			// contains filtered or unexported fields
+		}
+		// contains filtered or unexported methods
+	}
+
+	// Parameterized types should be shown. 
+	type Type[P any] struct {
+		Field P
+	}
+
+	// Variables with an instantiated type should be shown. 
+	var X Type[int]
+
+	// Constructors for parameterized types should be shown. 
+	func Constructor[lowerCase any]() Type[lowerCase]
+
+	// MethodA uses a different name for its receiver type parameter. 
+	func (t Type[A]) MethodA(p A)
+
+	// MethodB has a blank receiver type parameter. 
+	func (t Type[_]) MethodB()
+
+	// MethodC has a lower-case receiver type parameter. 
+	func (t Type[c]) MethodC()
+
diff --git a/internal/backport/go/doc/testdata/generics.go b/internal/backport/go/doc/testdata/generics.go
new file mode 100644
index 0000000..ba7187e
--- /dev/null
+++ b/internal/backport/go/doc/testdata/generics.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package generics contains the new syntax supporting generic programming in
+// Go.
+package generics
+
+// Variables with an instantiated type should be shown.
+var X Type[int]
+
+// Parameterized types should be shown.
+type Type[P any] struct {
+	Field P
+}
+
+// Constructors for parameterized types should be shown.
+func Constructor[lowerCase any]() Type[lowerCase] {
+	return Type[lowerCase]{}
+}
+
+// MethodA uses a different name for its receiver type parameter.
+func (t Type[A]) MethodA(p A) {}
+
+// MethodB has a blank receiver type parameter.
+func (t Type[_]) MethodB() {}
+
+// MethodC has a lower-case receiver type parameter.
+func (t Type[c]) MethodC() {}
+
+// Constraint is a constraint interface with two type parameters.
+type Constraint[P, Q interface{ string | ~int | Type[int] }] interface {
+	~int | ~byte | Type[string]
+	M() P
+}
+
+// int16 shadows the predeclared type int16.
+type int16 int
+
+// NewEmbeddings demonstrates how we filter the new embedded elements.
+type NewEmbeddings interface {
+	string // should not be filtered
+	int16
+	struct{ f int }
+	~struct{ f int }
+	*struct{ f int }
+	struct{ f int } | ~struct{ f int }
+}
+
+// Func has an instantiated constraint.
+func Func[T Constraint[string, Type[int]]]() {}
+
+// AnotherFunc has an implicit constraint interface.
+//
+// Neither type parameters nor regular parameters should be filtered.
+func AnotherFunc[T ~struct{ f int }](_ struct{ f int }) {}
+
+// AFuncType demonstrates filtering of parameters and type parameters. Here we
+// don't filter type parameters (to be consistent with function declarations),
+// but DO filter the RHS.
+type AFuncType[T ~struct{ f int }] func(_ struct{ f int })
+
+// See issue #49477: type parameters should not be interpreted as named types
+// for the purpose of determining whether a function is a factory function.
+
+// Slice is not a factory function.
+func Slice[T any]() []T {
+	return nil
+}
+
+// Single is not a factory function.
+func Single[T any]() *T {
+	return nil
+}
diff --git a/internal/backport/go/doc/testdata/issue12839.0.golden b/internal/backport/go/doc/testdata/issue12839.0.golden
new file mode 100644
index 0000000..6b59774
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue12839.0.golden
@@ -0,0 +1,51 @@
+// Package issue12839 is a go/doc test to test association of a ...
+PACKAGE issue12839
+
+IMPORTPATH
+	testdata/issue12839
+
+IMPORTS
+	p
+
+FILENAMES
+	testdata/issue12839.go
+
+FUNCTIONS
+	// F1 should not be associated with T1 
+	func F1() (*T1, *T2)
+
+	// F10 should not be associated with T1. 
+	func F10() (T1, T2, error)
+
+	// F4 should not be associated with a type (same as F1) 
+	func F4() (a T1, b T2)
+
+	// F9 should not be associated with T1. 
+	func F9() (int, T1, T2)
+
+
+TYPES
+	// 
+	type T1 struct{}
+
+	// F2 should be associated with T1 
+	func F2() (a, b, c T1)
+
+	// F3 should be associated with T1 because b.T3 is from a ...
+	func F3() (a T1, b p.T3)
+
+	// F5 should be associated with T1. 
+	func F5() (T1, error)
+
+	// F6 should be associated with T1. 
+	func F6() (*T1, error)
+
+	// F7 should be associated with T1. 
+	func F7() (T1, string)
+
+	// F8 should be associated with T1. 
+	func F8() (int, T1, string)
+
+	// 
+	type T2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/issue12839.1.golden b/internal/backport/go/doc/testdata/issue12839.1.golden
new file mode 100644
index 0000000..4b9b9f6
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue12839.1.golden
@@ -0,0 +1,54 @@
+// Package issue12839 is a go/doc test to test association of a ...
+PACKAGE issue12839
+
+IMPORTPATH
+	testdata/issue12839
+
+IMPORTS
+	p
+
+FILENAMES
+	testdata/issue12839.go
+
+FUNCTIONS
+	// F1 should not be associated with T1 
+	func F1() (*T1, *T2)
+
+	// F10 should not be associated with T1. 
+	func F10() (T1, T2, error)
+
+	// F4 should not be associated with a type (same as F1) 
+	func F4() (a T1, b T2)
+
+	// F9 should not be associated with T1. 
+	func F9() (int, T1, T2)
+
+
+TYPES
+	// 
+	type T1 struct{}
+
+	// F2 should be associated with T1 
+	func F2() (a, b, c T1)
+
+	// F3 should be associated with T1 because b.T3 is from a ...
+	func F3() (a T1, b p.T3)
+
+	// F5 should be associated with T1. 
+	func F5() (T1, error)
+
+	// F6 should be associated with T1. 
+	func F6() (*T1, error)
+
+	// F7 should be associated with T1. 
+	func F7() (T1, string)
+
+	// F8 should be associated with T1. 
+	func F8() (int, T1, string)
+
+	// 
+	func (t T1) hello() string
+
+	// 
+	type T2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/issue12839.2.golden b/internal/backport/go/doc/testdata/issue12839.2.golden
new file mode 100644
index 0000000..6b59774
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue12839.2.golden
@@ -0,0 +1,51 @@
+// Package issue12839 is a go/doc test to test association of a ...
+PACKAGE issue12839
+
+IMPORTPATH
+	testdata/issue12839
+
+IMPORTS
+	p
+
+FILENAMES
+	testdata/issue12839.go
+
+FUNCTIONS
+	// F1 should not be associated with T1 
+	func F1() (*T1, *T2)
+
+	// F10 should not be associated with T1. 
+	func F10() (T1, T2, error)
+
+	// F4 should not be associated with a type (same as F1) 
+	func F4() (a T1, b T2)
+
+	// F9 should not be associated with T1. 
+	func F9() (int, T1, T2)
+
+
+TYPES
+	// 
+	type T1 struct{}
+
+	// F2 should be associated with T1 
+	func F2() (a, b, c T1)
+
+	// F3 should be associated with T1 because b.T3 is from a ...
+	func F3() (a T1, b p.T3)
+
+	// F5 should be associated with T1. 
+	func F5() (T1, error)
+
+	// F6 should be associated with T1. 
+	func F6() (*T1, error)
+
+	// F7 should be associated with T1. 
+	func F7() (T1, string)
+
+	// F8 should be associated with T1. 
+	func F8() (int, T1, string)
+
+	// 
+	type T2 struct{}
+
diff --git a/internal/backport/go/doc/testdata/issue12839.go b/internal/backport/go/doc/testdata/issue12839.go
new file mode 100644
index 0000000..51c7ac1
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue12839.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package issue12839 is a go/doc test to test association of a function
+// that returns multiple types.
+// See golang.org/issue/12839.
+// (See also golang.org/issue/27928.)
+package issue12839
+
+import "p"
+
+type T1 struct{}
+
+type T2 struct{}
+
+func (t T1) hello() string {
+	return "hello"
+}
+
+// F1 should not be associated with T1
+func F1() (*T1, *T2) {
+	return &T1{}, &T2{}
+}
+
+// F2 should be associated with T1
+func F2() (a, b, c T1) {
+	return T1{}, T1{}, T1{}
+}
+
+// F3 should be associated with T1 because b.T3 is from a different package
+func F3() (a T1, b p.T3) {
+	return T1{}, p.T3{}
+}
+
+// F4 should not be associated with a type (same as F1)
+func F4() (a T1, b T2) {
+	return T1{}, T2{}
+}
+
+// F5 should be associated with T1.
+func F5() (T1, error) {
+	return T1{}, nil
+}
+
+// F6 should be associated with T1.
+func F6() (*T1, error) {
+	return &T1{}, nil
+}
+
+// F7 should be associated with T1.
+func F7() (T1, string) {
+	return T1{}, nil
+}
+
+// F8 should be associated with T1.
+func F8() (int, T1, string) {
+	return 0, T1{}, nil
+}
+
+// F9 should not be associated with T1.
+func F9() (int, T1, T2) {
+	return 0, T1{}, T2{}
+}
+
+// F10 should not be associated with T1.
+func F10() (T1, T2, error) {
+	return T1{}, T2{}, nil
+}
diff --git a/internal/backport/go/doc/testdata/issue13742.0.golden b/internal/backport/go/doc/testdata/issue13742.0.golden
new file mode 100644
index 0000000..8dee9aa
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue13742.0.golden
@@ -0,0 +1,25 @@
+// 
+PACKAGE issue13742
+
+IMPORTPATH
+	testdata/issue13742
+
+IMPORTS
+	go/ast
+
+FILENAMES
+	testdata/issue13742.go
+
+FUNCTIONS
+	// Both F0 and G0 should appear as functions. 
+	func F0(Node)
+
+	// Both F1 and G1 should appear as functions. 
+	func F1(ast.Node)
+
+	// 
+	func G0() Node
+
+	// 
+	func G1() ast.Node
+
diff --git a/internal/backport/go/doc/testdata/issue13742.1.golden b/internal/backport/go/doc/testdata/issue13742.1.golden
new file mode 100644
index 0000000..8dee9aa
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue13742.1.golden
@@ -0,0 +1,25 @@
+// 
+PACKAGE issue13742
+
+IMPORTPATH
+	testdata/issue13742
+
+IMPORTS
+	go/ast
+
+FILENAMES
+	testdata/issue13742.go
+
+FUNCTIONS
+	// Both F0 and G0 should appear as functions. 
+	func F0(Node)
+
+	// Both F1 and G1 should appear as functions. 
+	func F1(ast.Node)
+
+	// 
+	func G0() Node
+
+	// 
+	func G1() ast.Node
+
diff --git a/internal/backport/go/doc/testdata/issue13742.2.golden b/internal/backport/go/doc/testdata/issue13742.2.golden
new file mode 100644
index 0000000..8dee9aa
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue13742.2.golden
@@ -0,0 +1,25 @@
+// 
+PACKAGE issue13742
+
+IMPORTPATH
+	testdata/issue13742
+
+IMPORTS
+	go/ast
+
+FILENAMES
+	testdata/issue13742.go
+
+FUNCTIONS
+	// Both F0 and G0 should appear as functions. 
+	func F0(Node)
+
+	// Both F1 and G1 should appear as functions. 
+	func F1(ast.Node)
+
+	// 
+	func G0() Node
+
+	// 
+	func G1() ast.Node
+
diff --git a/internal/backport/go/doc/testdata/issue13742.go b/internal/backport/go/doc/testdata/issue13742.go
new file mode 100644
index 0000000..dbc1941
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue13742.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue13742
+
+import (
+	"go/ast"
+	. "go/ast"
+)
+
+// Both F0 and G0 should appear as functions.
+func F0(Node)  {}
+func G0() Node { return nil }
+
+// Both F1 and G1 should appear as functions.
+func F1(ast.Node)  {}
+func G1() ast.Node { return nil }
diff --git a/internal/backport/go/doc/testdata/issue16153.0.golden b/internal/backport/go/doc/testdata/issue16153.0.golden
new file mode 100644
index 0000000..189260b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue16153.0.golden
@@ -0,0 +1,32 @@
+// 
+PACKAGE issue16153
+
+IMPORTPATH
+	testdata/issue16153
+
+FILENAMES
+	testdata/issue16153.go
+
+CONSTANTS
+	// 
+	const (
+		X3	int64	= iota
+		Y3		= 1
+	)
+
+	// 
+	const (
+		X4	int64	= iota
+		Y4
+	)
+
+	// original test case 
+	const (
+		Y1 = 256
+	)
+
+	// variations 
+	const (
+		Y2 uint8
+	)
+
diff --git a/internal/backport/go/doc/testdata/issue16153.1.golden b/internal/backport/go/doc/testdata/issue16153.1.golden
new file mode 100644
index 0000000..803df3e
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue16153.1.golden
@@ -0,0 +1,34 @@
+// 
+PACKAGE issue16153
+
+IMPORTPATH
+	testdata/issue16153
+
+FILENAMES
+	testdata/issue16153.go
+
+CONSTANTS
+	// original test case 
+	const (
+		x1	uint8	= 255
+		Y1		= 256
+	)
+
+	// variations 
+	const (
+		x2	uint8	= 255
+		Y2
+	)
+
+	// 
+	const (
+		X3	int64	= iota
+		Y3		= 1
+	)
+
+	// 
+	const (
+		X4	int64	= iota
+		Y4
+	)
+
diff --git a/internal/backport/go/doc/testdata/issue16153.2.golden b/internal/backport/go/doc/testdata/issue16153.2.golden
new file mode 100644
index 0000000..189260b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue16153.2.golden
@@ -0,0 +1,32 @@
+// 
+PACKAGE issue16153
+
+IMPORTPATH
+	testdata/issue16153
+
+FILENAMES
+	testdata/issue16153.go
+
+CONSTANTS
+	// 
+	const (
+		X3	int64	= iota
+		Y3		= 1
+	)
+
+	// 
+	const (
+		X4	int64	= iota
+		Y4
+	)
+
+	// original test case 
+	const (
+		Y1 = 256
+	)
+
+	// variations 
+	const (
+		Y2 uint8
+	)
+
diff --git a/internal/backport/go/doc/testdata/issue16153.go b/internal/backport/go/doc/testdata/issue16153.go
new file mode 100644
index 0000000..528be42
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue16153.go
@@ -0,0 +1,27 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue16153
+
+// original test case
+const (
+	x1 uint8 = 255
+	Y1       = 256
+)
+
+// variations
+const (
+	x2 uint8 = 255
+	Y2
+)
+
+const (
+	X3 int64 = iota
+	Y3       = 1
+)
+
+const (
+	X4 int64 = iota
+	Y4
+)
diff --git a/internal/backport/go/doc/testdata/issue17788.0.golden b/internal/backport/go/doc/testdata/issue17788.0.golden
new file mode 100644
index 0000000..42c00da
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue17788.0.golden
@@ -0,0 +1,8 @@
+// 
+PACKAGE issue17788
+
+IMPORTPATH
+	testdata/issue17788
+
+FILENAMES
+	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.1.golden b/internal/backport/go/doc/testdata/issue17788.1.golden
new file mode 100644
index 0000000..42c00da
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue17788.1.golden
@@ -0,0 +1,8 @@
+// 
+PACKAGE issue17788
+
+IMPORTPATH
+	testdata/issue17788
+
+FILENAMES
+	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.2.golden b/internal/backport/go/doc/testdata/issue17788.2.golden
new file mode 100644
index 0000000..42c00da
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue17788.2.golden
@@ -0,0 +1,8 @@
+// 
+PACKAGE issue17788
+
+IMPORTPATH
+	testdata/issue17788
+
+FILENAMES
+	testdata/issue17788.go
diff --git a/internal/backport/go/doc/testdata/issue17788.go b/internal/backport/go/doc/testdata/issue17788.go
new file mode 100644
index 0000000..883ad5f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue17788.go
@@ -0,0 +1,8 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue17788
+
+func ( /* receiver type */ ) f0() {
+}
diff --git a/internal/backport/go/doc/testdata/issue22856.0.golden b/internal/backport/go/doc/testdata/issue22856.0.golden
new file mode 100644
index 0000000..a88f43f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue22856.0.golden
@@ -0,0 +1,45 @@
+// 
+PACKAGE issue22856
+
+IMPORTPATH
+	testdata/issue22856
+
+FILENAMES
+	testdata/issue22856.go
+
+FUNCTIONS
+	// NewPointerSliceOfSlice is not a factory function because slices ...
+	func NewPointerSliceOfSlice() [][]*T
+
+	// NewSlice3 is not a factory function because 3 nested slices of ...
+	func NewSlice3() [][][]T
+
+	// NewSliceOfSlice is not a factory function because slices of a ...
+	func NewSliceOfSlice() [][]T
+
+
+TYPES
+	// 
+	type T struct{}
+
+	// 
+	func New() T
+
+	// 
+	func NewArray() [1]T
+
+	// 
+	func NewPointer() *T
+
+	// 
+	func NewPointerArray() [1]*T
+
+	// 
+	func NewPointerOfPointer() **T
+
+	// 
+	func NewPointerSlice() []*T
+
+	// 
+	func NewSlice() []T
+
diff --git a/internal/backport/go/doc/testdata/issue22856.1.golden b/internal/backport/go/doc/testdata/issue22856.1.golden
new file mode 100644
index 0000000..a88f43f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue22856.1.golden
@@ -0,0 +1,45 @@
+// 
+PACKAGE issue22856
+
+IMPORTPATH
+	testdata/issue22856
+
+FILENAMES
+	testdata/issue22856.go
+
+FUNCTIONS
+	// NewPointerSliceOfSlice is not a factory function because slices ...
+	func NewPointerSliceOfSlice() [][]*T
+
+	// NewSlice3 is not a factory function because 3 nested slices of ...
+	func NewSlice3() [][][]T
+
+	// NewSliceOfSlice is not a factory function because slices of a ...
+	func NewSliceOfSlice() [][]T
+
+
+TYPES
+	// 
+	type T struct{}
+
+	// 
+	func New() T
+
+	// 
+	func NewArray() [1]T
+
+	// 
+	func NewPointer() *T
+
+	// 
+	func NewPointerArray() [1]*T
+
+	// 
+	func NewPointerOfPointer() **T
+
+	// 
+	func NewPointerSlice() []*T
+
+	// 
+	func NewSlice() []T
+
diff --git a/internal/backport/go/doc/testdata/issue22856.2.golden b/internal/backport/go/doc/testdata/issue22856.2.golden
new file mode 100644
index 0000000..a88f43f
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue22856.2.golden
@@ -0,0 +1,45 @@
+// 
+PACKAGE issue22856
+
+IMPORTPATH
+	testdata/issue22856
+
+FILENAMES
+	testdata/issue22856.go
+
+FUNCTIONS
+	// NewPointerSliceOfSlice is not a factory function because slices ...
+	func NewPointerSliceOfSlice() [][]*T
+
+	// NewSlice3 is not a factory function because 3 nested slices of ...
+	func NewSlice3() [][][]T
+
+	// NewSliceOfSlice is not a factory function because slices of a ...
+	func NewSliceOfSlice() [][]T
+
+
+TYPES
+	// 
+	type T struct{}
+
+	// 
+	func New() T
+
+	// 
+	func NewArray() [1]T
+
+	// 
+	func NewPointer() *T
+
+	// 
+	func NewPointerArray() [1]*T
+
+	// 
+	func NewPointerOfPointer() **T
+
+	// 
+	func NewPointerSlice() []*T
+
+	// 
+	func NewSlice() []T
+
diff --git a/internal/backport/go/doc/testdata/issue22856.go b/internal/backport/go/doc/testdata/issue22856.go
new file mode 100644
index 0000000..f456998
--- /dev/null
+++ b/internal/backport/go/doc/testdata/issue22856.go
@@ -0,0 +1,27 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue22856
+
+type T struct{}
+
+func New() T                   { return T{} }
+func NewPointer() *T           { return &T{} }
+func NewPointerSlice() []*T    { return []*T{&T{}} }
+func NewSlice() []T            { return []T{T{}} }
+func NewPointerOfPointer() **T { x := &T{}; return &x }
+func NewArray() [1]T           { return [1]T{T{}} }
+func NewPointerArray() [1]*T   { return [1]*T{&T{}} }
+
+// NewSliceOfSlice is not a factory function because slices of a slice of
+// type *T are not factory functions of type T.
+func NewSliceOfSlice() [][]T { return []T{[]T{}} }
+
+// NewPointerSliceOfSlice is not a factory function because slices of a
+// slice of type *T are not factory functions of type T.
+func NewPointerSliceOfSlice() [][]*T { return []*T{[]*T{}} }
+
+// NewSlice3 is not a factory function because 3 nested slices of type T
+// are not factory functions of type T.
+func NewSlice3() [][][]T { return []T{[]T{[]T{}}} }
diff --git a/internal/backport/go/doc/testdata/predeclared.0.golden b/internal/backport/go/doc/testdata/predeclared.0.golden
new file mode 100644
index 0000000..9f37b06
--- /dev/null
+++ b/internal/backport/go/doc/testdata/predeclared.0.golden
@@ -0,0 +1,8 @@
+// Package predeclared is a go/doc test for handling of exported ...
+PACKAGE predeclared
+
+IMPORTPATH
+	testdata/predeclared
+
+FILENAMES
+	testdata/predeclared.go
diff --git a/internal/backport/go/doc/testdata/predeclared.1.golden b/internal/backport/go/doc/testdata/predeclared.1.golden
new file mode 100644
index 0000000..2ff8ee6
--- /dev/null
+++ b/internal/backport/go/doc/testdata/predeclared.1.golden
@@ -0,0 +1,22 @@
+// Package predeclared is a go/doc test for handling of exported ...
+PACKAGE predeclared
+
+IMPORTPATH
+	testdata/predeclared
+
+FILENAMES
+	testdata/predeclared.go
+
+TYPES
+	// 
+	type bool int
+
+	// Must not be visible. 
+	func (b bool) String() string
+
+	// 
+	type error struct{}
+
+	// Must not be visible. 
+	func (e error) Error() string
+
diff --git a/internal/backport/go/doc/testdata/predeclared.2.golden b/internal/backport/go/doc/testdata/predeclared.2.golden
new file mode 100644
index 0000000..9f37b06
--- /dev/null
+++ b/internal/backport/go/doc/testdata/predeclared.2.golden
@@ -0,0 +1,8 @@
+// Package predeclared is a go/doc test for handling of exported ...
+PACKAGE predeclared
+
+IMPORTPATH
+	testdata/predeclared
+
+FILENAMES
+	testdata/predeclared.go
diff --git a/internal/backport/go/doc/testdata/predeclared.go b/internal/backport/go/doc/testdata/predeclared.go
new file mode 100644
index 0000000..c6dd806
--- /dev/null
+++ b/internal/backport/go/doc/testdata/predeclared.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package predeclared is a go/doc test for handling of
+// exported methods on locally-defined predeclared types.
+// See issue 9860.
+package predeclared
+
+type error struct{}
+
+// Must not be visible.
+func (e error) Error() string {
+	return ""
+}
+
+type bool int
+
+// Must not be visible.
+func (b bool) String() string {
+	return ""
+}
diff --git a/internal/backport/go/doc/testdata/template.txt b/internal/backport/go/doc/testdata/template.txt
new file mode 100644
index 0000000..1b07382
--- /dev/null
+++ b/internal/backport/go/doc/testdata/template.txt
@@ -0,0 +1,68 @@
+{{synopsis .Doc}}
+PACKAGE {{.Name}}
+
+IMPORTPATH
+	{{.ImportPath}}
+
+{{with .Imports}}IMPORTS
+{{range .}}	{{.}}
+{{end}}
+{{end}}{{/*
+
+*/}}FILENAMES
+{{range .Filenames}}	{{.}}
+{{end}}{{/*
+
+*/}}{{with .Consts}}
+CONSTANTS
+{{range .}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Vars}}
+VARIABLES
+{{range .}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Funcs}}
+FUNCTIONS
+{{range .}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Types}}
+TYPES
+{{range .}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{range .Consts}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Vars}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Funcs}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Methods}}	{{synopsis .Doc}}
+	{{node .Decl $.FSet}}
+
+{{end}}{{end}}{{end}}{{/*
+
+*/}}{{with .Bugs}}
+BUGS .Bugs is now deprecated, please use .Notes instead
+{{range .}}{{indent "\t" .}}
+{{end}}{{end}}{{with .Notes}}{{range $marker, $content := .}}
+{{$marker}}S
+{{range $content}}{{$marker}}({{.UID}}){{indent "\t" .Body}}
+{{end}}{{end}}{{end}}
\ No newline at end of file
diff --git a/internal/backport/go/doc/testdata/testing.0.golden b/internal/backport/go/doc/testdata/testing.0.golden
new file mode 100644
index 0000000..61dac8b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/testing.0.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+	testdata/testing
+
+IMPORTS
+	bytes
+	flag
+	fmt
+	io
+	os
+	runtime
+	runtime/pprof
+	strconv
+	strings
+	time
+
+FILENAMES
+	testdata/benchmark.go
+	testdata/example.go
+	testdata/testing.go
+
+FUNCTIONS
+	// An internal function but exported because it is cross-package; ...
+	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+	// An internal function but exported because it is cross-package; ...
+	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+	// 
+	func RunExamples(examples []InternalExample) (ok bool)
+
+	// 
+	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+	// Short reports whether the -test.short flag is set. 
+	func Short() bool
+
+
+TYPES
+	// B is a type passed to Benchmark functions to manage benchmark ...
+	type B struct {
+		N int
+		// contains filtered or unexported fields
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *B) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *B) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *B) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *B) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *B) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *B) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *B) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *B) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *B) Logf(format string, args ...any)
+
+	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
+	func (b *B) ResetTimer()
+
+	// SetBytes records the number of bytes processed in a single ...
+	func (b *B) SetBytes(n int64)
+
+	// StartTimer starts timing a test. This function is called ...
+	func (b *B) StartTimer()
+
+	// StopTimer stops timing a test. This can be used to pause the ...
+	func (b *B) StopTimer()
+
+	// The results of a benchmark run. 
+	type BenchmarkResult struct {
+		N	int		// The number of iterations.
+		T	time.Duration	// The total time taken.
+		Bytes	int64		// Bytes processed in one iteration.
+	}
+
+	// Benchmark benchmarks a single function. Useful for creating ...
+	func Benchmark(f func(b *B)) BenchmarkResult
+
+	// 
+	func (r BenchmarkResult) NsPerOp() int64
+
+	// 
+	func (r BenchmarkResult) String() string
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalBenchmark struct {
+		Name	string
+		F	func(b *B)
+	}
+
+	// 
+	type InternalExample struct {
+		Name	string
+		F	func()
+		Output	string
+	}
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalTest struct {
+		Name	string
+		F	func(*T)
+	}
+
+	// T is a type passed to Test functions to manage test state and ...
+	type T struct {
+		// contains filtered or unexported fields
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *T) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *T) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *T) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *T) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *T) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *T) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *T) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *T) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *T) Logf(format string, args ...any)
+
+	// Parallel signals that this test is to be run in parallel with ...
+	func (t *T) Parallel()
+
diff --git a/internal/backport/go/doc/testdata/testing.1.golden b/internal/backport/go/doc/testdata/testing.1.golden
new file mode 100644
index 0000000..1655af1
--- /dev/null
+++ b/internal/backport/go/doc/testdata/testing.1.golden
@@ -0,0 +1,298 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+	testdata/testing
+
+IMPORTS
+	bytes
+	flag
+	fmt
+	io
+	os
+	runtime
+	runtime/pprof
+	strconv
+	strings
+	time
+
+FILENAMES
+	testdata/benchmark.go
+	testdata/example.go
+	testdata/testing.go
+
+VARIABLES
+	// 
+	var (
+		// The short flag requests that tests run more quickly, but its functionality
+		// is provided by test writers themselves. The testing package is just its
+		// home. The all.bash installation script sets it to make installation more
+		// efficient, but by default the flag is off so a plain "go test" will do a
+		// full test of the package.
+		short	= flag.Bool("test.short", false, "run smaller test suite to save time")
+	
+		// Report as tests are run; default is silent for success.
+		chatty		= flag.Bool("test.v", false, "verbose: print additional output")
+		match		= flag.String("test.run", "", "regular expression to select tests to run")
+		memProfile	= flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+		memProfileRate	= flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+		cpuProfile	= flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+		timeout		= flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+		cpuListStr	= flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+		parallel	= flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+	
+		cpuList	[]int
+	)
+
+	// 
+	var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
+
+	// 
+	var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+
+	// 
+	var timer *time.Timer
+
+
+FUNCTIONS
+	// An internal function but exported because it is cross-package; ...
+	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+	// An internal function but exported because it is cross-package; ...
+	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+	// 
+	func RunExamples(examples []InternalExample) (ok bool)
+
+	// 
+	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+	// Short reports whether the -test.short flag is set. 
+	func Short() bool
+
+	// after runs after all testing. 
+	func after()
+
+	// alarm is called if the timeout expires. 
+	func alarm()
+
+	// before runs before all testing. 
+	func before()
+
+	// decorate inserts the final newline if needed and indentation ...
+	func decorate(s string, addFileLine bool) string
+
+	// 
+	func max(x, y int) int
+
+	// 
+	func min(x, y int) int
+
+	// 
+	func parseCpuList()
+
+	// roundDown10 rounds a number down to the nearest power of 10. 
+	func roundDown10(n int) int
+
+	// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. 
+	func roundUp(n int) int
+
+	// startAlarm starts an alarm if requested. 
+	func startAlarm()
+
+	// stopAlarm turns off the alarm. 
+	func stopAlarm()
+
+	// 
+	func tRunner(t *T, test *InternalTest)
+
+
+TYPES
+	// B is a type passed to Benchmark functions to manage benchmark ...
+	type B struct {
+		common
+		N		int
+		benchmark	InternalBenchmark
+		bytes		int64
+		timerOn		bool
+		result		BenchmarkResult
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *B) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *B) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *B) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *B) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *B) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *B) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *B) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *B) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *B) Logf(format string, args ...any)
+
+	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
+	func (b *B) ResetTimer()
+
+	// SetBytes records the number of bytes processed in a single ...
+	func (b *B) SetBytes(n int64)
+
+	// StartTimer starts timing a test. This function is called ...
+	func (b *B) StartTimer()
+
+	// StopTimer stops timing a test. This can be used to pause the ...
+	func (b *B) StopTimer()
+
+	// launch launches the benchmark function. It gradually increases ...
+	func (b *B) launch()
+
+	// log generates the output. It's always at the same stack depth. 
+	func (c *B) log(s string)
+
+	// 
+	func (b *B) nsPerOp() int64
+
+	// run times the benchmark function in a separate goroutine. 
+	func (b *B) run() BenchmarkResult
+
+	// runN runs a single benchmark for the specified number of ...
+	func (b *B) runN(n int)
+
+	// trimOutput shortens the output from a benchmark, which can be ...
+	func (b *B) trimOutput()
+
+	// The results of a benchmark run. 
+	type BenchmarkResult struct {
+		N	int		// The number of iterations.
+		T	time.Duration	// The total time taken.
+		Bytes	int64		// Bytes processed in one iteration.
+	}
+
+	// Benchmark benchmarks a single function. Useful for creating ...
+	func Benchmark(f func(b *B)) BenchmarkResult
+
+	// 
+	func (r BenchmarkResult) NsPerOp() int64
+
+	// 
+	func (r BenchmarkResult) String() string
+
+	// 
+	func (r BenchmarkResult) mbPerSec() float64
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalBenchmark struct {
+		Name	string
+		F	func(b *B)
+	}
+
+	// 
+	type InternalExample struct {
+		Name	string
+		F	func()
+		Output	string
+	}
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalTest struct {
+		Name	string
+		F	func(*T)
+	}
+
+	// T is a type passed to Test functions to manage test state and ...
+	type T struct {
+		common
+		name		string		// Name of test.
+		startParallel	chan bool	// Parallel tests will wait on this.
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *T) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *T) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *T) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *T) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *T) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *T) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *T) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *T) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *T) Logf(format string, args ...any)
+
+	// Parallel signals that this test is to be run in parallel with ...
+	func (t *T) Parallel()
+
+	// log generates the output. It's always at the same stack depth. 
+	func (c *T) log(s string)
+
+	// 
+	func (t *T) report()
+
+	// common holds the elements common between T and B and captures ...
+	type common struct {
+		output		[]byte		// Output generated by test or benchmark.
+		failed		bool		// Test or benchmark has failed.
+		start		time.Time	// Time test or benchmark started
+		duration	time.Duration
+		self		any		// To be sent on signal channel when done.
+		signal		chan any	// Output for serial tests.
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *common) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *common) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *common) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *common) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *common) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *common) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *common) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *common) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *common) Logf(format string, args ...any)
+
+	// log generates the output. It's always at the same stack depth. 
+	func (c *common) log(s string)
+
diff --git a/internal/backport/go/doc/testdata/testing.2.golden b/internal/backport/go/doc/testdata/testing.2.golden
new file mode 100644
index 0000000..61dac8b
--- /dev/null
+++ b/internal/backport/go/doc/testdata/testing.2.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+	testdata/testing
+
+IMPORTS
+	bytes
+	flag
+	fmt
+	io
+	os
+	runtime
+	runtime/pprof
+	strconv
+	strings
+	time
+
+FILENAMES
+	testdata/benchmark.go
+	testdata/example.go
+	testdata/testing.go
+
+FUNCTIONS
+	// An internal function but exported because it is cross-package; ...
+	func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+	// An internal function but exported because it is cross-package; ...
+	func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+	// 
+	func RunExamples(examples []InternalExample) (ok bool)
+
+	// 
+	func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+	// Short reports whether the -test.short flag is set. 
+	func Short() bool
+
+
+TYPES
+	// B is a type passed to Benchmark functions to manage benchmark ...
+	type B struct {
+		N int
+		// contains filtered or unexported fields
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *B) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *B) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *B) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *B) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *B) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *B) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *B) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *B) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *B) Logf(format string, args ...any)
+
+	// ResetTimer sets the elapsed benchmark time to zero. It does not ...
+	func (b *B) ResetTimer()
+
+	// SetBytes records the number of bytes processed in a single ...
+	func (b *B) SetBytes(n int64)
+
+	// StartTimer starts timing a test. This function is called ...
+	func (b *B) StartTimer()
+
+	// StopTimer stops timing a test. This can be used to pause the ...
+	func (b *B) StopTimer()
+
+	// The results of a benchmark run. 
+	type BenchmarkResult struct {
+		N	int		// The number of iterations.
+		T	time.Duration	// The total time taken.
+		Bytes	int64		// Bytes processed in one iteration.
+	}
+
+	// Benchmark benchmarks a single function. Useful for creating ...
+	func Benchmark(f func(b *B)) BenchmarkResult
+
+	// 
+	func (r BenchmarkResult) NsPerOp() int64
+
+	// 
+	func (r BenchmarkResult) String() string
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalBenchmark struct {
+		Name	string
+		F	func(b *B)
+	}
+
+	// 
+	type InternalExample struct {
+		Name	string
+		F	func()
+		Output	string
+	}
+
+	// An internal type but exported because it is cross-package; part ...
+	type InternalTest struct {
+		Name	string
+		F	func(*T)
+	}
+
+	// T is a type passed to Test functions to manage test state and ...
+	type T struct {
+		// contains filtered or unexported fields
+	}
+
+	// Error is equivalent to Log() followed by Fail(). 
+	func (c *T) Error(args ...any)
+
+	// Errorf is equivalent to Logf() followed by Fail(). 
+	func (c *T) Errorf(format string, args ...any)
+
+	// Fail marks the function as having failed but continues ...
+	func (c *T) Fail()
+
+	// FailNow marks the function as having failed and stops its ...
+	func (c *T) FailNow()
+
+	// Failed reports whether the function has failed. 
+	func (c *T) Failed() bool
+
+	// Fatal is equivalent to Log() followed by FailNow(). 
+	func (c *T) Fatal(args ...any)
+
+	// Fatalf is equivalent to Logf() followed by FailNow(). 
+	func (c *T) Fatalf(format string, args ...any)
+
+	// Log formats its arguments using default formatting, analogous ...
+	func (c *T) Log(args ...any)
+
+	// Logf formats its arguments according to the format, analogous ...
+	func (c *T) Logf(format string, args ...any)
+
+	// Parallel signals that this test is to be run in parallel with ...
+	func (t *T) Parallel()
+
diff --git a/internal/backport/go/doc/testdata/testing.go b/internal/backport/go/doc/testdata/testing.go
new file mode 100644
index 0000000..80238df
--- /dev/null
+++ b/internal/backport/go/doc/testdata/testing.go
@@ -0,0 +1,404 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides support for automated testing of Go packages.
+// It is intended to be used in concert with the ``go test'' utility, which automates
+// execution of any function of the form
+//     func TestXxx(*testing.T)
+// where Xxx can be any alphanumeric string (but the first letter must not be in
+// [a-z]) and serves to identify the test routine.
+// These TestXxx routines should be declared within the package they are testing.
+//
+// Functions of the form
+//     func BenchmarkXxx(*testing.B)
+// are considered benchmarks, and are executed by go test when the -test.bench
+// flag is provided.
+//
+// A sample benchmark function looks like this:
+//     func BenchmarkHello(b *testing.B) {
+//         for i := 0; i < b.N; i++ {
+//             fmt.Sprintf("hello")
+//         }
+//     }
+// The benchmark package will vary b.N until the benchmark function lasts
+// long enough to be timed reliably. The output
+//     testing.BenchmarkHello    10000000    282 ns/op
+// means that the loop ran 10000000 times at a speed of 282 ns per loop.
+//
+// If a benchmark needs some expensive setup before running, the timer
+// may be stopped:
+//     func BenchmarkBigLen(b *testing.B) {
+//         b.StopTimer()
+//         big := NewBig()
+//         b.StartTimer()
+//         for i := 0; i < b.N; i++ {
+//             big.Len()
+//         }
+//     }
+package testing
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"runtime"
+	"runtime/pprof"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	// The short flag requests that tests run more quickly, but its functionality
+	// is provided by test writers themselves. The testing package is just its
+	// home. The all.bash installation script sets it to make installation more
+	// efficient, but by default the flag is off so a plain "go test" will do a
+	// full test of the package.
+	short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+	// Report as tests are run; default is silent for success.
+	chatty         = flag.Bool("test.v", false, "verbose: print additional output")
+	match          = flag.String("test.run", "", "regular expression to select tests to run")
+	memProfile     = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+	memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+	cpuProfile     = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+	timeout        = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+	cpuListStr     = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+	parallel       = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+	cpuList []int
+)
+
+// common holds the elements common between T and B and
+// captures common methods such as Errorf.
+type common struct {
+	output   []byte    // Output generated by test or benchmark.
+	failed   bool      // Test or benchmark has failed.
+	start    time.Time // Time test or benchmark started
+	duration time.Duration
+	self     any      // To be sent on signal channel when done.
+	signal   chan any // Output for serial tests.
+}
+
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+	return *short
+}
+
+// decorate inserts the final newline if needed and indentation tabs for formatting.
+// If addFileLine is true, it also prefixes the string with the file and line of the call site.
+func decorate(s string, addFileLine bool) string {
+	if addFileLine {
+		_, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+		if ok {
+			// Truncate file name at last file name separator.
+			if index := strings.LastIndex(file, "/"); index >= 0 {
+				file = file[index+1:]
+			} else if index = strings.LastIndex(file, "\\"); index >= 0 {
+				file = file[index+1:]
+			}
+		} else {
+			file = "???"
+			line = 1
+		}
+		s = fmt.Sprintf("%s:%d: %s", file, line, s)
+	}
+	s = "\t" + s // Every line is indented at least one tab.
+	n := len(s)
+	if n > 0 && s[n-1] != '\n' {
+		s += "\n"
+		n++
+	}
+	for i := 0; i < n-1; i++ { // -1 to avoid final newline
+		if s[i] == '\n' {
+			// Second and subsequent lines are indented an extra tab.
+			return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
+		}
+	}
+	return s
+}
+
+// T is a type passed to Test functions to manage test state and support formatted test logs.
+// Logs are accumulated during execution and dumped to standard error when done.
+type T struct {
+	common
+	name          string    // Name of test.
+	startParallel chan bool // Parallel tests will wait on this.
+}
+
+// Fail marks the function as having failed but continues execution.
+func (c *common) Fail() { c.failed = true }
+
+// Failed reports whether the function has failed.
+func (c *common) Failed() bool { return c.failed }
+
+// FailNow marks the function as having failed and stops its execution.
+// Execution will continue at the next Test.
+func (c *common) FailNow() {
+	c.Fail()
+
+	// Calling runtime.Goexit will exit the goroutine, which
+	// will run the deferred functions in this goroutine,
+	// which will eventually run the deferred lines in tRunner,
+	// which will signal to the test loop that this test is done.
+	//
+	// A previous version of this code said:
+	//
+	//	c.duration = ...
+	//	c.signal <- c.self
+	//	runtime.Goexit()
+	//
+	// This previous version duplicated code (those lines are in
+	// tRunner no matter what), but worse the goroutine teardown
+	// implicit in runtime.Goexit was not guaranteed to complete
+	// before the test exited. If a test deferred an important cleanup
+	// function (like removing temporary files), there was no guarantee
+	// it would run on a test failure. Because we send on c.signal during
+	// a top-of-stack deferred function now, we know that the send
+	// only happens after any other stacked defers have completed.
+	runtime.Goexit()
+}
+
+// log generates the output. It's always at the same stack depth.
+func (c *common) log(s string) {
+	c.output = append(c.output, decorate(s, true)...)
+}
+
+// Log formats its arguments using default formatting, analogous to Println(),
+// and records the text in the error log.
+func (c *common) Log(args ...any) { c.log(fmt.Sprintln(args...)) }
+
+// Logf formats its arguments according to the format, analogous to Printf(),
+// and records the text in the error log.
+func (c *common) Logf(format string, args ...any) { c.log(fmt.Sprintf(format, args...)) }
+
+// Error is equivalent to Log() followed by Fail().
+func (c *common) Error(args ...any) {
+	c.log(fmt.Sprintln(args...))
+	c.Fail()
+}
+
+// Errorf is equivalent to Logf() followed by Fail().
+func (c *common) Errorf(format string, args ...any) {
+	c.log(fmt.Sprintf(format, args...))
+	c.Fail()
+}
+
+// Fatal is equivalent to Log() followed by FailNow().
+func (c *common) Fatal(args ...any) {
+	c.log(fmt.Sprintln(args...))
+	c.FailNow()
+}
+
+// Fatalf is equivalent to Logf() followed by FailNow().
+func (c *common) Fatalf(format string, args ...any) {
+	c.log(fmt.Sprintf(format, args...))
+	c.FailNow()
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests in this CPU group.
+func (t *T) Parallel() {
+	t.signal <- (*T)(nil) // Release main testing loop
+	<-t.startParallel     // Wait for serial tests to finish
+}
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalTest struct {
+	Name string
+	F    func(*T)
+}
+
+func tRunner(t *T, test *InternalTest) {
+	t.start = time.Now()
+
+	// When this goroutine is done, either because test.F(t)
+	// returned normally or because a test failure triggered
+	// a call to runtime.Goexit, record the duration and send
+	// a signal saying that the test is done.
+	defer func() {
+		t.duration = time.Now().Sub(t.start)
+		t.signal <- t
+	}()
+
+	test.F(t)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+	flag.Parse()
+	parseCpuList()
+
+	before()
+	startAlarm()
+	testOk := RunTests(matchString, tests)
+	exampleOk := RunExamples(examples)
+	if !testOk || !exampleOk {
+		fmt.Println("FAIL")
+		os.Exit(1)
+	}
+	fmt.Println("PASS")
+	stopAlarm()
+	RunBenchmarks(matchString, benchmarks)
+	after()
+}
+
+func (t *T) report() {
+	tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
+	format := "--- %s: %s %s\n%s"
+	if t.failed {
+		fmt.Printf(format, "FAIL", t.name, tstr, t.output)
+	} else if *chatty {
+		fmt.Printf(format, "PASS", t.name, tstr, t.output)
+	}
+}
+
+func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
+	ok = true
+	if len(tests) == 0 {
+		fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+		return
+	}
+	for _, procs := range cpuList {
+		runtime.GOMAXPROCS(procs)
+		// We build a new channel tree for each run of the loop.
+		// collector merges in one channel all the upstream signals from parallel tests.
+		// If all tests pump to the same channel, a bug can occur where a test
+		// kicks off a goroutine that Fails, yet the test still delivers a completion signal,
+		// which skews the counting.
+		var collector = make(chan any)
+
+		numParallel := 0
+		startParallel := make(chan bool)
+
+		for i := 0; i < len(tests); i++ {
+			matched, err := matchString(*match, tests[i].Name)
+			if err != nil {
+				fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
+				os.Exit(1)
+			}
+			if !matched {
+				continue
+			}
+			testName := tests[i].Name
+			if procs != 1 {
+				testName = fmt.Sprintf("%s-%d", tests[i].Name, procs)
+			}
+			t := &T{
+				common: common{
+					signal: make(chan any),
+				},
+				name:          testName,
+				startParallel: startParallel,
+			}
+			t.self = t
+			if *chatty {
+				fmt.Printf("=== RUN %s\n", t.name)
+			}
+			go tRunner(t, &tests[i])
+			out := (<-t.signal).(*T)
+			if out == nil { // Parallel run.
+				go func() {
+					collector <- <-t.signal
+				}()
+				numParallel++
+				continue
+			}
+			t.report()
+			ok = ok && !out.failed
+		}
+
+		running := 0
+		for numParallel+running > 0 {
+			if running < *parallel && numParallel > 0 {
+				startParallel <- true
+				running++
+				numParallel--
+				continue
+			}
+			t := (<-collector).(*T)
+			t.report()
+			ok = ok && !t.failed
+			running--
+		}
+	}
+	return
+}
+
+// before runs before all testing.
+func before() {
+	if *memProfileRate > 0 {
+		runtime.MemProfileRate = *memProfileRate
+	}
+	if *cpuProfile != "" {
+		f, err := os.Create(*cpuProfile)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "testing: %s", err)
+			return
+		}
+		if err := pprof.StartCPUProfile(f); err != nil {
+			fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err)
+			f.Close()
+			return
+		}
+		// Could save f so after can call f.Close; not worth the effort.
+	}
+
+}
+
+// after runs after all testing.
+func after() {
+	if *cpuProfile != "" {
+		pprof.StopCPUProfile() // flushes profile to disk
+	}
+	if *memProfile != "" {
+		f, err := os.Create(*memProfile)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "testing: %s", err)
+			return
+		}
+		if err = pprof.WriteHeapProfile(f); err != nil {
+			fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err)
+		}
+		f.Close()
+	}
+}
+
+var timer *time.Timer
+
+// startAlarm starts an alarm if requested.
+func startAlarm() {
+	if *timeout > 0 {
+		timer = time.AfterFunc(*timeout, alarm)
+	}
+}
+
+// stopAlarm turns off the alarm.
+func stopAlarm() {
+	if *timeout > 0 {
+		timer.Stop()
+	}
+}
+
+// alarm is called if the timeout expires.
+func alarm() {
+	panic("test timed out")
+}
+
+func parseCpuList() {
+	if len(*cpuListStr) == 0 {
+		cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
+	} else {
+		for _, val := range strings.Split(*cpuListStr, ",") {
+			cpu, err := strconv.Atoi(val)
+			if err != nil || cpu <= 0 {
+				fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val)
+				os.Exit(1)
+			}
+			cpuList = append(cpuList, cpu)
+		}
+	}
+}
diff --git a/internal/backport/go/format/benchmark_test.go b/internal/backport/go/format/benchmark_test.go
new file mode 100644
index 0000000..e2a6491
--- /dev/null
+++ b/internal/backport/go/format/benchmark_test.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides a simple framework to add benchmarks
+// based on generated input (source) files.
+
+package format_test
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/format"
+	"os"
+	"testing"
+)
+
+var debug = flag.Bool("debug", false, "write .src files containing formatting input; for debugging")
+
+// array1 generates an array literal with n elements of the form:
+//
+// var _ = [...]byte{
+//
+//	// 0
+//	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+//	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+//	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+//	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+//	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+//	// 40
+//	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+//	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+//	...
+func array1(buf *bytes.Buffer, n int) {
+	buf.WriteString("var _ = [...]byte{\n")
+	for i := 0; i < n; {
+		if i%10 == 0 {
+			fmt.Fprintf(buf, "\t// %d\n", i)
+		}
+		buf.WriteByte('\t')
+		for j := 0; j < 8; j++ {
+			fmt.Fprintf(buf, "0x%02x, ", byte(i))
+			i++
+		}
+		buf.WriteString("\n")
+	}
+	buf.WriteString("}\n")
+}
+
+var tests = []struct {
+	name string
+	gen  func(*bytes.Buffer, int)
+	n    int
+}{
+	{"array1", array1, 10000},
+	// add new test cases here as needed
+}
+
+func BenchmarkFormat(b *testing.B) {
+	var src bytes.Buffer
+	for _, t := range tests {
+		src.Reset()
+		src.WriteString("package p\n")
+		t.gen(&src, t.n)
+		data := src.Bytes()
+
+		if *debug {
+			filename := t.name + ".src"
+			err := os.WriteFile(filename, data, 0660)
+			if err != nil {
+				b.Fatalf("couldn't write %s: %v", filename, err)
+			}
+		}
+
+		b.Run(fmt.Sprintf("%s-%d", t.name, t.n), func(b *testing.B) {
+			b.SetBytes(int64(len(data)))
+			b.ReportAllocs()
+			b.ResetTimer()
+			for i := 0; i < b.N; i++ {
+				var err error
+				sink, err = format.Source(data)
+				if err != nil {
+					b.Fatal(err)
+				}
+			}
+		})
+	}
+}
+
+var sink []byte
diff --git a/internal/backport/go/format/example_test.go b/internal/backport/go/format/example_test.go
new file mode 100644
index 0000000..5e7ceee
--- /dev/null
+++ b/internal/backport/go/format/example_test.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format_test
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/format"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"log"
+)
+
+func ExampleNode() {
+	const expr = "(6+2*3)/4"
+
+	// parser.ParseExpr parses the argument and returns the
+	// corresponding ast.Node.
+	node, err := parser.ParseExpr(expr)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// Create a FileSet for node. Since the node does not come
+	// from a real source file, fset will be empty.
+	fset := token.NewFileSet()
+
+	var buf bytes.Buffer
+	err = format.Node(&buf, fset, node)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fmt.Println(buf.String())
+
+	// Output: (6 + 2*3) / 4
+}
diff --git a/internal/backport/go/format/format.go b/internal/backport/go/format/format.go
new file mode 100644
index 0000000..0268883
--- /dev/null
+++ b/internal/backport/go/format/format.go
@@ -0,0 +1,133 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package format implements standard formatting of Go source.
+//
+// Note that formatting of Go source code changes over time, so tools relying on
+// consistent formatting should execute a specific version of the gofmt binary
+// instead of using this package. That way, the formatting will be stable, and
+// the tools won't need to be recompiled each time gofmt changes.
+//
+// For example, pre-submit checks that use this package directly would behave
+// differently depending on what Go version each developer uses, causing the
+// check to be inherently fragile.
+package format
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+)
+
+// Keep these in sync with cmd/gofmt/gofmt.go.
+const (
+	tabWidth    = 8
+	printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers
+
+	// printerNormalizeNumbers means to canonicalize number literal prefixes
+	// and exponents while printing. See https://golang.org/doc/go1.13#gofmt.
+	//
+	// This value is defined in go/printer specifically for go/format and cmd/gofmt.
+	printerNormalizeNumbers = 1 << 30
+)
+
+var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth}
+
+const parserMode = parser.ParseComments
+
+// Node formats node in canonical gofmt style and writes the result to dst.
+//
+// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl,
+// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec,
+// or ast.Stmt. Node does not modify node. Imports are not sorted for
+// nodes representing partial source files (for instance, if the node is
+// not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File).
+//
+// The function may return early (before the entire result is written)
+// and return a formatting error, for instance due to an incorrect AST.
+func Node(dst io.Writer, fset *token.FileSet, node interface{}) error {
+	// Determine if we have a complete source file (file != nil).
+	var file *ast.File
+	var cnode *printer.CommentedNode
+	switch n := node.(type) {
+	case *ast.File:
+		file = n
+	case *printer.CommentedNode:
+		if f, ok := n.Node.(*ast.File); ok {
+			file = f
+			cnode = n
+		}
+	}
+
+	// Sort imports if necessary.
+	if file != nil && hasUnsortedImports(file) {
+		// Make a copy of the AST because ast.SortImports is destructive.
+		// TODO(gri) Do this more efficiently.
+		var buf bytes.Buffer
+		err := config.Fprint(&buf, fset, file)
+		if err != nil {
+			return err
+		}
+		file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode)
+		if err != nil {
+			// We should never get here. If we do, provide good diagnostic.
+			return fmt.Errorf("format.Node internal error (%s)", err)
+		}
+		ast.SortImports(fset, file)
+
+		// Use new file with sorted imports.
+		node = file
+		if cnode != nil {
+			node = &printer.CommentedNode{Node: file, Comments: cnode.Comments}
+		}
+	}
+
+	return config.Fprint(dst, fset, node)
+}
+
+// Source formats src in canonical gofmt style and returns the result
+// or an (I/O or syntax) error. src is expected to be a syntactically
+// correct Go source file, or a list of Go declarations or statements.
+//
+// If src is a partial source file, the leading and trailing space of src
+// is applied to the result (such that it has the same leading and trailing
+// space as src), and the result is indented by the same amount as the first
+// line of src containing code. Imports are not sorted for partial source files.
+func Source(src []byte) ([]byte, error) {
+	fset := token.NewFileSet()
+	file, sourceAdj, indentAdj, err := parse(fset, "", src, true)
+	if err != nil {
+		return nil, err
+	}
+
+	if sourceAdj == nil {
+		// Complete source file.
+		// TODO(gri) consider doing this always.
+		ast.SortImports(fset, file)
+	}
+
+	return format(fset, file, sourceAdj, indentAdj, src, config)
+}
+
+func hasUnsortedImports(file *ast.File) bool {
+	for _, d := range file.Decls {
+		d, ok := d.(*ast.GenDecl)
+		if !ok || d.Tok != token.IMPORT {
+			// Not an import declaration, so we're done.
+			// Imports are always first.
+			return false
+		}
+		if d.Lparen.IsValid() {
+			// For now assume all grouped imports are unsorted.
+			// TODO(gri) Should check if they are sorted already.
+			return true
+		}
+		// Ungrouped imports are sorted by default.
+	}
+	return false
+}
diff --git a/internal/backport/go/format/format_test.go b/internal/backport/go/format/format_test.go
new file mode 100644
index 0000000..d984eec
--- /dev/null
+++ b/internal/backport/go/format/format_test.go
@@ -0,0 +1,187 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format
+
+import (
+	"bytes"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"os"
+	"strings"
+	"testing"
+)
+
+const testfile = "format_test.go"
+
+func diff(t *testing.T, dst, src []byte) {
+	line := 1
+	offs := 0 // line offset
+	for i := 0; i < len(dst) && i < len(src); i++ {
+		d := dst[i]
+		s := src[i]
+		if d != s {
+			t.Errorf("dst:%d: %s\n", line, dst[offs:i+1])
+			t.Errorf("src:%d: %s\n", line, src[offs:i+1])
+			return
+		}
+		if s == '\n' {
+			line++
+			offs = i + 1
+		}
+	}
+	if len(dst) != len(src) {
+		t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src)
+	}
+}
+
+func TestNode(t *testing.T) {
+	src, err := os.ReadFile(testfile)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, testfile, src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+
+	if err = Node(&buf, fset, file); err != nil {
+		t.Fatal("Node failed:", err)
+	}
+
+	diff(t, buf.Bytes(), src)
+}
+
+// Node is documented to not modify the AST.
+// Test that it is so even when numbers are normalized.
+func TestNodeNoModify(t *testing.T) {
+	const (
+		src    = "package p\n\nconst _ = 0000000123i\n"
+		golden = "package p\n\nconst _ = 123i\n"
+	)
+
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Capture original address and value of a BasicLit node
+	// which will undergo formatting changes during printing.
+	wantLit := file.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Values[0].(*ast.BasicLit)
+	wantVal := wantLit.Value
+
+	var buf bytes.Buffer
+	if err = Node(&buf, fset, file); err != nil {
+		t.Fatal("Node failed:", err)
+	}
+	diff(t, buf.Bytes(), []byte(golden))
+
+	// Check if anything changed after Node returned.
+	gotLit := file.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec).Values[0].(*ast.BasicLit)
+	gotVal := gotLit.Value
+
+	if gotLit != wantLit {
+		t.Errorf("got *ast.BasicLit address %p, want %p", gotLit, wantLit)
+	}
+	if gotVal != wantVal {
+		t.Errorf("got *ast.BasicLit value %q, want %q", gotVal, wantVal)
+	}
+}
+
+func TestSource(t *testing.T) {
+	src, err := os.ReadFile(testfile)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := Source(src)
+	if err != nil {
+		t.Fatal("Source failed:", err)
+	}
+
+	diff(t, res, src)
+}
+
+// Test cases that are expected to fail are marked by the prefix "ERROR".
+// The formatted result must look the same as the input for successful tests.
+var tests = []string{
+	// declaration lists
+	`import "golang.org/x/website/internal/backport/go/format"`,
+	"var x int",
+	"var x int\n\ntype T struct{}",
+
+	// statement lists
+	"x := 0",
+	"f(a, b, c)\nvar x int = f(1, 2, 3)",
+
+	// indentation, leading and trailing space
+	"\tx := 0\n\tgo f()",
+	"\tx := 0\n\tgo f()\n\n\n",
+	"\n\t\t\n\n\tx := 0\n\tgo f()\n\n\n",
+	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tgo f()\n\n\n",
+	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n",     // no indentation added inside raw strings
+	"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\n\t\tfoo\n`\n\n\n", // no indentation removed inside raw strings
+
+	// comments
+	"/* Comment */",
+	"\t/* Comment */ ",
+	"\n/* Comment */ ",
+	"i := 5 /* Comment */",         // issue #5551
+	"\ta()\n//line :1",             // issue #11276
+	"\t//xxx\n\ta()\n//line :2",    // issue #11276
+	"\ta() //line :1\n\tb()\n",     // issue #11276
+	"x := 0\n//line :1\n//line :2", // issue #11276
+
+	// whitespace
+	"",     // issue #11275
+	" ",    // issue #11275
+	"\t",   // issue #11275
+	"\t\t", // issue #11275
+	"\n",   // issue #11275
+	"\n\n", // issue #11275
+	"\t\n", // issue #11275
+
+	// erroneous programs
+	"ERROR1 + 2 +",
+	"ERRORx :=  0",
+
+	// build comments
+	"// copyright\n\n//go:build x\n\npackage p\n",
+	"// copyright\n\n//go:build x\n// +build x\n\npackage p\n",
+}
+
+func String(s string) (string, error) {
+	res, err := Source([]byte(s))
+	if err != nil {
+		return "", err
+	}
+	return string(res), nil
+}
+
+func TestPartial(t *testing.T) {
+	for _, src := range tests {
+		if strings.HasPrefix(src, "ERROR") {
+			// test expected to fail
+			src = src[5:] // remove ERROR prefix
+			res, err := String(src)
+			if err == nil && res == src {
+				t.Errorf("formatting succeeded but was expected to fail:\n%q", src)
+			}
+		} else {
+			// test expected to succeed
+			res, err := String(src)
+			if err != nil {
+				t.Errorf("formatting failed (%s):\n%q", err, src)
+			} else if res != src {
+				t.Errorf("formatting incorrect:\nsource: %q\nresult: %q", src, res)
+			}
+		}
+	}
+}
diff --git a/internal/backport/go/format/internal.go b/internal/backport/go/format/internal.go
new file mode 100644
index 0000000..c83432d
--- /dev/null
+++ b/internal/backport/go/format/internal.go
@@ -0,0 +1,176 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(gri): This file and the file src/cmd/gofmt/internal.go are
+// the same (but for this comment and the package name). Do not modify
+// one without the other. Determine if we can factor out functionality
+// in a public API. See also #11844 for context.
+
+package format
+
+import (
+	"bytes"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
+	"strings"
+)
+
+// parse parses src, which was read from the named file,
+// as a Go source file, declaration, or statement list.
+func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) (
+	file *ast.File,
+	sourceAdj func(src []byte, indent int) []byte,
+	indentAdj int,
+	err error,
+) {
+	// Try as whole source file.
+	file, err = parser.ParseFile(fset, filename, src, parserMode)
+	// If there's no error, return. If the error is that the source file didn't begin with a
+	// package line and source fragments are ok, fall through to
+	// try as a source fragment. Stop and return on any other error.
+	if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") {
+		return
+	}
+
+	// If this is a declaration list, make it a source file
+	// by inserting a package clause.
+	// Insert using a ';', not a newline, so that the line numbers
+	// in psrc match the ones in src.
+	psrc := append([]byte("package p;"), src...)
+	file, err = parser.ParseFile(fset, filename, psrc, parserMode)
+	if err == nil {
+		sourceAdj = func(src []byte, indent int) []byte {
+			// Remove the package clause.
+			// Gofmt has turned the ';' into a '\n'.
+			src = src[indent+len("package p\n"):]
+			return bytes.TrimSpace(src)
+		}
+		return
+	}
+	// If the error is that the source file didn't begin with a
+	// declaration, fall through to try as a statement list.
+	// Stop and return on any other error.
+	if !strings.Contains(err.Error(), "expected declaration") {
+		return
+	}
+
+	// If this is a statement list, make it a source file
+	// by inserting a package clause and turning the list
+	// into a function body. This handles expressions too.
+	// Insert using a ';', not a newline, so that the line numbers
+	// in fsrc match the ones in src. Add an extra '\n' before the '}'
+	// to make sure comments are flushed before the '}'.
+	fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}')
+	file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
+	if err == nil {
+		sourceAdj = func(src []byte, indent int) []byte {
+			// Cap adjusted indent to zero.
+			if indent < 0 {
+				indent = 0
+			}
+			// Remove the wrapping.
+			// Gofmt has turned the "; " into a "\n\n".
+			// There will be two non-blank lines with indent, hence 2*indent.
+			src = src[2*indent+len("package p\n\nfunc _() {"):]
+			// Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway
+			src = src[:len(src)-len("}\n")]
+			return bytes.TrimSpace(src)
+		}
+		// Gofmt has also indented the function body one level.
+		// Adjust that with indentAdj.
+		indentAdj = -1
+	}
+
+	// Succeeded, or out of options.
+	return
+}
+
+// format formats the given package file originally obtained from src
+// and adjusts the result based on the original source via sourceAdj
+// and indentAdj.
+func format(
+	fset *token.FileSet,
+	file *ast.File,
+	sourceAdj func(src []byte, indent int) []byte,
+	indentAdj int,
+	src []byte,
+	cfg printer.Config,
+) ([]byte, error) {
+	if sourceAdj == nil {
+		// Complete source file.
+		var buf bytes.Buffer
+		err := cfg.Fprint(&buf, fset, file)
+		if err != nil {
+			return nil, err
+		}
+		return buf.Bytes(), nil
+	}
+
+	// Partial source file.
+	// Determine and prepend leading space.
+	i, j := 0, 0
+	for j < len(src) && isSpace(src[j]) {
+		if src[j] == '\n' {
+			i = j + 1 // byte offset of last line in leading space
+		}
+		j++
+	}
+	var res []byte
+	res = append(res, src[:i]...)
+
+	// Determine and prepend indentation of first code line.
+	// Spaces are ignored unless there are no tabs,
+	// in which case spaces count as one tab.
+	indent := 0
+	hasSpace := false
+	for _, b := range src[i:j] {
+		switch b {
+		case ' ':
+			hasSpace = true
+		case '\t':
+			indent++
+		}
+	}
+	if indent == 0 && hasSpace {
+		indent = 1
+	}
+	for i := 0; i < indent; i++ {
+		res = append(res, '\t')
+	}
+
+	// Format the source.
+	// Write it without any leading and trailing space.
+	cfg.Indent = indent + indentAdj
+	var buf bytes.Buffer
+	err := cfg.Fprint(&buf, fset, file)
+	if err != nil {
+		return nil, err
+	}
+	out := sourceAdj(buf.Bytes(), cfg.Indent)
+
+	// If the adjusted output is empty, the source
+	// was empty but (possibly) for white space.
+	// The result is the incoming source.
+	if len(out) == 0 {
+		return src, nil
+	}
+
+	// Otherwise, append output to leading space.
+	res = append(res, out...)
+
+	// Determine and append trailing space.
+	i = len(src)
+	for i > 0 && isSpace(src[i-1]) {
+		i--
+	}
+	return append(res, src[i:]...), nil
+}
+
+// isSpace reports whether the byte is a space character.
+// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'.
+func isSpace(b byte) bool {
+	return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
diff --git a/internal/backport/go/internal/typeparams/common.go b/internal/backport/go/internal/typeparams/common.go
new file mode 100644
index 0000000..9b82e60
--- /dev/null
+++ b/internal/backport/go/internal/typeparams/common.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams provides functions to work with type parameter data
+// stored in the AST, while these AST changes are guarded by a build
+// constraint.
+package typeparams
+
+// 'Hidden' parser modes to control the parsing of type-parameter related
+// features.
+const (
+	DisallowTypeSets = 1 << 29 // Disallow eliding 'interface' in constraint type sets.
+	DisallowParsing  = 1 << 30 // Disallow type parameters entirely.
+)
diff --git a/internal/backport/go/internal/typeparams/typeparams.go b/internal/backport/go/internal/typeparams/typeparams.go
new file mode 100644
index 0000000..2f7a5ce
--- /dev/null
+++ b/internal/backport/go/internal/typeparams/typeparams.go
@@ -0,0 +1,54 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, exprs []ast.Expr, rbrack token.Pos) ast.Expr {
+	switch len(exprs) {
+	case 0:
+		panic("internal error: PackIndexExpr with empty expr slice")
+	case 1:
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  exprs[0],
+			Rbrack: rbrack,
+		}
+	default:
+		return &ast.IndexListExpr{
+			X:       x,
+			Lbrack:  lbrack,
+			Indices: exprs,
+			Rbrack:  rbrack,
+		}
+	}
+}
+
+// IndexExpr wraps an ast.IndexExpr or ast.IndexListExpr.
+//
+// Orig holds the original ast.Expr from which this IndexExpr was derived.
+type IndexExpr struct {
+	Orig ast.Expr // the wrapped expr, which may be distinct from the IndexListExpr below.
+	*ast.IndexListExpr
+}
+
+func UnpackIndexExpr(n ast.Node) *IndexExpr {
+	switch e := n.(type) {
+	case *ast.IndexExpr:
+		return &IndexExpr{e, &ast.IndexListExpr{
+			X:       e.X,
+			Lbrack:  e.Lbrack,
+			Indices: []ast.Expr{e.Index},
+			Rbrack:  e.Rbrack,
+		}}
+	case *ast.IndexListExpr:
+		return &IndexExpr{e, e}
+	}
+	return nil
+}
diff --git a/internal/backport/go/parser/error_test.go b/internal/backport/go/parser/error_test.go
new file mode 100644
index 0000000..38658c2
--- /dev/null
+++ b/internal/backport/go/parser/error_test.go
@@ -0,0 +1,202 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a parser test harness. The files in the testdata
+// directory are parsed and the errors reported are compared against the
+// error messages expected in the test files. The test files must end in
+// .src rather than .go so that they are not disturbed by gofmt runs.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+//	package p
+//	func f() {
+//		_ = x /* ERROR "not declared" */ + 1
+//	}
+
+package parser
+
+import (
+	"flag"
+	"golang.org/x/website/internal/backport/go/internal/typeparams"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"testing"
+)
+
+var traceErrs = flag.Bool("trace_errs", false, "whether to enable tracing for error tests")
+
+const testdata = "testdata"
+
+// getFile assumes that each filename occurs at most once
+func getFile(fset *token.FileSet, filename string) (file *token.File) {
+	fset.Iterate(func(f *token.File) bool {
+		if f.Name() == filename {
+			if file != nil {
+				panic(filename + " used multiple times")
+			}
+			file = f
+		}
+		return true
+	})
+	return file
+}
+
+func getPos(fset *token.FileSet, filename string, offset int) token.Pos {
+	if f := getFile(fset, filename); f != nil {
+		return f.Pos(offset)
+	}
+	return token.NoPos
+}
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+// The special form /* ERROR HERE "rx" */ must be used for error
+// messages that appear immediately after a token, rather than at
+// a token's position.
+var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`)
+
+// expectedErrors collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+func expectedErrors(fset *token.FileSet, filename string, src []byte) map[token.Pos]string {
+	errors := make(map[token.Pos]string)
+
+	var s scanner.Scanner
+	// file was parsed already - do not add it again to the file
+	// set otherwise the position information returned here will
+	// not match the position information collected by the parser
+	s.Init(getFile(fset, filename), src, nil, scanner.ScanComments)
+	var prev token.Pos // position of last non-comment, non-semicolon token
+	var here token.Pos // position immediately after the token at position prev
+
+	for {
+		pos, tok, lit := s.Scan()
+		switch tok {
+		case token.EOF:
+			return errors
+		case token.COMMENT:
+			s := errRx.FindStringSubmatch(lit)
+			if len(s) == 3 {
+				pos := prev
+				if s[1] == "HERE" {
+					pos = here
+				}
+				errors[pos] = s[2]
+			}
+		case token.SEMICOLON:
+			// don't use the position of auto-inserted (invisible) semicolons
+			if lit != ";" {
+				break
+			}
+			fallthrough
+		default:
+			prev = pos
+			var l int // token length
+			if tok.IsLiteral() {
+				l = len(lit)
+			} else {
+				l = len(tok.String())
+			}
+			here = prev + token.Pos(l)
+		}
+	}
+}
+
+// compareErrors compares the map of expected error messages with the list
+// of found errors and reports discrepancies.
+func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found scanner.ErrorList) {
+	t.Helper()
+	for _, error := range found {
+		// error.Pos is a token.Position, but we want
+		// a token.Pos so we can do a map lookup
+		pos := getPos(fset, error.Pos.Filename, error.Pos.Offset)
+		if msg, found := expected[pos]; found {
+			// we expect a message at pos; check if it matches
+			rx, err := regexp.Compile(msg)
+			if err != nil {
+				t.Errorf("%s: %v", error.Pos, err)
+				continue
+			}
+			if match := rx.MatchString(error.Msg); !match {
+				t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
+				continue
+			}
+			// we have a match - eliminate this error
+			delete(expected, pos)
+		} else {
+			// To keep in mind when analyzing failed test output:
+			// If the same error position occurs multiple times in errors,
+			// this message will be triggered (because the first error at
+			// the position removes this position from the expected errors).
+			t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg)
+		}
+	}
+
+	// there should be no expected errors left
+	if len(expected) > 0 {
+		t.Errorf("%d errors not reported:", len(expected))
+		for pos, msg := range expected {
+			t.Errorf("%s: %s\n", fset.Position(pos), msg)
+		}
+	}
+}
+
+func checkErrors(t *testing.T, filename string, input interface{}, mode Mode, expectErrors bool) {
+	t.Helper()
+	src, err := readSource(filename, input)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	fset := token.NewFileSet()
+	_, err = ParseFile(fset, filename, src, mode)
+	found, ok := err.(scanner.ErrorList)
+	if err != nil && !ok {
+		t.Error(err)
+		return
+	}
+	found.RemoveMultiples()
+
+	expected := map[token.Pos]string{}
+	if expectErrors {
+		// we are expecting the following errors
+		// (collect these after parsing a file so that it is found in the file set)
+		expected = expectedErrors(fset, filename, src)
+	}
+
+	// verify errors returned by the parser
+	compareErrors(t, fset, expected, found)
+}
+
+func TestErrors(t *testing.T) {
+	list, err := os.ReadDir(testdata)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for _, d := range list {
+		name := d.Name()
+		t.Run(name, func(t *testing.T) {
+			if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
+				mode := DeclarationErrors | AllErrors
+				if !strings.HasSuffix(name, ".go2") {
+					mode |= typeparams.DisallowParsing
+				}
+				if *traceErrs {
+					mode |= Trace
+				}
+				checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
+			}
+		})
+	}
+}
diff --git a/internal/backport/go/parser/example_test.go b/internal/backport/go/parser/example_test.go
new file mode 100644
index 0000000..35fdd33
--- /dev/null
+++ b/internal/backport/go/parser/example_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser_test
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+func ExampleParseFile() {
+	fset := token.NewFileSet() // positions are relative to fset
+
+	src := `package foo
+
+import (
+	"fmt"
+	"time"
+)
+
+func bar() {
+	fmt.Println(time.Now())
+}`
+
+	// Parse src but stop after processing the imports.
+	f, err := parser.ParseFile(fset, "", src, parser.ImportsOnly)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+
+	// Print the imports from the file's AST.
+	for _, s := range f.Imports {
+		fmt.Println(s.Path.Value)
+	}
+
+	// output:
+	//
+	// "fmt"
+	// "time"
+}
diff --git a/internal/backport/go/parser/interface.go b/internal/backport/go/parser/interface.go
new file mode 100644
index 0000000..d0fc6e8
--- /dev/null
+++ b/internal/backport/go/parser/interface.go
@@ -0,0 +1,232 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the exported entry points for invoking the parser.
+
+package parser
+
+import (
+	"bytes"
+	"errors"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// If src != nil, readSource converts src to a []byte if possible;
+// otherwise it returns an error. If src == nil, readSource returns
+// the result of reading the file specified by filename.
+func readSource(filename string, src interface{}) ([]byte, error) {
+	if src != nil {
+		switch s := src.(type) {
+		case string:
+			return []byte(s), nil
+		case []byte:
+			return s, nil
+		case *bytes.Buffer:
+			// is io.Reader, but src is already available in []byte form
+			if s != nil {
+				return s.Bytes(), nil
+			}
+		case io.Reader:
+			return io.ReadAll(s)
+		}
+		return nil, errors.New("invalid source")
+	}
+	return os.ReadFile(filename)
+}
+
+// A Mode value is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+type Mode uint
+
+const (
+	PackageClauseOnly    Mode             = 1 << iota // stop parsing after package clause
+	ImportsOnly                                       // stop parsing after import declarations
+	ParseComments                                     // parse comments and add them to AST
+	Trace                                             // print a trace of parsed productions
+	DeclarationErrors                                 // report declaration errors
+	SpuriousErrors                                    // same as AllErrors, for backward-compatibility
+	SkipObjectResolution                              // don't resolve identifiers to objects - see ParseFile
+	AllErrors            = SpuriousErrors             // report all errors (not just the first 10 on different lines)
+)
+
+// ParseFile parses the source code of a single Go source file and returns
+// the corresponding ast.File node. The source code may be provided via
+// the filename of the source file, or via the src parameter.
+//
+// If src != nil, ParseFile parses the source from src and the filename is
+// only used when recording position information. The type of the argument
+// for the src parameter must be string, []byte, or io.Reader.
+// If src == nil, ParseFile parses the file specified by filename.
+//
+// The mode parameter controls the amount of source text parsed and other
+// optional parser functionality. If the SkipObjectResolution mode bit is set,
+// the object resolution phase of parsing will be skipped, causing File.Scope,
+// File.Unresolved, and all Ident.Obj fields to be nil.
+//
+// Position information is recorded in the file set fset, which must not be
+// nil.
+//
+// If the source couldn't be read, the returned AST is nil and the error
+// indicates the specific failure. If the source was read but syntax
+// errors were found, the result is a partial AST (with ast.Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors
+// are returned via a scanner.ErrorList which is sorted by source position.
+func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (f *ast.File, err error) {
+	if fset == nil {
+		panic("parser.ParseFile: no token.FileSet provided (fset == nil)")
+	}
+
+	// get source
+	text, err := readSource(filename, src)
+	if err != nil {
+		return nil, err
+	}
+
+	var p parser
+	defer func() {
+		if e := recover(); e != nil {
+			// resume same panic if it's not a bailout
+			if _, ok := e.(bailout); !ok {
+				panic(e)
+			}
+		}
+
+		// set result values
+		if f == nil {
+			// source is not a valid Go source file - satisfy
+			// ParseFile API and return a valid (but) empty
+			// *ast.File
+			f = &ast.File{
+				Name:  new(ast.Ident),
+				Scope: ast.NewScope(nil),
+			}
+		}
+
+		p.errors.Sort()
+		err = p.errors.Err()
+	}()
+
+	// parse source
+	p.init(fset, filename, text, mode)
+	f = p.parseFile()
+
+	return
+}
+
+// ParseDir calls ParseFile for all files with names ending in ".go" in the
+// directory specified by path and returns a map of package name -> package
+// AST with all the packages found.
+//
+// If filter != nil, only the files with fs.FileInfo entries passing through
+// the filter (and ending in ".go") are considered. The mode bits are passed
+// to ParseFile unchanged. Position information is recorded in fset, which
+// must not be nil.
+//
+// If the directory couldn't be read, a nil map and the respective error are
+// returned. If a parse error occurred, a non-nil but incomplete map and the
+// first error encountered are returned.
+func ParseDir(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
+	list, err := os.ReadDir(path)
+	if err != nil {
+		return nil, err
+	}
+
+	pkgs = make(map[string]*ast.Package)
+	for _, d := range list {
+		if d.IsDir() || !strings.HasSuffix(d.Name(), ".go") {
+			continue
+		}
+		if filter != nil {
+			info, err := d.Info()
+			if err != nil {
+				return nil, err
+			}
+			if !filter(info) {
+				continue
+			}
+		}
+		filename := filepath.Join(path, d.Name())
+		if src, err := ParseFile(fset, filename, nil, mode); err == nil {
+			name := src.Name.Name
+			pkg, found := pkgs[name]
+			if !found {
+				pkg = &ast.Package{
+					Name:  name,
+					Files: make(map[string]*ast.File),
+				}
+				pkgs[name] = pkg
+			}
+			pkg.Files[filename] = src
+		} else if first == nil {
+			first = err
+		}
+	}
+
+	return
+}
+
+// ParseExprFrom is a convenience function for parsing an expression.
+// The arguments have the same meaning as for ParseFile, but the source must
+// be a valid Go (type or value) expression. Specifically, fset must not
+// be nil.
+//
+// If the source couldn't be read, the returned AST is nil and the error
+// indicates the specific failure. If the source was read but syntax
+// errors were found, the result is a partial AST (with ast.Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors
+// are returned via a scanner.ErrorList which is sorted by source position.
+func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode Mode) (expr ast.Expr, err error) {
+	if fset == nil {
+		panic("parser.ParseExprFrom: no token.FileSet provided (fset == nil)")
+	}
+
+	// get source
+	text, err := readSource(filename, src)
+	if err != nil {
+		return nil, err
+	}
+
+	var p parser
+	defer func() {
+		if e := recover(); e != nil {
+			// resume same panic if it's not a bailout
+			if _, ok := e.(bailout); !ok {
+				panic(e)
+			}
+		}
+		p.errors.Sort()
+		err = p.errors.Err()
+	}()
+
+	// parse expr
+	p.init(fset, filename, text, mode)
+	expr = p.parseRhsOrType()
+
+	// If a semicolon was inserted, consume it;
+	// report an error if there's more tokens.
+	if p.tok == token.SEMICOLON && p.lit == "\n" {
+		p.next()
+	}
+	p.expect(token.EOF)
+
+	return
+}
+
+// ParseExpr is a convenience function for obtaining the AST of an expression x.
+// The position information recorded in the AST is undefined. The filename used
+// in error messages is the empty string.
+//
+// If syntax errors were found, the result is a partial AST (with ast.Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors are
+// returned via a scanner.ErrorList which is sorted by source position.
+func ParseExpr(x string) (ast.Expr, error) {
+	return ParseExprFrom(token.NewFileSet(), "", []byte(x), 0)
+}
diff --git a/internal/backport/go/parser/parser.go b/internal/backport/go/parser/parser.go
new file mode 100644
index 0000000..b258c0b
--- /dev/null
+++ b/internal/backport/go/parser/parser.go
@@ -0,0 +1,2894 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+//
+// The parser accepts a larger language than is syntactically permitted by
+// the Go spec, for simplicity, and for improved robustness in the presence
+// of syntax errors. For instance, in method declarations, the receiver is
+// treated like an ordinary parameter list and thus may contain multiple
+// entries where the spec permits exactly one. Consequently, the corresponding
+// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
+package parser
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/internal/typeparams"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+	file    *token.File
+	errors  scanner.ErrorList
+	scanner scanner.Scanner
+
+	// Tracing/debugging
+	mode   Mode // parsing mode
+	trace  bool // == (mode&Trace != 0)
+	indent int  // indentation used for tracing output
+
+	// Comments
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	// Next token
+	pos token.Pos   // token position
+	tok token.Token // one token look-ahead
+	lit string      // token literal
+
+	// Error recovery
+	// (used to limit the number of calls to parser.advance
+	// w/o making scanning progress - avoids potential endless
+	// loops across multiple parser functions during error recovery)
+	syncPos token.Pos // last synchronization position
+	syncCnt int       // number of parser.advance calls without progress
+
+	// Non-syntactic parser control
+	exprLev int  // < 0: in control clause, >= 0: in expression
+	inRhs   bool // if set, the parser is parsing a rhs expression
+
+	imports []*ast.ImportSpec // list of imports
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
+	p.file = fset.AddFile(filename, -1, len(src))
+	var m scanner.Mode
+	if mode&ParseComments != 0 {
+		m = scanner.ScanComments
+	}
+	eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
+	p.scanner.Init(p.file, src, eh, m)
+
+	p.mode = mode
+	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+	p.next()
+}
+
+func (p *parser) allowGenerics() bool { return p.mode&typeparams.DisallowParsing == 0 }
+func (p *parser) allowTypeSets() bool { return p.mode&typeparams.DisallowTypeSets == 0 }
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	pos := p.file.Position(p.pos)
+	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *parser) {
+	p.indent--
+	p.printTrace(")")
+}
+
+// Advance to the next token.
+func (p *parser) next0() {
+	// Because of one-token look-ahead, print the previous token
+	// when tracing as it provides a more readable output. The
+	// very first token (!p.pos.IsValid()) is not initialized
+	// (it is token.ILLEGAL), so don't print it.
+	if p.trace && p.pos.IsValid() {
+		s := p.tok.String()
+		switch {
+		case p.tok.IsLiteral():
+			p.printTrace(s, p.lit)
+		case p.tok.IsOperator(), p.tok.IsKeyword():
+			p.printTrace("\"" + s + "\"")
+		default:
+			p.printTrace(s)
+		}
+	}
+
+	p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+	// /*-style comments may end on a different line than where they start.
+	// Scan the comment for '\n' chars and adjust endline accordingly.
+	endline = p.file.Line(p.pos)
+	if p.lit[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.lit); i++ {
+			if p.lit[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
+	p.next0()
+
+	return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. A non-comment token or n
+// empty lines terminate a comment group.
+func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.file.Line(p.pos)
+	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{List: list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+func (p *parser) next() {
+	p.leadComment = nil
+	p.lineComment = nil
+	prev := p.pos
+	p.next0()
+
+	if p.tok == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		if p.file.Line(p.pos) == p.file.Line(prev) {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.file.Line(p.pos) != endline || p.tok == token.EOF {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok == token.COMMENT {
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.file.Line(p.pos) {
+			// The next token is following on the line immediately after the
+			// comment group, thus the last comment group is a lead comment.
+			p.leadComment = comment
+		}
+	}
+}
+
+// A bailout panic is raised to indicate early termination.
+type bailout struct{}
+
+func (p *parser) error(pos token.Pos, msg string) {
+	if p.trace {
+		defer un(trace(p, "error: "+msg))
+	}
+
+	epos := p.file.Position(pos)
+
+	// If AllErrors is not set, discard errors reported on the same line
+	// as the last recorded error and stop parsing if there are more than
+	// 10 errors.
+	if p.mode&AllErrors == 0 {
+		n := len(p.errors)
+		if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
+			return // discard - likely a spurious error
+		}
+		if n > 10 {
+			panic(bailout{})
+		}
+	}
+
+	p.errors.Add(epos, msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+	msg = "expected " + msg
+	if pos == p.pos {
+		// the error happened at the current position;
+		// make the error message more specific
+		switch {
+		case p.tok == token.SEMICOLON && p.lit == "\n":
+			msg += ", found newline"
+		case p.tok.IsLiteral():
+			// print 123 rather than 'INT', etc.
+			msg += ", found " + p.lit
+		default:
+			msg += ", found '" + p.tok.String() + "'"
+		}
+	}
+	p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+	pos := p.pos
+	if p.tok != tok {
+		p.errorExpected(pos, "'"+tok.String()+"'")
+	}
+	p.next() // make progress
+	return pos
+}
+
+// expect2 is like expect, but it returns an invalid position
+// if the expected token is not found.
+func (p *parser) expect2(tok token.Token) (pos token.Pos) {
+	if p.tok == tok {
+		pos = p.pos
+	} else {
+		p.errorExpected(p.pos, "'"+tok.String()+"'")
+	}
+	p.next() // make progress
+	return
+}
+
+// expectClosing is like expect but provides a better error message
+// for the common case of a missing comma before a newline.
+func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
+	if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
+		p.error(p.pos, "missing ',' before newline in "+context)
+		p.next()
+	}
+	return p.expect(tok)
+}
+
+func (p *parser) expectSemi() {
+	// semicolon is optional before a closing ')' or '}'
+	if p.tok != token.RPAREN && p.tok != token.RBRACE {
+		switch p.tok {
+		case token.COMMA:
+			// permit a ',' instead of a ';' but complain
+			p.errorExpected(p.pos, "';'")
+			fallthrough
+		case token.SEMICOLON:
+			p.next()
+		default:
+			p.errorExpected(p.pos, "';'")
+			p.advance(stmtStart)
+		}
+	}
+}
+
+func (p *parser) atComma(context string, follow token.Token) bool {
+	if p.tok == token.COMMA {
+		return true
+	}
+	if p.tok != follow {
+		msg := "missing ','"
+		if p.tok == token.SEMICOLON && p.lit == "\n" {
+			msg += " before newline"
+		}
+		p.error(p.pos, msg+" in "+context)
+		return true // "insert" comma and continue
+	}
+	return false
+}
+
+func assert(cond bool, msg string) {
+	if !cond {
+		panic("golang.org/x/website/internal/backport/go/parser internal error: " + msg)
+	}
+}
+
+// advance consumes tokens until the current token p.tok
+// is in the 'to' set, or token.EOF. For error recovery.
+func (p *parser) advance(to map[token.Token]bool) {
+	for ; p.tok != token.EOF; p.next() {
+		if to[p.tok] {
+			// Return only if parser made some progress since last
+			// sync or if it has not reached 10 advance calls without
+			// progress. Otherwise consume at least one token to
+			// avoid an endless parser loop (it is possible that
+			// both parseOperand and parseStmt call advance and
+			// correctly do not advance, thus the need for the
+			// invocation limit p.syncCnt).
+			if p.pos == p.syncPos && p.syncCnt < 10 {
+				p.syncCnt++
+				return
+			}
+			if p.pos > p.syncPos {
+				p.syncPos = p.pos
+				p.syncCnt = 0
+				return
+			}
+			// Reaching here indicates a parser bug, likely an
+			// incorrect token list in this function, but it only
+			// leads to skipping of possibly correct code if a
+			// previous error is present, and thus is preferred
+			// over a non-terminating parse.
+		}
+	}
+}
+
+var stmtStart = map[token.Token]bool{
+	token.BREAK:       true,
+	token.CONST:       true,
+	token.CONTINUE:    true,
+	token.DEFER:       true,
+	token.FALLTHROUGH: true,
+	token.FOR:         true,
+	token.GO:          true,
+	token.GOTO:        true,
+	token.IF:          true,
+	token.RETURN:      true,
+	token.SELECT:      true,
+	token.SWITCH:      true,
+	token.TYPE:        true,
+	token.VAR:         true,
+}
+
+var declStart = map[token.Token]bool{
+	token.CONST: true,
+	token.TYPE:  true,
+	token.VAR:   true,
+}
+
+var exprEnd = map[token.Token]bool{
+	token.COMMA:     true,
+	token.COLON:     true,
+	token.SEMICOLON: true,
+	token.RPAREN:    true,
+	token.RBRACK:    true,
+	token.RBRACE:    true,
+}
+
+// safePos returns a valid file position for a given position: If pos
+// is valid to begin with, safePos returns pos. If pos is out-of-range,
+// safePos returns the EOF position.
+//
+// This is hack to work around "artificial" end positions in the AST which
+// are computed by adding 1 to (presumably valid) token positions. If the
+// token positions are invalid due to parse errors, the resulting end position
+// may be past the file's EOF position, which would lead to panics if used
+// later on.
+func (p *parser) safePos(pos token.Pos) (res token.Pos) {
+	defer func() {
+		if recover() != nil {
+			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
+		}
+	}()
+	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
+	return pos
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+	pos := p.pos
+	name := "_"
+	if p.tok == token.IDENT {
+		name = p.lit
+		p.next()
+	} else {
+		p.expect(token.IDENT) // use expect() error handling
+	}
+	return &ast.Ident{NamePos: pos, Name: name}
+}
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+	if p.trace {
+		defer un(trace(p, "IdentList"))
+	}
+
+	list = append(list, p.parseIdent())
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.parseIdent())
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ExpressionList"))
+	}
+
+	list = append(list, p.checkExpr(p.parseExpr()))
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.checkExpr(p.parseExpr()))
+	}
+
+	return
+}
+
+func (p *parser) parseList(inRhs bool) []ast.Expr {
+	old := p.inRhs
+	p.inRhs = inRhs
+	list := p.parseExprList()
+	p.inRhs = old
+	return list
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Type"))
+	}
+
+	typ := p.tryIdentOrType()
+
+	if typ == nil {
+		pos := p.pos
+		p.errorExpected(pos, "type")
+		p.advance(exprEnd)
+		return &ast.BadExpr{From: pos, To: p.pos}
+	}
+
+	return typ
+}
+
+func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "QualifiedIdent"))
+	}
+
+	typ := p.parseTypeName(ident)
+	if p.tok == token.LBRACK && p.allowGenerics() {
+		typ = p.parseTypeInstance(typ)
+	}
+
+	return typ
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "TypeName"))
+	}
+
+	if ident == nil {
+		ident = p.parseIdent()
+	}
+
+	if p.tok == token.PERIOD {
+		// ident is a package name
+		p.next()
+		sel := p.parseIdent()
+		return &ast.SelectorExpr{X: ident, Sel: sel}
+	}
+
+	return ident
+}
+
+// "[" has already been consumed, and lbrack is its position.
+// If len != nil it is the already consumed array length.
+func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
+	if p.trace {
+		defer un(trace(p, "ArrayType"))
+	}
+
+	if len == nil {
+		p.exprLev++
+		// always permit ellipsis for more fault-tolerant parsing
+		if p.tok == token.ELLIPSIS {
+			len = &ast.Ellipsis{Ellipsis: p.pos}
+			p.next()
+		} else if p.tok != token.RBRACK {
+			len = p.parseRhs()
+		}
+		p.exprLev--
+	}
+	if p.tok == token.COMMA {
+		// Trailing commas are accepted in type parameter
+		// lists but not in array type declarations.
+		// Accept for better error handling but complain.
+		p.error(p.pos, "unexpected comma; expecting ]")
+		p.next()
+	}
+	p.expect(token.RBRACK)
+	elt := p.parseType()
+	return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
+}
+
+func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ArrayFieldOrTypeInstance"))
+	}
+
+	// TODO(gri) Should we allow a trailing comma in a type argument
+	//           list such as T[P,]? (We do in parseTypeInstance).
+	lbrack := p.expect(token.LBRACK)
+	var args []ast.Expr
+	var firstComma token.Pos
+	// TODO(rfindley): consider changing parseRhsOrType so that this function variable
+	// is not needed.
+	argparser := p.parseRhsOrType
+	if !p.allowGenerics() {
+		argparser = p.parseRhs
+	}
+	if p.tok != token.RBRACK {
+		p.exprLev++
+		args = append(args, argparser())
+		for p.tok == token.COMMA {
+			if !firstComma.IsValid() {
+				firstComma = p.pos
+			}
+			p.next()
+			args = append(args, argparser())
+		}
+		p.exprLev--
+	}
+	rbrack := p.expect(token.RBRACK)
+
+	if len(args) == 0 {
+		// x []E
+		elt := p.parseType()
+		return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
+	}
+
+	// x [P]E or x[P]
+	if len(args) == 1 {
+		elt := p.tryIdentOrType()
+		if elt != nil {
+			// x [P]E
+			return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
+		}
+		if !p.allowGenerics() {
+			p.error(rbrack, "missing element type in array type expression")
+			return nil, &ast.BadExpr{From: args[0].Pos(), To: args[0].End()}
+		}
+	}
+
+	if !p.allowGenerics() {
+		p.error(firstComma, "expected ']', found ','")
+		return x, &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
+	}
+
+	// x[P], x[P1, P2], ...
+	return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack)
+}
+
+func (p *parser) parseFieldDecl() *ast.Field {
+	if p.trace {
+		defer un(trace(p, "FieldDecl"))
+	}
+
+	doc := p.leadComment
+
+	var names []*ast.Ident
+	var typ ast.Expr
+	if p.tok == token.IDENT {
+		name := p.parseIdent()
+		if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
+			// embedded type
+			typ = name
+			if p.tok == token.PERIOD {
+				typ = p.parseQualifiedIdent(name)
+			}
+		} else {
+			// name1, name2, ... T
+			names = []*ast.Ident{name}
+			for p.tok == token.COMMA {
+				p.next()
+				names = append(names, p.parseIdent())
+			}
+			// Careful dance: We don't know if we have an embedded instantiated
+			// type T[P1, P2, ...] or a field T of array type []E or [P]E.
+			if len(names) == 1 && p.tok == token.LBRACK {
+				name, typ = p.parseArrayFieldOrTypeInstance(name)
+				if name == nil {
+					names = nil
+				}
+			} else {
+				// T P
+				typ = p.parseType()
+			}
+		}
+	} else {
+		// embedded, possibly generic type
+		// (using the enclosing parentheses to distinguish it from a named field declaration)
+		// TODO(rFindley) confirm that this doesn't allow parenthesized embedded type
+		typ = p.parseType()
+	}
+
+	var tag *ast.BasicLit
+	if p.tok == token.STRING {
+		tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+		p.next()
+	}
+
+	p.expectSemi() // call before accessing p.linecomment
+
+	field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: p.lineComment}
+	return field
+}
+
+func (p *parser) parseStructType() *ast.StructType {
+	if p.trace {
+		defer un(trace(p, "StructType"))
+	}
+
+	pos := p.expect(token.STRUCT)
+	lbrace := p.expect(token.LBRACE)
+	var list []*ast.Field
+	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+		// a field declaration cannot start with a '(' but we accept
+		// it here for more robust parsing and better error messages
+		// (parseFieldDecl will check and complain if necessary)
+		list = append(list, p.parseFieldDecl())
+	}
+	rbrace := p.expect(token.RBRACE)
+
+	return &ast.StructType{
+		Struct: pos,
+		Fields: &ast.FieldList{
+			Opening: lbrace,
+			List:    list,
+			Closing: rbrace,
+		},
+	}
+}
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+	if p.trace {
+		defer un(trace(p, "PointerType"))
+	}
+
+	star := p.expect(token.MUL)
+	base := p.parseType()
+
+	return &ast.StarExpr{Star: star, X: base}
+}
+
+func (p *parser) parseDotsType() *ast.Ellipsis {
+	if p.trace {
+		defer un(trace(p, "DotsType"))
+	}
+
+	pos := p.expect(token.ELLIPSIS)
+	elt := p.parseType()
+
+	return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
+}
+
+type field struct {
+	name *ast.Ident
+	typ  ast.Expr
+}
+
+func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
+	// TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax
+	// package
+	if p.trace {
+		defer un(trace(p, "ParamDeclOrNil"))
+	}
+
+	ptok := p.tok
+	if name != nil {
+		p.tok = token.IDENT // force token.IDENT case in switch below
+	} else if typeSetsOK && p.tok == token.TILDE {
+		// "~" ...
+		return field{nil, p.embeddedElem(nil)}
+	}
+
+	switch p.tok {
+	case token.IDENT:
+		// name
+		if name != nil {
+			f.name = name
+			p.tok = ptok
+		} else {
+			f.name = p.parseIdent()
+		}
+		switch p.tok {
+		case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
+			// name type
+			f.typ = p.parseType()
+
+		case token.LBRACK:
+			// name "[" type1, ..., typeN "]" or name "[" n "]" type
+			f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
+
+		case token.ELLIPSIS:
+			// name "..." type
+			f.typ = p.parseDotsType()
+			return // don't allow ...type "|" ...
+
+		case token.PERIOD:
+			// name "." ...
+			f.typ = p.parseQualifiedIdent(f.name)
+			f.name = nil
+
+		case token.TILDE:
+			if typeSetsOK {
+				f.typ = p.embeddedElem(nil)
+				return
+			}
+
+		case token.OR:
+			if typeSetsOK {
+				// name "|" typeset
+				f.typ = p.embeddedElem(f.name)
+				f.name = nil
+				return
+			}
+		}
+
+	case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
+		// type
+		f.typ = p.parseType()
+
+	case token.ELLIPSIS:
+		// "..." type
+		// (always accepted)
+		f.typ = p.parseDotsType()
+		return // don't allow ...type "|" ...
+
+	default:
+		// TODO(rfindley): this looks incorrect in the case of type parameter
+		// lists.
+		p.errorExpected(p.pos, ")")
+		p.advance(exprEnd)
+	}
+
+	// [name] type "|"
+	if typeSetsOK && p.tok == token.OR && f.typ != nil {
+		f.typ = p.embeddedElem(f.typ)
+	}
+
+	return
+}
+
+func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) {
+	if p.trace {
+		defer un(trace(p, "ParameterList"))
+	}
+
+	// Type parameters are the only parameter list closed by ']'.
+	tparams := closing == token.RBRACK
+	// Type set notation is ok in type parameter lists.
+	typeSetsOK := tparams && p.allowTypeSets()
+
+	pos := p.pos
+	if name0 != nil {
+		pos = name0.Pos()
+	}
+
+	var list []field
+	var named int // number of parameters that have an explicit name and type
+
+	for name0 != nil || p.tok != closing && p.tok != token.EOF {
+		var par field
+		if typ0 != nil {
+			if typeSetsOK {
+				typ0 = p.embeddedElem(typ0)
+			}
+			par = field{name0, typ0}
+		} else {
+			par = p.parseParamDecl(name0, typeSetsOK)
+		}
+		name0 = nil // 1st name was consumed if present
+		typ0 = nil  // 1st typ was consumed if present
+		if par.name != nil || par.typ != nil {
+			list = append(list, par)
+			if par.name != nil && par.typ != nil {
+				named++
+			}
+		}
+		if !p.atComma("parameter list", closing) {
+			break
+		}
+		p.next()
+	}
+
+	if len(list) == 0 {
+		return // not uncommon
+	}
+
+	// TODO(gri) parameter distribution and conversion to []*ast.Field
+	//           can be combined and made more efficient
+
+	// distribute parameter types
+	if named == 0 {
+		// all unnamed => found names are type names
+		for i := 0; i < len(list); i++ {
+			par := &list[i]
+			if typ := par.name; typ != nil {
+				par.typ = typ
+				par.name = nil
+			}
+		}
+		if tparams {
+			p.error(pos, "all type parameters must be named")
+		}
+	} else if named != len(list) {
+		// some named => all must be named
+		ok := true
+		var typ ast.Expr
+		missingName := pos
+		for i := len(list) - 1; i >= 0; i-- {
+			if par := &list[i]; par.typ != nil {
+				typ = par.typ
+				if par.name == nil {
+					ok = false
+					missingName = par.typ.Pos()
+					n := ast.NewIdent("_")
+					n.NamePos = typ.Pos() // correct position
+					par.name = n
+				}
+			} else if typ != nil {
+				par.typ = typ
+			} else {
+				// par.typ == nil && typ == nil => we only have a par.name
+				ok = false
+				missingName = par.name.Pos()
+				par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
+			}
+		}
+		if !ok {
+			if tparams {
+				p.error(missingName, "all type parameters must be named")
+			} else {
+				p.error(pos, "mixed named and unnamed parameters")
+			}
+		}
+	}
+
+	// convert list []*ast.Field
+	if named == 0 {
+		// parameter list consists of types only
+		for _, par := range list {
+			assert(par.typ != nil, "nil type in unnamed parameter list")
+			params = append(params, &ast.Field{Type: par.typ})
+		}
+		return
+	}
+
+	// parameter list consists of named parameters with types
+	var names []*ast.Ident
+	var typ ast.Expr
+	addParams := func() {
+		assert(typ != nil, "nil type in named parameter list")
+		field := &ast.Field{Names: names, Type: typ}
+		params = append(params, field)
+		names = nil
+	}
+	for _, par := range list {
+		if par.typ != typ {
+			if len(names) > 0 {
+				addParams()
+			}
+			typ = par.typ
+		}
+		names = append(names, par.name)
+	}
+	if len(names) > 0 {
+		addParams()
+	}
+	return
+}
+
+func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
+	if p.trace {
+		defer un(trace(p, "Parameters"))
+	}
+
+	if p.allowGenerics() && acceptTParams && p.tok == token.LBRACK {
+		opening := p.pos
+		p.next()
+		// [T any](params) syntax
+		list := p.parseParameterList(nil, nil, token.RBRACK)
+		rbrack := p.expect(token.RBRACK)
+		tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
+		// Type parameter lists must not be empty.
+		if tparams.NumFields() == 0 {
+			p.error(tparams.Closing, "empty type parameter list")
+			tparams = nil // avoid follow-on errors
+		}
+	}
+
+	opening := p.expect(token.LPAREN)
+
+	var fields []*ast.Field
+	if p.tok != token.RPAREN {
+		fields = p.parseParameterList(nil, nil, token.RPAREN)
+	}
+
+	rparen := p.expect(token.RPAREN)
+	params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
+
+	return
+}
+
+func (p *parser) parseResult() *ast.FieldList {
+	if p.trace {
+		defer un(trace(p, "Result"))
+	}
+
+	if p.tok == token.LPAREN {
+		_, results := p.parseParameters(false)
+		return results
+	}
+
+	typ := p.tryIdentOrType()
+	if typ != nil {
+		list := make([]*ast.Field, 1)
+		list[0] = &ast.Field{Type: typ}
+		return &ast.FieldList{List: list}
+	}
+
+	return nil
+}
+
+func (p *parser) parseFuncType() *ast.FuncType {
+	if p.trace {
+		defer un(trace(p, "FuncType"))
+	}
+
+	pos := p.expect(token.FUNC)
+	tparams, params := p.parseParameters(true)
+	if tparams != nil {
+		p.error(tparams.Pos(), "function type must have no type parameters")
+	}
+	results := p.parseResult()
+
+	return &ast.FuncType{Func: pos, Params: params, Results: results}
+}
+
+func (p *parser) parseMethodSpec() *ast.Field {
+	if p.trace {
+		defer un(trace(p, "MethodSpec"))
+	}
+
+	doc := p.leadComment
+	var idents []*ast.Ident
+	var typ ast.Expr
+	x := p.parseTypeName(nil)
+	if ident, _ := x.(*ast.Ident); ident != nil {
+		switch {
+		case p.tok == token.LBRACK && p.allowGenerics():
+			// generic method or embedded instantiated type
+			lbrack := p.pos
+			p.next()
+			p.exprLev++
+			x := p.parseExpr()
+			p.exprLev--
+			if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
+				// generic method m[T any]
+				//
+				// Interface methods do not have type parameters. We parse them for a
+				// better error message and improved error recovery.
+				_ = p.parseParameterList(name0, nil, token.RBRACK)
+				_ = p.expect(token.RBRACK)
+				p.error(lbrack, "interface method must have no type parameters")
+
+				// TODO(rfindley) refactor to share code with parseFuncType.
+				_, params := p.parseParameters(false)
+				results := p.parseResult()
+				idents = []*ast.Ident{ident}
+				typ = &ast.FuncType{
+					Func:    token.NoPos,
+					Params:  params,
+					Results: results,
+				}
+			} else {
+				// embedded instantiated type
+				// TODO(rfindley) should resolve all identifiers in x.
+				list := []ast.Expr{x}
+				if p.atComma("type argument list", token.RBRACK) {
+					p.exprLev++
+					p.next()
+					for p.tok != token.RBRACK && p.tok != token.EOF {
+						list = append(list, p.parseType())
+						if !p.atComma("type argument list", token.RBRACK) {
+							break
+						}
+						p.next()
+					}
+					p.exprLev--
+				}
+				rbrack := p.expectClosing(token.RBRACK, "type argument list")
+				typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack)
+			}
+		case p.tok == token.LPAREN:
+			// ordinary method
+			// TODO(rfindley) refactor to share code with parseFuncType.
+			_, params := p.parseParameters(false)
+			results := p.parseResult()
+			idents = []*ast.Ident{ident}
+			typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
+		default:
+			// embedded type
+			typ = x
+		}
+	} else {
+		// embedded, possibly instantiated type
+		typ = x
+		if p.tok == token.LBRACK && p.allowGenerics() {
+			// embedded instantiated interface
+			typ = p.parseTypeInstance(typ)
+		}
+	}
+
+	// Comment is added at the callsite: the field below may joined with
+	// additional type specs using '|'.
+	// TODO(rfindley) this should be refactored.
+	// TODO(rfindley) add more tests for comment handling.
+	return &ast.Field{Doc: doc, Names: idents, Type: typ}
+}
+
+func (p *parser) embeddedElem(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "EmbeddedElem"))
+	}
+	if x == nil {
+		x = p.embeddedTerm()
+	}
+	for p.tok == token.OR {
+		t := new(ast.BinaryExpr)
+		t.OpPos = p.pos
+		t.Op = token.OR
+		p.next()
+		t.X = x
+		t.Y = p.embeddedTerm()
+		x = t
+	}
+	return x
+}
+
+func (p *parser) embeddedTerm() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "EmbeddedTerm"))
+	}
+	if p.tok == token.TILDE {
+		t := new(ast.UnaryExpr)
+		t.OpPos = p.pos
+		t.Op = token.TILDE
+		p.next()
+		t.X = p.parseType()
+		return t
+	}
+
+	t := p.tryIdentOrType()
+	if t == nil {
+		pos := p.pos
+		p.errorExpected(pos, "~ term or type")
+		p.advance(exprEnd)
+		return &ast.BadExpr{From: pos, To: p.pos}
+	}
+
+	return t
+}
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+	if p.trace {
+		defer un(trace(p, "InterfaceType"))
+	}
+
+	pos := p.expect(token.INTERFACE)
+	lbrace := p.expect(token.LBRACE)
+
+	var list []*ast.Field
+
+parseElements:
+	for {
+		switch {
+		case p.tok == token.IDENT:
+			f := p.parseMethodSpec()
+			if f.Names == nil && p.allowGenerics() {
+				f.Type = p.embeddedElem(f.Type)
+			}
+			p.expectSemi()
+			f.Comment = p.lineComment
+			list = append(list, f)
+		case p.tok == token.TILDE && p.allowGenerics():
+			typ := p.embeddedElem(nil)
+			p.expectSemi()
+			comment := p.lineComment
+			list = append(list, &ast.Field{Type: typ, Comment: comment})
+		case p.allowGenerics():
+			if t := p.tryIdentOrType(); t != nil {
+				typ := p.embeddedElem(t)
+				p.expectSemi()
+				comment := p.lineComment
+				list = append(list, &ast.Field{Type: typ, Comment: comment})
+			} else {
+				break parseElements
+			}
+		default:
+			break parseElements
+		}
+	}
+
+	// TODO(rfindley): the error produced here could be improved, since we could
+	// accept a identifier, 'type', or a '}' at this point.
+	rbrace := p.expect(token.RBRACE)
+
+	return &ast.InterfaceType{
+		Interface: pos,
+		Methods: &ast.FieldList{
+			Opening: lbrace,
+			List:    list,
+			Closing: rbrace,
+		},
+	}
+}
+
+func (p *parser) parseMapType() *ast.MapType {
+	if p.trace {
+		defer un(trace(p, "MapType"))
+	}
+
+	pos := p.expect(token.MAP)
+	p.expect(token.LBRACK)
+	key := p.parseType()
+	p.expect(token.RBRACK)
+	value := p.parseType()
+
+	return &ast.MapType{Map: pos, Key: key, Value: value}
+}
+
+func (p *parser) parseChanType() *ast.ChanType {
+	if p.trace {
+		defer un(trace(p, "ChanType"))
+	}
+
+	pos := p.pos
+	dir := ast.SEND | ast.RECV
+	var arrow token.Pos
+	if p.tok == token.CHAN {
+		p.next()
+		if p.tok == token.ARROW {
+			arrow = p.pos
+			p.next()
+			dir = ast.SEND
+		}
+	} else {
+		arrow = p.expect(token.ARROW)
+		p.expect(token.CHAN)
+		dir = ast.RECV
+	}
+	value := p.parseType()
+
+	return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
+}
+
+func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
+	assert(p.allowGenerics(), "parseTypeInstance while not parsing type params")
+	if p.trace {
+		defer un(trace(p, "TypeInstance"))
+	}
+
+	opening := p.expect(token.LBRACK)
+	p.exprLev++
+	var list []ast.Expr
+	for p.tok != token.RBRACK && p.tok != token.EOF {
+		list = append(list, p.parseType())
+		if !p.atComma("type argument list", token.RBRACK) {
+			break
+		}
+		p.next()
+	}
+	p.exprLev--
+
+	closing := p.expectClosing(token.RBRACK, "type argument list")
+
+	if len(list) == 0 {
+		p.errorExpected(closing, "type argument list")
+		return &ast.IndexExpr{
+			X:      typ,
+			Lbrack: opening,
+			Index:  &ast.BadExpr{From: opening + 1, To: closing},
+			Rbrack: closing,
+		}
+	}
+
+	return typeparams.PackIndexExpr(typ, opening, list, closing)
+}
+
+func (p *parser) tryIdentOrType() ast.Expr {
+	switch p.tok {
+	case token.IDENT:
+		typ := p.parseTypeName(nil)
+		if p.tok == token.LBRACK && p.allowGenerics() {
+			typ = p.parseTypeInstance(typ)
+		}
+		return typ
+	case token.LBRACK:
+		lbrack := p.expect(token.LBRACK)
+		return p.parseArrayType(lbrack, nil)
+	case token.STRUCT:
+		return p.parseStructType()
+	case token.MUL:
+		return p.parsePointerType()
+	case token.FUNC:
+		typ := p.parseFuncType()
+		return typ
+	case token.INTERFACE:
+		return p.parseInterfaceType()
+	case token.MAP:
+		return p.parseMapType()
+	case token.CHAN, token.ARROW:
+		return p.parseChanType()
+	case token.LPAREN:
+		lparen := p.pos
+		p.next()
+		typ := p.parseType()
+		rparen := p.expect(token.RPAREN)
+		return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
+	}
+
+	// no type found
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+	if p.trace {
+		defer un(trace(p, "StatementList"))
+	}
+
+	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+		list = append(list, p.parseStmt())
+	}
+
+	return
+}
+
+func (p *parser) parseBody() *ast.BlockStmt {
+	if p.trace {
+		defer un(trace(p, "Body"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	list := p.parseStmtList()
+	rbrace := p.expect2(token.RBRACE)
+
+	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+}
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+	if p.trace {
+		defer un(trace(p, "BlockStmt"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	list := p.parseStmtList()
+	rbrace := p.expect2(token.RBRACE)
+
+	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "FuncTypeOrLit"))
+	}
+
+	typ := p.parseFuncType()
+	if p.tok != token.LBRACE {
+		// function type only
+		return typ
+	}
+
+	p.exprLev++
+	body := p.parseBody()
+	p.exprLev--
+
+	return &ast.FuncLit{Type: typ, Body: body}
+}
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+func (p *parser) parseOperand() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Operand"))
+	}
+
+	switch p.tok {
+	case token.IDENT:
+		x := p.parseIdent()
+		return x
+
+	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+		p.next()
+		return x
+
+	case token.LPAREN:
+		lparen := p.pos
+		p.next()
+		p.exprLev++
+		x := p.parseRhsOrType() // types may be parenthesized: (some type)
+		p.exprLev--
+		rparen := p.expect(token.RPAREN)
+		return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
+
+	case token.FUNC:
+		return p.parseFuncTypeOrLit()
+	}
+
+	if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
+		// could be type for composite literal or conversion
+		_, isIdent := typ.(*ast.Ident)
+		assert(!isIdent, "type cannot be identifier")
+		return typ
+	}
+
+	// we have an error
+	pos := p.pos
+	p.errorExpected(pos, "operand")
+	p.advance(stmtStart)
+	return &ast.BadExpr{From: pos, To: p.pos}
+}
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Selector"))
+	}
+
+	sel := p.parseIdent()
+
+	return &ast.SelectorExpr{X: x, Sel: sel}
+}
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "TypeAssertion"))
+	}
+
+	lparen := p.expect(token.LPAREN)
+	var typ ast.Expr
+	if p.tok == token.TYPE {
+		// type switch: typ == nil
+		p.next()
+	} else {
+		typ = p.parseType()
+	}
+	rparen := p.expect(token.RPAREN)
+
+	return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
+}
+
+func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "parseIndexOrSliceOrInstance"))
+	}
+
+	lbrack := p.expect(token.LBRACK)
+	if p.tok == token.RBRACK {
+		// empty index, slice or index expressions are not permitted;
+		// accept them for parsing tolerance, but complain
+		p.errorExpected(p.pos, "operand")
+		rbrack := p.pos
+		p.next()
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  &ast.BadExpr{From: rbrack, To: rbrack},
+			Rbrack: rbrack,
+		}
+	}
+	p.exprLev++
+
+	const N = 3 // change the 3 to 2 to disable 3-index slices
+	var args []ast.Expr
+	var index [N]ast.Expr
+	var colons [N - 1]token.Pos
+	var firstComma token.Pos
+	if p.tok != token.COLON {
+		// We can't know if we have an index expression or a type instantiation;
+		// so even if we see a (named) type we are not going to be in type context.
+		index[0] = p.parseRhsOrType()
+	}
+	ncolons := 0
+	switch p.tok {
+	case token.COLON:
+		// slice expression
+		for p.tok == token.COLON && ncolons < len(colons) {
+			colons[ncolons] = p.pos
+			ncolons++
+			p.next()
+			if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
+				index[ncolons] = p.parseRhs()
+			}
+		}
+	case token.COMMA:
+		firstComma = p.pos
+		// instance expression
+		args = append(args, index[0])
+		for p.tok == token.COMMA {
+			p.next()
+			if p.tok != token.RBRACK && p.tok != token.EOF {
+				args = append(args, p.parseType())
+			}
+		}
+	}
+
+	p.exprLev--
+	rbrack := p.expect(token.RBRACK)
+
+	if ncolons > 0 {
+		// slice expression
+		slice3 := false
+		if ncolons == 2 {
+			slice3 = true
+			// Check presence of 2nd and 3rd index here rather than during type-checking
+			// to prevent erroneous programs from passing through gofmt (was issue 7305).
+			if index[1] == nil {
+				p.error(colons[0], "2nd index required in 3-index slice")
+				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
+			}
+			if index[2] == nil {
+				p.error(colons[1], "3rd index required in 3-index slice")
+				index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
+			}
+		}
+		return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
+	}
+
+	if len(args) == 0 {
+		// index expression
+		return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
+	}
+
+	if !p.allowGenerics() {
+		p.error(firstComma, "expected ']' or ':', found ','")
+		return &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
+	}
+
+	// instance expression
+	return typeparams.PackIndexExpr(x, lbrack, args, rbrack)
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+	if p.trace {
+		defer un(trace(p, "CallOrConversion"))
+	}
+
+	lparen := p.expect(token.LPAREN)
+	p.exprLev++
+	var list []ast.Expr
+	var ellipsis token.Pos
+	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+		list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
+		if p.tok == token.ELLIPSIS {
+			ellipsis = p.pos
+			p.next()
+		}
+		if !p.atComma("argument list", token.RPAREN) {
+			break
+		}
+		p.next()
+	}
+	p.exprLev--
+	rparen := p.expectClosing(token.RPAREN, "argument list")
+
+	return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
+}
+
+func (p *parser) parseValue() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Element"))
+	}
+
+	if p.tok == token.LBRACE {
+		return p.parseLiteralValue(nil)
+	}
+
+	x := p.checkExpr(p.parseExpr())
+
+	return x
+}
+
+func (p *parser) parseElement() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Element"))
+	}
+
+	x := p.parseValue()
+	if p.tok == token.COLON {
+		colon := p.pos
+		p.next()
+		x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
+	}
+
+	return x
+}
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ElementList"))
+	}
+
+	for p.tok != token.RBRACE && p.tok != token.EOF {
+		list = append(list, p.parseElement())
+		if !p.atComma("composite literal", token.RBRACE) {
+			break
+		}
+		p.next()
+	}
+
+	return
+}
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "LiteralValue"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	var elts []ast.Expr
+	p.exprLev++
+	if p.tok != token.RBRACE {
+		elts = p.parseElementList()
+	}
+	p.exprLev--
+	rbrace := p.expectClosing(token.RBRACE, "composite literal")
+	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+	switch unparen(x).(type) {
+	case *ast.BadExpr:
+	case *ast.Ident:
+	case *ast.BasicLit:
+	case *ast.FuncLit:
+	case *ast.CompositeLit:
+	case *ast.ParenExpr:
+		panic("unreachable")
+	case *ast.SelectorExpr:
+	case *ast.IndexExpr:
+	case *ast.IndexListExpr:
+	case *ast.SliceExpr:
+	case *ast.TypeAssertExpr:
+		// If t.Type == nil we have a type assertion of the form
+		// y.(type), which is only allowed in type switch expressions.
+		// It's hard to exclude those but for the case where we are in
+		// a type switch. Instead be lenient and test this in the type
+		// checker.
+	case *ast.CallExpr:
+	case *ast.StarExpr:
+	case *ast.UnaryExpr:
+	case *ast.BinaryExpr:
+	default:
+		// all other nodes are not proper expressions
+		p.errorExpected(x.Pos(), "expression")
+		x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
+	}
+	return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+	if p, isParen := x.(*ast.ParenExpr); isParen {
+		x = unparen(p.X)
+	}
+	return x
+}
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+	switch t := unparen(x).(type) {
+	case *ast.ParenExpr:
+		panic("unreachable")
+	case *ast.ArrayType:
+		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+			p.error(len.Pos(), "expected array length, found '...'")
+			x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
+		}
+	}
+
+	// all other nodes are expressions or types
+	return x
+}
+
+func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "PrimaryExpr"))
+	}
+
+	if x == nil {
+		x = p.parseOperand()
+	}
+	for {
+		switch p.tok {
+		case token.PERIOD:
+			p.next()
+			switch p.tok {
+			case token.IDENT:
+				x = p.parseSelector(p.checkExprOrType(x))
+			case token.LPAREN:
+				x = p.parseTypeAssertion(p.checkExpr(x))
+			default:
+				pos := p.pos
+				p.errorExpected(pos, "selector or type assertion")
+				// TODO(rFindley) The check for token.RBRACE below is a targeted fix
+				//                to error recovery sufficient to make the x/tools tests to
+				//                pass with the new parsing logic introduced for type
+				//                parameters. Remove this once error recovery has been
+				//                more generally reconsidered.
+				if p.tok != token.RBRACE {
+					p.next() // make progress
+				}
+				sel := &ast.Ident{NamePos: pos, Name: "_"}
+				x = &ast.SelectorExpr{X: x, Sel: sel}
+			}
+		case token.LBRACK:
+			x = p.parseIndexOrSliceOrInstance(p.checkExpr(x))
+		case token.LPAREN:
+			x = p.parseCallOrConversion(p.checkExprOrType(x))
+		case token.LBRACE:
+			// operand may have returned a parenthesized complit
+			// type; accept it but complain if we have a complit
+			t := unparen(x)
+			// determine if '{' belongs to a composite literal or a block statement
+			switch t.(type) {
+			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
+				if p.exprLev < 0 {
+					return x
+				}
+				// x is possibly a composite literal type
+			case *ast.IndexExpr, *ast.IndexListExpr:
+				if p.exprLev < 0 {
+					return x
+				}
+				// x is possibly a composite literal type
+			case *ast.ArrayType, *ast.StructType, *ast.MapType:
+				// x is a composite literal type
+			default:
+				return x
+			}
+			if t != x {
+				p.error(t.Pos(), "cannot parenthesize type in composite literal")
+				// already progressed, no need to advance
+			}
+			x = p.parseLiteralValue(x)
+		default:
+			return x
+		}
+	}
+}
+
+func (p *parser) parseUnaryExpr() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "UnaryExpr"))
+	}
+
+	switch p.tok {
+	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
+		pos, op := p.pos, p.tok
+		p.next()
+		x := p.parseUnaryExpr()
+		return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
+
+	case token.ARROW:
+		// channel type or receive expression
+		arrow := p.pos
+		p.next()
+
+		// If the next token is token.CHAN we still don't know if it
+		// is a channel type or a receive operation - we only know
+		// once we have found the end of the unary expression. There
+		// are two cases:
+		//
+		//   <- type  => (<-type) must be channel type
+		//   <- expr  => <-(expr) is a receive from an expression
+		//
+		// In the first case, the arrow must be re-associated with
+		// the channel type parsed already:
+		//
+		//   <- (chan type)    =>  (<-chan type)
+		//   <- (chan<- type)  =>  (<-chan (<-type))
+
+		x := p.parseUnaryExpr()
+
+		// determine which case we have
+		if typ, ok := x.(*ast.ChanType); ok {
+			// (<-type)
+
+			// re-associate position info and <-
+			dir := ast.SEND
+			for ok && dir == ast.SEND {
+				if typ.Dir == ast.RECV {
+					// error: (<-type) is (<-(<-chan T))
+					p.errorExpected(typ.Arrow, "'chan'")
+				}
+				arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
+				dir, typ.Dir = typ.Dir, ast.RECV
+				typ, ok = typ.Value.(*ast.ChanType)
+			}
+			if dir == ast.SEND {
+				p.errorExpected(arrow, "channel type")
+			}
+
+			return x
+		}
+
+		// <-(expr)
+		return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
+
+	case token.MUL:
+		// pointer type or unary "*" expression
+		pos := p.pos
+		p.next()
+		x := p.parseUnaryExpr()
+		return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
+	}
+
+	return p.parsePrimaryExpr(nil)
+}
+
+func (p *parser) tokPrec() (token.Token, int) {
+	tok := p.tok
+	if p.inRhs && tok == token.ASSIGN {
+		tok = token.EQL
+	}
+	return tok, tok.Precedence()
+}
+
+// parseBinaryExpr parses a (possibly) binary expression.
+// If x is non-nil, it is used as the left operand.
+// If check is true, operands are checked to be valid expressions.
+//
+// TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
+func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int, check bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "BinaryExpr"))
+	}
+
+	if x == nil {
+		x = p.parseUnaryExpr()
+	}
+	for {
+		op, oprec := p.tokPrec()
+		if oprec < prec1 {
+			return x
+		}
+		pos := p.expect(op)
+		y := p.parseBinaryExpr(nil, oprec+1, check)
+		if check {
+			x = p.checkExpr(x)
+			y = p.checkExpr(y)
+		}
+		x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
+	}
+}
+
+// checkBinaryExpr checks binary expressions that were not already checked by
+// parseBinaryExpr, because the latter was called with check=false.
+func (p *parser) checkBinaryExpr(x ast.Expr) {
+	bx, ok := x.(*ast.BinaryExpr)
+	if !ok {
+		return
+	}
+
+	bx.X = p.checkExpr(bx.X)
+	bx.Y = p.checkExpr(bx.Y)
+
+	// parseBinaryExpr checks x and y for each binary expr in a tree, so we
+	// traverse the tree of binary exprs starting from x.
+	p.checkBinaryExpr(bx.X)
+	p.checkBinaryExpr(bx.Y)
+}
+
+// The result may be a type or even a raw type ([...]int). Callers must
+// check the result (using checkExpr or checkExprOrType), depending on
+// context.
+func (p *parser) parseExpr() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Expression"))
+	}
+
+	return p.parseBinaryExpr(nil, token.LowestPrec+1, true)
+}
+
+func (p *parser) parseRhs() ast.Expr {
+	old := p.inRhs
+	p.inRhs = true
+	x := p.checkExpr(p.parseExpr())
+	p.inRhs = old
+	return x
+}
+
+func (p *parser) parseRhsOrType() ast.Expr {
+	old := p.inRhs
+	p.inRhs = true
+	x := p.checkExprOrType(p.parseExpr())
+	p.inRhs = old
+	return x
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// Parsing modes for parseSimpleStmt.
+const (
+	basic = iota
+	labelOk
+	rangeOk
+)
+
+// parseSimpleStmt returns true as 2nd result if it parsed the assignment
+// of a range clause (with mode == rangeOk). The returned statement is an
+// assignment with a right-hand side that is a single unary expression of
+// the form "range x". No guarantees are given for the left-hand side.
+func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
+	if p.trace {
+		defer un(trace(p, "SimpleStmt"))
+	}
+
+	x := p.parseList(false)
+
+	switch p.tok {
+	case
+		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+		// assignment statement, possibly part of a range clause
+		pos, tok := p.pos, p.tok
+		p.next()
+		var y []ast.Expr
+		isRange := false
+		if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
+			pos := p.pos
+			p.next()
+			y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
+			isRange = true
+		} else {
+			y = p.parseList(true)
+		}
+		as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
+		if tok == token.DEFINE {
+			p.checkAssignStmt(as)
+		}
+		return as, isRange
+	}
+
+	if len(x) > 1 {
+		p.errorExpected(x[0].Pos(), "1 expression")
+		// continue with first expression
+	}
+
+	switch p.tok {
+	case token.COLON:
+		// labeled statement
+		colon := p.pos
+		p.next()
+		if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
+			// Go spec: The scope of a label is the body of the function
+			// in which it is declared and excludes the body of any nested
+			// function.
+			stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
+			return stmt, false
+		}
+		// The label declaration typically starts at x[0].Pos(), but the label
+		// declaration may be erroneous due to a token after that position (and
+		// before the ':'). If SpuriousErrors is not set, the (only) error
+		// reported for the line is the illegal label error instead of the token
+		// before the ':' that caused the problem. Thus, use the (latest) colon
+		// position for error reporting.
+		p.error(colon, "illegal label declaration")
+		return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
+
+	case token.ARROW:
+		// send statement
+		arrow := p.pos
+		p.next()
+		y := p.parseRhs()
+		return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
+
+	case token.INC, token.DEC:
+		// increment or decrement
+		s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
+		p.next()
+		return s, false
+	}
+
+	// expression
+	return &ast.ExprStmt{X: x[0]}, false
+}
+
+func (p *parser) checkAssignStmt(as *ast.AssignStmt) {
+	for _, x := range as.Lhs {
+		if _, isIdent := x.(*ast.Ident); !isIdent {
+			p.errorExpected(x.Pos(), "identifier on left side of :=")
+		}
+	}
+}
+
+func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
+	x := p.parseRhsOrType() // could be a conversion: (some type)(x)
+	if call, isCall := x.(*ast.CallExpr); isCall {
+		return call
+	}
+	if _, isBad := x.(*ast.BadExpr); !isBad {
+		// only report error if it's a new one
+		p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
+	}
+	return nil
+}
+
+func (p *parser) parseGoStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "GoStmt"))
+	}
+
+	pos := p.expect(token.GO)
+	call := p.parseCallExpr("go")
+	p.expectSemi()
+	if call == nil {
+		return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
+	}
+
+	return &ast.GoStmt{Go: pos, Call: call}
+}
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "DeferStmt"))
+	}
+
+	pos := p.expect(token.DEFER)
+	call := p.parseCallExpr("defer")
+	p.expectSemi()
+	if call == nil {
+		return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
+	}
+
+	return &ast.DeferStmt{Defer: pos, Call: call}
+}
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+	if p.trace {
+		defer un(trace(p, "ReturnStmt"))
+	}
+
+	pos := p.pos
+	p.expect(token.RETURN)
+	var x []ast.Expr
+	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+		x = p.parseList(true)
+	}
+	p.expectSemi()
+
+	return &ast.ReturnStmt{Return: pos, Results: x}
+}
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+	if p.trace {
+		defer un(trace(p, "BranchStmt"))
+	}
+
+	pos := p.expect(tok)
+	var label *ast.Ident
+	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+		label = p.parseIdent()
+	}
+	p.expectSemi()
+
+	return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
+}
+
+func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
+	if s == nil {
+		return nil
+	}
+	if es, isExpr := s.(*ast.ExprStmt); isExpr {
+		return p.checkExpr(es.X)
+	}
+	found := "simple statement"
+	if _, isAss := s.(*ast.AssignStmt); isAss {
+		found = "assignment"
+	}
+	p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
+	return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+}
+
+// parseIfHeader is an adjusted version of parser.header
+// in cmd/compile/internal/syntax/parser.go, which has
+// been tuned for better error handling.
+func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
+	if p.tok == token.LBRACE {
+		p.error(p.pos, "missing condition in if statement")
+		cond = &ast.BadExpr{From: p.pos, To: p.pos}
+		return
+	}
+	// p.tok != token.LBRACE
+
+	prevLev := p.exprLev
+	p.exprLev = -1
+
+	if p.tok != token.SEMICOLON {
+		// accept potential variable declaration but complain
+		if p.tok == token.VAR {
+			p.next()
+			p.error(p.pos, "var declaration not allowed in 'IF' initializer")
+		}
+		init, _ = p.parseSimpleStmt(basic)
+	}
+
+	var condStmt ast.Stmt
+	var semi struct {
+		pos token.Pos
+		lit string // ";" or "\n"; valid if pos.IsValid()
+	}
+	if p.tok != token.LBRACE {
+		if p.tok == token.SEMICOLON {
+			semi.pos = p.pos
+			semi.lit = p.lit
+			p.next()
+		} else {
+			p.expect(token.SEMICOLON)
+		}
+		if p.tok != token.LBRACE {
+			condStmt, _ = p.parseSimpleStmt(basic)
+		}
+	} else {
+		condStmt = init
+		init = nil
+	}
+
+	if condStmt != nil {
+		cond = p.makeExpr(condStmt, "boolean expression")
+	} else if semi.pos.IsValid() {
+		if semi.lit == "\n" {
+			p.error(semi.pos, "unexpected newline, expecting { after if clause")
+		} else {
+			p.error(semi.pos, "missing condition in if statement")
+		}
+	}
+
+	// make sure we have a valid AST
+	if cond == nil {
+		cond = &ast.BadExpr{From: p.pos, To: p.pos}
+	}
+
+	p.exprLev = prevLev
+	return
+}
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+	if p.trace {
+		defer un(trace(p, "IfStmt"))
+	}
+
+	pos := p.expect(token.IF)
+
+	init, cond := p.parseIfHeader()
+	body := p.parseBlockStmt()
+
+	var else_ ast.Stmt
+	if p.tok == token.ELSE {
+		p.next()
+		switch p.tok {
+		case token.IF:
+			else_ = p.parseIfStmt()
+		case token.LBRACE:
+			else_ = p.parseBlockStmt()
+			p.expectSemi()
+		default:
+			p.errorExpected(p.pos, "if statement or block")
+			else_ = &ast.BadStmt{From: p.pos, To: p.pos}
+		}
+	} else {
+		p.expectSemi()
+	}
+
+	return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
+}
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "TypeList"))
+	}
+
+	list = append(list, p.parseType())
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.parseType())
+	}
+
+	return
+}
+
+func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
+	if p.trace {
+		defer un(trace(p, "CaseClause"))
+	}
+
+	pos := p.pos
+	var list []ast.Expr
+	if p.tok == token.CASE {
+		p.next()
+		if typeSwitch {
+			list = p.parseTypeList()
+		} else {
+			list = p.parseList(true)
+		}
+	} else {
+		p.expect(token.DEFAULT)
+	}
+
+	colon := p.expect(token.COLON)
+	body := p.parseStmtList()
+
+	return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
+}
+
+func isTypeSwitchAssert(x ast.Expr) bool {
+	a, ok := x.(*ast.TypeAssertExpr)
+	return ok && a.Type == nil
+}
+
+func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
+	switch t := s.(type) {
+	case *ast.ExprStmt:
+		// x.(type)
+		return isTypeSwitchAssert(t.X)
+	case *ast.AssignStmt:
+		// v := x.(type)
+		if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
+			switch t.Tok {
+			case token.ASSIGN:
+				// permit v = x.(type) but complain
+				p.error(t.TokPos, "expected ':=', found '='")
+				fallthrough
+			case token.DEFINE:
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "SwitchStmt"))
+	}
+
+	pos := p.expect(token.SWITCH)
+
+	var s1, s2 ast.Stmt
+	if p.tok != token.LBRACE {
+		prevLev := p.exprLev
+		p.exprLev = -1
+		if p.tok != token.SEMICOLON {
+			s2, _ = p.parseSimpleStmt(basic)
+		}
+		if p.tok == token.SEMICOLON {
+			p.next()
+			s1 = s2
+			s2 = nil
+			if p.tok != token.LBRACE {
+				// A TypeSwitchGuard may declare a variable in addition
+				// to the variable declared in the initial SimpleStmt.
+				// Introduce extra scope to avoid redeclaration errors:
+				//
+				//	switch t := 0; t := x.(T) { ... }
+				//
+				// (this code is not valid Go because the first t
+				// cannot be accessed and thus is never used, the extra
+				// scope is needed for the correct error message).
+				//
+				// If we don't have a type switch, s2 must be an expression.
+				// Having the extra nested but empty scope won't affect it.
+				s2, _ = p.parseSimpleStmt(basic)
+			}
+		}
+		p.exprLev = prevLev
+	}
+
+	typeSwitch := p.isTypeSwitchGuard(s2)
+	lbrace := p.expect(token.LBRACE)
+	var list []ast.Stmt
+	for p.tok == token.CASE || p.tok == token.DEFAULT {
+		list = append(list, p.parseCaseClause(typeSwitch))
+	}
+	rbrace := p.expect(token.RBRACE)
+	p.expectSemi()
+	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+
+	if typeSwitch {
+		return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
+	}
+
+	return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
+}
+
+func (p *parser) parseCommClause() *ast.CommClause {
+	if p.trace {
+		defer un(trace(p, "CommClause"))
+	}
+
+	pos := p.pos
+	var comm ast.Stmt
+	if p.tok == token.CASE {
+		p.next()
+		lhs := p.parseList(false)
+		if p.tok == token.ARROW {
+			// SendStmt
+			if len(lhs) > 1 {
+				p.errorExpected(lhs[0].Pos(), "1 expression")
+				// continue with first expression
+			}
+			arrow := p.pos
+			p.next()
+			rhs := p.parseRhs()
+			comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
+		} else {
+			// RecvStmt
+			if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
+				// RecvStmt with assignment
+				if len(lhs) > 2 {
+					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+					// continue with first two expressions
+					lhs = lhs[0:2]
+				}
+				pos := p.pos
+				p.next()
+				rhs := p.parseRhs()
+				as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
+				if tok == token.DEFINE {
+					p.checkAssignStmt(as)
+				}
+				comm = as
+			} else {
+				// lhs must be single receive operation
+				if len(lhs) > 1 {
+					p.errorExpected(lhs[0].Pos(), "1 expression")
+					// continue with first expression
+				}
+				comm = &ast.ExprStmt{X: lhs[0]}
+			}
+		}
+	} else {
+		p.expect(token.DEFAULT)
+	}
+
+	colon := p.expect(token.COLON)
+	body := p.parseStmtList()
+
+	return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
+}
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+	if p.trace {
+		defer un(trace(p, "SelectStmt"))
+	}
+
+	pos := p.expect(token.SELECT)
+	lbrace := p.expect(token.LBRACE)
+	var list []ast.Stmt
+	for p.tok == token.CASE || p.tok == token.DEFAULT {
+		list = append(list, p.parseCommClause())
+	}
+	rbrace := p.expect(token.RBRACE)
+	p.expectSemi()
+	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+
+	return &ast.SelectStmt{Select: pos, Body: body}
+}
+
+func (p *parser) parseForStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "ForStmt"))
+	}
+
+	pos := p.expect(token.FOR)
+
+	var s1, s2, s3 ast.Stmt
+	var isRange bool
+	if p.tok != token.LBRACE {
+		prevLev := p.exprLev
+		p.exprLev = -1
+		if p.tok != token.SEMICOLON {
+			if p.tok == token.RANGE {
+				// "for range x" (nil lhs in assignment)
+				pos := p.pos
+				p.next()
+				y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
+				s2 = &ast.AssignStmt{Rhs: y}
+				isRange = true
+			} else {
+				s2, isRange = p.parseSimpleStmt(rangeOk)
+			}
+		}
+		if !isRange && p.tok == token.SEMICOLON {
+			p.next()
+			s1 = s2
+			s2 = nil
+			if p.tok != token.SEMICOLON {
+				s2, _ = p.parseSimpleStmt(basic)
+			}
+			p.expectSemi()
+			if p.tok != token.LBRACE {
+				s3, _ = p.parseSimpleStmt(basic)
+			}
+		}
+		p.exprLev = prevLev
+	}
+
+	body := p.parseBlockStmt()
+	p.expectSemi()
+
+	if isRange {
+		as := s2.(*ast.AssignStmt)
+		// check lhs
+		var key, value ast.Expr
+		switch len(as.Lhs) {
+		case 0:
+			// nothing to do
+		case 1:
+			key = as.Lhs[0]
+		case 2:
+			key, value = as.Lhs[0], as.Lhs[1]
+		default:
+			p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
+			return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
+		}
+		// parseSimpleStmt returned a right-hand side that
+		// is a single unary expression of the form "range x"
+		x := as.Rhs[0].(*ast.UnaryExpr).X
+		return &ast.RangeStmt{
+			For:    pos,
+			Key:    key,
+			Value:  value,
+			TokPos: as.TokPos,
+			Tok:    as.Tok,
+			X:      x,
+			Body:   body,
+		}
+	}
+
+	// regular for statement
+	return &ast.ForStmt{
+		For:  pos,
+		Init: s1,
+		Cond: p.makeExpr(s2, "boolean or range expression"),
+		Post: s3,
+		Body: body,
+	}
+}
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+	if p.trace {
+		defer un(trace(p, "Statement"))
+	}
+
+	switch p.tok {
+	case token.CONST, token.TYPE, token.VAR:
+		s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
+	case
+		// tokens that may start an expression
+		token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
+		token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
+		token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
+		s, _ = p.parseSimpleStmt(labelOk)
+		// because of the required look-ahead, labeled statements are
+		// parsed by parseSimpleStmt - don't expect a semicolon after
+		// them
+		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+			p.expectSemi()
+		}
+	case token.GO:
+		s = p.parseGoStmt()
+	case token.DEFER:
+		s = p.parseDeferStmt()
+	case token.RETURN:
+		s = p.parseReturnStmt()
+	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+		s = p.parseBranchStmt(p.tok)
+	case token.LBRACE:
+		s = p.parseBlockStmt()
+		p.expectSemi()
+	case token.IF:
+		s = p.parseIfStmt()
+	case token.SWITCH:
+		s = p.parseSwitchStmt()
+	case token.SELECT:
+		s = p.parseSelectStmt()
+	case token.FOR:
+		s = p.parseForStmt()
+	case token.SEMICOLON:
+		// Is it ever possible to have an implicit semicolon
+		// producing an empty statement in a valid program?
+		// (handle correctly anyway)
+		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
+		p.next()
+	case token.RBRACE:
+		// a semicolon may be omitted before a closing "}"
+		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
+	default:
+		// no statement found
+		pos := p.pos
+		p.errorExpected(pos, "statement")
+		p.advance(stmtStart)
+		s = &ast.BadStmt{From: pos, To: p.pos}
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(doc *ast.CommentGroup, pos token.Pos, keyword token.Token, iota int) ast.Spec
+
+func isValidImport(lit string) bool {
+	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+	s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
+	for _, r := range s {
+		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+			return false
+		}
+	}
+	return s != ""
+}
+
+func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "ImportSpec"))
+	}
+
+	var ident *ast.Ident
+	switch p.tok {
+	case token.PERIOD:
+		ident = &ast.Ident{NamePos: p.pos, Name: "."}
+		p.next()
+	case token.IDENT:
+		ident = p.parseIdent()
+	}
+
+	pos := p.pos
+	var path string
+	if p.tok == token.STRING {
+		path = p.lit
+		if !isValidImport(path) {
+			p.error(pos, "invalid import path: "+path)
+		}
+		p.next()
+	} else {
+		p.expect(token.STRING) // use expect() error handling
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	// collect imports
+	spec := &ast.ImportSpec{
+		Doc:     doc,
+		Name:    ident,
+		Path:    &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
+		Comment: p.lineComment,
+	}
+	p.imports = append(p.imports, spec)
+
+	return spec
+}
+
+func (p *parser) parseValueSpec(doc *ast.CommentGroup, _ token.Pos, keyword token.Token, iota int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, keyword.String()+"Spec"))
+	}
+
+	pos := p.pos
+	idents := p.parseIdentList()
+	typ := p.tryIdentOrType()
+	var values []ast.Expr
+	// always permit optional initialization for more tolerant parsing
+	if p.tok == token.ASSIGN {
+		p.next()
+		values = p.parseList(true)
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	switch keyword {
+	case token.VAR:
+		if typ == nil && values == nil {
+			p.error(pos, "missing variable type or initialization")
+		}
+	case token.CONST:
+		if values == nil && (iota == 0 || typ != nil) {
+			p.error(pos, "missing constant value")
+		}
+	}
+
+	spec := &ast.ValueSpec{
+		Doc:     doc,
+		Names:   idents,
+		Type:    typ,
+		Values:  values,
+		Comment: p.lineComment,
+	}
+	return spec
+}
+
+func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "parseGenericType"))
+	}
+
+	list := p.parseParameterList(name0, typ0, token.RBRACK)
+	closePos := p.expect(token.RBRACK)
+	spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
+	// Let the type checker decide whether to accept type parameters on aliases:
+	// see issue #46477.
+	if p.tok == token.ASSIGN {
+		// type alias
+		spec.Assign = p.pos
+		p.next()
+	}
+	spec.Type = p.parseType()
+}
+
+func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "TypeSpec"))
+	}
+
+	ident := p.parseIdent()
+	spec := &ast.TypeSpec{Doc: doc, Name: ident}
+
+	if p.tok == token.LBRACK && p.allowGenerics() {
+		lbrack := p.pos
+		p.next()
+		if p.tok == token.IDENT {
+			// We may have an array type or a type parameter list.
+			// In either case we expect an expression x (which may
+			// just be a name, or a more complex expression) which
+			// we can analyze further.
+			//
+			// A type parameter list may have a type bound starting
+			// with a "[" as in: P []E. In that case, simply parsing
+			// an expression would lead to an error: P[] is invalid.
+			// But since index or slice expressions are never constant
+			// and thus invalid array length expressions, if we see a
+			// "[" following a name it must be the start of an array
+			// or slice constraint. Only if we don't see a "[" do we
+			// need to parse a full expression.
+
+			// Index or slice expressions are never constant and thus invalid
+			// array length expressions. Thus, if we see a "[" following name
+			// we can safely assume that "[" name starts a type parameter list.
+			var x ast.Expr = p.parseIdent()
+			if p.tok != token.LBRACK {
+				// To parse the expression starting with name, expand
+				// the call sequence we would get by passing in name
+				// to parser.expr, and pass in name to parsePrimaryExpr.
+				p.exprLev++
+				lhs := p.parsePrimaryExpr(x)
+				x = p.parseBinaryExpr(lhs, token.LowestPrec+1, false)
+				p.exprLev--
+			}
+
+			// analyze the cases
+			var pname *ast.Ident // pname != nil means pname is the type parameter name
+			var ptype ast.Expr   // ptype != nil means ptype is the type parameter type; pname != nil in this case
+
+			switch t := x.(type) {
+			case *ast.Ident:
+				// Unless we see a "]", we are at the start of a type parameter list.
+				if p.tok != token.RBRACK {
+					// d.Name "[" name ...
+					pname = t
+					// no ptype
+				}
+			case *ast.BinaryExpr:
+				// If we have an expression of the form name*T, and T is a (possibly
+				// parenthesized) type literal or the next token is a comma, we are
+				// at the start of a type parameter list.
+				if name, _ := t.X.(*ast.Ident); name != nil {
+					if t.Op == token.MUL && (isTypeLit(t.Y) || p.tok == token.COMMA) {
+						// d.Name "[" name "*" t.Y
+						// d.Name "[" name "*" t.Y ","
+						// convert t into unary *t.Y
+						pname = name
+						ptype = &ast.StarExpr{Star: t.OpPos, X: t.Y}
+					}
+				}
+				if pname == nil {
+					// A normal binary expression. Since we passed check=false, we must
+					// now check its operands.
+					p.checkBinaryExpr(t)
+				}
+			case *ast.CallExpr:
+				// If we have an expression of the form name(T), and T is a (possibly
+				// parenthesized) type literal or the next token is a comma, we are
+				// at the start of a type parameter list.
+				if name, _ := t.Fun.(*ast.Ident); name != nil {
+					if len(t.Args) == 1 && !t.Ellipsis.IsValid() && (isTypeLit(t.Args[0]) || p.tok == token.COMMA) {
+						// d.Name "[" name "(" t.ArgList[0] ")"
+						// d.Name "[" name "(" t.ArgList[0] ")" ","
+						pname = name
+						ptype = t.Args[0]
+					}
+				}
+			}
+
+			if pname != nil {
+				// d.Name "[" pname ...
+				// d.Name "[" pname ptype ...
+				// d.Name "[" pname ptype "," ...
+				p.parseGenericType(spec, lbrack, pname, ptype)
+			} else {
+				// d.Name "[" x ...
+				spec.Type = p.parseArrayType(lbrack, x)
+			}
+		} else {
+			// array type
+			spec.Type = p.parseArrayType(lbrack, nil)
+		}
+	} else {
+		// no type parameters
+		if p.tok == token.ASSIGN {
+			// type alias
+			spec.Assign = p.pos
+			p.next()
+		}
+		spec.Type = p.parseType()
+	}
+
+	p.expectSemi() // call before accessing p.linecomment
+	spec.Comment = p.lineComment
+
+	return spec
+}
+
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x ast.Expr) bool {
+	switch x := x.(type) {
+	case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+		return true
+	case *ast.StarExpr:
+		// *T may be a pointer dereferenciation.
+		// Only consider *T as type literal if T is a type literal.
+		return isTypeLit(x.X)
+	case *ast.ParenExpr:
+		return isTypeLit(x.X)
+	}
+	return false
+}
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+	if p.trace {
+		defer un(trace(p, "GenDecl("+keyword.String()+")"))
+	}
+
+	doc := p.leadComment
+	pos := p.expect(keyword)
+	var lparen, rparen token.Pos
+	var list []ast.Spec
+	if p.tok == token.LPAREN {
+		lparen = p.pos
+		p.next()
+		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+			list = append(list, f(p.leadComment, pos, keyword, iota))
+		}
+		rparen = p.expect(token.RPAREN)
+		p.expectSemi()
+	} else {
+		list = append(list, f(nil, pos, keyword, 0))
+	}
+
+	return &ast.GenDecl{
+		Doc:    doc,
+		TokPos: pos,
+		Tok:    keyword,
+		Lparen: lparen,
+		Specs:  list,
+		Rparen: rparen,
+	}
+}
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+	if p.trace {
+		defer un(trace(p, "FunctionDecl"))
+	}
+
+	doc := p.leadComment
+	pos := p.expect(token.FUNC)
+
+	var recv *ast.FieldList
+	if p.tok == token.LPAREN {
+		_, recv = p.parseParameters(false)
+	}
+
+	ident := p.parseIdent()
+
+	tparams, params := p.parseParameters(true)
+	if recv != nil && tparams != nil {
+		// Method declarations do not have type parameters. We parse them for a
+		// better error message and improved error recovery.
+		p.error(tparams.Opening, "method must have no type parameters")
+		tparams = nil
+	}
+	results := p.parseResult()
+
+	var body *ast.BlockStmt
+	switch p.tok {
+	case token.LBRACE:
+		body = p.parseBody()
+		p.expectSemi()
+	case token.SEMICOLON:
+		p.next()
+		if p.tok == token.LBRACE {
+			// opening { of function declaration on next line
+			p.error(p.pos, "unexpected semicolon or newline before {")
+			body = p.parseBody()
+			p.expectSemi()
+		}
+	default:
+		p.expectSemi()
+	}
+
+	decl := &ast.FuncDecl{
+		Doc:  doc,
+		Recv: recv,
+		Name: ident,
+		Type: &ast.FuncType{
+			Func:       pos,
+			TypeParams: tparams,
+			Params:     params,
+			Results:    results,
+		},
+		Body: body,
+	}
+	return decl
+}
+
+func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
+	if p.trace {
+		defer un(trace(p, "Declaration"))
+	}
+
+	var f parseSpecFunction
+	switch p.tok {
+	case token.CONST, token.VAR:
+		f = p.parseValueSpec
+
+	case token.TYPE:
+		f = p.parseTypeSpec
+
+	case token.FUNC:
+		return p.parseFuncDecl()
+
+	default:
+		pos := p.pos
+		p.errorExpected(pos, "declaration")
+		p.advance(sync)
+		return &ast.BadDecl{From: pos, To: p.pos}
+	}
+
+	return p.parseGenDecl(p.tok, f)
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+	if p.trace {
+		defer un(trace(p, "File"))
+	}
+
+	// Don't bother parsing the rest if we had errors scanning the first token.
+	// Likely not a Go source file at all.
+	if p.errors.Len() != 0 {
+		return nil
+	}
+
+	// package clause
+	doc := p.leadComment
+	pos := p.expect(token.PACKAGE)
+	// Go spec: The package clause is not a declaration;
+	// the package name does not appear in any scope.
+	ident := p.parseIdent()
+	if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
+		p.error(p.pos, "invalid package name _")
+	}
+	p.expectSemi()
+
+	// Don't bother parsing the rest if we had errors parsing the package clause.
+	// Likely not a Go source file at all.
+	if p.errors.Len() != 0 {
+		return nil
+	}
+
+	var decls []ast.Decl
+	if p.mode&PackageClauseOnly == 0 {
+		// import decls
+		for p.tok == token.IMPORT {
+			decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
+		}
+
+		if p.mode&ImportsOnly == 0 {
+			// rest of package body
+			for p.tok != token.EOF {
+				decls = append(decls, p.parseDecl(declStart))
+			}
+		}
+	}
+
+	f := &ast.File{
+		Doc:      doc,
+		Package:  pos,
+		Name:     ident,
+		Decls:    decls,
+		Imports:  p.imports,
+		Comments: p.comments,
+	}
+	var declErr func(token.Pos, string)
+	if p.mode&DeclarationErrors != 0 {
+		declErr = p.error
+	}
+	if p.mode&SkipObjectResolution == 0 {
+		resolveFile(f, p.file, declErr)
+	}
+
+	return f
+}
diff --git a/internal/backport/go/parser/parser_test.go b/internal/backport/go/parser/parser_test.go
new file mode 100644
index 0000000..feb05ea
--- /dev/null
+++ b/internal/backport/go/parser/parser_test.go
@@ -0,0 +1,579 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+	"io/fs"
+	"strings"
+	"testing"
+)
+
+var validFiles = []string{
+	"parser.go",
+	"parser_test.go",
+	"error_test.go",
+	"short_test.go",
+}
+
+func TestParse(t *testing.T) {
+	for _, filename := range validFiles {
+		_, err := ParseFile(token.NewFileSet(), filename, nil, DeclarationErrors)
+		if err != nil {
+			t.Fatalf("ParseFile(%s): %v", filename, err)
+		}
+	}
+}
+
+func nameFilter(filename string) bool {
+	switch filename {
+	case "parser.go", "interface.go", "parser_test.go":
+		return true
+	case "parser.go.orig":
+		return true // permit but should be ignored by ParseDir
+	}
+	return false
+}
+
+func dirFilter(f fs.FileInfo) bool { return nameFilter(f.Name()) }
+
+func TestParseFile(t *testing.T) {
+	src := "package p\nvar _=s[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]"
+	_, err := ParseFile(token.NewFileSet(), "", src, 0)
+	if err == nil {
+		t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
+	}
+}
+
+func TestParseExprFrom(t *testing.T) {
+	src := "s[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]+\ns[::]"
+	_, err := ParseExprFrom(token.NewFileSet(), "", src, 0)
+	if err == nil {
+		t.Errorf("ParseExprFrom(%s) succeeded unexpectedly", src)
+	}
+}
+
+func TestParseDir(t *testing.T) {
+	path := "."
+	pkgs, err := ParseDir(token.NewFileSet(), path, dirFilter, 0)
+	if err != nil {
+		t.Fatalf("ParseDir(%s): %v", path, err)
+	}
+	if n := len(pkgs); n != 1 {
+		t.Errorf("got %d packages; want 1", n)
+	}
+	pkg := pkgs["parser"]
+	if pkg == nil {
+		t.Errorf(`package "parser" not found`)
+		return
+	}
+	if n := len(pkg.Files); n != 3 {
+		t.Errorf("got %d package files; want 3", n)
+	}
+	for filename := range pkg.Files {
+		if !nameFilter(filename) {
+			t.Errorf("unexpected package file: %s", filename)
+		}
+	}
+}
+
+func TestIssue42951(t *testing.T) {
+	path := "./testdata/issue42951"
+	_, err := ParseDir(token.NewFileSet(), path, nil, 0)
+	if err != nil {
+		t.Errorf("ParseDir(%s): %v", path, err)
+	}
+}
+
+func TestParseExpr(t *testing.T) {
+	// just kicking the tires:
+	// a valid arithmetic expression
+	src := "a + b"
+	x, err := ParseExpr(src)
+	if err != nil {
+		t.Errorf("ParseExpr(%q): %v", src, err)
+	}
+	// sanity check
+	if _, ok := x.(*ast.BinaryExpr); !ok {
+		t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
+	}
+
+	// a valid type expression
+	src = "struct{x *int}"
+	x, err = ParseExpr(src)
+	if err != nil {
+		t.Errorf("ParseExpr(%q): %v", src, err)
+	}
+	// sanity check
+	if _, ok := x.(*ast.StructType); !ok {
+		t.Errorf("ParseExpr(%q): got %T, want *ast.StructType", src, x)
+	}
+
+	// an invalid expression
+	src = "a + *"
+	x, err = ParseExpr(src)
+	if err == nil {
+		t.Errorf("ParseExpr(%q): got no error", src)
+	}
+	if x == nil {
+		t.Errorf("ParseExpr(%q): got no (partial) result", src)
+	}
+	if _, ok := x.(*ast.BinaryExpr); !ok {
+		t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
+	}
+
+	// a valid expression followed by extra tokens is invalid
+	src = "a[i] := x"
+	if _, err := ParseExpr(src); err == nil {
+		t.Errorf("ParseExpr(%q): got no error", src)
+	}
+
+	// a semicolon is not permitted unless automatically inserted
+	src = "a + b\n"
+	if _, err := ParseExpr(src); err != nil {
+		t.Errorf("ParseExpr(%q): got error %s", src, err)
+	}
+	src = "a + b;"
+	if _, err := ParseExpr(src); err == nil {
+		t.Errorf("ParseExpr(%q): got no error", src)
+	}
+
+	// various other stuff following a valid expression
+	const validExpr = "a + b"
+	const anything = "dh3*#D)#_"
+	for _, c := range "!)]};," {
+		src := validExpr + string(c) + anything
+		if _, err := ParseExpr(src); err == nil {
+			t.Errorf("ParseExpr(%q): got no error", src)
+		}
+	}
+
+	// ParseExpr must not crash
+	for _, src := range valids {
+		ParseExpr(src)
+	}
+}
+
+func TestColonEqualsScope(t *testing.T) {
+	f, err := ParseFile(token.NewFileSet(), "", `package p; func f() { x, y, z := x, y, z }`, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// RHS refers to undefined globals; LHS does not.
+	as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt)
+	for _, v := range as.Rhs {
+		id := v.(*ast.Ident)
+		if id.Obj != nil {
+			t.Errorf("rhs %s has Obj, should not", id.Name)
+		}
+	}
+	for _, v := range as.Lhs {
+		id := v.(*ast.Ident)
+		if id.Obj == nil {
+			t.Errorf("lhs %s does not have Obj, should", id.Name)
+		}
+	}
+}
+
+func TestVarScope(t *testing.T) {
+	f, err := ParseFile(token.NewFileSet(), "", `package p; func f() { var x, y, z = x, y, z }`, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// RHS refers to undefined globals; LHS does not.
+	as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+	for _, v := range as.Values {
+		id := v.(*ast.Ident)
+		if id.Obj != nil {
+			t.Errorf("rhs %s has Obj, should not", id.Name)
+		}
+	}
+	for _, id := range as.Names {
+		if id.Obj == nil {
+			t.Errorf("lhs %s does not have Obj, should", id.Name)
+		}
+	}
+}
+
+func TestObjects(t *testing.T) {
+	const src = `
+package p
+import fmt "fmt"
+const pi = 3.14
+type T struct{}
+var x int
+func f() { L: }
+`
+
+	f, err := ParseFile(token.NewFileSet(), "", src, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	objects := map[string]ast.ObjKind{
+		"p":   ast.Bad, // not in a scope
+		"fmt": ast.Bad, // not resolved yet
+		"pi":  ast.Con,
+		"T":   ast.Typ,
+		"x":   ast.Var,
+		"int": ast.Bad, // not resolved yet
+		"f":   ast.Fun,
+		"L":   ast.Lbl,
+	}
+
+	ast.Inspect(f, func(n ast.Node) bool {
+		if ident, ok := n.(*ast.Ident); ok {
+			obj := ident.Obj
+			if obj == nil {
+				if objects[ident.Name] != ast.Bad {
+					t.Errorf("no object for %s", ident.Name)
+				}
+				return true
+			}
+			if obj.Name != ident.Name {
+				t.Errorf("names don't match: obj.Name = %s, ident.Name = %s", obj.Name, ident.Name)
+			}
+			kind := objects[ident.Name]
+			if obj.Kind != kind {
+				t.Errorf("%s: obj.Kind = %s; want %s", ident.Name, obj.Kind, kind)
+			}
+		}
+		return true
+	})
+}
+
+func TestUnresolved(t *testing.T) {
+	f, err := ParseFile(token.NewFileSet(), "", `
+package p
+//
+func f1a(int)
+func f2a(byte, int, float)
+func f3a(a, b int, c float)
+func f4a(...complex)
+func f5a(a s1a, b ...complex)
+//
+func f1b(*int)
+func f2b([]byte, (int), *float)
+func f3b(a, b *int, c []float)
+func f4b(...*complex)
+func f5b(a s1a, b ...[]complex)
+//
+type s1a struct { int }
+type s2a struct { byte; int; s1a }
+type s3a struct { a, b int; c float }
+//
+type s1b struct { *int }
+type s2b struct { byte; int; *float }
+type s3b struct { a, b *s3b; c []float }
+`, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	want := "int " + // f1a
+		"byte int float " + // f2a
+		"int float " + // f3a
+		"complex " + // f4a
+		"complex " + // f5a
+		//
+		"int " + // f1b
+		"byte int float " + // f2b
+		"int float " + // f3b
+		"complex " + // f4b
+		"complex " + // f5b
+		//
+		"int " + // s1a
+		"byte int " + // s2a
+		"int float " + // s3a
+		//
+		"int " + // s1a
+		"byte int float " + // s2a
+		"float " // s3a
+
+	// collect unresolved identifiers
+	var buf bytes.Buffer
+	for _, u := range f.Unresolved {
+		buf.WriteString(u.Name)
+		buf.WriteByte(' ')
+	}
+	got := buf.String()
+
+	if got != want {
+		t.Errorf("\ngot:  %s\nwant: %s", got, want)
+	}
+}
+
+var imports = map[string]bool{
+	`"a"`:        true,
+	"`a`":        true,
+	`"a/b"`:      true,
+	`"a.b"`:      true,
+	`"m\x61th"`:  true,
+	`"greek/αβ"`: true,
+	`""`:         false,
+
+	// Each of these pairs tests both `` vs "" strings
+	// and also use of invalid characters spelled out as
+	// escape sequences and written directly.
+	// For example `"\x00"` tests import "\x00"
+	// while "`\x00`" tests import `<actual-NUL-byte>`.
+	`"\x00"`:     false,
+	"`\x00`":     false,
+	`"\x7f"`:     false,
+	"`\x7f`":     false,
+	`"a!"`:       false,
+	"`a!`":       false,
+	`"a b"`:      false,
+	"`a b`":      false,
+	`"a\\b"`:     false,
+	"`a\\b`":     false,
+	"\"`a`\"":    false,
+	"`\"a\"`":    false,
+	`"\x80\x80"`: false,
+	"`\x80\x80`": false,
+	`"\xFFFD"`:   false,
+	"`\xFFFD`":   false,
+}
+
+func TestImports(t *testing.T) {
+	for path, isValid := range imports {
+		src := fmt.Sprintf("package p; import %s", path)
+		_, err := ParseFile(token.NewFileSet(), "", src, 0)
+		switch {
+		case err != nil && isValid:
+			t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
+		case err == nil && !isValid:
+			t.Errorf("ParseFile(%s): got no error; expected one", src)
+		}
+	}
+}
+
+func TestCommentGroups(t *testing.T) {
+	f, err := ParseFile(token.NewFileSet(), "", `
+package p /* 1a */ /* 1b */      /* 1c */ // 1d
+/* 2a
+*/
+// 2b
+const pi = 3.1415
+/* 3a */ // 3b
+/* 3c */ const e = 2.7182
+
+// Example from issue 3139
+func ExampleCount() {
+	fmt.Println(strings.Count("cheese", "e"))
+	fmt.Println(strings.Count("five", "")) // before & after each rune
+	// Output:
+	// 3
+	// 5
+}
+`, ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	expected := [][]string{
+		{"/* 1a */", "/* 1b */", "/* 1c */", "// 1d"},
+		{"/* 2a\n*/", "// 2b"},
+		{"/* 3a */", "// 3b", "/* 3c */"},
+		{"// Example from issue 3139"},
+		{"// before & after each rune"},
+		{"// Output:", "// 3", "// 5"},
+	}
+	if len(f.Comments) != len(expected) {
+		t.Fatalf("got %d comment groups; expected %d", len(f.Comments), len(expected))
+	}
+	for i, exp := range expected {
+		got := f.Comments[i].List
+		if len(got) != len(exp) {
+			t.Errorf("got %d comments in group %d; expected %d", len(got), i, len(exp))
+			continue
+		}
+		for j, exp := range exp {
+			got := got[j].Text
+			if got != exp {
+				t.Errorf("got %q in group %d; expected %q", got, i, exp)
+			}
+		}
+	}
+}
+
+func getField(file *ast.File, fieldname string) *ast.Field {
+	parts := strings.Split(fieldname, ".")
+	for _, d := range file.Decls {
+		if d, ok := d.(*ast.GenDecl); ok && d.Tok == token.TYPE {
+			for _, s := range d.Specs {
+				if s, ok := s.(*ast.TypeSpec); ok && s.Name.Name == parts[0] {
+					if s, ok := s.Type.(*ast.StructType); ok {
+						for _, f := range s.Fields.List {
+							for _, name := range f.Names {
+								if name.Name == parts[1] {
+									return f
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Don't use ast.CommentGroup.Text() - we want to see exact comment text.
+func commentText(c *ast.CommentGroup) string {
+	var buf bytes.Buffer
+	if c != nil {
+		for _, c := range c.List {
+			buf.WriteString(c.Text)
+		}
+	}
+	return buf.String()
+}
+
+func checkFieldComments(t *testing.T, file *ast.File, fieldname, lead, line string) {
+	f := getField(file, fieldname)
+	if f == nil {
+		t.Fatalf("field not found: %s", fieldname)
+	}
+	if got := commentText(f.Doc); got != lead {
+		t.Errorf("got lead comment %q; expected %q", got, lead)
+	}
+	if got := commentText(f.Comment); got != line {
+		t.Errorf("got line comment %q; expected %q", got, line)
+	}
+}
+
+func TestLeadAndLineComments(t *testing.T) {
+	f, err := ParseFile(token.NewFileSet(), "", `
+package p
+type T struct {
+	/* F1 lead comment */
+	//
+	F1 int  /* F1 */ // line comment
+	// F2 lead
+	// comment
+	F2 int  // F2 line comment
+	// f3 lead comment
+	f3 int  // f3 line comment
+}
+`, ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
+	checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
+	checkFieldComments(t, f, "T.f3", "// f3 lead comment", "// f3 line comment")
+	ast.FileExports(f)
+	checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
+	checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
+	if getField(f, "T.f3") != nil {
+		t.Error("not expected to find T.f3")
+	}
+}
+
+// TestIssue9979 verifies that empty statements are contained within their enclosing blocks.
+func TestIssue9979(t *testing.T) {
+	for _, src := range []string{
+		"package p; func f() {;}",
+		"package p; func f() {L:}",
+		"package p; func f() {L:;}",
+		"package p; func f() {L:\n}",
+		"package p; func f() {L:\n;}",
+		"package p; func f() { ; }",
+		"package p; func f() { L: }",
+		"package p; func f() { L: ; }",
+		"package p; func f() { L: \n}",
+		"package p; func f() { L: \n; }",
+	} {
+		fset := token.NewFileSet()
+		f, err := ParseFile(fset, "", src, 0)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		var pos, end token.Pos
+		ast.Inspect(f, func(x ast.Node) bool {
+			switch s := x.(type) {
+			case *ast.BlockStmt:
+				pos, end = s.Pos()+1, s.End()-1 // exclude "{", "}"
+			case *ast.LabeledStmt:
+				pos, end = s.Pos()+2, s.End() // exclude "L:"
+			case *ast.EmptyStmt:
+				// check containment
+				if s.Pos() < pos || s.End() > end {
+					t.Errorf("%s: %T[%d, %d] not inside [%d, %d]", src, s, s.Pos(), s.End(), pos, end)
+				}
+				// check semicolon
+				offs := fset.Position(s.Pos()).Offset
+				if ch := src[offs]; ch != ';' != s.Implicit {
+					want := "want ';'"
+					if s.Implicit {
+						want = "but ';' is implicit"
+					}
+					t.Errorf("%s: found %q at offset %d; %s", src, ch, offs, want)
+				}
+			}
+			return true
+		})
+	}
+}
+
+// TestIncompleteSelection ensures that an incomplete selector
+// expression is parsed as a (blank) *ast.SelectorExpr, not a
+// *ast.BadExpr.
+func TestIncompleteSelection(t *testing.T) {
+	for _, src := range []string{
+		"package p; var _ = fmt.",             // at EOF
+		"package p; var _ = fmt.\ntype X int", // not at EOF
+	} {
+		fset := token.NewFileSet()
+		f, err := ParseFile(fset, "", src, 0)
+		if err == nil {
+			t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
+			continue
+		}
+
+		const wantErr = "expected selector or type assertion"
+		if !strings.Contains(err.Error(), wantErr) {
+			t.Errorf("ParseFile returned wrong error %q, want %q", err, wantErr)
+		}
+
+		var sel *ast.SelectorExpr
+		ast.Inspect(f, func(n ast.Node) bool {
+			if n, ok := n.(*ast.SelectorExpr); ok {
+				sel = n
+			}
+			return true
+		})
+		if sel == nil {
+			t.Error("found no *ast.SelectorExpr")
+			continue
+		}
+		const wantSel = "&{fmt _}"
+		if fmt.Sprint(sel) != wantSel {
+			t.Errorf("found selector %s, want %s", sel, wantSel)
+			continue
+		}
+	}
+}
+
+func TestLastLineComment(t *testing.T) {
+	const src = `package main
+type x int // comment
+`
+	fset := token.NewFileSet()
+	f, err := ParseFile(fset, "", src, ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	comment := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.TypeSpec).Comment.List[0].Text
+	if comment != "// comment" {
+		t.Errorf("got %q, want %q", comment, "// comment")
+	}
+}
diff --git a/internal/backport/go/parser/performance_test.go b/internal/backport/go/parser/performance_test.go
new file mode 100644
index 0000000..6e7243f
--- /dev/null
+++ b/internal/backport/go/parser/performance_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+	"golang.org/x/website/internal/backport/go/token"
+	"os"
+	"testing"
+)
+
+// TODO(rFindley): use a testdata file or file from another package here, to
+//
+//	avoid a moving target.
+var src = readFile("parser.go")
+
+func readFile(filename string) []byte {
+	data, err := os.ReadFile(filename)
+	if err != nil {
+		panic(err)
+	}
+	return data
+}
+
+func BenchmarkParse(b *testing.B) {
+	b.SetBytes(int64(len(src)))
+	for i := 0; i < b.N; i++ {
+		if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments); err != nil {
+			b.Fatalf("benchmark failed due to parse error: %s", err)
+		}
+	}
+}
+
+func BenchmarkParseOnly(b *testing.B) {
+	b.SetBytes(int64(len(src)))
+	for i := 0; i < b.N; i++ {
+		if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments|SkipObjectResolution); err != nil {
+			b.Fatalf("benchmark failed due to parse error: %s", err)
+		}
+	}
+}
+
+func BenchmarkResolve(b *testing.B) {
+	b.SetBytes(int64(len(src)))
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		fset := token.NewFileSet()
+		file, err := ParseFile(fset, "", src, SkipObjectResolution)
+		if err != nil {
+			b.Fatalf("benchmark failed due to parse error: %s", err)
+		}
+		b.StartTimer()
+		handle := fset.File(file.Package)
+		resolveFile(file, handle, nil)
+	}
+}
diff --git a/internal/backport/go/parser/resolver.go b/internal/backport/go/parser/resolver.go
new file mode 100644
index 0000000..b648836
--- /dev/null
+++ b/internal/backport/go/parser/resolver.go
@@ -0,0 +1,607 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+	"strings"
+)
+
+const debugResolve = false
+
+// resolveFile walks the given file to resolve identifiers within the file
+// scope, updating ast.Ident.Obj fields with declaration information.
+//
+// If declErr is non-nil, it is used to report declaration errors during
+// resolution. tok is used to format position in error messages.
+func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, string)) {
+	pkgScope := ast.NewScope(nil)
+	r := &resolver{
+		handle:   handle,
+		declErr:  declErr,
+		topScope: pkgScope,
+		pkgScope: pkgScope,
+		depth:    1,
+	}
+
+	for _, decl := range file.Decls {
+		ast.Walk(r, decl)
+	}
+
+	r.closeScope()
+	assert(r.topScope == nil, "unbalanced scopes")
+	assert(r.labelScope == nil, "unbalanced label scopes")
+
+	// resolve global identifiers within the same file
+	i := 0
+	for _, ident := range r.unresolved {
+		// i <= index for current ident
+		assert(ident.Obj == unresolved, "object already resolved")
+		ident.Obj = r.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+		if ident.Obj == nil {
+			r.unresolved[i] = ident
+			i++
+		} else if debugResolve {
+			pos := ident.Obj.Decl.(interface{ Pos() token.Pos }).Pos()
+			r.trace("resolved %s@%v to package object %v", ident.Name, ident.Pos(), pos)
+		}
+	}
+	file.Scope = r.pkgScope
+	file.Unresolved = r.unresolved[0:i]
+}
+
+type resolver struct {
+	handle  *token.File
+	declErr func(token.Pos, string)
+
+	// Ordinary identifier scopes
+	pkgScope   *ast.Scope   // pkgScope.Outer == nil
+	topScope   *ast.Scope   // top-most scope; may be pkgScope
+	unresolved []*ast.Ident // unresolved identifiers
+	depth      int          // scope depth
+
+	// Label scopes
+	// (maintained by open/close LabelScope)
+	labelScope  *ast.Scope     // label scope for current function
+	targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+func (r *resolver) trace(format string, args ...interface{}) {
+	fmt.Println(strings.Repeat(". ", r.depth) + r.sprintf(format, args...))
+}
+
+func (r *resolver) sprintf(format string, args ...interface{}) string {
+	for i, arg := range args {
+		switch arg := arg.(type) {
+		case token.Pos:
+			args[i] = r.handle.Position(arg)
+		}
+	}
+	return fmt.Sprintf(format, args...)
+}
+
+func (r *resolver) openScope(pos token.Pos) {
+	if debugResolve {
+		r.trace("opening scope @%v", pos)
+		r.depth++
+	}
+	r.topScope = ast.NewScope(r.topScope)
+}
+
+func (r *resolver) closeScope() {
+	if debugResolve {
+		r.depth--
+		r.trace("closing scope")
+	}
+	r.topScope = r.topScope.Outer
+}
+
+func (r *resolver) openLabelScope() {
+	r.labelScope = ast.NewScope(r.labelScope)
+	r.targetStack = append(r.targetStack, nil)
+}
+
+func (r *resolver) closeLabelScope() {
+	// resolve labels
+	n := len(r.targetStack) - 1
+	scope := r.labelScope
+	for _, ident := range r.targetStack[n] {
+		ident.Obj = scope.Lookup(ident.Name)
+		if ident.Obj == nil && r.declErr != nil {
+			r.declErr(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+		}
+	}
+	// pop label scope
+	r.targetStack = r.targetStack[0:n]
+	r.labelScope = r.labelScope.Outer
+}
+
+func (r *resolver) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+	for _, ident := range idents {
+		if ident.Obj != nil {
+			panic(fmt.Sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
+		}
+		obj := ast.NewObj(kind, ident.Name)
+		// remember the corresponding declaration for redeclaration
+		// errors and global variable resolution/typechecking phase
+		obj.Decl = decl
+		obj.Data = data
+		// Identifiers (for receiver type parameters) are written to the scope, but
+		// never set as the resolved object. See issue #50956.
+		if _, ok := decl.(*ast.Ident); !ok {
+			ident.Obj = obj
+		}
+		if ident.Name != "_" {
+			if debugResolve {
+				r.trace("declaring %s@%v", ident.Name, ident.Pos())
+			}
+			if alt := scope.Insert(obj); alt != nil && r.declErr != nil {
+				prevDecl := ""
+				if pos := alt.Pos(); pos.IsValid() {
+					prevDecl = r.sprintf("\n\tprevious declaration at %v", pos)
+				}
+				r.declErr(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+			}
+		}
+	}
+}
+
+func (r *resolver) shortVarDecl(decl *ast.AssignStmt) {
+	// Go spec: A short variable declaration may redeclare variables
+	// provided they were originally declared in the same block with
+	// the same type, and at least one of the non-blank variables is new.
+	n := 0 // number of new variables
+	for _, x := range decl.Lhs {
+		if ident, isIdent := x.(*ast.Ident); isIdent {
+			assert(ident.Obj == nil, "identifier already declared or resolved")
+			obj := ast.NewObj(ast.Var, ident.Name)
+			// remember corresponding assignment for other tools
+			obj.Decl = decl
+			ident.Obj = obj
+			if ident.Name != "_" {
+				if debugResolve {
+					r.trace("declaring %s@%v", ident.Name, ident.Pos())
+				}
+				if alt := r.topScope.Insert(obj); alt != nil {
+					ident.Obj = alt // redeclaration
+				} else {
+					n++ // new declaration
+				}
+			}
+		}
+	}
+	if n == 0 && r.declErr != nil {
+		r.declErr(decl.Lhs[0].Pos(), "no new variables on left side of :=")
+	}
+}
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+// If x is an identifier, resolve attempts to resolve x by looking up
+// the object it denotes. If no object is found and collectUnresolved is
+// set, x is marked as unresolved and collected in the list of unresolved
+// identifiers.
+func (r *resolver) resolve(ident *ast.Ident, collectUnresolved bool) {
+	if ident.Obj != nil {
+		panic(r.sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
+	}
+	// '_' should never refer to existing declarations, because it has special
+	// handling in the spec.
+	if ident.Name == "_" {
+		return
+	}
+	for s := r.topScope; s != nil; s = s.Outer {
+		if obj := s.Lookup(ident.Name); obj != nil {
+			if debugResolve {
+				r.trace("resolved %v:%s to %v", ident.Pos(), ident.Name, obj)
+			}
+			assert(obj.Name != "", "obj with no name")
+			// Identifiers (for receiver type parameters) are written to the scope,
+			// but never set as the resolved object. See issue #50956.
+			if _, ok := obj.Decl.(*ast.Ident); !ok {
+				ident.Obj = obj
+			}
+			return
+		}
+	}
+	// all local scopes are known, so any unresolved identifier
+	// must be found either in the file scope, package scope
+	// (perhaps in another file), or universe scope --- collect
+	// them so that they can be resolved later
+	if collectUnresolved {
+		ident.Obj = unresolved
+		r.unresolved = append(r.unresolved, ident)
+	}
+}
+
+func (r *resolver) walkExprs(list []ast.Expr) {
+	for _, node := range list {
+		ast.Walk(r, node)
+	}
+}
+
+func (r *resolver) walkLHS(list []ast.Expr) {
+	for _, expr := range list {
+		expr := unparen(expr)
+		if _, ok := expr.(*ast.Ident); !ok && expr != nil {
+			ast.Walk(r, expr)
+		}
+	}
+}
+
+func (r *resolver) walkStmts(list []ast.Stmt) {
+	for _, stmt := range list {
+		ast.Walk(r, stmt)
+	}
+}
+
+func (r *resolver) Visit(node ast.Node) ast.Visitor {
+	if debugResolve && node != nil {
+		r.trace("node %T@%v", node, node.Pos())
+	}
+
+	switch n := node.(type) {
+
+	// Expressions.
+	case *ast.Ident:
+		r.resolve(n, true)
+
+	case *ast.FuncLit:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkFuncType(n.Type)
+		r.walkBody(n.Body)
+
+	case *ast.SelectorExpr:
+		ast.Walk(r, n.X)
+		// Note: don't try to resolve n.Sel, as we don't support qualified
+		// resolution.
+
+	case *ast.StructType:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkFieldList(n.Fields, ast.Var)
+
+	case *ast.FuncType:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkFuncType(n)
+
+	case *ast.CompositeLit:
+		if n.Type != nil {
+			ast.Walk(r, n.Type)
+		}
+		for _, e := range n.Elts {
+			if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
+				// See issue #45160: try to resolve composite lit keys, but don't
+				// collect them as unresolved if resolution failed. This replicates
+				// existing behavior when resolving during parsing.
+				if ident, _ := kv.Key.(*ast.Ident); ident != nil {
+					r.resolve(ident, false)
+				} else {
+					ast.Walk(r, kv.Key)
+				}
+				ast.Walk(r, kv.Value)
+			} else {
+				ast.Walk(r, e)
+			}
+		}
+
+	case *ast.InterfaceType:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkFieldList(n.Methods, ast.Fun)
+
+	// Statements
+	case *ast.LabeledStmt:
+		r.declare(n, nil, r.labelScope, ast.Lbl, n.Label)
+		ast.Walk(r, n.Stmt)
+
+	case *ast.AssignStmt:
+		r.walkExprs(n.Rhs)
+		if n.Tok == token.DEFINE {
+			r.shortVarDecl(n)
+		} else {
+			r.walkExprs(n.Lhs)
+		}
+
+	case *ast.BranchStmt:
+		// add to list of unresolved targets
+		if n.Tok != token.FALLTHROUGH && n.Label != nil {
+			depth := len(r.targetStack) - 1
+			r.targetStack[depth] = append(r.targetStack[depth], n.Label)
+		}
+
+	case *ast.BlockStmt:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkStmts(n.List)
+
+	case *ast.IfStmt:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		if n.Init != nil {
+			ast.Walk(r, n.Init)
+		}
+		ast.Walk(r, n.Cond)
+		ast.Walk(r, n.Body)
+		if n.Else != nil {
+			ast.Walk(r, n.Else)
+		}
+
+	case *ast.CaseClause:
+		r.walkExprs(n.List)
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		r.walkStmts(n.Body)
+
+	case *ast.SwitchStmt:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		if n.Init != nil {
+			ast.Walk(r, n.Init)
+		}
+		if n.Tag != nil {
+			// The scope below reproduces some unnecessary behavior of the parser,
+			// opening an extra scope in case this is a type switch. It's not needed
+			// for expression switches.
+			// TODO: remove this once we've matched the parser resolution exactly.
+			if n.Init != nil {
+				r.openScope(n.Tag.Pos())
+				defer r.closeScope()
+			}
+			ast.Walk(r, n.Tag)
+		}
+		if n.Body != nil {
+			r.walkStmts(n.Body.List)
+		}
+
+	case *ast.TypeSwitchStmt:
+		if n.Init != nil {
+			r.openScope(n.Pos())
+			defer r.closeScope()
+			ast.Walk(r, n.Init)
+		}
+		r.openScope(n.Assign.Pos())
+		defer r.closeScope()
+		ast.Walk(r, n.Assign)
+		// s.Body consists only of case clauses, so does not get its own
+		// scope.
+		if n.Body != nil {
+			r.walkStmts(n.Body.List)
+		}
+
+	case *ast.CommClause:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		if n.Comm != nil {
+			ast.Walk(r, n.Comm)
+		}
+		r.walkStmts(n.Body)
+
+	case *ast.SelectStmt:
+		// as for switch statements, select statement bodies don't get their own
+		// scope.
+		if n.Body != nil {
+			r.walkStmts(n.Body.List)
+		}
+
+	case *ast.ForStmt:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		if n.Init != nil {
+			ast.Walk(r, n.Init)
+		}
+		if n.Cond != nil {
+			ast.Walk(r, n.Cond)
+		}
+		if n.Post != nil {
+			ast.Walk(r, n.Post)
+		}
+		ast.Walk(r, n.Body)
+
+	case *ast.RangeStmt:
+		r.openScope(n.Pos())
+		defer r.closeScope()
+		ast.Walk(r, n.X)
+		var lhs []ast.Expr
+		if n.Key != nil {
+			lhs = append(lhs, n.Key)
+		}
+		if n.Value != nil {
+			lhs = append(lhs, n.Value)
+		}
+		if len(lhs) > 0 {
+			if n.Tok == token.DEFINE {
+				// Note: we can't exactly match the behavior of object resolution
+				// during the parsing pass here, as it uses the position of the RANGE
+				// token for the RHS OpPos. That information is not contained within
+				// the AST.
+				as := &ast.AssignStmt{
+					Lhs:    lhs,
+					Tok:    token.DEFINE,
+					TokPos: n.TokPos,
+					Rhs:    []ast.Expr{&ast.UnaryExpr{Op: token.RANGE, X: n.X}},
+				}
+				// TODO(rFindley): this walkLHS reproduced the parser resolution, but
+				// is it necessary? By comparison, for a normal AssignStmt we don't
+				// walk the LHS in case there is an invalid identifier list.
+				r.walkLHS(lhs)
+				r.shortVarDecl(as)
+			} else {
+				r.walkExprs(lhs)
+			}
+		}
+		ast.Walk(r, n.Body)
+
+	// Declarations
+	case *ast.GenDecl:
+		switch n.Tok {
+		case token.CONST, token.VAR:
+			for i, spec := range n.Specs {
+				spec := spec.(*ast.ValueSpec)
+				kind := ast.Con
+				if n.Tok == token.VAR {
+					kind = ast.Var
+				}
+				r.walkExprs(spec.Values)
+				if spec.Type != nil {
+					ast.Walk(r, spec.Type)
+				}
+				r.declare(spec, i, r.topScope, kind, spec.Names...)
+			}
+		case token.TYPE:
+			for _, spec := range n.Specs {
+				spec := spec.(*ast.TypeSpec)
+				// Go spec: The scope of a type identifier declared inside a function begins
+				// at the identifier in the TypeSpec and ends at the end of the innermost
+				// containing block.
+				r.declare(spec, nil, r.topScope, ast.Typ, spec.Name)
+				if spec.TypeParams != nil {
+					r.openScope(spec.Pos())
+					defer r.closeScope()
+					r.walkTParams(spec.TypeParams)
+				}
+				ast.Walk(r, spec.Type)
+			}
+		}
+
+	case *ast.FuncDecl:
+		// Open the function scope.
+		r.openScope(n.Pos())
+		defer r.closeScope()
+
+		r.walkRecv(n.Recv)
+
+		// Type parameters are walked normally: they can reference each other, and
+		// can be referenced by normal parameters.
+		if n.Type.TypeParams != nil {
+			r.walkTParams(n.Type.TypeParams)
+			// TODO(rFindley): need to address receiver type parameters.
+		}
+
+		// Resolve and declare parameters in a specific order to get duplicate
+		// declaration errors in the correct location.
+		r.resolveList(n.Type.Params)
+		r.resolveList(n.Type.Results)
+		r.declareList(n.Recv, ast.Var)
+		r.declareList(n.Type.Params, ast.Var)
+		r.declareList(n.Type.Results, ast.Var)
+
+		r.walkBody(n.Body)
+		if n.Recv == nil && n.Name.Name != "init" {
+			r.declare(n, nil, r.pkgScope, ast.Fun, n.Name)
+		}
+
+	default:
+		return r
+	}
+
+	return nil
+}
+
+func (r *resolver) walkFuncType(typ *ast.FuncType) {
+	// typ.TypeParams must be walked separately for FuncDecls.
+	r.resolveList(typ.Params)
+	r.resolveList(typ.Results)
+	r.declareList(typ.Params, ast.Var)
+	r.declareList(typ.Results, ast.Var)
+}
+
+func (r *resolver) resolveList(list *ast.FieldList) {
+	if list == nil {
+		return
+	}
+	for _, f := range list.List {
+		if f.Type != nil {
+			ast.Walk(r, f.Type)
+		}
+	}
+}
+
+func (r *resolver) declareList(list *ast.FieldList, kind ast.ObjKind) {
+	if list == nil {
+		return
+	}
+	for _, f := range list.List {
+		r.declare(f, nil, r.topScope, kind, f.Names...)
+	}
+}
+
+func (r *resolver) walkRecv(recv *ast.FieldList) {
+	// If our receiver has receiver type parameters, we must declare them before
+	// trying to resolve the rest of the receiver, and avoid re-resolving the
+	// type parameter identifiers.
+	if recv == nil || len(recv.List) == 0 {
+		return // nothing to do
+	}
+	typ := recv.List[0].Type
+	if ptr, ok := typ.(*ast.StarExpr); ok {
+		typ = ptr.X
+	}
+
+	var declareExprs []ast.Expr // exprs to declare
+	var resolveExprs []ast.Expr // exprs to resolve
+	switch typ := typ.(type) {
+	case *ast.IndexExpr:
+		declareExprs = []ast.Expr{typ.Index}
+		resolveExprs = append(resolveExprs, typ.X)
+	case *ast.IndexListExpr:
+		declareExprs = typ.Indices
+		resolveExprs = append(resolveExprs, typ.X)
+	default:
+		resolveExprs = append(resolveExprs, typ)
+	}
+	for _, expr := range declareExprs {
+		if id, _ := expr.(*ast.Ident); id != nil {
+			r.declare(expr, nil, r.topScope, ast.Typ, id)
+		} else {
+			// The receiver type parameter expression is invalid, but try to resolve
+			// it anyway for consistency.
+			resolveExprs = append(resolveExprs, expr)
+		}
+	}
+	for _, expr := range resolveExprs {
+		if expr != nil {
+			ast.Walk(r, expr)
+		}
+	}
+	// The receiver is invalid, but try to resolve it anyway for consistency.
+	for _, f := range recv.List[1:] {
+		if f.Type != nil {
+			ast.Walk(r, f.Type)
+		}
+	}
+}
+
+func (r *resolver) walkFieldList(list *ast.FieldList, kind ast.ObjKind) {
+	if list == nil {
+		return
+	}
+	r.resolveList(list)
+	r.declareList(list, kind)
+}
+
+// walkTParams is like walkFieldList, but declares type parameters eagerly so
+// that they may be resolved in the constraint expressions held in the field
+// Type.
+func (r *resolver) walkTParams(list *ast.FieldList) {
+	r.declareList(list, ast.Typ)
+	r.resolveList(list)
+}
+
+func (r *resolver) walkBody(body *ast.BlockStmt) {
+	if body == nil {
+		return
+	}
+	r.openLabelScope()
+	defer r.closeLabelScope()
+	r.walkStmts(body.List)
+}
diff --git a/internal/backport/go/parser/resolver_test.go b/internal/backport/go/parser/resolver_test.go
new file mode 100644
index 0000000..8dd9172
--- /dev/null
+++ b/internal/backport/go/parser/resolver_test.go
@@ -0,0 +1,176 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/internal/typeparams"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+)
+
+// TestResolution checks that identifiers are resolved to the declarations
+// annotated in the source, by comparing the positions of the resulting
+// Ident.Obj.Decl to positions marked in the source via special comments.
+//
+// In the test source, any comment prefixed with '=' or '@' (or both) marks the
+// previous token position as the declaration ('=') or a use ('@') of an
+// identifier. The text following '=' and '@' in the comment string is the
+// label to use for the location.  Declaration labels must be unique within the
+// file, and use labels must refer to an existing declaration label. It's OK
+// for a comment to denote both the declaration and use of a label (e.g.
+// '=@foo'). Leading and trailing whitespace is ignored. Any comment not
+// beginning with '=' or '@' is ignored.
+func TestResolution(t *testing.T) {
+	dir := filepath.Join("testdata", "resolution")
+	fis, err := os.ReadDir(dir)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for _, fi := range fis {
+		t.Run(fi.Name(), func(t *testing.T) {
+			fset := token.NewFileSet()
+			path := filepath.Join(dir, fi.Name())
+			src := readFile(path) // panics on failure
+			var mode Mode
+			if !strings.HasSuffix(path, ".go2") {
+				mode |= typeparams.DisallowParsing
+			}
+			file, err := ParseFile(fset, path, src, mode)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// Compare the positions of objects resolved during parsing (fromParser)
+			// to those annotated in source comments (fromComments).
+
+			handle := fset.File(file.Package)
+			fromParser := declsFromParser(file)
+			fromComments := declsFromComments(handle, src)
+
+			pos := func(pos token.Pos) token.Position {
+				p := handle.Position(pos)
+				// The file name is implied by the subtest, so remove it to avoid
+				// clutter in error messages.
+				p.Filename = ""
+				return p
+			}
+			for k, want := range fromComments {
+				if got := fromParser[k]; got != want {
+					t.Errorf("%s resolved to %s, want %s", pos(k), pos(got), pos(want))
+				}
+				delete(fromParser, k)
+			}
+			// What remains in fromParser are unexpected resolutions.
+			for k, got := range fromParser {
+				t.Errorf("%s resolved to %s, want no object", pos(k), pos(got))
+			}
+		})
+	}
+}
+
+// declsFromParser walks the file and collects the map associating an
+// identifier position with its declaration position.
+func declsFromParser(file *ast.File) map[token.Pos]token.Pos {
+	objmap := map[token.Pos]token.Pos{}
+	ast.Inspect(file, func(node ast.Node) bool {
+		// Ignore blank identifiers to reduce noise.
+		if ident, _ := node.(*ast.Ident); ident != nil && ident.Obj != nil && ident.Name != "_" {
+			objmap[ident.Pos()] = ident.Obj.Pos()
+		}
+		return true
+	})
+	return objmap
+}
+
+// declsFromComments looks at comments annotating uses and declarations, and
+// maps each identifier use to its corresponding declaration. See the
+// description of these annotations in the documentation for TestResolution.
+func declsFromComments(handle *token.File, src []byte) map[token.Pos]token.Pos {
+	decls, uses := positionMarkers(handle, src)
+
+	objmap := make(map[token.Pos]token.Pos)
+	// Join decls and uses on name, to build the map of use->decl.
+	for name, posns := range uses {
+		declpos, ok := decls[name]
+		if !ok {
+			panic(fmt.Sprintf("missing declaration for %s", name))
+		}
+		for _, pos := range posns {
+			objmap[pos] = declpos
+		}
+	}
+	return objmap
+}
+
+// positionMarkers extracts named positions from the source denoted by comments
+// prefixed with '=' (declarations) and '@' (uses): for example '@foo' or
+// '=@bar'. It returns a map of name->position for declarations, and
+// name->position(s) for uses.
+func positionMarkers(handle *token.File, src []byte) (decls map[string]token.Pos, uses map[string][]token.Pos) {
+	var s scanner.Scanner
+	s.Init(handle, src, nil, scanner.ScanComments)
+	decls = make(map[string]token.Pos)
+	uses = make(map[string][]token.Pos)
+	var prev token.Pos // position of last non-comment, non-semicolon token
+
+scanFile:
+	for {
+		pos, tok, lit := s.Scan()
+		switch tok {
+		case token.EOF:
+			break scanFile
+		case token.COMMENT:
+			name, decl, use := annotatedObj(lit)
+			if len(name) > 0 {
+				if decl {
+					if _, ok := decls[name]; ok {
+						panic(fmt.Sprintf("duplicate declaration markers for %s", name))
+					}
+					decls[name] = prev
+				}
+				if use {
+					uses[name] = append(uses[name], prev)
+				}
+			}
+		case token.SEMICOLON:
+			// ignore automatically inserted semicolon
+			if lit == "\n" {
+				continue scanFile
+			}
+			fallthrough
+		default:
+			prev = pos
+		}
+	}
+	return decls, uses
+}
+
+func annotatedObj(lit string) (name string, decl, use bool) {
+	if lit[1] == '*' {
+		lit = lit[:len(lit)-2] // strip trailing */
+	}
+	lit = strings.TrimSpace(lit[2:])
+
+scanLit:
+	for idx, r := range lit {
+		switch r {
+		case '=':
+			decl = true
+		case '@':
+			use = true
+		default:
+			name = lit[idx:]
+			break scanLit
+		}
+	}
+	return
+}
diff --git a/internal/backport/go/parser/short_test.go b/internal/backport/go/parser/short_test.go
new file mode 100644
index 0000000..ada2caa
--- /dev/null
+++ b/internal/backport/go/parser/short_test.go
@@ -0,0 +1,284 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for short valid and invalid programs.
+
+package parser
+
+import (
+	"golang.org/x/website/internal/backport/go/internal/typeparams"
+	"testing"
+)
+
+var valids = []string{
+	"package p\n",
+	`package p;`,
+	`package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
+	`package p; func f() { if f(T{}) {} };`,
+	`package p; func f() { _ = <-chan int(nil) };`,
+	`package p; func f() { _ = (<-chan int)(nil) };`,
+	`package p; func f() { _ = (<-chan <-chan int)(nil) };`,
+	`package p; func f() { _ = <-chan <-chan <-chan <-chan <-int(nil) };`,
+	`package p; func f(func() func() func());`,
+	`package p; func f(...T);`,
+	`package p; func f(float, ...int);`,
+	`package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
+	`package p; func f(int,) {};`,
+	`package p; func f(...int,) {};`,
+	`package p; func f(x ...int,) {};`,
+	`package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
+	`package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
+	`package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
+	`package p; var a = T{{1, 2}, {3, 4}}`,
+	`package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
+	`package p; func f() { select { case x := (<-c): } };`,
+	`package p; func f() { if ; true {} };`,
+	`package p; func f() { switch ; {} };`,
+	`package p; func f() { for _ = range "foo" + "bar" {} };`,
+	`package p; func f() { var s []int; g(s[:], s[i:], s[:j], s[i:j], s[i:j:k], s[:j:k]) };`,
+	`package p; var ( _ = (struct {*T}).m; _ = (interface {T}).m )`,
+	`package p; func ((T),) m() {}`,
+	`package p; func ((*T),) m() {}`,
+	`package p; func (*(T),) m() {}`,
+	`package p; func _(x []int) { for range x {} }`,
+	`package p; func _() { if [T{}.n]int{} {} }`,
+	`package p; func _() { map[int]int{}[0]++; map[int]int{}[0] += 1 }`,
+	`package p; func _(x interface{f()}) { interface{f()}(x).f() }`,
+	`package p; func _(x chan int) { chan int(x) <- 0 }`,
+	`package p; const (x = 0; y; z)`, // issue 9639
+	`package p; var _ = map[P]int{P{}:0, {}:1}`,
+	`package p; var _ = map[*P]int{&P{}:0, {}:1}`,
+	`package p; type T = int`,
+	`package p; type (T = p.T; _ = struct{}; x = *T)`,
+	`package p; type T (*int)`,
+	`package p; type _ struct{ ((int)) }`,
+	`package p; type _ struct{ (*(int)) }`,
+	`package p; type _ struct{ ([]byte) }`, // disallowed by type-checker
+	`package p; var _ = func()T(nil)`,
+	`package p; func _(T (P))`,
+	`package p; func _(T []E)`,
+	`package p; func _(T [P]E)`,
+	`package p; type _ [A+B]struct{}`,
+	`package p; func (R) _()`,
+	`package p; type _ struct{ f [n]E }`,
+	`package p; type _ struct{ f [a+b+c+d]E }`,
+	`package p; type I1 interface{}; type I2 interface{ I1 }`,
+}
+
+// validWithTParamsOnly holds source code examples that are valid if
+// parseTypeParams is set, but invalid if not. When checking with the
+// parseTypeParams set, errors are ignored.
+var validWithTParamsOnly = []string{
+	`package p; type _ []T[ /* ERROR "expected ';', found '\['" */ int]`,
+	`package p; type T[P any /* ERROR "expected ']', found any" */ ] struct { P }`,
+	`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ ] struct { P }`,
+	`package p; type T[P comparable /* ERROR "expected ']', found comparable" */ [P]] struct { P }`,
+	`package p; type T[P1, /* ERROR "unexpected comma" */ P2 any] struct { P1; f []P2 }`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any]()()`,
+	`package p; func _(T (P))`,
+	`package p; func f[ /* ERROR "expected '\(', found '\['" */ A, B any](); func _() { _ = f[int, int] }`,
+	`package p; func _(x /* ERROR "mixed named and unnamed parameters" */ T[P1, P2, P3])`,
+	`package p; func _(x /* ERROR "mixed named and unnamed parameters" */ p.T[Q])`,
+	`package p; func _(p.T[ /* ERROR "missing ',' in parameter list" */ Q])`,
+	`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {},] struct{}`,
+	`package p; type _[A interface /* ERROR "expected ']', found 'interface'" */ {}] struct{}`,
+	`package p; type _[A, /* ERROR "unexpected comma" */  B any,] struct{}`,
+	`package p; type _[A, /* ERROR "unexpected comma" */ B any] struct{}`,
+	`package p; type _[A any /* ERROR "expected ']', found any" */,] struct{}`,
+	`package p; type _[A any /* ERROR "expected ']', found any" */ ]struct{}`,
+	`package p; type _[A any /* ERROR "expected ']', found any" */ ] struct{ A }`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any]()`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T any](x T)`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1, T2 any](x T)`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B any](a A) B`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C](a A) B`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ A, B C[A, B]](a A) B`,
+
+	`package p; type _[A, /* ERROR "unexpected comma" */ B any] interface { _(a A) B }`,
+	`package p; type _[A, /* ERROR "unexpected comma" */ B C[A, B]] interface { _(a A) B }`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1, T2 interface{}](x T1) T2`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ T1 interface{ m() }, T2, T3 interface{}](x T1, y T3) T2`,
+	`package p; var _ = [ /* ERROR "expected expression" */ ]T[int]{}`,
+	`package p; var _ = [ /* ERROR "expected expression" */ 10]T[int]{}`,
+	`package p; var _ = func /* ERROR "expected expression" */ ()T[int]{}`,
+	`package p; var _ = map /* ERROR "expected expression" */ [T[int]]T[int]{}`,
+	`package p; var _ = chan /* ERROR "expected expression" */ T[int](x)`,
+	`package p; func _(_ T[ /* ERROR "missing ',' in parameter list" */ P], T P) T[P]`,
+	`package p; var _ T[ /* ERROR "expected ';', found '\['" */ chan int]`,
+
+	// TODO(rfindley) this error message could be improved.
+	`package p; func (_ /* ERROR "mixed named and unnamed parameters" */ R[P]) _(x T)`,
+	`package p; func (_ /* ERROR "mixed named and unnamed parameters" */ R[ P, Q]) _(x T)`,
+
+	`package p; func (R[P] /* ERROR "missing element type" */ ) _()`,
+	`package p; func _(T[P] /* ERROR "missing element type" */ )`,
+	`package p; func _(T[P1, /* ERROR "expected ']', found ','" */ P2, P3 ])`,
+	`package p; func _(T[P] /* ERROR "missing element type" */ ) T[P]`,
+	`package p; type _ struct{ T[P] /* ERROR "missing element type" */ }`,
+	`package p; type _ struct{ T[struct /* ERROR "expected expression" */ {a, b, c int}] }`,
+	`package p; type _ interface{int| /* ERROR "expected ';'" */ float32; bool; m(); string;}`,
+	`package p; type I1[T any /* ERROR "expected ']', found any" */ ] interface{}; type I2 interface{ I1[int] }`,
+	`package p; type I1[T any /* ERROR "expected ']', found any" */ ] interface{}; type I2[T any] interface{ I1[T] }`,
+	`package p; type _ interface { N[ /* ERROR "expected ';', found '\['" */ T] }`,
+	`package p; type T[P any /* ERROR "expected ']'" */ ] = T0`,
+}
+
+func TestValid(t *testing.T) {
+	t.Run("no tparams", func(t *testing.T) {
+		for _, src := range valids {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
+		}
+	})
+	t.Run("tparams", func(t *testing.T) {
+		for _, src := range valids {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
+		}
+		for _, src := range validWithTParamsOnly {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
+		}
+	})
+}
+
+// TestSingle is useful to track down a problem with a single short test program.
+func TestSingle(t *testing.T) {
+	const src = `package p; var _ = T{}`
+	checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
+}
+
+var invalids = []string{
+	`foo /* ERROR "expected 'package'" */ !`,
+	`package p; func f() { if { /* ERROR "missing condition" */ } };`,
+	`package p; func f() { if ; /* ERROR "missing condition" */ {} };`,
+	`package p; func f() { if f(); /* ERROR "missing condition" */ {} };`,
+	`package p; func f() { if _ = range /* ERROR "expected operand" */ x; true {} };`,
+	`package p; func f() { switch _ /* ERROR "expected switch expression" */ = range x; true {} };`,
+	`package p; func f() { for _ = range x ; /* ERROR "expected '{'" */ ; {} };`,
+	`package p; func f() { for ; ; _ = range /* ERROR "expected operand" */ x {} };`,
+	`package p; func f() { for ; _ /* ERROR "expected boolean or range expression" */ = range x ; {} };`,
+	`package p; func f() { switch t = /* ERROR "expected ':=', found '='" */ t.(type) {} };`,
+	`package p; func f() { switch t /* ERROR "expected switch expression" */ , t = t.(type) {} };`,
+	`package p; func f() { switch t /* ERROR "expected switch expression" */ = t.(type), t {} };`,
+	`package p; var a = [ /* ERROR "expected expression" */ 1]int;`,
+	`package p; var a = [ /* ERROR "expected expression" */ ...]int;`,
+	`package p; var a = struct /* ERROR "expected expression" */ {}`,
+	`package p; var a = func /* ERROR "expected expression" */ ();`,
+	`package p; var a = interface /* ERROR "expected expression" */ {}`,
+	`package p; var a = [ /* ERROR "expected expression" */ ]int`,
+	`package p; var a = map /* ERROR "expected expression" */ [int]int`,
+	`package p; var a = chan /* ERROR "expected expression" */ int;`,
+	`package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
+	`package p; var a = ( /* ERROR "expected expression" */ []int);`,
+	`package p; var a = <- /* ERROR "expected expression" */ chan int;`,
+	`package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
+	`package p; func f() { _ = (<-<- /* ERROR "expected 'chan'" */ chan int)(nil) };`,
+	`package p; func f() { _ = (<-chan<-chan<-chan<-chan<-chan<- /* ERROR "expected channel type" */ int)(nil) };`,
+	`package p; func f() { var t []int; t /* ERROR "expected identifier on left side of :=" */ [0] := 0 };`,
+	`package p; func f() { if x := g(); x /* ERROR "expected boolean expression" */ = 0 {}};`,
+	`package p; func f() { _ = x = /* ERROR "expected '=='" */ 0 {}};`,
+	`package p; func f() { _ = 1 == func()int { var x bool; x = x = /* ERROR "expected '=='" */ true; return x }() };`,
+	`package p; func f() { var s []int; _ = s[] /* ERROR "expected operand" */ };`,
+	`package p; func f() { var s []int; _ = s[i:j: /* ERROR "3rd index required" */ ] };`,
+	`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :k] };`,
+	`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :] };`,
+	`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ :] };`,
+	`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ ::] };`,
+	`package p; func f() { var s []int; _ = s[i:j:k: /* ERROR "expected ']'" */ l] };`,
+	`package p; func f() { for x /* ERROR "boolean or range expression" */ = []string {} }`,
+	`package p; func f() { for x /* ERROR "boolean or range expression" */ := []string {} }`,
+	`package p; func f() { for i /* ERROR "boolean or range expression" */ , x = []string {} }`,
+	`package p; func f() { for i /* ERROR "boolean or range expression" */ , x := []string {} }`,
+	`package p; func f() { go f /* ERROR HERE "function must be invoked" */ }`,
+	`package p; func f() { defer func() {} /* ERROR HERE "function must be invoked" */ }`,
+	`package p; func f() { go func() { func() { f(x func /* ERROR "missing ','" */ (){}) } } }`,
+	`package p; func _() (type /* ERROR "found 'type'" */ T)(T)`,
+	`package p; func (type /* ERROR "found 'type'" */ T)(T) _()`,
+	`package p; type _[A+B, /* ERROR "unexpected comma" */ ] int`,
+
+	// TODO(rfindley): this error should be positioned on the ':'
+	`package p; var a = a[[]int:[ /* ERROR "expected expression" */ ]int];`,
+
+	// TODO(rfindley): the compiler error is better here: "cannot parenthesize embedded type"
+	// TODO(rfindley): confirm that parenthesized types should now be accepted.
+	// `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
+
+	// issue 8656
+	`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
+
+	// issue 9639
+	`package p; var x /* ERROR "missing variable type or initialization" */ , y, z;`,
+	`package p; const x /* ERROR "missing constant value" */ ;`,
+	`package p; const x /* ERROR "missing constant value" */ int;`,
+	`package p; const (x = 0; y; z /* ERROR "missing constant value" */ int);`,
+
+	// issue 12437
+	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ }{};`,
+	`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ y float }{};`,
+
+	// issue 11611
+	`package p; type _ struct { int, } /* ERROR "expected 'IDENT', found '}'" */ ;`,
+	`package p; type _ struct { int, float } /* ERROR "expected type, found '}'" */ ;`,
+
+	// issue 13475
+	`package p; func f() { if true {} else ; /* ERROR "expected if statement or block" */ }`,
+	`package p; func f() { if true {} else defer /* ERROR "expected if statement or block" */ f() }`,
+}
+
+// invalidNoTParamErrs holds invalid source code examples annotated with the
+// error messages produced when ParseTypeParams is not set.
+var invalidNoTParamErrs = []string{
+	`package p; type _[_ any /* ERROR "expected ']', found any" */ ] int; var _ = T[]{}`,
+	`package p; type T[P any /* ERROR "expected ']', found any" */ ] = T0`,
+	`package p; var _ func[ /* ERROR "expected '\(', found '\['" */ T any](T)`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ ]()`,
+	`package p; type _[A, /* ERROR "unexpected comma" */] struct{ A }`,
+	`package p; func _[ /* ERROR "expected '\(', found '\['" */ type P, *Q interface{}]()`,
+
+	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B any](a A) B`,
+	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B C](a A) B`,
+	`package p; func (T) _[ /* ERROR "expected '\(', found '\['" */ A, B C[A, B]](a A) B`,
+
+	`package p; func(*T[ /* ERROR "missing ',' in parameter list" */ e, e]) _()`,
+}
+
+// invalidTParamErrs holds invalid source code examples annotated with the
+// error messages produced when ParseTypeParams is set.
+var invalidTParamErrs = []string{
+	`package p; type _[_ any] int; var _ = T[] /* ERROR "expected operand" */ {}`,
+	`package p; var _ func[ /* ERROR "must have no type parameters" */ T any](T)`,
+	`package p; func _[]/* ERROR "empty type parameter list" */()`,
+
+	// TODO(rfindley) a better location would be after the ']'
+	`package p; type _[A /* ERROR "all type parameters must be named" */ ,] struct{ A }`,
+
+	// TODO(rfindley) this error is confusing.
+	`package p; func _[type /* ERROR "all type parameters must be named" */ P, *Q interface{}]()`,
+
+	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B any](a A) B`,
+	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B C](a A) B`,
+	`package p; func (T) _[ /* ERROR "must have no type parameters" */ A, B C[A, B]](a A) B`,
+
+	`package p; func(*T[e, e /* ERROR "e redeclared" */ ]) _()`,
+}
+
+func TestInvalid(t *testing.T) {
+	t.Run("no tparams", func(t *testing.T) {
+		for _, src := range invalids {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
+		}
+		for _, src := range validWithTParamsOnly {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
+		}
+		for _, src := range invalidNoTParamErrs {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors|typeparams.DisallowParsing, true)
+		}
+	})
+	t.Run("tparams", func(t *testing.T) {
+		for _, src := range invalids {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
+		}
+		for _, src := range invalidTParamErrs {
+			checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
+		}
+	})
+}
diff --git a/internal/backport/go/parser/testdata/chans.go2 b/internal/backport/go/parser/testdata/chans.go2
new file mode 100644
index 0000000..fad2bce
--- /dev/null
+++ b/internal/backport/go/parser/testdata/chans.go2
@@ -0,0 +1,62 @@
+package chans
+
+import "runtime"
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[T any]() (*Sender[T], *Receiver[T]) {
+	c := make(chan T)
+	d := make(chan bool)
+	s := &Sender[T]{values: c, done: d}
+	r := &Receiver[T]{values: c, done: d}
+	runtime.SetFinalizer(r, r.finalize)
+	return s, r
+}
+
+// A sender is used to send values to a Receiver.
+type Sender[T any] struct {
+	values chan<- T
+	done <-chan bool
+}
+
+// Send sends a value to the receiver. It returns whether any more
+// values may be sent; if it returns false the value was not sent.
+func (s *Sender[T]) Send(v T) bool {
+	select {
+	case s.values <- v:
+		return true
+	case <-s.done:
+		return false
+	}
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[T]) Close() {
+	close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[T any] struct {
+	values <-chan T
+	done chan<- bool
+}
+
+// Next returns the next value from the channel. The bool result
+// indicates whether the value is valid, or whether the Sender has
+// been closed and no more values will be received.
+func (r *Receiver[T]) Next() (T, bool) {
+	v, ok := <-r.values
+	return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[T]) finalize() {
+	close(r.done)
+}
diff --git a/internal/backport/go/parser/testdata/commas.src b/internal/backport/go/parser/testdata/commas.src
new file mode 100644
index 0000000..e0603cf
--- /dev/null
+++ b/internal/backport/go/parser/testdata/commas.src
@@ -0,0 +1,19 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for error messages/parser synchronization
+// after missing commas.
+
+package p
+
+var _ = []int{
+	0/* ERROR HERE "missing ','" */
+}
+
+var _ = []int{
+	0,
+	1,
+	2,
+	3/* ERROR HERE "missing ','" */
+}
diff --git a/internal/backport/go/parser/testdata/interface.go2 b/internal/backport/go/parser/testdata/interface.go2
new file mode 100644
index 0000000..2ed9339
--- /dev/null
+++ b/internal/backport/go/parser/testdata/interface.go2
@@ -0,0 +1,76 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for interfaces containing
+// constraint elements.
+
+package p
+
+type _ interface {
+	m()
+	~int
+	~int|string
+	E
+}
+
+type _ interface {
+	m()
+	~int
+	int | string
+	int | ~string
+	~int | ~string
+}
+
+type _ interface {
+	m()
+	~int
+	T[int, string] | string
+	int | ~T[string, struct{}]
+	~int | ~string
+}
+
+type _ interface {
+	int
+	[]byte
+	[10]int
+	struct{}
+	*int
+	func()
+	interface{}
+	map[string]int
+	chan T
+	chan<- T
+	<-chan T
+	T[int]
+}
+
+type _ interface {
+	int | string
+	[]byte | string
+	[10]int | string
+	struct{} | string
+	*int | string
+	func() | string
+	interface{} | string
+	map[string]int | string
+	chan T | string
+	chan<- T | string
+	<-chan T | string
+	T[int] | string
+}
+
+type _ interface {
+	~int | string
+	~[]byte | string
+	~[10]int | string
+	~struct{} | string
+	~*int | string
+	~func() | string
+	~interface{} | string
+	~map[string]int | string
+	~chan T | string
+	~chan<- T | string
+	~<-chan T | string
+	~T[int] | string
+}
diff --git a/internal/backport/go/parser/testdata/issue11377.src b/internal/backport/go/parser/testdata/issue11377.src
new file mode 100644
index 0000000..1c43800
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue11377.src
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 11377: Better synchronization of
+// parser after certain syntax errors.
+
+package p
+
+func bad1() {
+    if f()) /* ERROR "expected ';', found '\)'" */ {
+        return
+    }
+}
+
+// There shouldn't be any errors down below.
+
+func F1() {}
+func F2() {}
+func F3() {}
+func F4() {}
+func F5() {}
+func F6() {}
+func F7() {}
+func F8() {}
+func F9() {}
+func F10() {}
diff --git a/internal/backport/go/parser/testdata/issue23434.src b/internal/backport/go/parser/testdata/issue23434.src
new file mode 100644
index 0000000..24a0832
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue23434.src
@@ -0,0 +1,25 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 23434: Better synchronization of
+// parser after missing type. There should be exactly
+// one error each time, with now follow errors.
+
+package p
+
+func g() {
+	m := make(map[string]! /* ERROR "expected type, found '!'" */ )
+	for {
+		x := 1
+		print(x)
+	}
+}
+
+func f() {
+	m := make(map[string]) /* ERROR "expected type, found '\)'" */
+	for {
+		x := 1
+		print(x)
+	}
+}
diff --git a/internal/backport/go/parser/testdata/issue3106.src b/internal/backport/go/parser/testdata/issue3106.src
new file mode 100644
index 0000000..2db10be
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue3106.src
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 3106: Better synchronization of
+// parser after certain syntax errors.
+
+package main
+
+func f() {
+	var m Mutex
+	c := MakeCond(&m)
+	percent := 0
+	const step = 10
+	for i := 0; i < 5; i++ {
+		go func() {
+			for {
+				// Emulates some useful work.
+				time.Sleep(1e8)
+				m.Lock()
+				defer
+				if /* ERROR "expected ';', found 'if'" */ percent == 100 {
+					m.Unlock()
+					break
+				}
+				percent++
+				if percent % step == 0 {
+					//c.Signal()
+				}
+				m.Unlock()
+			}
+		}()
+	}
+	for {
+		m.Lock()
+		if percent == 0 || percent % step != 0 {
+			c.Wait()
+		}
+		fmt.Print(",")
+		if percent == 100 {
+			m.Unlock()
+			break
+		}
+		m.Unlock()
+	}
+}
diff --git a/internal/backport/go/parser/testdata/issue34946.src b/internal/backport/go/parser/testdata/issue34946.src
new file mode 100644
index 0000000..6bb15e1
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue34946.src
@@ -0,0 +1,22 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 34946: Better synchronization of
+// parser for function declarations that start their
+// body's opening { on a new line.
+
+package p
+
+// accept Allman/BSD-style declaration but complain
+// (implicit semicolon between signature and body)
+func _() int
+{ /* ERROR "unexpected semicolon or newline before {" */
+	{ return 0 }
+}
+
+func _() {}
+
+func _(); { /* ERROR "unexpected semicolon or newline before {" */ }
+
+func _() {}
diff --git a/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go b/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go
new file mode 100644
index 0000000..bb698be
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue42951/not_a_file.go/invalid.go
@@ -0,0 +1 @@
+This file should not be parsed by ParseDir.
diff --git a/internal/backport/go/parser/testdata/issue44504.src b/internal/backport/go/parser/testdata/issue44504.src
new file mode 100644
index 0000000..7791f4a
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue44504.src
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 44504: panic due to duplicate resolution of slice/index
+// operands. We should not try to resolve a LHS expression with invalid syntax.
+
+package p
+
+func _() {
+  var items []bool
+  items[] /* ERROR "operand" */ = false
+}
diff --git a/internal/backport/go/parser/testdata/issue49174.go2 b/internal/backport/go/parser/testdata/issue49174.go2
new file mode 100644
index 0000000..77c1950
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue49174.go2
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[_ []int | int]() {}
+func _[_ int | []int]() {}
diff --git a/internal/backport/go/parser/testdata/issue49175.go2 b/internal/backport/go/parser/testdata/issue49175.go2
new file mode 100644
index 0000000..a5ad30f
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue49175.go2
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type _[_ []t]t
+type _[_ [1]t]t
+
+func _[_ []t]() {}
+func _[_ [1]t]() {}
+
+type t [t /* ERROR "all type parameters must be named" */ [0]]t
diff --git a/internal/backport/go/parser/testdata/issue49482.go2 b/internal/backport/go/parser/testdata/issue49482.go2
new file mode 100644
index 0000000..50de651
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue49482.go2
@@ -0,0 +1,35 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+        // these need a comma to disambiguate
+        _[P *T,] struct{}
+        _[P *T, _ any] struct{}
+        _[P (*T),] struct{}
+        _[P (*T), _ any] struct{}
+        _[P (T),] struct{}
+        _[P (T), _ any] struct{}
+
+        // these parse as name followed by type
+        _[P *struct{}] struct{}
+        _[P (*struct{})] struct{}
+        _[P ([]int)] struct{}
+
+        // array declarations
+        _ [P(T)]struct{}
+        _ [P((T))]struct{}
+        _ [P * *T]struct{}
+        _ [P * T]struct{}
+        _ [P(*T)]struct{}
+        _ [P(**T)]struct{}
+        _ [P * T - T]struct{}
+        _ [P*T-T, /* ERROR "unexpected comma" */ ]struct{}
+        _ [10, /* ERROR "unexpected comma" */ ]struct{}
+
+        // These should be parsed as generic type declarations.
+        _[P *struct /* ERROR "expected expression" */ {}|int] struct{}
+        _[P *struct /* ERROR "expected expression" */ {}|int|string] struct{}
+)
diff --git a/internal/backport/go/parser/testdata/issue50427.go2 b/internal/backport/go/parser/testdata/issue50427.go2
new file mode 100644
index 0000000..1521459
--- /dev/null
+++ b/internal/backport/go/parser/testdata/issue50427.go2
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T interface{ m[ /* ERROR "must have no type parameters" */ P any]() }
+
+func _(t T) {
+	var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = t
+}
+
+type S struct{}
+
+func (S) m[ /* ERROR "must have no type parameters" */ P any]() {}
+
+func _(s S) {
+	var _ interface{ m[ /* ERROR "must have no type parameters" */ P any](); n() } = s
+}
diff --git a/internal/backport/go/parser/testdata/linalg.go2 b/internal/backport/go/parser/testdata/linalg.go2
new file mode 100644
index 0000000..7ccb19c
--- /dev/null
+++ b/internal/backport/go/parser/testdata/linalg.go2
@@ -0,0 +1,83 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linalg
+
+import "math"
+
+// Numeric is type bound that matches any numeric type.
+// It would likely be in a constraints package in the standard library.
+type Numeric interface {
+	~int|~int8|~int16|~int32|~int64|
+		~uint|~uint8|~uint16|~uint32|~uint64|~uintptr|
+		~float32|~float64|
+		~complex64|~complex128
+}
+
+func DotProduct[T Numeric](s1, s2 []T) T {
+	if len(s1) != len(s2) {
+		panic("DotProduct: slices of unequal length")
+	}
+	var r T
+	for i := range s1 {
+		r += s1[i] * s2[i]
+	}
+	return r
+}
+
+// NumericAbs matches numeric types with an Abs method.
+type NumericAbs[T any] interface {
+	Numeric
+
+	Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func AbsDifference[T NumericAbs](a, b T) T {
+	d := a - b
+	return d.Abs()
+}
+
+// OrderedNumeric is a type bound that matches numeric types that support the < operator.
+type OrderedNumeric interface {
+	~int|~int8|~int16|~int32|~int64|
+		~uint|~uint8|~uint16|~uint32|~uint64|~uintptr|
+		~float32|~float64
+}
+
+// Complex is a type bound that matches the two complex types, which do not have a < operator.
+type Complex interface {
+	~complex64|~complex128
+}
+
+// OrderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type OrderedAbs[T OrderedNumeric] T
+
+func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
+	if a < 0 {
+		return -a
+	}
+	return a
+}
+
+// ComplexAbs is a helper type that defines an Abs method for
+// complex types.
+type ComplexAbs[T Complex] T
+
+func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
+	r := float64(real(a))
+	i := float64(imag(a))
+	d := math.Sqrt(r * r + i * i)
+	return ComplexAbs[T](complex(d, 0))
+}
+
+func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
+	return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
+}
+
+func ComplexAbsDifference[T Complex](a, b T) T {
+	return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
+}
diff --git a/internal/backport/go/parser/testdata/map.go2 b/internal/backport/go/parser/testdata/map.go2
new file mode 100644
index 0000000..74c79ae
--- /dev/null
+++ b/internal/backport/go/parser/testdata/map.go2
@@ -0,0 +1,109 @@
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+import "chans"
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+	root    *node[K, V]
+	compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+	key         K
+	val         V
+	left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+        return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+	pn := &m.root
+	for *pn != nil {
+		switch cmp := m.compare(key, (*pn).key); {
+		case cmp < 0:
+			pn = &(*pn).left
+		case cmp > 0:
+			pn = &(*pn).right
+		default:
+			return pn
+		}
+	}
+	return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+	pn := m.find(key)
+	if *pn != nil {
+		(*pn).val = val
+		return false
+	}
+        *pn = &node[K, V]{key: key, val: val}
+	return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+	pn := m.find(key)
+	if *pn == nil {
+		var zero V // see the discussion of zero values, above
+		return zero, false
+	}
+	return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+	key K
+	val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+	sender, receiver := chans.Ranger[keyValue[K, V]]()
+	var f func(*node[K, V]) bool
+	f = func(n *node[K, V]) bool {
+		if n == nil {
+			return true
+		}
+		// Stop sending values if sender.Send returns false,
+		// meaning that nothing is listening at the receiver end.
+		return f(n.left) &&
+                        // TODO
+			// sender.Send(keyValue[K, V]{n.key, n.val}) &&
+			f(n.right)
+	}
+	go func() {
+		f(m.root)
+		sender.Close()
+	}()
+	return &Iterator{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+	r *chans.Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+	keyval, ok := it.r.Next()
+	if !ok {
+		var zerok K
+		var zerov V
+		return zerok, zerov, false
+	}
+	return keyval.key, keyval.val, true
+}
diff --git a/internal/backport/go/parser/testdata/metrics.go2 b/internal/backport/go/parser/testdata/metrics.go2
new file mode 100644
index 0000000..ef1c66b
--- /dev/null
+++ b/internal/backport/go/parser/testdata/metrics.go2
@@ -0,0 +1,58 @@
+package metrics
+
+import "sync"
+
+type Metric1[T comparable] struct {
+	mu sync.Mutex
+	m  map[T]int
+}
+
+func (m *Metric1[T]) Add(v T) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.m == nil {
+		m.m = make(map[T]int)
+	}
+	m[v]++
+}
+
+type key2[T1, T2 comparable] struct {
+	f1 T1
+	f2 T2
+}
+
+type Metric2[T1, T2 cmp2] struct {
+	mu sync.Mutex
+	m  map[key2[T1, T2]]int
+}
+
+func (m *Metric2[T1, T2]) Add(v1 T1, v2 T2) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.m == nil {
+		m.m = make(map[key2[T1, T2]]int)
+	}
+	m[key[T1, T2]{v1, v2}]++
+}
+
+type key3[T1, T2, T3 comparable] struct {
+	f1 T1
+	f2 T2
+	f3 T3
+}
+
+type Metric3[T1, T2, T3 comparable] struct {
+	mu sync.Mutex
+	m  map[key3[T1, T2, T3]]int
+}
+
+func (m *Metric3[T1, T2, T3]) Add(v1 T1, v2 T2, v3 T3) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.m == nil {
+		m.m = make(map[key3]int)
+	}
+	m[key[T1, T2, T3]{v1, v2, v3}]++
+}
+
+// Repeat for the maximum number of permitted arguments.
diff --git a/internal/backport/go/parser/testdata/resolution/issue45136.src b/internal/backport/go/parser/testdata/resolution/issue45136.src
new file mode 100644
index 0000000..e1d63d8
--- /dev/null
+++ b/internal/backport/go/parser/testdata/resolution/issue45136.src
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue45136
+
+type obj /* =@obj */ struct {
+	name /*=@name */ string
+}
+
+func _() {
+	var foo /* =@foo */ = "foo"
+	obj /* @obj */ ["foo"]
+	obj /* @obj */ .run()
+	obj /* @obj */ {
+		name: foo /* @foo */,
+	}
+	obj /* @obj */ {
+		name: "bar",
+	}.run()
+
+	var _ = File{key: obj /* @obj */ {}}
+	var _ = File{obj /* @obj */ {}}
+
+	[]obj /* @obj */ {foo /* @foo */}
+	x /* =@x1 */ := obj /* @obj */{}
+}
diff --git a/internal/backport/go/parser/testdata/resolution/issue45160.src b/internal/backport/go/parser/testdata/resolution/issue45160.src
new file mode 100644
index 0000000..6be933b
--- /dev/null
+++ b/internal/backport/go/parser/testdata/resolution/issue45160.src
@@ -0,0 +1,25 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue45160
+
+func mklink1 /* =@mklink1func */() {}
+
+func _() {
+	var tests /* =@tests */ = []dirLinkTest /* @dirLinkTest */ {
+		{
+			mklink1 /* @mklink1func */: func() {},
+			mklink2: func(link /* =@link */, target /* =@target */ string) error {
+				return nil
+			},
+		},
+	}
+}
+
+type dirLinkTest /* =@dirLinkTest */ struct {
+	mklink1 /* =@mklink1field */ func(string, string) error
+	mklink2 /* =@mklink2field */ func(string, string) error
+}
+
+func mklink2 /* =@mklink2func */() {}
diff --git a/internal/backport/go/parser/testdata/resolution/resolution.src b/internal/backport/go/parser/testdata/resolution/resolution.src
new file mode 100644
index 0000000..a880dd1
--- /dev/null
+++ b/internal/backport/go/parser/testdata/resolution/resolution.src
@@ -0,0 +1,63 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package resolution
+
+func f /* =@fdecl */(n /* =@narg */ ast.Node) bool {
+		if n /* =@ninit */, ok /* =@ok */ := n /* @narg */ .(*ast.SelectorExpr); ok /* @ok */ {
+			sel = n /* @ninit */
+	}
+}
+
+type c /* =@cdecl */ map[token.Pos]resolvedObj
+
+func (v /* =@vdecl */ c /* @cdecl */) Visit(node /* =@nodearg */ ast.Node) (w /* =@w */ ast.Visitor) {}
+
+const (
+	basic /* =@basic */ = iota
+	labelOk // =@labelOk
+)
+
+type T /* =@T */ int
+
+func _(count /* =@count */ T /* @T */) {
+	x /* =@x1 */ := c /* @cdecl */{}
+	switch x /* =@x2 */ := x /* @x1 */; x /* =@x3 */ := x /* @x2 */.(type) {
+	case c /* @cdecl */:
+	default:
+	}
+loop /* =@loop */:
+	for {
+		if true {
+			break loop /* @loop */
+		}
+	}
+	select {
+	case err /* =@err1 */ := <-_:
+		return err /* @err1 */
+	case err /* =@err2 */ := <-_:
+		return err /* @err2 */
+	}
+
+	_ = func(p1 /* =@p1 */ int, p2 /* =@p2 */ p1) {
+		closed /* =@closed */ := p1 // @p1
+		shadowed /* =@shadowed1 */ := p2 // @p2
+		_ = func(shadowed /* =@shadowed2 */ p2 /* @p2 */) {
+			closed /* @closed */ = 1
+			shadowed /* @shadowed2 */ = 2
+		}
+	}
+}
+
+func (r /* =@r */ c /* @cdecl */) m(_ r) c /* @cdecl */ { return r /* @r */ }
+
+var cycle /* =@cycle */ = cycle /* @cycle */ + 1
+
+type chain /* =@chain */ struct {
+	next /* =@next */ *chain /* @chain */
+}
+
+func recursive /* =@recursive */() {
+	recursive /* @recursive */ ()
+}
diff --git a/internal/backport/go/parser/testdata/resolution/typeparams.go2 b/internal/backport/go/parser/testdata/resolution/typeparams.go2
new file mode 100644
index 0000000..7395ca2
--- /dev/null
+++ b/internal/backport/go/parser/testdata/resolution/typeparams.go2
@@ -0,0 +1,51 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package resolution
+
+type List /* =@List */ [E /* =@E */ any] []E // @E
+
+type Pair /* =@Pair */ [L /* =@L */, R /* =@R */ any] struct {
+	Left /* =@Left */ L // @L
+	Right /* =@Right */ R // @R
+	L /* =@Lfield */ int
+}
+
+var _ = Pair /* @Pair */ [int, string]{}
+
+type Addable /* =@Addable */ interface {
+	~int64|~float64
+}
+
+func Add /* =@AddDecl */[T /* =@T */ Addable /* @Addable */](l /* =@l */, r /* =@r */ T /* @T */) T /* @T */ {
+	var t /* =@t */ T /* @T */
+	return l /* @l */ + r /* @r */ + t /* @t */
+}
+
+type Receiver /* =@Receiver */[P /* =@P */ any] struct {}
+
+type RP /* =@RP1 */ struct{}
+
+// TODO(rFindley): make a decision on how/whether to resolve identifiers that
+// refer to receiver type parameters, as is the case for the 'P' result
+// parameter below.
+//
+// For now, we ensure that types are not incorrectly resolved when receiver
+// type parameters are in scope.
+func (r /* =@recv */ Receiver /* @Receiver */ [RP]) m(RP) RP {}
+
+func f /* =@f */[T1 /* =@T1 */ interface{~[]T2 /* @T2 */}, T2 /* =@T2 */ any](
+  x /* =@x */ T1 /* @T1 */, T1 /* =@T1_duplicate */ y,  // Note that this is a bug:
+                                                        // the duplicate T1 should
+							// not be allowed.
+  ){
+  // Note that duplicate short var declarations resolve to their alt declaration.
+  x /* @x */ := 0
+  y /* =@y */ := 0
+  T1 /* @T1 */ := 0
+  var t1var /* =@t1var */ T1 /* @T1 */
+}
+
+// From issue #39634
+func(*ph1[e, e])h(d)
diff --git a/internal/backport/go/parser/testdata/set.go2 b/internal/backport/go/parser/testdata/set.go2
new file mode 100644
index 0000000..0da6377
--- /dev/null
+++ b/internal/backport/go/parser/testdata/set.go2
@@ -0,0 +1,31 @@
+// Package set implements sets of any type.
+package set
+
+type Set[Elem comparable] map[Elem]struct{}
+
+func Make[Elem comparable]() Set[Elem] {
+	return make(Set(Elem))
+}
+
+func (s Set[Elem]) Add(v Elem) {
+	s[v] = struct{}{}
+}
+
+func (s Set[Elem]) Delete(v Elem) {
+	delete(s, v)
+}
+
+func (s Set[Elem]) Contains(v Elem) bool {
+	_, ok := s[v]
+	return ok
+}
+
+func (s Set[Elem]) Len() int {
+	return len(s)
+}
+
+func (s Set[Elem]) Iterate(f func(Elem)) {
+	for v := range s {
+		f(v)
+	}
+}
diff --git a/internal/backport/go/parser/testdata/slices.go2 b/internal/backport/go/parser/testdata/slices.go2
new file mode 100644
index 0000000..e060212
--- /dev/null
+++ b/internal/backport/go/parser/testdata/slices.go2
@@ -0,0 +1,31 @@
+// Package slices implements various slice algorithms.
+package slices
+
+// Map turns a []T1 to a []T2 using a mapping function.
+func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
+	r := make([]T2, len(s))
+	for i, v := range s {
+		r[i] = f(v)
+	}
+	return r
+}
+
+// Reduce reduces a []T1 to a single value using a reduction function.
+func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
+	r := initializer
+	for _, v := range s {
+		r = f(r, v)
+	}
+	return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[T any](s []T, f func(T) bool) []T {
+	var r []T
+	for _, v := range s {
+		if f(v) {
+			r = append(r, v)
+		}
+	}
+	return r
+}
diff --git a/internal/backport/go/parser/testdata/sort.go2 b/internal/backport/go/parser/testdata/sort.go2
new file mode 100644
index 0000000..88be79f
--- /dev/null
+++ b/internal/backport/go/parser/testdata/sort.go2
@@ -0,0 +1,27 @@
+package sort
+
+type orderedSlice[Elem comparable] []Elem
+
+func (s orderedSlice[Elem]) Len() int           { return len(s) }
+func (s orderedSlice[Elem]) Less(i, j int) bool { return s[i] < s[j] }
+func (s orderedSlice[Elem]) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// OrderedSlice sorts the slice s in ascending order.
+// The elements of s must be ordered using the < operator.
+func OrderedSlice[Elem comparable](s []Elem) {
+	sort.Sort(orderedSlice[Elem](s))
+}
+
+type sliceFn[Elem any] struct {
+	s []Elem
+	f func(Elem, Elem) bool
+}
+
+func (s sliceFn[Elem]) Len() int           { return len(s.s) }
+func (s sliceFn[Elem]) Less(i, j int) bool { return s.f(s.s[i], s.s[j]) }
+func (s sliceFn[Elem]) Swap(i, j int)      { s.s[i], s.s[j] = s.s[j], s.s[i] }
+
+// SliceFn sorts the slice s according to the function f.
+func SliceFn[Elem any](s []Elem, f func(Elem, Elem) bool) {
+	Sort(sliceFn[Elem]{s, f})
+}
diff --git a/internal/backport/go/parser/testdata/typeparams.src b/internal/backport/go/parser/testdata/typeparams.src
new file mode 100644
index 0000000..479cb96
--- /dev/null
+++ b/internal/backport/go/parser/testdata/typeparams.src
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for error messages produced while parsing code that uses type
+// parameters, without ParseTypeParams being enabled.
+
+package p
+
+type List[E any /* ERROR "expected ']', found any" */ ] []E
+
+type Pair[L, /* ERROR "unexpected comma" */ R any] struct {
+	Left L
+	Right R
+}
+
+var _ = Pair[int, /* ERROR "expected ']' or ':', found ','" */ string]{}
diff --git a/internal/backport/go/parser/testdata/typeset.go2 b/internal/backport/go/parser/testdata/typeset.go2
new file mode 100644
index 0000000..aa18e8c
--- /dev/null
+++ b/internal/backport/go/parser/testdata/typeset.go2
@@ -0,0 +1,75 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for typeset-only constraint elements.
+// TODO(gri) gofmt once/if gofmt supports this notation.
+
+package p
+
+type (
+        _[_ t] t
+        _[_ ~t] t
+        _[_ t|t] t
+        _[_ ~t|t] t
+        _[_ t|~t] t
+        _[_ ~t|~t] t
+
+        _[_ t, _, _ t|t] t
+        _[_ t, _, _ ~t|t] t
+        _[_ t, _, _ t|~t] t
+        _[_ t, _, _ ~t|~t] t
+
+        _[_ t.t] t
+        _[_ ~t.t] t
+        _[_ t.t|t.t] t
+        _[_ ~t.t|t.t] t
+        _[_ t.t|~t.t] t
+        _[_ ~t.t|~t.t] t
+
+        _[_ t, _, _ t.t|t.t] t
+        _[_ t, _, _ ~t.t|t.t] t
+        _[_ t, _, _ t.t|~t.t] t
+        _[_ t, _, _ ~t.t|~t.t] t
+
+        _[_ struct{}] t
+        _[_ ~struct{}] t
+
+        _[_ struct{}|t] t
+        _[_ ~struct{}|t] t
+        _[_ struct{}|~t] t
+        _[_ ~struct{}|~t] t
+
+        _[_ t|struct{}] t
+        _[_ ~t|struct{}] t
+        _[_ t|~struct{}] t
+        _[_ ~t|~struct{}] t
+)
+
+// Single-expression type parameter lists and those that don't start
+// with a (type parameter) name are considered array sizes.
+// The term must be a valid expression (it could be a type - and then
+// a type-checker will complain - but we don't allow ~ in the expr).
+// TODO(rfindley): Improve error recover here. In these cases go/parser error
+// recovery is worse than cmd/compile/internal/syntax, and unnecessary type
+// declarations had to be inserted to force synchronization.
+type _[t] t
+type _[~ /* ERROR "expected operand" */ t] t
+type /* ERROR "expected ']'" */ Sync int  // placeholder to synchronize the parser
+type _[t|t] t
+type _[~ /* ERROR "expected operand" */ t|t] t
+type /* ERROR "expected ']'" */ Sync int  // placeholder to synchronize the parser
+type _[t| ~ /* ERROR "expected operand" */ t] t
+type /* ERROR "expected ']'" */ Sync int  // placeholder to synchronize the parser
+type _[~ /* ERROR "expected operand" */ t|~t] t
+type /* ERROR "expected ']'" */ Sync int  // placeholder to synchronize the parser
+
+type _[_ t, t /* ERROR "type parameters must be named" */ ] t
+type _[_ ~t, t /* ERROR "type parameters must be named" */ ] t
+type _[_ t, ~ /* ERROR "type parameters must be named" */ t] t
+type _[_ ~t, ~ /* ERROR "type parameters must be named" */ t] t
+
+type _[_ t|t, t /* ERROR "type parameters must be named" */ |t] t
+type _[_ ~t|t, t /* ERROR "type parameters must be named" */ |t] t
+type _[_ t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
+type _[_ ~t|t, ~ /* ERROR "type parameters must be named" */ t|t] t
diff --git a/internal/backport/go/printer/example_test.go b/internal/backport/go/printer/example_test.go
new file mode 100644
index 0000000..0df644d
--- /dev/null
+++ b/internal/backport/go/printer/example_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer_test
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
+	"strings"
+	"testing"
+)
+
+// Dummy test function so that godoc does not use the entire file as example.
+func Test(*testing.T) {}
+
+func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
+	fset = token.NewFileSet()
+	if file, err := parser.ParseFile(fset, filename, nil, 0); err == nil {
+		for _, d := range file.Decls {
+			if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
+				fun = f
+				return
+			}
+		}
+	}
+	panic("function not found")
+}
+
+func ExampleFprint() {
+	// Parse source file and extract the AST without comments for
+	// this function, with position information referring to the
+	// file set fset.
+	funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+
+	// Print the function body into buffer buf.
+	// The file set is provided to the printer so that it knows
+	// about the original source formatting and can add additional
+	// line breaks where they were present in the source.
+	var buf bytes.Buffer
+	printer.Fprint(&buf, fset, funcAST.Body)
+
+	// Remove braces {} enclosing the function body, unindent,
+	// and trim leading and trailing white space.
+	s := buf.String()
+	s = s[1 : len(s)-1]
+	s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
+
+	// Print the cleaned-up body text to stdout.
+	fmt.Println(s)
+
+	// output:
+	// funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+	//
+	// var buf bytes.Buffer
+	// printer.Fprint(&buf, fset, funcAST.Body)
+	//
+	// s := buf.String()
+	// s = s[1 : len(s)-1]
+	// s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
+	//
+	// fmt.Println(s)
+}
diff --git a/internal/backport/go/printer/gobuild.go b/internal/backport/go/printer/gobuild.go
new file mode 100644
index 0000000..f00492d
--- /dev/null
+++ b/internal/backport/go/printer/gobuild.go
@@ -0,0 +1,170 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+	"go/build/constraint"
+	"sort"
+	"text/tabwriter"
+)
+
+func (p *printer) fixGoBuildLines() {
+	if len(p.goBuild)+len(p.plusBuild) == 0 {
+		return
+	}
+
+	// Find latest possible placement of //go:build and // +build comments.
+	// That's just after the last blank line before we find a non-comment.
+	// (We'll add another blank line after our comment block.)
+	// When we start dropping // +build comments, we can skip over /* */ comments too.
+	// Note that we are processing tabwriter input, so every comment
+	// begins and ends with a tabwriter.Escape byte.
+	// And some newlines have turned into \f bytes.
+	insert := 0
+	for pos := 0; ; {
+		// Skip leading space at beginning of line.
+		blank := true
+		for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') {
+			pos++
+		}
+		// Skip over // comment if any.
+		if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' {
+			blank = false
+			for pos < len(p.output) && !isNL(p.output[pos]) {
+				pos++
+			}
+		}
+		// Skip over \n at end of line.
+		if pos >= len(p.output) || !isNL(p.output[pos]) {
+			break
+		}
+		pos++
+
+		if blank {
+			insert = pos
+		}
+	}
+
+	// If there is a //go:build comment before the place we identified,
+	// use that point instead. (Earlier in the file is always fine.)
+	if len(p.goBuild) > 0 && p.goBuild[0] < insert {
+		insert = p.goBuild[0]
+	} else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert {
+		insert = p.plusBuild[0]
+	}
+
+	var x constraint.Expr
+	switch len(p.goBuild) {
+	case 0:
+		// Synthesize //go:build expression from // +build lines.
+		for _, pos := range p.plusBuild {
+			y, err := constraint.Parse(p.commentTextAt(pos))
+			if err != nil {
+				x = nil
+				break
+			}
+			if x == nil {
+				x = y
+			} else {
+				x = &constraint.AndExpr{X: x, Y: y}
+			}
+		}
+	case 1:
+		// Parse //go:build expression.
+		x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0]))
+	}
+
+	var block []byte
+	if x == nil {
+		// Don't have a valid //go:build expression to treat as truth.
+		// Bring all the lines together but leave them alone.
+		// Note that these are already tabwriter-escaped.
+		for _, pos := range p.goBuild {
+			block = append(block, p.lineAt(pos)...)
+		}
+		for _, pos := range p.plusBuild {
+			block = append(block, p.lineAt(pos)...)
+		}
+	} else {
+		block = append(block, tabwriter.Escape)
+		block = append(block, "//go:build "...)
+		block = append(block, x.String()...)
+		block = append(block, tabwriter.Escape, '\n')
+		if len(p.plusBuild) > 0 {
+			lines, err := constraint.PlusBuildLines(x)
+			if err != nil {
+				lines = []string{"// +build error: " + err.Error()}
+			}
+			for _, line := range lines {
+				block = append(block, tabwriter.Escape)
+				block = append(block, line...)
+				block = append(block, tabwriter.Escape, '\n')
+			}
+		}
+	}
+	block = append(block, '\n')
+
+	// Build sorted list of lines to delete from remainder of output.
+	toDelete := append(p.goBuild, p.plusBuild...)
+	sort.Ints(toDelete)
+
+	// Collect output after insertion point, with lines deleted, into after.
+	var after []byte
+	start := insert
+	for _, end := range toDelete {
+		if end < start {
+			continue
+		}
+		after = appendLines(after, p.output[start:end])
+		start = end + len(p.lineAt(end))
+	}
+	after = appendLines(after, p.output[start:])
+	if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) {
+		after = after[:n-1]
+	}
+
+	p.output = p.output[:insert]
+	p.output = append(p.output, block...)
+	p.output = append(p.output, after...)
+}
+
+// appendLines is like append(x, y...)
+// but it avoids creating doubled blank lines,
+// which would not be gofmt-standard output.
+// It assumes that only whole blocks of lines are being appended,
+// not line fragments.
+func appendLines(x, y []byte) []byte {
+	if len(y) > 0 && isNL(y[0]) && // y starts in blank line
+		(len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line
+		y = y[1:] // delete y's leading blank line
+	}
+	return append(x, y...)
+}
+
+func (p *printer) lineAt(start int) []byte {
+	pos := start
+	for pos < len(p.output) && !isNL(p.output[pos]) {
+		pos++
+	}
+	if pos < len(p.output) {
+		pos++
+	}
+	return p.output[start:pos]
+}
+
+func (p *printer) commentTextAt(start int) string {
+	if start < len(p.output) && p.output[start] == tabwriter.Escape {
+		start++
+	}
+	pos := start
+	for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) {
+		pos++
+	}
+	return string(p.output[start:pos])
+}
+
+func isNL(b byte) bool {
+	return b == '\n' || b == '\f'
+}
diff --git a/internal/backport/go/printer/nodes.go b/internal/backport/go/printer/nodes.go
new file mode 100644
index 0000000..82b6aed
--- /dev/null
+++ b/internal/backport/go/printer/nodes.go
@@ -0,0 +1,1904 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of AST nodes; specifically
+// expressions, statements, declarations, and files. It uses
+// the print functionality implemented in printer.go.
+
+package printer
+
+import (
+	"bytes"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Formatting issues:
+// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
+//   when the comment spans multiple lines; if such a comment is just two lines, formatting is
+//   not idempotent
+// - formatting of expression lists
+// - should use blank instead of tab to separate one-line function bodies from
+//   the function header unless there is a group of consecutive one-liners
+
+// ----------------------------------------------------------------------------
+// Common AST nodes.
+
+// Print as many newlines as necessary (but at least min newlines) to get to
+// the current line. ws is printed before the first line break. If newSection
+// is set, the first line break is printed as formfeed. Returns 0 if no line
+// breaks were printed, returns 1 if there was exactly one newline printed,
+// and returns a value > 1 if there was a formfeed or more than one newline
+// printed.
+//
+// TODO(gri): linebreak may add too many lines if the next statement at "line"
+//
+//	is preceded by comments because the computation of n assumes
+//	the current position before the comment and the target position
+//	after the comment. Thus, after interspersing such comments, the
+//	space taken up by them is not considered to reduce the number of
+//	linebreaks. At the moment there is no easy way to know about
+//	future (not yet interspersed) comments in this function.
+func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) {
+	n := nlimit(line - p.pos.Line)
+	if n < min {
+		n = min
+	}
+	if n > 0 {
+		p.print(ws)
+		if newSection {
+			p.print(formfeed)
+			n--
+			nbreaks = 2
+		}
+		nbreaks += n
+		for ; n > 0; n-- {
+			p.print(newline)
+		}
+	}
+	return
+}
+
+// setComment sets g as the next comment if g != nil and if node comments
+// are enabled - this mode is used when printing source code fragments such
+// as exports only. It assumes that there is no pending comment in p.comments
+// and at most one pending comment in the p.comment cache.
+func (p *printer) setComment(g *ast.CommentGroup) {
+	if g == nil || !p.useNodeComments {
+		return
+	}
+	if p.comments == nil {
+		// initialize p.comments lazily
+		p.comments = make([]*ast.CommentGroup, 1)
+	} else if p.cindex < len(p.comments) {
+		// for some reason there are pending comments; this
+		// should never happen - handle gracefully and flush
+		// all comments up to g, ignore anything after that
+		p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL)
+		p.comments = p.comments[0:1]
+		// in debug mode, report error
+		p.internalError("setComment found pending comments")
+	}
+	p.comments[0] = g
+	p.cindex = 0
+	// don't overwrite any pending comment in the p.comment cache
+	// (there may be a pending comment when a line comment is
+	// immediately followed by a lead comment with no other
+	// tokens between)
+	if p.commentOffset == infinity {
+		p.nextComment() // get comment ready for use
+	}
+}
+
+type exprListMode uint
+
+const (
+	commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
+	noIndent                           // no extra indentation in multi-line lists
+)
+
+// If indent is set, a multi-line identifier list is indented after the
+// first linebreak encountered.
+func (p *printer) identList(list []*ast.Ident, indent bool) {
+	// convert into an expression list so we can re-use exprList formatting
+	xlist := make([]ast.Expr, len(list))
+	for i, x := range list {
+		xlist[i] = x
+	}
+	var mode exprListMode
+	if !indent {
+		mode = noIndent
+	}
+	p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false)
+}
+
+const filteredMsg = "contains filtered or unexported fields"
+
+// Print a list of expressions. If the list spans multiple
+// source lines, the original line breaks are respected between
+// expressions.
+//
+// TODO(gri) Consider rewriting this to be independent of []ast.Expr
+//
+//	so that we can use the algorithm for any kind of list
+//	(e.g., pass list via a channel over which to range).
+func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) {
+	if len(list) == 0 {
+		if isIncomplete {
+			prev := p.posFor(prev0)
+			next := p.posFor(next0)
+			if prev.IsValid() && prev.Line == next.Line {
+				p.print("/* " + filteredMsg + " */")
+			} else {
+				p.print(newline)
+				p.print(indent, "// "+filteredMsg, unindent, newline)
+			}
+		}
+		return
+	}
+
+	prev := p.posFor(prev0)
+	next := p.posFor(next0)
+	line := p.lineFor(list[0].Pos())
+	endLine := p.lineFor(list[len(list)-1].End())
+
+	if prev.IsValid() && prev.Line == line && line == endLine {
+		// all list entries on a single line
+		for i, x := range list {
+			if i > 0 {
+				// use position of expression following the comma as
+				// comma position for correct comment placement
+				p.print(x.Pos(), token.COMMA, blank)
+			}
+			p.expr0(x, depth)
+		}
+		if isIncomplete {
+			p.print(token.COMMA, blank, "/* "+filteredMsg+" */")
+		}
+		return
+	}
+
+	// list entries span multiple lines;
+	// use source code positions to guide line breaks
+
+	// Don't add extra indentation if noIndent is set;
+	// i.e., pretend that the first line is already indented.
+	ws := ignore
+	if mode&noIndent == 0 {
+		ws = indent
+	}
+
+	// The first linebreak is always a formfeed since this section must not
+	// depend on any previous formatting.
+	prevBreak := -1 // index of last expression that was followed by a linebreak
+	if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 {
+		ws = ignore
+		prevBreak = 0
+	}
+
+	// initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line
+	size := 0
+
+	// We use the ratio between the geometric mean of the previous key sizes and
+	// the current size to determine if there should be a break in the alignment.
+	// To compute the geometric mean we accumulate the ln(size) values (lnsum)
+	// and the number of sizes included (count).
+	lnsum := 0.0
+	count := 0
+
+	// print all list elements
+	prevLine := prev.Line
+	for i, x := range list {
+		line = p.lineFor(x.Pos())
+
+		// Determine if the next linebreak, if any, needs to use formfeed:
+		// in general, use the entire node size to make the decision; for
+		// key:value expressions, use the key size.
+		// TODO(gri) for a better result, should probably incorporate both
+		//           the key and the node size into the decision process
+		useFF := true
+
+		// Determine element size: All bets are off if we don't have
+		// position information for the previous and next token (likely
+		// generated code - simply ignore the size in this case by setting
+		// it to 0).
+		prevSize := size
+		const infinity = 1e6 // larger than any source line
+		size = p.nodeSize(x, infinity)
+		pair, isPair := x.(*ast.KeyValueExpr)
+		if size <= infinity && prev.IsValid() && next.IsValid() {
+			// x fits on a single line
+			if isPair {
+				size = p.nodeSize(pair.Key, infinity) // size <= infinity
+			}
+		} else {
+			// size too large or we don't have good layout information
+			size = 0
+		}
+
+		// If the previous line and the current line had single-
+		// line-expressions and the key sizes are small or the
+		// ratio between the current key and the geometric mean
+		// if the previous key sizes does not exceed a threshold,
+		// align columns and do not use formfeed.
+		if prevSize > 0 && size > 0 {
+			const smallSize = 40
+			if count == 0 || prevSize <= smallSize && size <= smallSize {
+				useFF = false
+			} else {
+				const r = 2.5                               // threshold
+				geomean := math.Exp(lnsum / float64(count)) // count > 0
+				ratio := float64(size) / geomean
+				useFF = r*ratio <= 1 || r <= ratio
+			}
+		}
+
+		needsLinebreak := 0 < prevLine && prevLine < line
+		if i > 0 {
+			// Use position of expression following the comma as
+			// comma position for correct comment placement, but
+			// only if the expression is on the same line.
+			if !needsLinebreak {
+				p.print(x.Pos())
+			}
+			p.print(token.COMMA)
+			needsBlank := true
+			if needsLinebreak {
+				// Lines are broken using newlines so comments remain aligned
+				// unless useFF is set or there are multiple expressions on
+				// the same line in which case formfeed is used.
+				nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i)
+				if nbreaks > 0 {
+					ws = ignore
+					prevBreak = i
+					needsBlank = false // we got a line break instead
+				}
+				// If there was a new section or more than one new line
+				// (which means that the tabwriter will implicitly break
+				// the section), reset the geomean variables since we are
+				// starting a new group of elements with the next element.
+				if nbreaks > 1 {
+					lnsum = 0
+					count = 0
+				}
+			}
+			if needsBlank {
+				p.print(blank)
+			}
+		}
+
+		if len(list) > 1 && isPair && size > 0 && needsLinebreak {
+			// We have a key:value expression that fits onto one line
+			// and it's not on the same line as the prior expression:
+			// Use a column for the key such that consecutive entries
+			// can align if possible.
+			// (needsLinebreak is set if we started a new line before)
+			p.expr(pair.Key)
+			p.print(pair.Colon, token.COLON, vtab)
+			p.expr(pair.Value)
+		} else {
+			p.expr0(x, depth)
+		}
+
+		if size > 0 {
+			lnsum += math.Log(float64(size))
+			count++
+		}
+
+		prevLine = line
+	}
+
+	if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line {
+		// Print a terminating comma if the next token is on a new line.
+		p.print(token.COMMA)
+		if isIncomplete {
+			p.print(newline)
+			p.print("// " + filteredMsg)
+		}
+		if ws == ignore && mode&noIndent == 0 {
+			// unindent if we indented
+			p.print(unindent)
+		}
+		p.print(formfeed) // terminating comma needs a line break to look good
+		return
+	}
+
+	if isIncomplete {
+		p.print(token.COMMA, newline)
+		p.print("// "+filteredMsg, newline)
+	}
+
+	if ws == ignore && mode&noIndent == 0 {
+		// unindent if we indented
+		p.print(unindent)
+	}
+}
+
+type paramMode int
+
+const (
+	funcParam paramMode = iota
+	funcTParam
+	typeTParam
+)
+
+func (p *printer) parameters(fields *ast.FieldList, mode paramMode) {
+	openTok, closeTok := token.LPAREN, token.RPAREN
+	if mode != funcParam {
+		openTok, closeTok = token.LBRACK, token.RBRACK
+	}
+	p.print(fields.Opening, openTok)
+	if len(fields.List) > 0 {
+		prevLine := p.lineFor(fields.Opening)
+		ws := indent
+		for i, par := range fields.List {
+			// determine par begin and end line (may be different
+			// if there are multiple parameter names for this par
+			// or the type is on a separate line)
+			parLineBeg := p.lineFor(par.Pos())
+			parLineEnd := p.lineFor(par.End())
+			// separating "," if needed
+			needsLinebreak := 0 < prevLine && prevLine < parLineBeg
+			if i > 0 {
+				// use position of parameter following the comma as
+				// comma position for correct comma placement, but
+				// only if the next parameter is on the same line
+				if !needsLinebreak {
+					p.print(par.Pos())
+				}
+				p.print(token.COMMA)
+			}
+			// separator if needed (linebreak or blank)
+			if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 {
+				// break line if the opening "(" or previous parameter ended on a different line
+				ws = ignore
+			} else if i > 0 {
+				p.print(blank)
+			}
+			// parameter names
+			if len(par.Names) > 0 {
+				// Very subtle: If we indented before (ws == ignore), identList
+				// won't indent again. If we didn't (ws == indent), identList will
+				// indent if the identList spans multiple lines, and it will outdent
+				// again at the end (and still ws == indent). Thus, a subsequent indent
+				// by a linebreak call after a type, or in the next multi-line identList
+				// will do the right thing.
+				p.identList(par.Names, ws == indent)
+				p.print(blank)
+			}
+			// parameter type
+			p.expr(stripParensAlways(par.Type))
+			prevLine = parLineEnd
+		}
+
+		// if the closing ")" is on a separate line from the last parameter,
+		// print an additional "," and line break
+		if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
+			p.print(token.COMMA)
+			p.linebreak(closing, 0, ignore, true)
+		} else if mode == typeTParam && fields.NumFields() == 1 {
+			// Otherwise, if we are in a type parameter list that could be confused
+			// with the constant array length expression [P*C], print a comma so that
+			// parsing is unambiguous.
+			//
+			// Note that while ParenExprs can also be ambiguous (issue #49482), the
+			// printed type is never parenthesized (stripParensAlways is used above).
+			if t, _ := fields.List[0].Type.(*ast.StarExpr); t != nil && !isTypeLit(t.X) {
+				p.print(token.COMMA)
+			}
+		}
+
+		// unindent if we indented
+		if ws == ignore {
+			p.print(unindent)
+		}
+	}
+
+	p.print(fields.Closing, closeTok)
+}
+
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x ast.Expr) bool {
+	switch x := x.(type) {
+	case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+		return true
+	case *ast.StarExpr:
+		// *T may be a pointer dereferenciation.
+		// Only consider *T as type literal if T is a type literal.
+		return isTypeLit(x.X)
+	case *ast.ParenExpr:
+		return isTypeLit(x.X)
+	}
+	return false
+}
+
+func (p *printer) signature(sig *ast.FuncType) {
+	if sig.TypeParams != nil {
+		p.parameters(sig.TypeParams, funcTParam)
+	}
+	if sig.Params != nil {
+		p.parameters(sig.Params, funcParam)
+	} else {
+		p.print(token.LPAREN, token.RPAREN)
+	}
+	res := sig.Results
+	n := res.NumFields()
+	if n > 0 {
+		// res != nil
+		p.print(blank)
+		if n == 1 && res.List[0].Names == nil {
+			// single anonymous res; no ()'s
+			p.expr(stripParensAlways(res.List[0].Type))
+			return
+		}
+		p.parameters(res, funcParam)
+	}
+}
+
+func identListSize(list []*ast.Ident, maxSize int) (size int) {
+	for i, x := range list {
+		if i > 0 {
+			size += len(", ")
+		}
+		size += utf8.RuneCountInString(x.Name)
+		if size >= maxSize {
+			break
+		}
+	}
+	return
+}
+
+func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
+	if len(list) != 1 {
+		return false // allow only one field
+	}
+	f := list[0]
+	if f.Tag != nil || f.Comment != nil {
+		return false // don't allow tags or comments
+	}
+	// only name(s) and type
+	const maxSize = 30 // adjust as appropriate, this is an approximate value
+	namesSize := identListSize(f.Names, maxSize)
+	if namesSize > 0 {
+		namesSize = 1 // blank between names and types
+	}
+	typeSize := p.nodeSize(f.Type, maxSize)
+	return namesSize+typeSize <= maxSize
+}
+
+func (p *printer) setLineComment(text string) {
+	p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
+}
+
+func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
+	lbrace := fields.Opening
+	list := fields.List
+	rbrace := fields.Closing
+	hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace))
+	srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace)
+
+	if !hasComments && srcIsOneLine {
+		// possibly a one-line struct/interface
+		if len(list) == 0 {
+			// no blank between keyword and {} in this case
+			p.print(lbrace, token.LBRACE, rbrace, token.RBRACE)
+			return
+		} else if p.isOneLineFieldList(list) {
+			// small enough - print on one line
+			// (don't use identList and ignore source line breaks)
+			p.print(lbrace, token.LBRACE, blank)
+			f := list[0]
+			if isStruct {
+				for i, x := range f.Names {
+					if i > 0 {
+						// no comments so no need for comma position
+						p.print(token.COMMA, blank)
+					}
+					p.expr(x)
+				}
+				if len(f.Names) > 0 {
+					p.print(blank)
+				}
+				p.expr(f.Type)
+			} else { // interface
+				if len(f.Names) > 0 {
+					name := f.Names[0] // method name
+					p.expr(name)
+					p.signature(f.Type.(*ast.FuncType)) // don't print "func"
+				} else {
+					// embedded interface
+					p.expr(f.Type)
+				}
+			}
+			p.print(blank, rbrace, token.RBRACE)
+			return
+		}
+	}
+	// hasComments || !srcIsOneLine
+
+	p.print(blank, lbrace, token.LBRACE, indent)
+	if hasComments || len(list) > 0 {
+		p.print(formfeed)
+	}
+
+	if isStruct {
+
+		sep := vtab
+		if len(list) == 1 {
+			sep = blank
+		}
+		var line int
+		for i, f := range list {
+			if i > 0 {
+				p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0)
+			}
+			extraTabs := 0
+			p.setComment(f.Doc)
+			p.recordLine(&line)
+			if len(f.Names) > 0 {
+				// named fields
+				p.identList(f.Names, false)
+				p.print(sep)
+				p.expr(f.Type)
+				extraTabs = 1
+			} else {
+				// anonymous field
+				p.expr(f.Type)
+				extraTabs = 2
+			}
+			if f.Tag != nil {
+				if len(f.Names) > 0 && sep == vtab {
+					p.print(sep)
+				}
+				p.print(sep)
+				p.expr(f.Tag)
+				extraTabs = 0
+			}
+			if f.Comment != nil {
+				for ; extraTabs > 0; extraTabs-- {
+					p.print(sep)
+				}
+				p.setComment(f.Comment)
+			}
+		}
+		if isIncomplete {
+			if len(list) > 0 {
+				p.print(formfeed)
+			}
+			p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+			p.setLineComment("// " + filteredMsg)
+		}
+
+	} else { // interface
+
+		var line int
+		var prev *ast.Ident // previous "type" identifier
+		for i, f := range list {
+			var name *ast.Ident // first name, or nil
+			if len(f.Names) > 0 {
+				name = f.Names[0]
+			}
+			if i > 0 {
+				// don't do a line break (min == 0) if we are printing a list of types
+				// TODO(gri) this doesn't work quite right if the list of types is
+				//           spread across multiple lines
+				min := 1
+				if prev != nil && name == prev {
+					min = 0
+				}
+				p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0)
+			}
+			p.setComment(f.Doc)
+			p.recordLine(&line)
+			if name != nil {
+				// method
+				p.expr(name)
+				p.signature(f.Type.(*ast.FuncType)) // don't print "func"
+				prev = nil
+			} else {
+				// embedded interface
+				p.expr(f.Type)
+				prev = nil
+			}
+			p.setComment(f.Comment)
+		}
+		if isIncomplete {
+			if len(list) > 0 {
+				p.print(formfeed)
+			}
+			p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+			p.setLineComment("// contains filtered or unexported methods")
+		}
+
+	}
+	p.print(unindent, formfeed, rbrace, token.RBRACE)
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
+	switch e.Op.Precedence() {
+	case 4:
+		has4 = true
+	case 5:
+		has5 = true
+	}
+
+	switch l := e.X.(type) {
+	case *ast.BinaryExpr:
+		if l.Op.Precedence() < e.Op.Precedence() {
+			// parens will be inserted.
+			// pretend this is an *ast.ParenExpr and do nothing.
+			break
+		}
+		h4, h5, mp := walkBinary(l)
+		has4 = has4 || h4
+		has5 = has5 || h5
+		if maxProblem < mp {
+			maxProblem = mp
+		}
+	}
+
+	switch r := e.Y.(type) {
+	case *ast.BinaryExpr:
+		if r.Op.Precedence() <= e.Op.Precedence() {
+			// parens will be inserted.
+			// pretend this is an *ast.ParenExpr and do nothing.
+			break
+		}
+		h4, h5, mp := walkBinary(r)
+		has4 = has4 || h4
+		has5 = has5 || h5
+		if maxProblem < mp {
+			maxProblem = mp
+		}
+
+	case *ast.StarExpr:
+		if e.Op == token.QUO { // `*/`
+			maxProblem = 5
+		}
+
+	case *ast.UnaryExpr:
+		switch e.Op.String() + r.Op.String() {
+		case "/*", "&&", "&^":
+			maxProblem = 5
+		case "++", "--":
+			if maxProblem < 4 {
+				maxProblem = 4
+			}
+		}
+	}
+	return
+}
+
+func cutoff(e *ast.BinaryExpr, depth int) int {
+	has4, has5, maxProblem := walkBinary(e)
+	if maxProblem > 0 {
+		return maxProblem + 1
+	}
+	if has4 && has5 {
+		if depth == 1 {
+			return 5
+		}
+		return 4
+	}
+	if depth == 1 {
+		return 6
+	}
+	return 4
+}
+
+func diffPrec(expr ast.Expr, prec int) int {
+	x, ok := expr.(*ast.BinaryExpr)
+	if !ok || prec != x.Op.Precedence() {
+		return 1
+	}
+	return 0
+}
+
+func reduceDepth(depth int) int {
+	depth--
+	if depth < 1 {
+		depth = 1
+	}
+	return depth
+}
+
+// Format the binary expression: decide the cutoff and then format.
+// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
+// (Algorithm suggestion by Russ Cox.)
+//
+// The precedences are:
+//
+//	5             *  /  %  <<  >>  &  &^
+//	4             +  -  |  ^
+//	3             ==  !=  <  <=  >  >=
+//	2             &&
+//	1             ||
+//
+// The only decision is whether there will be spaces around levels 4 and 5.
+// There are never spaces at level 6 (unary), and always spaces at levels 3 and below.
+//
+// To choose the cutoff, look at the whole expression but excluding primary
+// expressions (function calls, parenthesized exprs), and apply these rules:
+//
+//  1. If there is a binary operator with a right side unary operand
+//     that would clash without a space, the cutoff must be (in order):
+//
+//     /*	6
+//     &&	6
+//     &^	6
+//     ++	5
+//     --	5
+//
+//     (Comparison operators always have spaces around them.)
+//
+//  2. If there is a mix of level 5 and level 4 operators, then the cutoff
+//     is 5 (use spaces to distinguish precedence) in Normal mode
+//     and 4 (never use spaces) in Compact mode.
+//
+//  3. If there are no level 4 operators or no level 5 operators, then the
+//     cutoff is 6 (always use spaces) in Normal mode
+//     and 4 (never use spaces) in Compact mode.
+func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
+	prec := x.Op.Precedence()
+	if prec < prec1 {
+		// parenthesis needed
+		// Note: The parser inserts an ast.ParenExpr node; thus this case
+		//       can only occur if the AST is created in a different way.
+		p.print(token.LPAREN)
+		p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
+		p.print(token.RPAREN)
+		return
+	}
+
+	printBlank := prec < cutoff
+
+	ws := indent
+	p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
+	if printBlank {
+		p.print(blank)
+	}
+	xline := p.pos.Line // before the operator (it may be on the next line!)
+	yline := p.lineFor(x.Y.Pos())
+	p.print(x.OpPos, x.Op)
+	if xline != yline && xline > 0 && yline > 0 {
+		// at least one line break, but respect an extra empty line
+		// in the source
+		if p.linebreak(yline, 1, ws, true) > 0 {
+			ws = ignore
+			printBlank = false // no blank after line break
+		}
+	}
+	if printBlank {
+		p.print(blank)
+	}
+	p.expr1(x.Y, prec+1, depth+1)
+	if ws == ignore {
+		p.print(unindent)
+	}
+}
+
+func isBinary(expr ast.Expr) bool {
+	_, ok := expr.(*ast.BinaryExpr)
+	return ok
+}
+
+func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
+	p.print(expr.Pos())
+
+	switch x := expr.(type) {
+	case *ast.BadExpr:
+		p.print("BadExpr")
+
+	case *ast.Ident:
+		p.print(x)
+
+	case *ast.BinaryExpr:
+		if depth < 1 {
+			p.internalError("depth < 1:", depth)
+			depth = 1
+		}
+		p.binaryExpr(x, prec1, cutoff(x, depth), depth)
+
+	case *ast.KeyValueExpr:
+		p.expr(x.Key)
+		p.print(x.Colon, token.COLON, blank)
+		p.expr(x.Value)
+
+	case *ast.StarExpr:
+		const prec = token.UnaryPrec
+		if prec < prec1 {
+			// parenthesis needed
+			p.print(token.LPAREN)
+			p.print(token.MUL)
+			p.expr(x.X)
+			p.print(token.RPAREN)
+		} else {
+			// no parenthesis needed
+			p.print(token.MUL)
+			p.expr(x.X)
+		}
+
+	case *ast.UnaryExpr:
+		const prec = token.UnaryPrec
+		if prec < prec1 {
+			// parenthesis needed
+			p.print(token.LPAREN)
+			p.expr(x)
+			p.print(token.RPAREN)
+		} else {
+			// no parenthesis needed
+			p.print(x.Op)
+			if x.Op == token.RANGE {
+				// TODO(gri) Remove this code if it cannot be reached.
+				p.print(blank)
+			}
+			p.expr1(x.X, prec, depth)
+		}
+
+	case *ast.BasicLit:
+		if p.Config.Mode&normalizeNumbers != 0 {
+			x = normalizedNumber(x)
+		}
+		p.print(x)
+
+	case *ast.FuncLit:
+		p.print(x.Type.Pos(), token.FUNC)
+		// See the comment in funcDecl about how the header size is computed.
+		startCol := p.out.Column - len("func")
+		p.signature(x.Type)
+		p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body)
+
+	case *ast.ParenExpr:
+		if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
+			// don't print parentheses around an already parenthesized expression
+			// TODO(gri) consider making this more general and incorporate precedence levels
+			p.expr0(x.X, depth)
+		} else {
+			p.print(token.LPAREN)
+			p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
+			p.print(x.Rparen, token.RPAREN)
+		}
+
+	case *ast.SelectorExpr:
+		p.selectorExpr(x, depth, false)
+
+	case *ast.TypeAssertExpr:
+		p.expr1(x.X, token.HighestPrec, depth)
+		p.print(token.PERIOD, x.Lparen, token.LPAREN)
+		if x.Type != nil {
+			p.expr(x.Type)
+		} else {
+			p.print(token.TYPE)
+		}
+		p.print(x.Rparen, token.RPAREN)
+
+	case *ast.IndexExpr:
+		// TODO(gri): should treat[] like parentheses and undo one level of depth
+		p.expr1(x.X, token.HighestPrec, 1)
+		p.print(x.Lbrack, token.LBRACK)
+		p.expr0(x.Index, depth+1)
+		p.print(x.Rbrack, token.RBRACK)
+
+	case *ast.IndexListExpr:
+		// TODO(gri): as for IndexExpr, should treat [] like parentheses and undo
+		// one level of depth
+		p.expr1(x.X, token.HighestPrec, 1)
+		p.print(x.Lbrack, token.LBRACK)
+		p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false)
+		p.print(x.Rbrack, token.RBRACK)
+
+	case *ast.SliceExpr:
+		// TODO(gri): should treat[] like parentheses and undo one level of depth
+		p.expr1(x.X, token.HighestPrec, 1)
+		p.print(x.Lbrack, token.LBRACK)
+		indices := []ast.Expr{x.Low, x.High}
+		if x.Max != nil {
+			indices = append(indices, x.Max)
+		}
+		// determine if we need extra blanks around ':'
+		var needsBlanks bool
+		if depth <= 1 {
+			var indexCount int
+			var hasBinaries bool
+			for _, x := range indices {
+				if x != nil {
+					indexCount++
+					if isBinary(x) {
+						hasBinaries = true
+					}
+				}
+			}
+			if indexCount > 1 && hasBinaries {
+				needsBlanks = true
+			}
+		}
+		for i, x := range indices {
+			if i > 0 {
+				if indices[i-1] != nil && needsBlanks {
+					p.print(blank)
+				}
+				p.print(token.COLON)
+				if x != nil && needsBlanks {
+					p.print(blank)
+				}
+			}
+			if x != nil {
+				p.expr0(x, depth+1)
+			}
+		}
+		p.print(x.Rbrack, token.RBRACK)
+
+	case *ast.CallExpr:
+		if len(x.Args) > 1 {
+			depth++
+		}
+		var wasIndented bool
+		if _, ok := x.Fun.(*ast.FuncType); ok {
+			// conversions to literal function types require parentheses around the type
+			p.print(token.LPAREN)
+			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+			p.print(token.RPAREN)
+		} else {
+			wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+		}
+		p.print(x.Lparen, token.LPAREN)
+		if x.Ellipsis.IsValid() {
+			p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false)
+			p.print(x.Ellipsis, token.ELLIPSIS)
+			if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
+				p.print(token.COMMA, formfeed)
+			}
+		} else {
+			p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false)
+		}
+		p.print(x.Rparen, token.RPAREN)
+		if wasIndented {
+			p.print(unindent)
+		}
+
+	case *ast.CompositeLit:
+		// composite literal elements that are composite literals themselves may have the type omitted
+		if x.Type != nil {
+			p.expr1(x.Type, token.HighestPrec, depth)
+		}
+		p.level++
+		p.print(x.Lbrace, token.LBRACE)
+		p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete)
+		// do not insert extra line break following a /*-style comment
+		// before the closing '}' as it might break the code if there
+		// is no trailing ','
+		mode := noExtraLinebreak
+		// do not insert extra blank following a /*-style comment
+		// before the closing '}' unless the literal is empty
+		if len(x.Elts) > 0 {
+			mode |= noExtraBlank
+		}
+		// need the initial indent to print lone comments with
+		// the proper level of indentation
+		p.print(indent, unindent, mode, x.Rbrace, token.RBRACE, mode)
+		p.level--
+
+	case *ast.Ellipsis:
+		p.print(token.ELLIPSIS)
+		if x.Elt != nil {
+			p.expr(x.Elt)
+		}
+
+	case *ast.ArrayType:
+		p.print(token.LBRACK)
+		if x.Len != nil {
+			p.expr(x.Len)
+		}
+		p.print(token.RBRACK)
+		p.expr(x.Elt)
+
+	case *ast.StructType:
+		p.print(token.STRUCT)
+		p.fieldList(x.Fields, true, x.Incomplete)
+
+	case *ast.FuncType:
+		p.print(token.FUNC)
+		p.signature(x)
+
+	case *ast.InterfaceType:
+		p.print(token.INTERFACE)
+		p.fieldList(x.Methods, false, x.Incomplete)
+
+	case *ast.MapType:
+		p.print(token.MAP, token.LBRACK)
+		p.expr(x.Key)
+		p.print(token.RBRACK)
+		p.expr(x.Value)
+
+	case *ast.ChanType:
+		switch x.Dir {
+		case ast.SEND | ast.RECV:
+			p.print(token.CHAN)
+		case ast.RECV:
+			p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same
+		case ast.SEND:
+			p.print(token.CHAN, x.Arrow, token.ARROW)
+		}
+		p.print(blank)
+		p.expr(x.Value)
+
+	default:
+		panic("unreachable")
+	}
+}
+
+// normalizedNumber rewrites base prefixes and exponents
+// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3),
+// and removes leading 0's from integer imaginary literals (0765i to 765i).
+// It leaves hexadecimal digits alone.
+//
+// normalizedNumber doesn't modify the ast.BasicLit value lit points to.
+// If lit is not a number or a number in canonical format already,
+// lit is returned as is. Otherwise a new ast.BasicLit is created.
+func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit {
+	if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG {
+		return lit // not a number - nothing to do
+	}
+	if len(lit.Value) < 2 {
+		return lit // only one digit (common case) - nothing to do
+	}
+	// len(lit.Value) >= 2
+
+	// We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer
+	// or floating-point value, decimal or not. Instead, just consider the literal pattern.
+	x := lit.Value
+	switch x[:2] {
+	default:
+		// 0-prefix octal, decimal int, or float (possibly with 'i' suffix)
+		if i := strings.LastIndexByte(x, 'E'); i >= 0 {
+			x = x[:i] + "e" + x[i+1:]
+			break
+		}
+		// remove leading 0's from integer (but not floating-point) imaginary literals
+		if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") {
+			x = strings.TrimLeft(x, "0_")
+			if x == "i" {
+				x = "0i"
+			}
+		}
+	case "0X":
+		x = "0x" + x[2:]
+		// possibly a hexadecimal float
+		if i := strings.LastIndexByte(x, 'P'); i >= 0 {
+			x = x[:i] + "p" + x[i+1:]
+		}
+	case "0x":
+		// possibly a hexadecimal float
+		i := strings.LastIndexByte(x, 'P')
+		if i == -1 {
+			return lit // nothing to do
+		}
+		x = x[:i] + "p" + x[i+1:]
+	case "0O":
+		x = "0o" + x[2:]
+	case "0o":
+		return lit // nothing to do
+	case "0B":
+		x = "0b" + x[2:]
+	case "0b":
+		return lit // nothing to do
+	}
+
+	return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x}
+}
+
+func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool {
+	if x, ok := expr.(*ast.SelectorExpr); ok {
+		return p.selectorExpr(x, depth, true)
+	}
+	p.expr1(expr, prec1, depth)
+	return false
+}
+
+// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans
+// multiple lines.
+func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool {
+	p.expr1(x.X, token.HighestPrec, depth)
+	p.print(token.PERIOD)
+	if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
+		p.print(indent, newline, x.Sel.Pos(), x.Sel)
+		if !isMethod {
+			p.print(unindent)
+		}
+		return true
+	}
+	p.print(x.Sel.Pos(), x.Sel)
+	return false
+}
+
+func (p *printer) expr0(x ast.Expr, depth int) {
+	p.expr1(x, token.LowestPrec, depth)
+}
+
+func (p *printer) expr(x ast.Expr) {
+	const depth = 1
+	p.expr1(x, token.LowestPrec, depth)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// Print the statement list indented, but without a newline after the last statement.
+// Extra line breaks between statements in the source are respected but at most one
+// empty line is printed between statements.
+func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) {
+	if nindent > 0 {
+		p.print(indent)
+	}
+	var line int
+	i := 0
+	for _, s := range list {
+		// ignore empty statements (was issue 3466)
+		if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty {
+			// nindent == 0 only for lists of switch/select case clauses;
+			// in those cases each clause is a new section
+			if len(p.output) > 0 {
+				// only print line break if we are not at the beginning of the output
+				// (i.e., we are not printing only a partial program)
+				p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0)
+			}
+			p.recordLine(&line)
+			p.stmt(s, nextIsRBrace && i == len(list)-1)
+			// labeled statements put labels on a separate line, but here
+			// we only care about the start line of the actual statement
+			// without label - correct line for each label
+			for t := s; ; {
+				lt, _ := t.(*ast.LabeledStmt)
+				if lt == nil {
+					break
+				}
+				line++
+				t = lt.Stmt
+			}
+			i++
+		}
+	}
+	if nindent > 0 {
+		p.print(unindent)
+	}
+}
+
+// block prints an *ast.BlockStmt; it always spans at least two lines.
+func (p *printer) block(b *ast.BlockStmt, nindent int) {
+	p.print(b.Lbrace, token.LBRACE)
+	p.stmtList(b.List, nindent, true)
+	p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true)
+	p.print(b.Rbrace, token.RBRACE)
+}
+
+func isTypeName(x ast.Expr) bool {
+	switch t := x.(type) {
+	case *ast.Ident:
+		return true
+	case *ast.SelectorExpr:
+		return isTypeName(t.X)
+	}
+	return false
+}
+
+func stripParens(x ast.Expr) ast.Expr {
+	if px, strip := x.(*ast.ParenExpr); strip {
+		// parentheses must not be stripped if there are any
+		// unparenthesized composite literals starting with
+		// a type name
+		ast.Inspect(px.X, func(node ast.Node) bool {
+			switch x := node.(type) {
+			case *ast.ParenExpr:
+				// parentheses protect enclosed composite literals
+				return false
+			case *ast.CompositeLit:
+				if isTypeName(x.Type) {
+					strip = false // do not strip parentheses
+				}
+				return false
+			}
+			// in all other cases, keep inspecting
+			return true
+		})
+		if strip {
+			return stripParens(px.X)
+		}
+	}
+	return x
+}
+
+func stripParensAlways(x ast.Expr) ast.Expr {
+	if x, ok := x.(*ast.ParenExpr); ok {
+		return stripParensAlways(x.X)
+	}
+	return x
+}
+
+func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
+	p.print(blank)
+	needsBlank := false
+	if init == nil && post == nil {
+		// no semicolons required
+		if expr != nil {
+			p.expr(stripParens(expr))
+			needsBlank = true
+		}
+	} else {
+		// all semicolons required
+		// (they are not separators, print them explicitly)
+		if init != nil {
+			p.stmt(init, false)
+		}
+		p.print(token.SEMICOLON, blank)
+		if expr != nil {
+			p.expr(stripParens(expr))
+			needsBlank = true
+		}
+		if isForStmt {
+			p.print(token.SEMICOLON, blank)
+			needsBlank = false
+			if post != nil {
+				p.stmt(post, false)
+				needsBlank = true
+			}
+		}
+	}
+	if needsBlank {
+		p.print(blank)
+	}
+}
+
+// indentList reports whether an expression list would look better if it
+// were indented wholesale (starting with the very first element, rather
+// than starting at the first line break).
+func (p *printer) indentList(list []ast.Expr) bool {
+	// Heuristic: indentList reports whether there are more than one multi-
+	// line element in the list, or if there is any element that is not
+	// starting on the same line as the previous one ends.
+	if len(list) >= 2 {
+		var b = p.lineFor(list[0].Pos())
+		var e = p.lineFor(list[len(list)-1].End())
+		if 0 < b && b < e {
+			// list spans multiple lines
+			n := 0 // multi-line element count
+			line := b
+			for _, x := range list {
+				xb := p.lineFor(x.Pos())
+				xe := p.lineFor(x.End())
+				if line < xb {
+					// x is not starting on the same
+					// line as the previous one ended
+					return true
+				}
+				if xb < xe {
+					// x is a multi-line element
+					n++
+				}
+				line = xe
+			}
+			return n > 1
+		}
+	}
+	return false
+}
+
+func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
+	p.print(stmt.Pos())
+
+	switch s := stmt.(type) {
+	case *ast.BadStmt:
+		p.print("BadStmt")
+
+	case *ast.DeclStmt:
+		p.decl(s.Decl)
+
+	case *ast.EmptyStmt:
+		// nothing to do
+
+	case *ast.LabeledStmt:
+		// a "correcting" unindent immediately following a line break
+		// is applied before the line break if there is no comment
+		// between (see writeWhitespace)
+		p.print(unindent)
+		p.expr(s.Label)
+		p.print(s.Colon, token.COLON, indent)
+		if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
+			if !nextIsRBrace {
+				p.print(newline, e.Pos(), token.SEMICOLON)
+				break
+			}
+		} else {
+			p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
+		}
+		p.stmt(s.Stmt, nextIsRBrace)
+
+	case *ast.ExprStmt:
+		const depth = 1
+		p.expr0(s.X, depth)
+
+	case *ast.SendStmt:
+		const depth = 1
+		p.expr0(s.Chan, depth)
+		p.print(blank, s.Arrow, token.ARROW, blank)
+		p.expr0(s.Value, depth)
+
+	case *ast.IncDecStmt:
+		const depth = 1
+		p.expr0(s.X, depth+1)
+		p.print(s.TokPos, s.Tok)
+
+	case *ast.AssignStmt:
+		var depth = 1
+		if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
+			depth++
+		}
+		p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false)
+		p.print(blank, s.TokPos, s.Tok, blank)
+		p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false)
+
+	case *ast.GoStmt:
+		p.print(token.GO, blank)
+		p.expr(s.Call)
+
+	case *ast.DeferStmt:
+		p.print(token.DEFER, blank)
+		p.expr(s.Call)
+
+	case *ast.ReturnStmt:
+		p.print(token.RETURN)
+		if s.Results != nil {
+			p.print(blank)
+			// Use indentList heuristic to make corner cases look
+			// better (issue 1207). A more systematic approach would
+			// always indent, but this would cause significant
+			// reformatting of the code base and not necessarily
+			// lead to more nicely formatted code in general.
+			if p.indentList(s.Results) {
+				p.print(indent)
+				// Use NoPos so that a newline never goes before
+				// the results (see issue #32854).
+				p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false)
+				p.print(unindent)
+			} else {
+				p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false)
+			}
+		}
+
+	case *ast.BranchStmt:
+		p.print(s.Tok)
+		if s.Label != nil {
+			p.print(blank)
+			p.expr(s.Label)
+		}
+
+	case *ast.BlockStmt:
+		p.block(s, 1)
+
+	case *ast.IfStmt:
+		p.print(token.IF)
+		p.controlClause(false, s.Init, s.Cond, nil)
+		p.block(s.Body, 1)
+		if s.Else != nil {
+			p.print(blank, token.ELSE, blank)
+			switch s.Else.(type) {
+			case *ast.BlockStmt, *ast.IfStmt:
+				p.stmt(s.Else, nextIsRBrace)
+			default:
+				// This can only happen with an incorrectly
+				// constructed AST. Permit it but print so
+				// that it can be parsed without errors.
+				p.print(token.LBRACE, indent, formfeed)
+				p.stmt(s.Else, true)
+				p.print(unindent, formfeed, token.RBRACE)
+			}
+		}
+
+	case *ast.CaseClause:
+		if s.List != nil {
+			p.print(token.CASE, blank)
+			p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false)
+		} else {
+			p.print(token.DEFAULT)
+		}
+		p.print(s.Colon, token.COLON)
+		p.stmtList(s.Body, 1, nextIsRBrace)
+
+	case *ast.SwitchStmt:
+		p.print(token.SWITCH)
+		p.controlClause(false, s.Init, s.Tag, nil)
+		p.block(s.Body, 0)
+
+	case *ast.TypeSwitchStmt:
+		p.print(token.SWITCH)
+		if s.Init != nil {
+			p.print(blank)
+			p.stmt(s.Init, false)
+			p.print(token.SEMICOLON)
+		}
+		p.print(blank)
+		p.stmt(s.Assign, false)
+		p.print(blank)
+		p.block(s.Body, 0)
+
+	case *ast.CommClause:
+		if s.Comm != nil {
+			p.print(token.CASE, blank)
+			p.stmt(s.Comm, false)
+		} else {
+			p.print(token.DEFAULT)
+		}
+		p.print(s.Colon, token.COLON)
+		p.stmtList(s.Body, 1, nextIsRBrace)
+
+	case *ast.SelectStmt:
+		p.print(token.SELECT, blank)
+		body := s.Body
+		if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) {
+			// print empty select statement w/o comments on one line
+			p.print(body.Lbrace, token.LBRACE, body.Rbrace, token.RBRACE)
+		} else {
+			p.block(body, 0)
+		}
+
+	case *ast.ForStmt:
+		p.print(token.FOR)
+		p.controlClause(true, s.Init, s.Cond, s.Post)
+		p.block(s.Body, 1)
+
+	case *ast.RangeStmt:
+		p.print(token.FOR, blank)
+		if s.Key != nil {
+			p.expr(s.Key)
+			if s.Value != nil {
+				// use position of value following the comma as
+				// comma position for correct comment placement
+				p.print(s.Value.Pos(), token.COMMA, blank)
+				p.expr(s.Value)
+			}
+			p.print(blank, s.TokPos, s.Tok, blank)
+		}
+		p.print(token.RANGE, blank)
+		p.expr(stripParens(s.X))
+		p.print(blank)
+		p.block(s.Body, 1)
+
+	default:
+		panic("unreachable")
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// The keepTypeColumn function determines if the type column of a series of
+// consecutive const or var declarations must be kept, or if initialization
+// values (V) can be placed in the type column (T) instead. The i'th entry
+// in the result slice is true if the type column in spec[i] must be kept.
+//
+// For example, the declaration:
+//
+//		const (
+//			foobar int = 42 // comment
+//			x          = 7  // comment
+//			foo
+//	             bar = 991
+//		)
+//
+// leads to the type/values matrix below. A run of value columns (V) can
+// be moved into the type column if there is no type for any of the values
+// in that column (we only move entire columns so that they align properly).
+//
+//		matrix        formatted     result
+//	                   matrix
+//		T  V    ->    T  V     ->   true      there is a T and so the type
+//		-  V          -  V          true      column must be kept
+//		-  -          -  -          false
+//		-  V          V  -          false     V is moved into T column
+func keepTypeColumn(specs []ast.Spec) []bool {
+	m := make([]bool, len(specs))
+
+	populate := func(i, j int, keepType bool) {
+		if keepType {
+			for ; i < j; i++ {
+				m[i] = true
+			}
+		}
+	}
+
+	i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run
+	var keepType bool
+	for i, s := range specs {
+		t := s.(*ast.ValueSpec)
+		if t.Values != nil {
+			if i0 < 0 {
+				// start of a run of ValueSpecs with non-nil Values
+				i0 = i
+				keepType = false
+			}
+		} else {
+			if i0 >= 0 {
+				// end of a run
+				populate(i0, i, keepType)
+				i0 = -1
+			}
+		}
+		if t.Type != nil {
+			keepType = true
+		}
+	}
+	if i0 >= 0 {
+		// end of a run
+		populate(i0, len(specs), keepType)
+	}
+
+	return m
+}
+
+func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
+	p.setComment(s.Doc)
+	p.identList(s.Names, false) // always present
+	extraTabs := 3
+	if s.Type != nil || keepType {
+		p.print(vtab)
+		extraTabs--
+	}
+	if s.Type != nil {
+		p.expr(s.Type)
+	}
+	if s.Values != nil {
+		p.print(vtab, token.ASSIGN, blank)
+		p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
+		extraTabs--
+	}
+	if s.Comment != nil {
+		for ; extraTabs > 0; extraTabs-- {
+			p.print(vtab)
+		}
+		p.setComment(s.Comment)
+	}
+}
+
+func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit {
+	// Note: An unmodified AST generated by go/parser will already
+	// contain a backward- or double-quoted path string that does
+	// not contain any invalid characters, and most of the work
+	// here is not needed. However, a modified or generated AST
+	// may possibly contain non-canonical paths. Do the work in
+	// all cases since it's not too hard and not speed-critical.
+
+	// if we don't have a proper string, be conservative and return whatever we have
+	if lit.Kind != token.STRING {
+		return lit
+	}
+	s, err := strconv.Unquote(lit.Value)
+	if err != nil {
+		return lit
+	}
+
+	// if the string is an invalid path, return whatever we have
+	//
+	// spec: "Implementation restriction: A compiler may restrict
+	// ImportPaths to non-empty strings using only characters belonging
+	// to Unicode's L, M, N, P, and S general categories (the Graphic
+	// characters without spaces) and may also exclude the characters
+	// !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character
+	// U+FFFD."
+	if s == "" {
+		return lit
+	}
+	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+	for _, r := range s {
+		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+			return lit
+		}
+	}
+
+	// otherwise, return the double-quoted path
+	s = strconv.Quote(s)
+	if s == lit.Value {
+		return lit // nothing wrong with lit
+	}
+	return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s}
+}
+
+// The parameter n is the number of specs in the group. If doIndent is set,
+// multi-line identifier lists in the spec are indented when the first
+// linebreak is encountered.
+func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
+	switch s := spec.(type) {
+	case *ast.ImportSpec:
+		p.setComment(s.Doc)
+		if s.Name != nil {
+			p.expr(s.Name)
+			p.print(blank)
+		}
+		p.expr(sanitizeImportPath(s.Path))
+		p.setComment(s.Comment)
+		p.print(s.EndPos)
+
+	case *ast.ValueSpec:
+		if n != 1 {
+			p.internalError("expected n = 1; got", n)
+		}
+		p.setComment(s.Doc)
+		p.identList(s.Names, doIndent) // always present
+		if s.Type != nil {
+			p.print(blank)
+			p.expr(s.Type)
+		}
+		if s.Values != nil {
+			p.print(blank, token.ASSIGN, blank)
+			p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
+		}
+		p.setComment(s.Comment)
+
+	case *ast.TypeSpec:
+		p.setComment(s.Doc)
+		p.expr(s.Name)
+		if s.TypeParams != nil {
+			p.parameters(s.TypeParams, typeTParam)
+		}
+		if n == 1 {
+			p.print(blank)
+		} else {
+			p.print(vtab)
+		}
+		if s.Assign.IsValid() {
+			p.print(token.ASSIGN, blank)
+		}
+		p.expr(s.Type)
+		p.setComment(s.Comment)
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (p *printer) genDecl(d *ast.GenDecl) {
+	p.setComment(d.Doc)
+	p.print(d.Pos(), d.Tok, blank)
+
+	if d.Lparen.IsValid() || len(d.Specs) > 1 {
+		// group of parenthesized declarations
+		p.print(d.Lparen, token.LPAREN)
+		if n := len(d.Specs); n > 0 {
+			p.print(indent, formfeed)
+			if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
+				// two or more grouped const/var declarations:
+				// determine if the type column must be kept
+				keepType := keepTypeColumn(d.Specs)
+				var line int
+				for i, s := range d.Specs {
+					if i > 0 {
+						p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+					}
+					p.recordLine(&line)
+					p.valueSpec(s.(*ast.ValueSpec), keepType[i])
+				}
+			} else {
+				var line int
+				for i, s := range d.Specs {
+					if i > 0 {
+						p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+					}
+					p.recordLine(&line)
+					p.spec(s, n, false)
+				}
+			}
+			p.print(unindent, formfeed)
+		}
+		p.print(d.Rparen, token.RPAREN)
+
+	} else if len(d.Specs) > 0 {
+		// single declaration
+		p.spec(d.Specs[0], 1, true)
+	}
+}
+
+// nodeSize determines the size of n in chars after formatting.
+// The result is <= maxSize if the node fits on one line with at
+// most maxSize chars and the formatted output doesn't contain
+// any control chars. Otherwise, the result is > maxSize.
+func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
+	// nodeSize invokes the printer, which may invoke nodeSize
+	// recursively. For deep composite literal nests, this can
+	// lead to an exponential algorithm. Remember previous
+	// results to prune the recursion (was issue 1628).
+	if size, found := p.nodeSizes[n]; found {
+		return size
+	}
+
+	size = maxSize + 1 // assume n doesn't fit
+	p.nodeSizes[n] = size
+
+	// nodeSize computation must be independent of particular
+	// style so that we always get the same decision; print
+	// in RawFormat
+	cfg := Config{Mode: RawFormat}
+	var buf bytes.Buffer
+	if err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
+		return
+	}
+	if buf.Len() <= maxSize {
+		for _, ch := range buf.Bytes() {
+			if ch < ' ' {
+				return
+			}
+		}
+		size = buf.Len() // n fits
+		p.nodeSizes[n] = size
+	}
+	return
+}
+
+// numLines returns the number of lines spanned by node n in the original source.
+func (p *printer) numLines(n ast.Node) int {
+	if from := n.Pos(); from.IsValid() {
+		if to := n.End(); to.IsValid() {
+			return p.lineFor(to) - p.lineFor(from) + 1
+		}
+	}
+	return infinity
+}
+
+// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's.
+func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int {
+	pos1 := b.Pos()
+	pos2 := b.Rbrace
+	if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
+		// opening and closing brace are on different lines - don't make it a one-liner
+		return maxSize + 1
+	}
+	if len(b.List) > 5 {
+		// too many statements - don't make it a one-liner
+		return maxSize + 1
+	}
+	// otherwise, estimate body size
+	bodySize := p.commentSizeBefore(p.posFor(pos2))
+	for i, s := range b.List {
+		if bodySize > maxSize {
+			break // no need to continue
+		}
+		if i > 0 {
+			bodySize += 2 // space for a semicolon and blank
+		}
+		bodySize += p.nodeSize(s, maxSize)
+	}
+	return bodySize
+}
+
+// funcBody prints a function body following a function header of given headerSize.
+// If the header's and block's size are "small enough" and the block is "simple enough",
+// the block is printed on the current line, without line breaks, spaced from the header
+// by sep. Otherwise the block's opening "{" is printed on the current line, followed by
+// lines for the block's statements and its closing "}".
+func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
+	if b == nil {
+		return
+	}
+
+	// save/restore composite literal nesting level
+	defer func(level int) {
+		p.level = level
+	}(p.level)
+	p.level = 0
+
+	const maxSize = 100
+	if headerSize+p.bodySize(b, maxSize) <= maxSize {
+		p.print(sep, b.Lbrace, token.LBRACE)
+		if len(b.List) > 0 {
+			p.print(blank)
+			for i, s := range b.List {
+				if i > 0 {
+					p.print(token.SEMICOLON, blank)
+				}
+				p.stmt(s, i == len(b.List)-1)
+			}
+			p.print(blank)
+		}
+		p.print(noExtraLinebreak, b.Rbrace, token.RBRACE, noExtraLinebreak)
+		return
+	}
+
+	if sep != ignore {
+		p.print(blank) // always use blank
+	}
+	p.block(b, 1)
+}
+
+// distanceFrom returns the column difference between p.out (the current output
+// position) and startOutCol. If the start position is on a different line from
+// the current position (or either is unknown), the result is infinity.
+func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int {
+	if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line {
+		return p.out.Column - startOutCol
+	}
+	return infinity
+}
+
+func (p *printer) funcDecl(d *ast.FuncDecl) {
+	p.setComment(d.Doc)
+	p.print(d.Pos(), token.FUNC, blank)
+	// We have to save startCol only after emitting FUNC; otherwise it can be on a
+	// different line (all whitespace preceding the FUNC is emitted only when the
+	// FUNC is emitted).
+	startCol := p.out.Column - len("func ")
+	if d.Recv != nil {
+		p.parameters(d.Recv, funcParam) // method: print receiver
+		p.print(blank)
+	}
+	p.expr(d.Name)
+	p.signature(d.Type)
+	p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body)
+}
+
+func (p *printer) decl(decl ast.Decl) {
+	switch d := decl.(type) {
+	case *ast.BadDecl:
+		p.print(d.Pos(), "BadDecl")
+	case *ast.GenDecl:
+		p.genDecl(d)
+	case *ast.FuncDecl:
+		p.funcDecl(d)
+	default:
+		panic("unreachable")
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Files
+
+func declToken(decl ast.Decl) (tok token.Token) {
+	tok = token.ILLEGAL
+	switch d := decl.(type) {
+	case *ast.GenDecl:
+		tok = d.Tok
+	case *ast.FuncDecl:
+		tok = token.FUNC
+	}
+	return
+}
+
+func (p *printer) declList(list []ast.Decl) {
+	tok := token.ILLEGAL
+	for _, d := range list {
+		prev := tok
+		tok = declToken(d)
+		// If the declaration token changed (e.g., from CONST to TYPE)
+		// or the next declaration has documentation associated with it,
+		// print an empty line between top-level declarations.
+		// (because p.linebreak is called with the position of d, which
+		// is past any documentation, the minimum requirement is satisfied
+		// even w/o the extra getDoc(d) nil-check - leave it in case the
+		// linebreak logic improves - there's already a TODO).
+		if len(p.output) > 0 {
+			// only print line break if we are not at the beginning of the output
+			// (i.e., we are not printing only a partial program)
+			min := 1
+			if prev != tok || getDoc(d) != nil {
+				min = 2
+			}
+			// start a new section if the next declaration is a function
+			// that spans multiple lines (see also issue #19544)
+			p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1)
+		}
+		p.decl(d)
+	}
+}
+
+func (p *printer) file(src *ast.File) {
+	p.setComment(src.Doc)
+	p.print(src.Pos(), token.PACKAGE, blank)
+	p.expr(src.Name)
+	p.declList(src.Decls)
+	p.print(newline)
+}
diff --git a/internal/backport/go/printer/performance_test.go b/internal/backport/go/printer/performance_test.go
new file mode 100644
index 0000000..ea3eac4
--- /dev/null
+++ b/internal/backport/go/printer/performance_test.go
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a simple printer performance benchmark:
+// go test -bench=BenchmarkPrint
+
+package printer
+
+import (
+	"bytes"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"io"
+	"log"
+	"os"
+	"testing"
+)
+
+var testfile *ast.File
+
+func testprint(out io.Writer, file *ast.File) {
+	if err := (&Config{TabIndent | UseSpaces | normalizeNumbers, 8, 0}).Fprint(out, fset, file); err != nil {
+		log.Fatalf("print error: %s", err)
+	}
+}
+
+// cannot initialize in init because (printer) Fprint launches goroutines.
+func initialize() {
+	const filename = "testdata/parser.go"
+
+	src, err := os.ReadFile(filename)
+	if err != nil {
+		log.Fatalf("%s", err)
+	}
+
+	file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+	if err != nil {
+		log.Fatalf("%s", err)
+	}
+
+	var buf bytes.Buffer
+	testprint(&buf, file)
+	if !bytes.Equal(buf.Bytes(), src) {
+		log.Fatalf("print error: %s not idempotent", filename)
+	}
+
+	testfile = file
+}
+
+func BenchmarkPrint(b *testing.B) {
+	if testfile == nil {
+		initialize()
+	}
+	for i := 0; i < b.N; i++ {
+		testprint(io.Discard, testfile)
+	}
+}
diff --git a/internal/backport/go/printer/printer.go b/internal/backport/go/printer/printer.go
new file mode 100644
index 0000000..a8aa7cf
--- /dev/null
+++ b/internal/backport/go/printer/printer.go
@@ -0,0 +1,1380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package printer implements printing of AST nodes.
+package printer
+
+import (
+	"fmt"
+	"go/build/constraint"
+	"io"
+	"os"
+	"strings"
+	"text/tabwriter"
+	"unicode"
+
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+const (
+	maxNewlines = 2     // max. number of newlines between source text
+	debug       = false // enable for debugging
+	infinity    = 1 << 30
+)
+
+type whiteSpace byte
+
+const (
+	ignore   = whiteSpace(0)
+	blank    = whiteSpace(' ')
+	vtab     = whiteSpace('\v')
+	newline  = whiteSpace('\n')
+	formfeed = whiteSpace('\f')
+	indent   = whiteSpace('>')
+	unindent = whiteSpace('<')
+)
+
+// A pmode value represents the current printer mode.
+type pmode int
+
+const (
+	noExtraBlank     pmode = 1 << iota // disables extra blank after /*-style comment
+	noExtraLinebreak                   // disables extra line break after /*-style comment
+)
+
+type commentInfo struct {
+	cindex         int               // current comment index
+	comment        *ast.CommentGroup // = printer.comments[cindex]; or nil
+	commentOffset  int               // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity
+	commentNewline bool              // true if the comment group contains newlines
+}
+
+type printer struct {
+	// Configuration (does not change after initialization)
+	Config
+	fset *token.FileSet
+
+	// Current state
+	output       []byte       // raw printer result
+	indent       int          // current indentation
+	level        int          // level == 0: outside composite literal; level > 0: inside composite literal
+	mode         pmode        // current printer mode
+	endAlignment bool         // if set, terminate alignment immediately
+	impliedSemi  bool         // if set, a linebreak implies a semicolon
+	lastTok      token.Token  // last token printed (token.ILLEGAL if it's whitespace)
+	prevOpen     token.Token  // previous non-brace "open" token (, [, or token.ILLEGAL
+	wsbuf        []whiteSpace // delayed white space
+	goBuild      []int        // start index of all //go:build comments in output
+	plusBuild    []int        // start index of all // +build comments in output
+
+	// Positions
+	// The out position differs from the pos position when the result
+	// formatting differs from the source formatting (in the amount of
+	// white space). If there's a difference and SourcePos is set in
+	// ConfigMode, //line directives are used in the output to restore
+	// original source positions for a reader.
+	pos     token.Position // current position in AST (source) space
+	out     token.Position // current position in output space
+	last    token.Position // value of pos after calling writeString
+	linePtr *int           // if set, record out.Line for the next token in *linePtr
+
+	// The list of all source comments, in order of appearance.
+	comments        []*ast.CommentGroup // may be nil
+	useNodeComments bool                // if not set, ignore lead and line comments of nodes
+
+	// Information about p.comments[p.cindex]; set up by nextComment.
+	commentInfo
+
+	// Cache of already computed node sizes.
+	nodeSizes map[ast.Node]int
+
+	// Cache of most recently computed line position.
+	cachedPos  token.Pos
+	cachedLine int // line corresponding to cachedPos
+}
+
+func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
+	p.Config = *cfg
+	p.fset = fset
+	p.pos = token.Position{Line: 1, Column: 1}
+	p.out = token.Position{Line: 1, Column: 1}
+	p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
+	p.nodeSizes = nodeSizes
+	p.cachedPos = -1
+}
+
+func (p *printer) internalError(msg ...interface{}) {
+	if debug {
+		fmt.Print(p.pos.String() + ": ")
+		fmt.Println(msg...)
+		panic("golang.org/x/website/internal/backport/go/printer")
+	}
+}
+
+// commentsHaveNewline reports whether a list of comments belonging to
+// an *ast.CommentGroup contains newlines. Because the position information
+// may only be partially correct, we also have to read the comment text.
+func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
+	// len(list) > 0
+	line := p.lineFor(list[0].Pos())
+	for i, c := range list {
+		if i > 0 && p.lineFor(list[i].Pos()) != line {
+			// not all comments on the same line
+			return true
+		}
+		if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
+			return true
+		}
+	}
+	_ = line
+	return false
+}
+
+func (p *printer) nextComment() {
+	for p.cindex < len(p.comments) {
+		c := p.comments[p.cindex]
+		p.cindex++
+		if list := c.List; len(list) > 0 {
+			p.comment = c
+			p.commentOffset = p.posFor(list[0].Pos()).Offset
+			p.commentNewline = p.commentsHaveNewline(list)
+			return
+		}
+		// we should not reach here (correct ASTs don't have empty
+		// ast.CommentGroup nodes), but be conservative and try again
+	}
+	// no more comments
+	p.commentOffset = infinity
+}
+
+// commentBefore reports whether the current comment group occurs
+// before the next position in the source code and printing it does
+// not introduce implicit semicolons.
+func (p *printer) commentBefore(next token.Position) bool {
+	return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
+}
+
+// commentSizeBefore returns the estimated size of the
+// comments on the same line before the next position.
+func (p *printer) commentSizeBefore(next token.Position) int {
+	// save/restore current p.commentInfo (p.nextComment() modifies it)
+	defer func(info commentInfo) {
+		p.commentInfo = info
+	}(p.commentInfo)
+
+	size := 0
+	for p.commentBefore(next) {
+		for _, c := range p.comment.List {
+			size += len(c.Text)
+		}
+		p.nextComment()
+	}
+	return size
+}
+
+// recordLine records the output line number for the next non-whitespace
+// token in *linePtr. It is used to compute an accurate line number for a
+// formatted construct, independent of pending (not yet emitted) whitespace
+// or comments.
+func (p *printer) recordLine(linePtr *int) {
+	p.linePtr = linePtr
+}
+
+// linesFrom returns the number of output lines between the current
+// output line and the line argument, ignoring any pending (not yet
+// emitted) whitespace or comments. It is used to compute an accurate
+// size (in number of lines) for a formatted construct.
+func (p *printer) linesFrom(line int) int {
+	return p.out.Line - line
+}
+
+func (p *printer) posFor(pos token.Pos) token.Position {
+	// not used frequently enough to cache entire token.Position
+	return p.fset.PositionFor(pos, false /* absolute position */)
+}
+
+func (p *printer) lineFor(pos token.Pos) int {
+	if pos != p.cachedPos {
+		p.cachedPos = pos
+		p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line
+	}
+	return p.cachedLine
+}
+
+// writeLineDirective writes a //line directive if necessary.
+func (p *printer) writeLineDirective(pos token.Position) {
+	if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
+		p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
+		p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
+		p.output = append(p.output, tabwriter.Escape)
+		// p.out must match the //line directive
+		p.out.Filename = pos.Filename
+		p.out.Line = pos.Line
+	}
+}
+
+// writeIndent writes indentation.
+func (p *printer) writeIndent() {
+	// use "hard" htabs - indentation columns
+	// must not be discarded by the tabwriter
+	n := p.Config.Indent + p.indent // include base indentation
+	for i := 0; i < n; i++ {
+		p.output = append(p.output, '\t')
+	}
+
+	// update positions
+	p.pos.Offset += n
+	p.pos.Column += n
+	p.out.Column += n
+}
+
+// writeByte writes ch n times to p.output and updates p.pos.
+// Only used to write formatting (white space) characters.
+func (p *printer) writeByte(ch byte, n int) {
+	if p.endAlignment {
+		// Ignore any alignment control character;
+		// and at the end of the line, break with
+		// a formfeed to indicate termination of
+		// existing columns.
+		switch ch {
+		case '\t', '\v':
+			ch = ' '
+		case '\n', '\f':
+			ch = '\f'
+			p.endAlignment = false
+		}
+	}
+
+	if p.out.Column == 1 {
+		// no need to write line directives before white space
+		p.writeIndent()
+	}
+
+	for i := 0; i < n; i++ {
+		p.output = append(p.output, ch)
+	}
+
+	// update positions
+	p.pos.Offset += n
+	if ch == '\n' || ch == '\f' {
+		p.pos.Line += n
+		p.out.Line += n
+		p.pos.Column = 1
+		p.out.Column = 1
+		return
+	}
+	p.pos.Column += n
+	p.out.Column += n
+}
+
+// writeString writes the string s to p.output and updates p.pos, p.out,
+// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
+// to protect s from being interpreted by the tabwriter.
+//
+// Note: writeString is only used to write Go tokens, literals, and
+// comments, all of which must be written literally. Thus, it is correct
+// to always set isLit = true. However, setting it explicitly only when
+// needed (i.e., when we don't know that s contains no tabs or line breaks)
+// avoids processing extra escape characters and reduces run time of the
+// printer benchmark by up to 10%.
+func (p *printer) writeString(pos token.Position, s string, isLit bool) {
+	if p.out.Column == 1 {
+		if p.Config.Mode&SourcePos != 0 {
+			p.writeLineDirective(pos)
+		}
+		p.writeIndent()
+	}
+
+	if pos.IsValid() {
+		// update p.pos (if pos is invalid, continue with existing p.pos)
+		// Note: Must do this after handling line beginnings because
+		// writeIndent updates p.pos if there's indentation, but p.pos
+		// is the position of s.
+		p.pos = pos
+	}
+
+	if isLit {
+		// Protect s such that is passes through the tabwriter
+		// unchanged. Note that valid Go programs cannot contain
+		// tabwriter.Escape bytes since they do not appear in legal
+		// UTF-8 sequences.
+		p.output = append(p.output, tabwriter.Escape)
+	}
+
+	if debug {
+		p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
+	}
+	p.output = append(p.output, s...)
+
+	// update positions
+	nlines := 0
+	var li int // index of last newline; valid if nlines > 0
+	for i := 0; i < len(s); i++ {
+		// Raw string literals may contain any character except back quote (`).
+		if ch := s[i]; ch == '\n' || ch == '\f' {
+			// account for line break
+			nlines++
+			li = i
+			// A line break inside a literal will break whatever column
+			// formatting is in place; ignore any further alignment through
+			// the end of the line.
+			p.endAlignment = true
+		}
+	}
+	p.pos.Offset += len(s)
+	if nlines > 0 {
+		p.pos.Line += nlines
+		p.out.Line += nlines
+		c := len(s) - li
+		p.pos.Column = c
+		p.out.Column = c
+	} else {
+		p.pos.Column += len(s)
+		p.out.Column += len(s)
+	}
+
+	if isLit {
+		p.output = append(p.output, tabwriter.Escape)
+	}
+
+	p.last = p.pos
+}
+
+// writeCommentPrefix writes the whitespace before a comment.
+// If there is any pending whitespace, it consumes as much of
+// it as is likely to help position the comment nicely.
+// pos is the comment position, next the position of the item
+// after all pending comments, prev is the previous comment in
+// a group of comments (or nil), and tok is the next token.
+func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) {
+	if len(p.output) == 0 {
+		// the comment is the first item to be printed - don't write any whitespace
+		return
+	}
+
+	if pos.IsValid() && pos.Filename != p.last.Filename {
+		// comment in a different file - separate with newlines
+		p.writeByte('\f', maxNewlines)
+		return
+	}
+
+	if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
+		// comment on the same line as last item:
+		// separate with at least one separator
+		hasSep := false
+		if prev == nil {
+			// first comment of a comment group
+			j := 0
+			for i, ch := range p.wsbuf {
+				switch ch {
+				case blank:
+					// ignore any blanks before a comment
+					p.wsbuf[i] = ignore
+					continue
+				case vtab:
+					// respect existing tabs - important
+					// for proper formatting of commented structs
+					hasSep = true
+					continue
+				case indent:
+					// apply pending indentation
+					continue
+				}
+				j = i
+				break
+			}
+			p.writeWhitespace(j)
+		}
+		// make sure there is at least one separator
+		if !hasSep {
+			sep := byte('\t')
+			if pos.Line == next.Line {
+				// next item is on the same line as the comment
+				// (which must be a /*-style comment): separate
+				// with a blank instead of a tab
+				sep = ' '
+			}
+			p.writeByte(sep, 1)
+		}
+
+	} else {
+		// comment on a different line:
+		// separate with at least one line break
+		droppedLinebreak := false
+		j := 0
+		for i, ch := range p.wsbuf {
+			switch ch {
+			case blank, vtab:
+				// ignore any horizontal whitespace before line breaks
+				p.wsbuf[i] = ignore
+				continue
+			case indent:
+				// apply pending indentation
+				continue
+			case unindent:
+				// if this is not the last unindent, apply it
+				// as it is (likely) belonging to the last
+				// construct (e.g., a multi-line expression list)
+				// and is not part of closing a block
+				if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
+					continue
+				}
+				// if the next token is not a closing }, apply the unindent
+				// if it appears that the comment is aligned with the
+				// token; otherwise assume the unindent is part of a
+				// closing block and stop (this scenario appears with
+				// comments before a case label where the comments
+				// apply to the next case instead of the current one)
+				if tok != token.RBRACE && pos.Column == next.Column {
+					continue
+				}
+			case newline, formfeed:
+				p.wsbuf[i] = ignore
+				droppedLinebreak = prev == nil // record only if first comment of a group
+			}
+			j = i
+			break
+		}
+		p.writeWhitespace(j)
+
+		// determine number of linebreaks before the comment
+		n := 0
+		if pos.IsValid() && p.last.IsValid() {
+			n = pos.Line - p.last.Line
+			if n < 0 { // should never happen
+				n = 0
+			}
+		}
+
+		// at the package scope level only (p.indent == 0),
+		// add an extra newline if we dropped one before:
+		// this preserves a blank line before documentation
+		// comments at the package scope level (issue 2570)
+		if p.indent == 0 && droppedLinebreak {
+			n++
+		}
+
+		// make sure there is at least one line break
+		// if the previous comment was a line comment
+		if n == 0 && prev != nil && prev.Text[1] == '/' {
+			n = 1
+		}
+
+		if n > 0 {
+			// use formfeeds to break columns before a comment;
+			// this is analogous to using formfeeds to separate
+			// individual lines of /*-style comments
+			p.writeByte('\f', nlimit(n))
+		}
+	}
+}
+
+// Returns true if s contains only white space
+// (only tabs and blanks can appear in the printer's context).
+func isBlank(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] > ' ' {
+			return false
+		}
+	}
+	return true
+}
+
+// commonPrefix returns the common prefix of a and b.
+func commonPrefix(a, b string) string {
+	i := 0
+	for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
+		i++
+	}
+	return a[0:i]
+}
+
+// trimRight returns s with trailing whitespace removed.
+func trimRight(s string) string {
+	return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
+// comment line is indented, all but the first line have some form of space prefix).
+// The prefix is computed using heuristics such that is likely that the comment
+// contents are nicely laid out after re-printing each line using the printer's
+// current indentation.
+func stripCommonPrefix(lines []string) {
+	if len(lines) <= 1 {
+		return // at most one line - nothing to do
+	}
+	// len(lines) > 1
+
+	// The heuristic in this function tries to handle a few
+	// common patterns of /*-style comments: Comments where
+	// the opening /* and closing */ are aligned and the
+	// rest of the comment text is aligned and indented with
+	// blanks or tabs, cases with a vertical "line of stars"
+	// on the left, and cases where the closing */ is on the
+	// same line as the last comment text.
+
+	// Compute maximum common white prefix of all but the first,
+	// last, and blank lines, and replace blank lines with empty
+	// lines (the first line starts with /* and has no prefix).
+	// In cases where only the first and last lines are not blank,
+	// such as two-line comments, or comments where all inner lines
+	// are blank, consider the last line for the prefix computation
+	// since otherwise the prefix would be empty.
+	//
+	// Note that the first and last line are never empty (they
+	// contain the opening /* and closing */ respectively) and
+	// thus they can be ignored by the blank line check.
+	prefix := ""
+	prefixSet := false
+	if len(lines) > 2 {
+		for i, line := range lines[1 : len(lines)-1] {
+			if isBlank(line) {
+				lines[1+i] = "" // range starts with lines[1]
+			} else {
+				if !prefixSet {
+					prefix = line
+					prefixSet = true
+				}
+				prefix = commonPrefix(prefix, line)
+			}
+
+		}
+	}
+	// If we don't have a prefix yet, consider the last line.
+	if !prefixSet {
+		line := lines[len(lines)-1]
+		prefix = commonPrefix(line, line)
+	}
+
+	/*
+	 * Check for vertical "line of stars" and correct prefix accordingly.
+	 */
+	lineOfStars := false
+	if p, _, ok := stringsCut(prefix, "*"); ok {
+		// remove trailing blank from prefix so stars remain aligned
+		prefix = strings.TrimSuffix(p, " ")
+		lineOfStars = true
+	} else {
+		// No line of stars present.
+		// Determine the white space on the first line after the /*
+		// and before the beginning of the comment text, assume two
+		// blanks instead of the /* unless the first character after
+		// the /* is a tab. If the first comment line is empty but
+		// for the opening /*, assume up to 3 blanks or a tab. This
+		// whitespace may be found as suffix in the common prefix.
+		first := lines[0]
+		if isBlank(first[2:]) {
+			// no comment text on the first line:
+			// reduce prefix by up to 3 blanks or a tab
+			// if present - this keeps comment text indented
+			// relative to the /* and */'s if it was indented
+			// in the first place
+			i := len(prefix)
+			for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {
+				i--
+			}
+			if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {
+				i--
+			}
+			prefix = prefix[0:i]
+		} else {
+			// comment text on the first line
+			suffix := make([]byte, len(first))
+			n := 2 // start after opening /*
+			for n < len(first) && first[n] <= ' ' {
+				suffix[n] = first[n]
+				n++
+			}
+			if n > 2 && suffix[2] == '\t' {
+				// assume the '\t' compensates for the /*
+				suffix = suffix[2:n]
+			} else {
+				// otherwise assume two blanks
+				suffix[0], suffix[1] = ' ', ' '
+				suffix = suffix[0:n]
+			}
+			// Shorten the computed common prefix by the length of
+			// suffix, if it is found as suffix of the prefix.
+			prefix = strings.TrimSuffix(prefix, string(suffix))
+		}
+	}
+
+	// Handle last line: If it only contains a closing */, align it
+	// with the opening /*, otherwise align the text with the other
+	// lines.
+	last := lines[len(lines)-1]
+	closing := "*/"
+	before, _, _ := stringsCut(last, closing) // closing always present
+	if isBlank(before) {
+		// last line only contains closing */
+		if lineOfStars {
+			closing = " */" // add blank to align final star
+		}
+		lines[len(lines)-1] = prefix + closing
+	} else {
+		// last line contains more comment text - assume
+		// it is aligned like the other lines and include
+		// in prefix computation
+		prefix = commonPrefix(prefix, last)
+	}
+
+	// Remove the common prefix from all but the first and empty lines.
+	for i, line := range lines {
+		if i > 0 && line != "" {
+			lines[i] = line[len(prefix):]
+		}
+	}
+}
+
+func (p *printer) writeComment(comment *ast.Comment) {
+	text := comment.Text
+	pos := p.posFor(comment.Pos())
+
+	const linePrefix = "//line "
+	if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
+		// Possibly a //-style line directive.
+		// Suspend indentation temporarily to keep line directive valid.
+		defer func(indent int) { p.indent = indent }(p.indent)
+		p.indent = 0
+	}
+
+	// shortcut common case of //-style comments
+	if text[1] == '/' {
+		if constraint.IsGoBuild(text) {
+			p.goBuild = append(p.goBuild, len(p.output))
+		} else if constraint.IsPlusBuild(text) {
+			p.plusBuild = append(p.plusBuild, len(p.output))
+		}
+		p.writeString(pos, trimRight(text), true)
+		return
+	}
+
+	// for /*-style comments, print line by line and let the
+	// write function take care of the proper indentation
+	lines := strings.Split(text, "\n")
+
+	// The comment started in the first column but is going
+	// to be indented. For an idempotent result, add indentation
+	// to all lines such that they look like they were indented
+	// before - this will make sure the common prefix computation
+	// is the same independent of how many times formatting is
+	// applied (was issue 1835).
+	if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
+		for i, line := range lines[1:] {
+			lines[1+i] = "   " + line
+		}
+	}
+
+	stripCommonPrefix(lines)
+
+	// write comment lines, separated by formfeed,
+	// without a line break after the last line
+	for i, line := range lines {
+		if i > 0 {
+			p.writeByte('\f', 1)
+			pos = p.pos
+		}
+		if len(line) > 0 {
+			p.writeString(pos, trimRight(line), true)
+		}
+	}
+}
+
+// writeCommentSuffix writes a line break after a comment if indicated
+// and processes any leftover indentation information. If a line break
+// is needed, the kind of break (newline vs formfeed) depends on the
+// pending whitespace. The writeCommentSuffix result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
+	for i, ch := range p.wsbuf {
+		switch ch {
+		case blank, vtab:
+			// ignore trailing whitespace
+			p.wsbuf[i] = ignore
+		case indent, unindent:
+			// don't lose indentation information
+		case newline, formfeed:
+			// if we need a line break, keep exactly one
+			// but remember if we dropped any formfeeds
+			if needsLinebreak {
+				needsLinebreak = false
+				wroteNewline = true
+			} else {
+				if ch == formfeed {
+					droppedFF = true
+				}
+				p.wsbuf[i] = ignore
+			}
+		}
+	}
+	p.writeWhitespace(len(p.wsbuf))
+
+	// make sure we have a line break
+	if needsLinebreak {
+		p.writeByte('\n', 1)
+		wroteNewline = true
+	}
+
+	return
+}
+
+// containsLinebreak reports whether the whitespace buffer contains any line breaks.
+func (p *printer) containsLinebreak() bool {
+	for _, ch := range p.wsbuf {
+		if ch == newline || ch == formfeed {
+			return true
+		}
+	}
+	return false
+}
+
+// intersperseComments consumes all comments that appear before the next token
+// tok and prints it together with the buffered whitespace (i.e., the whitespace
+// that needs to be written before the next token). A heuristic is used to mix
+// the comments and whitespace. The intersperseComments result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace buffer.
+func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+	var last *ast.Comment
+	for p.commentBefore(next) {
+		for _, c := range p.comment.List {
+			p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok)
+			p.writeComment(c)
+			last = c
+		}
+		p.nextComment()
+	}
+
+	if last != nil {
+		// If the last comment is a /*-style comment and the next item
+		// follows on the same line but is not a comma, and not a "closing"
+		// token immediately following its corresponding "opening" token,
+		// add an extra separator unless explicitly disabled. Use a blank
+		// as separator unless we have pending linebreaks, they are not
+		// disabled, and we are outside a composite literal, in which case
+		// we want a linebreak (issue 15137).
+		// TODO(gri) This has become overly complicated. We should be able
+		// to track whether we're inside an expression or statement and
+		// use that information to decide more directly.
+		needsLinebreak := false
+		if p.mode&noExtraBlank == 0 &&
+			last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
+			tok != token.COMMA &&
+			(tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
+			(tok != token.RBRACK || p.prevOpen == token.LBRACK) {
+			if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 {
+				needsLinebreak = true
+			} else {
+				p.writeByte(' ', 1)
+			}
+		}
+		// Ensure that there is a line break after a //-style comment,
+		// before EOF, and before a closing '}' unless explicitly disabled.
+		if last.Text[1] == '/' ||
+			tok == token.EOF ||
+			tok == token.RBRACE && p.mode&noExtraLinebreak == 0 {
+			needsLinebreak = true
+		}
+		return p.writeCommentSuffix(needsLinebreak)
+	}
+
+	// no comment was written - we should never reach here since
+	// intersperseComments should not be called in that case
+	p.internalError("intersperseComments called without pending comments")
+	return
+}
+
+// whiteWhitespace writes the first n whitespace entries.
+func (p *printer) writeWhitespace(n int) {
+	// write entries
+	for i := 0; i < n; i++ {
+		switch ch := p.wsbuf[i]; ch {
+		case ignore:
+			// ignore!
+		case indent:
+			p.indent++
+		case unindent:
+			p.indent--
+			if p.indent < 0 {
+				p.internalError("negative indentation:", p.indent)
+				p.indent = 0
+			}
+		case newline, formfeed:
+			// A line break immediately followed by a "correcting"
+			// unindent is swapped with the unindent - this permits
+			// proper label positioning. If a comment is between
+			// the line break and the label, the unindent is not
+			// part of the comment whitespace prefix and the comment
+			// will be positioned correctly indented.
+			if i+1 < n && p.wsbuf[i+1] == unindent {
+				// Use a formfeed to terminate the current section.
+				// Otherwise, a long label name on the next line leading
+				// to a wide column may increase the indentation column
+				// of lines before the label; effectively leading to wrong
+				// indentation.
+				p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
+				i-- // do it again
+				continue
+			}
+			fallthrough
+		default:
+			p.writeByte(byte(ch), 1)
+		}
+	}
+
+	// shift remaining entries down
+	l := copy(p.wsbuf, p.wsbuf[n:])
+	p.wsbuf = p.wsbuf[:l]
+}
+
+// ----------------------------------------------------------------------------
+// Printing interface
+
+// nlimit limits n to maxNewlines.
+func nlimit(n int) int {
+	if n > maxNewlines {
+		n = maxNewlines
+	}
+	return n
+}
+
+func mayCombine(prev token.Token, next byte) (b bool) {
+	switch prev {
+	case token.INT:
+		b = next == '.' // 1.
+	case token.ADD:
+		b = next == '+' // ++
+	case token.SUB:
+		b = next == '-' // --
+	case token.QUO:
+		b = next == '*' // /*
+	case token.LSS:
+		b = next == '-' || next == '<' // <- or <<
+	case token.AND:
+		b = next == '&' || next == '^' // && or &^
+	}
+	return
+}
+
+// print prints a list of "items" (roughly corresponding to syntactic
+// tokens, but also including whitespace and formatting information).
+// It is the only print function that should be called directly from
+// any of the AST printing functions in nodes.go.
+//
+// Whitespace is accumulated until a non-whitespace token appears. Any
+// comments that need to appear before that token are printed first,
+// taking into account the amount and structure of any pending white-
+// space for best comment placement. Then, any leftover whitespace is
+// printed, followed by the actual token.
+func (p *printer) print(args ...interface{}) {
+	for _, arg := range args {
+		// information about the current arg
+		var data string
+		var isLit bool
+		var impliedSemi bool // value for p.impliedSemi after this arg
+
+		// record previous opening token, if any
+		switch p.lastTok {
+		case token.ILLEGAL:
+			// ignore (white space)
+		case token.LPAREN, token.LBRACK:
+			p.prevOpen = p.lastTok
+		default:
+			// other tokens followed any opening token
+			p.prevOpen = token.ILLEGAL
+		}
+
+		switch x := arg.(type) {
+		case pmode:
+			// toggle printer mode
+			p.mode ^= x
+			continue
+
+		case whiteSpace:
+			if x == ignore {
+				// don't add ignore's to the buffer; they
+				// may screw up "correcting" unindents (see
+				// LabeledStmt)
+				continue
+			}
+			i := len(p.wsbuf)
+			if i == cap(p.wsbuf) {
+				// Whitespace sequences are very short so this should
+				// never happen. Handle gracefully (but possibly with
+				// bad comment placement) if it does happen.
+				p.writeWhitespace(i)
+				i = 0
+			}
+			p.wsbuf = p.wsbuf[0 : i+1]
+			p.wsbuf[i] = x
+			if x == newline || x == formfeed {
+				// newlines affect the current state (p.impliedSemi)
+				// and not the state after printing arg (impliedSemi)
+				// because comments can be interspersed before the arg
+				// in this case
+				p.impliedSemi = false
+			}
+			p.lastTok = token.ILLEGAL
+			continue
+
+		case *ast.Ident:
+			data = x.Name
+			impliedSemi = true
+			p.lastTok = token.IDENT
+
+		case *ast.BasicLit:
+			data = x.Value
+			isLit = true
+			impliedSemi = true
+			p.lastTok = x.Kind
+
+		case token.Token:
+			s := x.String()
+			if mayCombine(p.lastTok, s[0]) {
+				// the previous and the current token must be
+				// separated by a blank otherwise they combine
+				// into a different incorrect token sequence
+				// (except for token.INT followed by a '.' this
+				// should never happen because it is taken care
+				// of via binary expression formatting)
+				if len(p.wsbuf) != 0 {
+					p.internalError("whitespace buffer not empty")
+				}
+				p.wsbuf = p.wsbuf[0:1]
+				p.wsbuf[0] = ' '
+			}
+			data = s
+			// some keywords followed by a newline imply a semicolon
+			switch x {
+			case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
+				token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
+				impliedSemi = true
+			}
+			p.lastTok = x
+
+		case token.Pos:
+			if x.IsValid() {
+				p.pos = p.posFor(x) // accurate position of next item
+			}
+			continue
+
+		case string:
+			// incorrect AST - print error message
+			data = x
+			isLit = true
+			impliedSemi = true
+			p.lastTok = token.STRING
+
+		default:
+			fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
+			panic("golang.org/x/website/internal/backport/go/printer type")
+		}
+		// data != ""
+
+		next := p.pos // estimated/accurate position of next item
+		wroteNewline, droppedFF := p.flush(next, p.lastTok)
+
+		// intersperse extra newlines if present in the source and
+		// if they don't cause extra semicolons (don't do this in
+		// flush as it will cause extra newlines at the end of a file)
+		if !p.impliedSemi {
+			n := nlimit(next.Line - p.pos.Line)
+			// don't exceed maxNewlines if we already wrote one
+			if wroteNewline && n == maxNewlines {
+				n = maxNewlines - 1
+			}
+			if n > 0 {
+				ch := byte('\n')
+				if droppedFF {
+					ch = '\f' // use formfeed since we dropped one before
+				}
+				p.writeByte(ch, n)
+				impliedSemi = false
+			}
+		}
+
+		// the next token starts now - record its line number if requested
+		if p.linePtr != nil {
+			*p.linePtr = p.out.Line
+			p.linePtr = nil
+		}
+
+		p.writeString(next, data, isLit)
+		p.impliedSemi = impliedSemi
+	}
+}
+
+// flush prints any pending comments and whitespace occurring textually
+// before the position of the next token tok. The flush result indicates
+// if a newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+	if p.commentBefore(next) {
+		// if there are comments before the next item, intersperse them
+		wroteNewline, droppedFF = p.intersperseComments(next, tok)
+	} else {
+		// otherwise, write any leftover whitespace
+		p.writeWhitespace(len(p.wsbuf))
+	}
+	return
+}
+
+// getNode returns the ast.CommentGroup associated with n, if any.
+func getDoc(n ast.Node) *ast.CommentGroup {
+	switch n := n.(type) {
+	case *ast.Field:
+		return n.Doc
+	case *ast.ImportSpec:
+		return n.Doc
+	case *ast.ValueSpec:
+		return n.Doc
+	case *ast.TypeSpec:
+		return n.Doc
+	case *ast.GenDecl:
+		return n.Doc
+	case *ast.FuncDecl:
+		return n.Doc
+	case *ast.File:
+		return n.Doc
+	}
+	return nil
+}
+
+func getLastComment(n ast.Node) *ast.CommentGroup {
+	switch n := n.(type) {
+	case *ast.Field:
+		return n.Comment
+	case *ast.ImportSpec:
+		return n.Comment
+	case *ast.ValueSpec:
+		return n.Comment
+	case *ast.TypeSpec:
+		return n.Comment
+	case *ast.GenDecl:
+		if len(n.Specs) > 0 {
+			return getLastComment(n.Specs[len(n.Specs)-1])
+		}
+	case *ast.File:
+		if len(n.Comments) > 0 {
+			return n.Comments[len(n.Comments)-1]
+		}
+	}
+	return nil
+}
+
+func (p *printer) printNode(node interface{}) error {
+	// unpack *CommentedNode, if any
+	var comments []*ast.CommentGroup
+	if cnode, ok := node.(*CommentedNode); ok {
+		node = cnode.Node
+		comments = cnode.Comments
+	}
+
+	if comments != nil {
+		// commented node - restrict comment list to relevant range
+		n, ok := node.(ast.Node)
+		if !ok {
+			goto unsupported
+		}
+		beg := n.Pos()
+		end := n.End()
+		// if the node has associated documentation,
+		// include that commentgroup in the range
+		// (the comment list is sorted in the order
+		// of the comment appearance in the source code)
+		if doc := getDoc(n); doc != nil {
+			beg = doc.Pos()
+		}
+		if com := getLastComment(n); com != nil {
+			if e := com.End(); e > end {
+				end = e
+			}
+		}
+		// token.Pos values are global offsets, we can
+		// compare them directly
+		i := 0
+		for i < len(comments) && comments[i].End() < beg {
+			i++
+		}
+		j := i
+		for j < len(comments) && comments[j].Pos() < end {
+			j++
+		}
+		if i < j {
+			p.comments = comments[i:j]
+		}
+	} else if n, ok := node.(*ast.File); ok {
+		// use ast.File comments, if any
+		p.comments = n.Comments
+	}
+
+	// if there are no comments, use node comments
+	p.useNodeComments = p.comments == nil
+
+	// get comments ready for use
+	p.nextComment()
+
+	p.print(pmode(0))
+
+	// format node
+	switch n := node.(type) {
+	case ast.Expr:
+		p.expr(n)
+	case ast.Stmt:
+		// A labeled statement will un-indent to position the label.
+		// Set p.indent to 1 so we don't get indent "underflow".
+		if _, ok := n.(*ast.LabeledStmt); ok {
+			p.indent = 1
+		}
+		p.stmt(n, false)
+	case ast.Decl:
+		p.decl(n)
+	case ast.Spec:
+		p.spec(n, 1, false)
+	case []ast.Stmt:
+		// A labeled statement will un-indent to position the label.
+		// Set p.indent to 1 so we don't get indent "underflow".
+		for _, s := range n {
+			if _, ok := s.(*ast.LabeledStmt); ok {
+				p.indent = 1
+			}
+		}
+		p.stmtList(n, 0, false)
+	case []ast.Decl:
+		p.declList(n)
+	case *ast.File:
+		p.file(n)
+	default:
+		goto unsupported
+	}
+
+	return nil
+
+unsupported:
+	return fmt.Errorf("golang.org/x/website/internal/backport/go/printer: unsupported node type %T", node)
+}
+
+// ----------------------------------------------------------------------------
+// Trimmer
+
+// A trimmer is an io.Writer filter for stripping tabwriter.Escape
+// characters, trailing blanks and tabs, and for converting formfeed
+// and vtab characters into newlines and htabs (in case no tabwriter
+// is used). Text bracketed by tabwriter.Escape characters is passed
+// through unchanged.
+type trimmer struct {
+	output io.Writer
+	state  int
+	space  []byte
+}
+
+// trimmer is implemented as a state machine.
+// It can be in one of the following states:
+const (
+	inSpace  = iota // inside space
+	inEscape        // inside text bracketed by tabwriter.Escapes
+	inText          // inside text
+)
+
+func (p *trimmer) resetSpace() {
+	p.state = inSpace
+	p.space = p.space[0:0]
+}
+
+// Design note: It is tempting to eliminate extra blanks occurring in
+//              whitespace in this function as it could simplify some
+//              of the blanks logic in the node printing functions.
+//              However, this would mess up any formatting done by
+//              the tabwriter.
+
+var aNewline = []byte("\n")
+
+func (p *trimmer) Write(data []byte) (n int, err error) {
+	// invariants:
+	// p.state == inSpace:
+	//	p.space is unwritten
+	// p.state == inEscape, inText:
+	//	data[m:n] is unwritten
+	m := 0
+	var b byte
+	for n, b = range data {
+		if b == '\v' {
+			b = '\t' // convert to htab
+		}
+		switch p.state {
+		case inSpace:
+			switch b {
+			case '\t', ' ':
+				p.space = append(p.space, b)
+			case '\n', '\f':
+				p.resetSpace() // discard trailing space
+				_, err = p.output.Write(aNewline)
+			case tabwriter.Escape:
+				_, err = p.output.Write(p.space)
+				p.state = inEscape
+				m = n + 1 // +1: skip tabwriter.Escape
+			default:
+				_, err = p.output.Write(p.space)
+				p.state = inText
+				m = n
+			}
+		case inEscape:
+			if b == tabwriter.Escape {
+				_, err = p.output.Write(data[m:n])
+				p.resetSpace()
+			}
+		case inText:
+			switch b {
+			case '\t', ' ':
+				_, err = p.output.Write(data[m:n])
+				p.resetSpace()
+				p.space = append(p.space, b)
+			case '\n', '\f':
+				_, err = p.output.Write(data[m:n])
+				p.resetSpace()
+				if err == nil {
+					_, err = p.output.Write(aNewline)
+				}
+			case tabwriter.Escape:
+				_, err = p.output.Write(data[m:n])
+				p.state = inEscape
+				m = n + 1 // +1: skip tabwriter.Escape
+			}
+		default:
+			panic("unreachable")
+		}
+		if err != nil {
+			return
+		}
+	}
+	n = len(data)
+
+	switch p.state {
+	case inEscape, inText:
+		_, err = p.output.Write(data[m:n])
+		p.resetSpace()
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Public interface
+
+// A Mode value is a set of flags (or 0). They control printing.
+type Mode uint
+
+const (
+	RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
+	TabIndent                  // use tabs for indentation independent of UseSpaces
+	UseSpaces                  // use spaces instead of tabs for alignment
+	SourcePos                  // emit //line directives to preserve original source positions
+)
+
+// The mode below is not included in printer's public API because
+// editing code text is deemed out of scope. Because this mode is
+// unexported, it's also possible to modify or remove it based on
+// the evolving needs of go/format and cmd/gofmt without breaking
+// users. See discussion in CL 240683.
+const (
+	// normalizeNumbers means to canonicalize number
+	// literal prefixes and exponents while printing.
+	//
+	// This value is known in and used by go/format and cmd/gofmt.
+	// It is currently more convenient and performant for those
+	// packages to apply number normalization during printing,
+	// rather than by modifying the AST in advance.
+	normalizeNumbers Mode = 1 << 30
+)
+
+// A Config node controls the output of Fprint.
+type Config struct {
+	Mode     Mode // default: 0
+	Tabwidth int  // default: 8
+	Indent   int  // default: 0 (all code is indented at least by this much)
+}
+
+// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {
+	// print node
+	var p printer
+	p.init(cfg, fset, nodeSizes)
+	if err = p.printNode(node); err != nil {
+		return
+	}
+	// print outstanding comments
+	p.impliedSemi = false // EOF acts like a newline
+	p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
+
+	// output is buffered in p.output now.
+	// fix //go:build and // +build comments if needed.
+	p.fixGoBuildLines()
+
+	// redirect output through a trimmer to eliminate trailing whitespace
+	// (Input to a tabwriter must be untrimmed since trailing tabs provide
+	// formatting information. The tabwriter could provide trimming
+	// functionality but no tabwriter is used when RawFormat is set.)
+	output = &trimmer{output: output}
+
+	// redirect output through a tabwriter if necessary
+	if cfg.Mode&RawFormat == 0 {
+		minwidth := cfg.Tabwidth
+
+		padchar := byte('\t')
+		if cfg.Mode&UseSpaces != 0 {
+			padchar = ' '
+		}
+
+		twmode := tabwriter.DiscardEmptyColumns
+		if cfg.Mode&TabIndent != 0 {
+			minwidth = 0
+			twmode |= tabwriter.TabIndent
+		}
+
+		output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
+	}
+
+	// write printer result via tabwriter/trimmer to output
+	if _, err = output.Write(p.output); err != nil {
+		return
+	}
+
+	// flush tabwriter, if any
+	if tw, _ := output.(*tabwriter.Writer); tw != nil {
+		err = tw.Flush()
+	}
+
+	return
+}
+
+// A CommentedNode bundles an AST node and corresponding comments.
+// It may be provided as argument to any of the Fprint functions.
+type CommentedNode struct {
+	Node     interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
+	Comments []*ast.CommentGroup
+}
+
+// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
+// Position information is interpreted relative to the file set fset.
+// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
+// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
+	return cfg.fprint(output, fset, node, make(map[ast.Node]int))
+}
+
+// Fprint "pretty-prints" an AST node to output.
+// It calls Config.Fprint with default settings.
+// Note that gofmt uses tabs for indentation but spaces for alignment;
+// use format.Node (package go/format) for output that matches gofmt.
+func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
+	return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
+}
+
+func stringsCut(s, sep string) (before, after string, found bool) {
+	if i := strings.Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, "", false
+}
diff --git a/internal/backport/go/printer/printer_test.go b/internal/backport/go/printer/printer_test.go
new file mode 100644
index 0000000..aed4ee6
--- /dev/null
+++ b/internal/backport/go/printer/printer_test.go
@@ -0,0 +1,823 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+)
+
+const (
+	dataDir  = "testdata"
+	tabwidth = 8
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+var fset = token.NewFileSet()
+
+type checkMode uint
+
+const (
+	export checkMode = 1 << iota
+	rawFormat
+	normNumber
+	idempotent
+	allowTypeParams
+)
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte, mode checkMode) ([]byte, error) {
+	// parse src
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		return nil, fmt.Errorf("parse: %s\n%s", err, src)
+	}
+
+	// filter exports if necessary
+	if mode&export != 0 {
+		ast.FileExports(f) // ignore result
+		f.Comments = nil   // don't print comments that are not in AST
+	}
+
+	// determine printer configuration
+	cfg := Config{Tabwidth: tabwidth}
+	if mode&rawFormat != 0 {
+		cfg.Mode |= RawFormat
+	}
+	if mode&normNumber != 0 {
+		cfg.Mode |= normalizeNumbers
+	}
+
+	// print AST
+	var buf bytes.Buffer
+	if err := cfg.Fprint(&buf, fset, f); err != nil {
+		return nil, fmt.Errorf("print: %s", err)
+	}
+
+	// make sure formatted output is syntactically correct
+	res := buf.Bytes()
+	if _, err := parser.ParseFile(fset, "", res, parser.ParseComments); err != nil {
+		return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes())
+	}
+
+	return res, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+	i := offs
+	for i < len(text) && text[i] != '\n' {
+		i++
+	}
+	return text[offs:i]
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+	if bytes.Equal(a, b) {
+		return nil
+	}
+
+	var buf bytes.Buffer // holding long error message
+	// compare lengths
+	if len(a) != len(b) {
+		fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+	}
+
+	// compare contents
+	line := 1
+	offs := 0
+	for i := 0; i < len(a) && i < len(b); i++ {
+		ch := a[i]
+		if ch != b[i] {
+			fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
+			fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+			fmt.Fprintf(&buf, "\n\n")
+			break
+		}
+		if ch == '\n' {
+			line++
+			offs = i + 1
+		}
+	}
+
+	fmt.Fprintf(&buf, "\n%s:\n%s\n%s:\n%s", aname, a, bname, b)
+	return errors.New(buf.String())
+}
+
+func runcheck(t *testing.T, source, golden string, mode checkMode) {
+	src, err := os.ReadFile(source)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	res, err := format(src, mode)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// update golden files if necessary
+	if *update {
+		if err := os.WriteFile(golden, res, 0644); err != nil {
+			t.Error(err)
+		}
+		return
+	}
+
+	// get golden
+	gld, err := os.ReadFile(golden)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// formatted source and golden must be the same
+	if err := diff(source, golden, res, gld); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if mode&idempotent != 0 {
+		// formatting golden must be idempotent
+		// (This is very difficult to achieve in general and for now
+		// it is only checked for files explicitly marked as such.)
+		res, err = format(gld, mode)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		if err := diff(golden, fmt.Sprintf("format(%s)", golden), gld, res); err != nil {
+			t.Errorf("golden is not idempotent: %s", err)
+		}
+	}
+}
+
+func check(t *testing.T, source, golden string, mode checkMode) {
+	// run the test
+	cc := make(chan int, 1)
+	go func() {
+		runcheck(t, source, golden, mode)
+		cc <- 0
+	}()
+
+	// wait with timeout
+	select {
+	case <-time.After(10 * time.Second): // plenty of a safety margin, even for very slow machines
+		// test running past time out
+		t.Errorf("%s: running too slowly", source)
+	case <-cc:
+		// test finished within allotted time margin
+	}
+}
+
+type entry struct {
+	source, golden string
+	mode           checkMode
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+	{"empty.input", "empty.golden", idempotent},
+	{"comments.input", "comments.golden", 0},
+	{"comments.input", "comments.x", export},
+	{"comments2.input", "comments2.golden", idempotent},
+	{"alignment.input", "alignment.golden", idempotent},
+	{"linebreaks.input", "linebreaks.golden", idempotent},
+	{"expressions.input", "expressions.golden", idempotent},
+	{"expressions.input", "expressions.raw", rawFormat | idempotent},
+	{"declarations.input", "declarations.golden", 0},
+	{"statements.input", "statements.golden", 0},
+	{"slow.input", "slow.golden", idempotent},
+	{"complit.input", "complit.x", export},
+	{"go2numbers.input", "go2numbers.golden", idempotent},
+	{"go2numbers.input", "go2numbers.norm", normNumber | idempotent},
+	{"generics.input", "generics.golden", idempotent | allowTypeParams},
+	{"gobuild1.input", "gobuild1.golden", idempotent},
+	{"gobuild2.input", "gobuild2.golden", idempotent},
+	{"gobuild3.input", "gobuild3.golden", idempotent},
+	{"gobuild4.input", "gobuild4.golden", idempotent},
+	{"gobuild5.input", "gobuild5.golden", idempotent},
+	{"gobuild6.input", "gobuild6.golden", idempotent},
+	{"gobuild7.input", "gobuild7.golden", idempotent},
+}
+
+func TestFiles(t *testing.T) {
+	t.Parallel()
+	for _, e := range data {
+		source := filepath.Join(dataDir, e.source)
+		golden := filepath.Join(dataDir, e.golden)
+		mode := e.mode
+		t.Run(e.source, func(t *testing.T) {
+			t.Parallel()
+			check(t, source, golden, mode)
+			// TODO(gri) check that golden is idempotent
+			//check(t, golden, golden, e.mode)
+		})
+	}
+}
+
+// TestLineComments, using a simple test case, checks that consecutive line
+// comments are properly terminated with a newline even if the AST position
+// information is incorrect.
+func TestLineComments(t *testing.T) {
+	const src = `// comment 1
+	// comment 2
+	// comment 3
+	package main
+	`
+
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		panic(err) // error in test
+	}
+
+	var buf bytes.Buffer
+	fset = token.NewFileSet() // use the wrong file set
+	Fprint(&buf, fset, f)
+
+	nlines := 0
+	for _, ch := range buf.Bytes() {
+		if ch == '\n' {
+			nlines++
+		}
+	}
+
+	const expected = 3
+	if nlines < expected {
+		t.Errorf("got %d, expected %d\n", nlines, expected)
+		t.Errorf("result:\n%s", buf.Bytes())
+	}
+}
+
+// Verify that the printer can be invoked during initialization.
+func init() {
+	const name = "foobar"
+	var buf bytes.Buffer
+	if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
+		panic(err) // error in test
+	}
+	// in debug mode, the result contains additional information;
+	// ignore it
+	if s := buf.String(); !debug && s != name {
+		panic("got " + s + ", want " + name)
+	}
+}
+
+// Verify that the printer doesn't crash if the AST contains BadXXX nodes.
+func TestBadNodes(t *testing.T) {
+	const src = "package p\n("
+	const res = "package p\nBadDecl\n"
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err == nil {
+		t.Error("expected illegal program") // error in test
+	}
+	var buf bytes.Buffer
+	Fprint(&buf, fset, f)
+	if buf.String() != res {
+		t.Errorf("got %q, expected %q", buf.String(), res)
+	}
+}
+
+// testComment verifies that f can be parsed again after printing it
+// with its first comment set to comment at any possible source offset.
+func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
+	f.Comments[0].List[0] = comment
+	var buf bytes.Buffer
+	for offs := 0; offs <= srclen; offs++ {
+		buf.Reset()
+		// Printing f should result in a correct program no
+		// matter what the (incorrect) comment position is.
+		if err := Fprint(&buf, fset, f); err != nil {
+			t.Error(err)
+		}
+		if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+			t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
+		}
+		// Position information is just an offset.
+		// Move comment one byte down in the source.
+		comment.Slash++
+	}
+}
+
+// Verify that the printer produces a correct program
+// even if the position information of comments introducing newlines
+// is incorrect.
+func TestBadComments(t *testing.T) {
+	t.Parallel()
+	const src = `
+// first comment - text and position changed by test
+package p
+import "fmt"
+const pi = 3.14 // rough circle
+var (
+	x, y, z int = 1, 2, 3
+	u, v float64
+)
+func fibo(n int) {
+	if n < 2 {
+		return n /* seed values */
+	}
+	return fibo(n-1) + fibo(n-2)
+}
+`
+
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		t.Error(err) // error in test
+	}
+
+	comment := f.Comments[0].List[0]
+	pos := comment.Pos()
+	if fset.PositionFor(pos, false /* absolute position */).Offset != 1 {
+		t.Error("expected offset 1") // error in test
+	}
+
+	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
+	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
+	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
+	testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
+}
+
+type visitor chan *ast.Ident
+
+func (v visitor) Visit(n ast.Node) (w ast.Visitor) {
+	if ident, ok := n.(*ast.Ident); ok {
+		v <- ident
+	}
+	return v
+}
+
+// idents is an iterator that returns all idents in f via the result channel.
+func idents(f *ast.File) <-chan *ast.Ident {
+	v := make(visitor)
+	go func() {
+		ast.Walk(v, f)
+		close(v)
+	}()
+	return v
+}
+
+// identCount returns the number of identifiers found in f.
+func identCount(f *ast.File) int {
+	n := 0
+	for range idents(f) {
+		n++
+	}
+	return n
+}
+
+// Verify that the SourcePos mode emits correct //line directives
+// by testing that position information for matching identifiers
+// is maintained.
+func TestSourcePos(t *testing.T) {
+	const src = `
+package p
+import ( "golang.org/x/website/internal/backport/go/printer"; "math" )
+const pi = 3.14; var x = 0
+type t struct{ x, y, z int; u, v, w float32 }
+func (t *t) foo(a, b, c int) int {
+	return a*t.x + b*t.y +
+		// two extra lines here
+		// ...
+		c*t.z
+}
+`
+
+	// parse original
+	f1, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// pretty-print original
+	var buf bytes.Buffer
+	err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// parse pretty printed original
+	// (//line directives must be interpreted even w/o parser.ParseComments set)
+	f2, err := parser.ParseFile(fset, "", buf.Bytes(), 0)
+	if err != nil {
+		t.Fatalf("%s\n%s", err, buf.Bytes())
+	}
+
+	// At this point the position information of identifiers in f2 should
+	// match the position information of corresponding identifiers in f1.
+
+	// number of identifiers must be > 0 (test should run) and must match
+	n1 := identCount(f1)
+	n2 := identCount(f2)
+	if n1 == 0 {
+		t.Fatal("got no idents")
+	}
+	if n2 != n1 {
+		t.Errorf("got %d idents; want %d", n2, n1)
+	}
+
+	// verify that all identifiers have correct line information
+	i2range := idents(f2)
+	for i1 := range idents(f1) {
+		i2 := <-i2range
+
+		if i2.Name != i1.Name {
+			t.Errorf("got ident %s; want %s", i2.Name, i1.Name)
+		}
+
+		// here we care about the relative (line-directive adjusted) positions
+		l1 := fset.Position(i1.Pos()).Line
+		l2 := fset.Position(i2.Pos()).Line
+		if l2 != l1 {
+			t.Errorf("got line %d; want %d for %s", l2, l1, i1.Name)
+		}
+	}
+
+	if t.Failed() {
+		t.Logf("\n%s", buf.Bytes())
+	}
+}
+
+// Verify that the SourcePos mode doesn't emit unnecessary //line directives
+// before empty lines.
+func TestIssue5945(t *testing.T) {
+	const orig = `
+package p   // line 2
+func f() {} // line 3
+
+var x, y, z int
+
+
+func g() { // line 8
+}
+`
+
+	const want = `//line src.go:2
+package p
+
+//line src.go:3
+func f() {}
+
+var x, y, z int
+
+//line src.go:8
+func g() {
+}
+`
+
+	// parse original
+	f1, err := parser.ParseFile(fset, "src.go", orig, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// pretty-print original
+	var buf bytes.Buffer
+	err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	got := buf.String()
+
+	// compare original with desired output
+	if got != want {
+		t.Errorf("got:\n%s\nwant:\n%s\n", got, want)
+	}
+}
+
+var decls = []string{
+	`import "fmt"`,
+	"const pi = 3.1415\nconst e = 2.71828\n\nvar x = pi",
+	"func sum(x, y int) int\t{ return x + y }",
+}
+
+func TestDeclLists(t *testing.T) {
+	for _, src := range decls {
+		file, err := parser.ParseFile(fset, "", "package p;"+src, parser.ParseComments)
+		if err != nil {
+			panic(err) // error in test
+		}
+
+		var buf bytes.Buffer
+		err = Fprint(&buf, fset, file.Decls) // only print declarations
+		if err != nil {
+			panic(err) // error in test
+		}
+
+		out := buf.String()
+		if out != src {
+			t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+		}
+	}
+}
+
+var stmts = []string{
+	"i := 0",
+	"select {}\nvar a, b = 1, 2\nreturn a + b",
+	"go f()\ndefer func() {}()",
+}
+
+func TestStmtLists(t *testing.T) {
+	for _, src := range stmts {
+		file, err := parser.ParseFile(fset, "", "package p; func _() {"+src+"}", parser.ParseComments)
+		if err != nil {
+			panic(err) // error in test
+		}
+
+		var buf bytes.Buffer
+		err = Fprint(&buf, fset, file.Decls[0].(*ast.FuncDecl).Body.List) // only print statements
+		if err != nil {
+			panic(err) // error in test
+		}
+
+		out := buf.String()
+		if out != src {
+			t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+		}
+	}
+}
+
+func TestBaseIndent(t *testing.T) {
+	t.Parallel()
+	// The testfile must not contain multi-line raw strings since those
+	// are not indented (because their values must not change) and make
+	// this test fail.
+	const filename = "printer.go"
+	src, err := os.ReadFile(filename)
+	if err != nil {
+		panic(err) // error in test
+	}
+
+	file, err := parser.ParseFile(fset, filename, src, 0)
+	if err != nil {
+		panic(err) // error in test
+	}
+
+	for indent := 0; indent < 4; indent++ {
+		indent := indent
+		t.Run(fmt.Sprint(indent), func(t *testing.T) {
+			t.Parallel()
+			var buf bytes.Buffer
+			(&Config{Tabwidth: tabwidth, Indent: indent}).Fprint(&buf, fset, file)
+			// all code must be indented by at least 'indent' tabs
+			lines := bytes.Split(buf.Bytes(), []byte{'\n'})
+			for i, line := range lines {
+				if len(line) == 0 {
+					continue // empty lines don't have indentation
+				}
+				n := 0
+				for j, b := range line {
+					if b != '\t' {
+						// end of indentation
+						n = j
+						break
+					}
+				}
+				if n < indent {
+					t.Errorf("line %d: got only %d tabs; want at least %d: %q", i, n, indent, line)
+				}
+			}
+		})
+	}
+}
+
+// TestFuncType tests that an ast.FuncType with a nil Params field
+// can be printed (per go/ast specification). Test case for issue 3870.
+func TestFuncType(t *testing.T) {
+	src := &ast.File{
+		Name: &ast.Ident{Name: "p"},
+		Decls: []ast.Decl{
+			&ast.FuncDecl{
+				Name: &ast.Ident{Name: "f"},
+				Type: &ast.FuncType{},
+			},
+		},
+	}
+
+	var buf bytes.Buffer
+	if err := Fprint(&buf, fset, src); err != nil {
+		t.Fatal(err)
+	}
+	got := buf.String()
+
+	const want = `package p
+
+func f()
+`
+
+	if got != want {
+		t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
+	}
+}
+
+type limitWriter struct {
+	remaining int
+	errCount  int
+}
+
+func (l *limitWriter) Write(buf []byte) (n int, err error) {
+	n = len(buf)
+	if n >= l.remaining {
+		n = l.remaining
+		err = io.EOF
+		l.errCount++
+	}
+	l.remaining -= n
+	return n, err
+}
+
+// Test whether the printer stops writing after the first error
+func TestWriteErrors(t *testing.T) {
+	t.Parallel()
+	const filename = "printer.go"
+	src, err := os.ReadFile(filename)
+	if err != nil {
+		panic(err) // error in test
+	}
+	file, err := parser.ParseFile(fset, filename, src, 0)
+	if err != nil {
+		panic(err) // error in test
+	}
+	for i := 0; i < 20; i++ {
+		lw := &limitWriter{remaining: i}
+		err := (&Config{Mode: RawFormat}).Fprint(lw, fset, file)
+		if lw.errCount > 1 {
+			t.Fatal("Writes continued after first error returned")
+		}
+		// We expect errCount be 1 iff err is set
+		if (lw.errCount != 0) != (err != nil) {
+			t.Fatal("Expected err when errCount != 0")
+		}
+	}
+}
+
+// TextX is a skeleton test that can be filled in for debugging one-off cases.
+// Do not remove.
+func TestX(t *testing.T) {
+	const src = `
+package p
+func _() {}
+`
+	_, err := format([]byte(src), 0)
+	if err != nil {
+		t.Error(err)
+	}
+}
+
+func TestCommentedNode(t *testing.T) {
+	const (
+		input = `package main
+
+func foo() {
+	// comment inside func
+}
+
+// leading comment
+type bar int // comment2
+
+`
+
+		foo = `func foo() {
+	// comment inside func
+}`
+
+		bar = `// leading comment
+type bar int	// comment2
+`
+	)
+
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "input.go", input, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+
+	err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[0], Comments: f.Comments})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if buf.String() != foo {
+		t.Errorf("got %q, want %q", buf.String(), foo)
+	}
+
+	buf.Reset()
+
+	err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[1], Comments: f.Comments})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if buf.String() != bar {
+		t.Errorf("got %q, want %q", buf.String(), bar)
+	}
+}
+
+func TestIssue11151(t *testing.T) {
+	const src = "package p\t/*\r/1\r*\r/2*\r\r\r\r/3*\r\r+\r\r/4*/\n"
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var buf bytes.Buffer
+	Fprint(&buf, fset, f)
+	got := buf.String()
+	const want = "package p\t/*/1*\r/2*\r/3*+/4*/\n" // \r following opening /* should be stripped
+	if got != want {
+		t.Errorf("\ngot : %q\nwant: %q", got, want)
+	}
+
+	// the resulting program must be valid
+	_, err = parser.ParseFile(fset, "", got, 0)
+	if err != nil {
+		t.Errorf("%v\norig: %q\ngot : %q", err, src, got)
+	}
+}
+
+// If a declaration has multiple specifications, a parenthesized
+// declaration must be printed even if Lparen is token.NoPos.
+func TestParenthesizedDecl(t *testing.T) {
+	// a package with multiple specs in a single declaration
+	const src = "package p; var ( a float64; b int )"
+	fset := token.NewFileSet()
+	f, err := parser.ParseFile(fset, "", src, 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// print the original package
+	var buf bytes.Buffer
+	err = Fprint(&buf, fset, f)
+	if err != nil {
+		t.Fatal(err)
+	}
+	original := buf.String()
+
+	// now remove parentheses from the declaration
+	for i := 0; i != len(f.Decls); i++ {
+		f.Decls[i].(*ast.GenDecl).Lparen = token.NoPos
+	}
+	buf.Reset()
+	err = Fprint(&buf, fset, f)
+	if err != nil {
+		t.Fatal(err)
+	}
+	noparen := buf.String()
+
+	if noparen != original {
+		t.Errorf("got %q, want %q", noparen, original)
+	}
+}
+
+// Verify that we don't print a newline between "return" and its results, as
+// that would incorrectly cause a naked return.
+func TestIssue32854(t *testing.T) {
+	src := `package foo
+
+func f() {
+        return Composite{
+                call(),
+        }
+}`
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "", src, 0)
+	if err != nil {
+		panic(err)
+	}
+
+	// Replace the result with call(), which is on the next line.
+	fd := file.Decls[0].(*ast.FuncDecl)
+	ret := fd.Body.List[0].(*ast.ReturnStmt)
+	ret.Results[0] = ret.Results[0].(*ast.CompositeLit).Elts[0]
+
+	var buf bytes.Buffer
+	if err := Fprint(&buf, fset, ret); err != nil {
+		t.Fatal(err)
+	}
+	want := "return call()"
+	if got := buf.String(); got != want {
+		t.Fatalf("got %q, want %q", got, want)
+	}
+}
diff --git a/internal/backport/go/printer/testdata/alignment.golden b/internal/backport/go/printer/testdata/alignment.golden
new file mode 100644
index 0000000..96086ed
--- /dev/null
+++ b/internal/backport/go/printer/testdata/alignment.golden
@@ -0,0 +1,172 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package alignment
+
+// ----------------------------------------------------------------------------
+// Examples from issue #7335.
+
+func main() {
+	z := MyStruct{
+		Foo:		"foo",
+		Bar:		"bar",
+		Name:		"name",
+		LongName:	"longname",
+		Baz:		"baz",
+	}
+	y := MyStruct{
+		Foo:			"foo",
+		Bar:			"bar",
+		NameXX:			"name",
+		LongNameXXXXXXXXXXXXX:	"longname",
+		Baz:			"baz",
+	}
+	z := MyStruct{
+		Foo:			"foo",
+		Bar:			"bar",
+		Name:			"name",
+		LongNameXXXXXXXXXXXXX:	"longname",
+		Baz:			"baz",
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #10392.
+
+var kcfg = KubeletConfig{
+	Address:			s.Address,
+	AllowPrivileged:		s.AllowPrivileged,
+	HostNetworkSources:		hostNetworkSources,
+	HostnameOverride:		s.HostnameOverride,
+	RootDirectory:			s.RootDirectory,
+	ConfigFile:			s.Config,
+	ManifestURL:			s.ManifestURL,
+	FileCheckFrequency:		s.FileCheckFrequency,
+	HTTPCheckFrequency:		s.HTTPCheckFrequency,
+	PodInfraContainerImage:		s.PodInfraContainerImage,
+	SyncFrequency:			s.SyncFrequency,
+	RegistryPullQPS:		s.RegistryPullQPS,
+	RegistryBurst:			s.RegistryBurst,
+	MinimumGCAge:			s.MinimumGCAge,
+	MaxPerPodContainerCount:	s.MaxPerPodContainerCount,
+	MaxContainerCount:		s.MaxContainerCount,
+	ClusterDomain:			s.ClusterDomain,
+	ClusterDNS:			s.ClusterDNS,
+	Runonce:			s.RunOnce,
+	Port:				s.Port,
+	ReadOnlyPort:			s.ReadOnlyPort,
+	CadvisorInterface:		cadvisorInterface,
+	EnableServer:			s.EnableServer,
+	EnableDebuggingHandlers:	s.EnableDebuggingHandlers,
+	DockerClient:			dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
+	KubeClient:			client,
+	MasterServiceNamespace:		s.MasterServiceNamespace,
+	VolumePlugins:			ProbeVolumePlugins(),
+	NetworkPlugins:			ProbeNetworkPlugins(),
+	NetworkPluginName:		s.NetworkPluginName,
+	StreamingConnectionIdleTimeout:	s.StreamingConnectionIdleTimeout,
+	TLSOptions:			tlsOptions,
+	ImageGCPolicy:			imageGCPolicy, imageGCPolicy,
+	Cloud:				cloud,
+	NodeStatusUpdateFrequency:	s.NodeStatusUpdateFrequency,
+}
+
+var a = A{
+	Long:					1,
+	LongLong:				1,
+	LongLongLong:				1,
+	LongLongLongLong:			1,
+	LongLongLongLongLong:			1,
+	LongLongLongLongLongLong:		1,
+	LongLongLongLongLongLongLong:		1,
+	LongLongLongLongLongLongLongLong:	1,
+	Short:					1,
+	LongLongLongLongLongLongLongLongLong:	3,
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #22852.
+
+var fmtMap = map[string]string{
+	"1":					"123",
+	"12":					"123",
+	"123":					"123",
+	"1234":					"123",
+	"12345":				"123",
+	"123456":				"123",
+	"12345678901234567890123456789":	"123",
+	"abcde":				"123",
+	"123456789012345678901234567890":	"123",
+	"1234567":				"123",
+	"abcdefghijklmnopqrstuvwxyzabcd":	"123",
+	"abcd":					"123",
+}
+
+type Fmt struct {
+	abcdefghijklmnopqrstuvwx	string
+	abcdefghijklmnopqrstuvwxy	string
+	abcdefghijklmnopqrstuvwxyz	string
+	abcdefghijklmnopqrstuvwxyza	string
+	abcdefghijklmnopqrstuvwxyzab	string
+	abcdefghijklmnopqrstuvwxyzabc	string
+	abcde				string
+	abcdefghijklmnopqrstuvwxyzabcde	string
+	abcdefg				string
+}
+
+func main() {
+	_ := Fmt{
+		abcdefghijklmnopqrstuvwx:		"foo",
+		abcdefghijklmnopqrstuvwxyza:		"foo",
+		abcdefghijklmnopqrstuvwxyzab:		"foo",
+		abcdefghijklmnopqrstuvwxyzabc:		"foo",
+		abcde:					"foo",
+		abcdefghijklmnopqrstuvwxyzabcde:	"foo",
+		abcdefg:				"foo",
+		abcdefghijklmnopqrstuvwxy:		"foo",
+		abcdefghijklmnopqrstuvwxyz:		"foo",
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26352.
+
+var _ = map[int]string{
+	1:	"",
+
+	12345678901234567890123456789:		"",
+	12345678901234567890123456789012345678:	"",
+}
+
+func f() {
+	_ = map[int]string{
+		1:	"",
+
+		12345678901234567:				"",
+		12345678901234567890123456789012345678901:	"",
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26930.
+
+var _ = S{
+	F1:	[]string{},
+	F2____:	[]string{},
+}
+
+var _ = S{
+	F1:	[]string{},
+	F2____:	[]string{},
+}
+
+var _ = S{
+	F1____:	[]string{},
+	F2:	[]string{},
+}
+
+var _ = S{
+	F1____:	[]string{},
+	F2:	[]string{},
+}
diff --git a/internal/backport/go/printer/testdata/alignment.input b/internal/backport/go/printer/testdata/alignment.input
new file mode 100644
index 0000000..323d268
--- /dev/null
+++ b/internal/backport/go/printer/testdata/alignment.input
@@ -0,0 +1,179 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package alignment
+
+// ----------------------------------------------------------------------------
+// Examples from issue #7335.
+
+func main() {
+    z := MyStruct{
+        Foo:      "foo",
+        Bar:      "bar",
+        Name:     "name",
+        LongName: "longname",
+        Baz:      "baz",
+    }
+    y := MyStruct{
+        Foo:                   "foo",
+        Bar:                   "bar",
+        NameXX:                "name",
+        LongNameXXXXXXXXXXXXX: "longname",
+        Baz: "baz",
+    }
+    z := MyStruct{
+        Foo:  "foo",
+        Bar:  "bar",
+        Name: "name",
+        LongNameXXXXXXXXXXXXX: "longname",
+        Baz: "baz",
+    }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #10392.
+
+var kcfg = KubeletConfig{
+    Address:                        s.Address,
+    AllowPrivileged:                s.AllowPrivileged,
+    HostNetworkSources:             hostNetworkSources,
+    HostnameOverride:               s.HostnameOverride,
+    RootDirectory:                  s.RootDirectory,
+    ConfigFile:                     s.Config,
+    ManifestURL:                    s.ManifestURL,
+    FileCheckFrequency:             s.FileCheckFrequency,
+    HTTPCheckFrequency:             s.HTTPCheckFrequency,
+    PodInfraContainerImage:         s.PodInfraContainerImage,
+    SyncFrequency:                  s.SyncFrequency,
+    RegistryPullQPS:                s.RegistryPullQPS,
+    RegistryBurst:                  s.RegistryBurst,
+    MinimumGCAge:                   s.MinimumGCAge,
+    MaxPerPodContainerCount:        s.MaxPerPodContainerCount,
+    MaxContainerCount:              s.MaxContainerCount,
+    ClusterDomain:                  s.ClusterDomain,
+    ClusterDNS:                     s.ClusterDNS,
+    Runonce:                        s.RunOnce,
+    Port:                           s.Port,
+    ReadOnlyPort:                   s.ReadOnlyPort,
+    CadvisorInterface:              cadvisorInterface,
+    EnableServer:                   s.EnableServer,
+    EnableDebuggingHandlers:        s.EnableDebuggingHandlers,
+    DockerClient:                   dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
+    KubeClient:                     client,
+    MasterServiceNamespace:         s.MasterServiceNamespace,
+    VolumePlugins:                  ProbeVolumePlugins(),
+    NetworkPlugins:                 ProbeNetworkPlugins(),
+    NetworkPluginName:              s.NetworkPluginName,
+    StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
+    TLSOptions:                     tlsOptions,
+    ImageGCPolicy:                  imageGCPolicy,imageGCPolicy,
+    Cloud:                          cloud,
+    NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
+}
+
+var a = A{
+    Long:                             1,
+    LongLong:                         1,
+    LongLongLong:                     1,
+    LongLongLongLong:                 1,
+    LongLongLongLongLong:             1,
+    LongLongLongLongLongLong:         1,
+    LongLongLongLongLongLongLong:     1,
+    LongLongLongLongLongLongLongLong: 1,
+    Short: 1,
+    LongLongLongLongLongLongLongLongLong: 3,
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #22852.
+
+var fmtMap = map[string]string{
+	"1": "123",
+	"12": "123",
+	"123": "123",
+	"1234": "123",
+	"12345": "123",
+	"123456": "123",
+	"12345678901234567890123456789": "123",
+	"abcde": "123",
+	"123456789012345678901234567890": "123",
+	"1234567": "123",
+	"abcdefghijklmnopqrstuvwxyzabcd": "123",
+	"abcd": "123",
+}
+
+type Fmt struct {
+	abcdefghijklmnopqrstuvwx string
+	abcdefghijklmnopqrstuvwxy string
+	abcdefghijklmnopqrstuvwxyz string
+	abcdefghijklmnopqrstuvwxyza string
+	abcdefghijklmnopqrstuvwxyzab string
+	abcdefghijklmnopqrstuvwxyzabc string
+	abcde string
+	abcdefghijklmnopqrstuvwxyzabcde string
+	abcdefg string
+}
+
+func main() {
+	_ := Fmt{
+		abcdefghijklmnopqrstuvwx: "foo",
+		abcdefghijklmnopqrstuvwxyza: "foo",
+		abcdefghijklmnopqrstuvwxyzab: "foo",
+		abcdefghijklmnopqrstuvwxyzabc: "foo",
+		abcde: "foo",
+		abcdefghijklmnopqrstuvwxyzabcde: "foo",
+		abcdefg: "foo",
+		abcdefghijklmnopqrstuvwxy: "foo",
+		abcdefghijklmnopqrstuvwxyz: "foo",
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26352.
+
+var _ = map[int]string{
+	1: "",
+
+	12345678901234567890123456789: "",
+	12345678901234567890123456789012345678: "",
+}
+
+func f() {
+	_ = map[int]string{
+		1: "",
+
+		12345678901234567: "",
+		12345678901234567890123456789012345678901: "",
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26930.
+
+var _ = S{
+	F1: []string{
+	},
+	F2____: []string{},
+}
+
+var _ = S{
+	F1: []string{
+
+
+	},
+	F2____: []string{},
+}
+
+var _ = S{
+	F1____: []string{
+	},
+	F2: []string{},
+}
+
+var _ = S{
+	F1____: []string{
+
+	},
+	F2: []string{},
+}
diff --git a/internal/backport/go/printer/testdata/comments.golden b/internal/backport/go/printer/testdata/comments.golden
new file mode 100644
index 0000000..1a21fff
--- /dev/null
+++ b/internal/backport/go/printer/testdata/comments.golden
@@ -0,0 +1,759 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+import "fmt"	// fmt
+
+const c0 = 0	// zero
+const (
+	c1	= iota	// c1
+	c2		// c2
+)
+
+// Alignment of comments in declarations>
+const (
+	_	T	= iota	// comment
+	_			// comment
+	_			// comment
+	_	= iota + 10
+	_	// comments
+
+	_		= 10	// comment
+	_	T	= 20	// comment
+)
+
+const (
+	_____	= iota	// foo
+	_		// bar
+	_	= 0	// bal
+	_		// bat
+)
+
+const (
+	_	T	= iota	// comment
+	_			// comment
+	_			// comment
+	_	= iota + 10
+	_		// comment
+	_		= 10
+	_		= 20	// comment
+	_	T	= 0	// comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+	int
+	x, y, z	int	// 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+	S0
+	A, B, C	float	// 3 exported fields
+	D, b, c	int	// 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+	S1
+	A, B, C	float	// 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+	f(x int) int	// unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+	I0
+	F(x float) float	// exported methods
+	g(x int) int		// unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+	I0
+	F(x float) float	// exported method
+	G(x float) float	// exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+	// lead comment for F1
+	F1	int	// line comment for F1
+	// lead comment for F2
+	F2	int	// line comment for F2
+	f3	int	// f3 is not exported
+}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int	// x
+var ()
+
+// This comment SHOULD be associated with f0.
+func f0() {
+	const pi = 3.14	// pi
+	var s1 struct{}	/* an empty struct */	/* foo */
+	// a struct constructor
+	// --------------------
+	var s2 struct{} = struct{}{}
+	x := pi
+}
+
+//
+// This comment should be associated with f1, with one blank line before the comment.
+//
+func f1() {
+	f0()
+	/* 1 */
+	// 2
+	/* 3 */
+	/* 4 */
+	f0()
+}
+
+func _() {
+	// this comment should be properly indented
+}
+
+func _(x int) int {
+	if x < 0 {	// the tab printed before this comment's // must not affect the remaining lines
+		return -x	// this statement should be properly indented
+	}
+	if x < 0 {	/* the tab printed before this comment's /* must not affect the remaining lines */
+		return -x	// this statement should be properly indented
+	}
+	return x
+}
+
+func typeswitch(x interface{}) {
+	switch v := x.(type) {
+	case bool, int, float:
+	case string:
+	default:
+	}
+
+	switch x.(type) {
+	}
+
+	switch v0, ok := x.(int); v := x.(type) {
+	}
+
+	switch v0, ok := x.(int); x.(type) {
+	case byte:	// this comment should be on the same line as the keyword
+		// this comment should be normally indented
+		_ = 0
+	case bool, int, float:
+		// this comment should be indented
+	case string:
+	default:
+		// this comment should be indented
+	}
+	// this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+	s := 1 +
+		2
+	// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2	// comment
+	// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2	// comment
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+
+	// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2	// comment
+
+	// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2	// comment
+
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+
+	// should be indented like s
+	_ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+	templateText := "a" +	// A
+		"b" +	// B
+		"c"	// C
+
+	// should be aligned with f()
+	f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+	templateText := "a" +	// A
+		"b" +	// B
+		"c"	// C
+
+		// may not be aligned with f() (source is not aligned)
+	f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line */
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line */
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line */
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line */
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line */
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line */
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line */
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line */
+}
+
+// Issue 9751.
+func _() {
+	/*a string
+
+	b string*/
+
+	/*A string
+
+
+
+	Z string*/
+
+	/*a string
+
+	b string
+
+	c string*/
+
+	{
+		/*a string
+		b string*/
+
+		/*a string
+
+		b string*/
+
+		/*a string
+
+		b string
+
+		c string*/
+	}
+
+	{
+		/*a string
+		b string*/
+
+		/*a string
+
+		b string*/
+
+		/*a string
+
+		b string
+
+		c string*/
+	}
+
+	/*
+	 */
+
+	/*
+
+	 */
+
+	/*
+
+	 * line
+
+	 */
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/*	and another line
+ *	of
+ *	stars */
+
+/* a line of
+ * stars */
+
+/*	and another line of
+ *	stars */
+
+/* a line of stars
+ */
+
+/*	and another line of
+ */
+
+/* a line of stars
+ */
+
+/*	and another line of
+ */
+
+/*
+aligned in middle
+here
+        not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+   aligned in middle
+   here
+           not here
+*/
+
+/*
+	blank line in middle:
+
+	with no leading spaces on blank line.
+*/
+
+func _() {
+	/*
+	 * line
+	 * of
+	 * stars
+	 */
+
+	/*
+		aligned in middle
+		here
+			not here
+	*/
+
+	/*
+		blank line in middle:
+
+		with no leading spaces on blank line.
+	*/
+}
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _( /* this */ x /* is */ /* an */ int) {
+}
+
+func _( /* no params - extra blank before and after comment */ )	{}
+func _(a, b int /* params - no extra blank after comment */)		{}
+
+func _()	{ f( /* no args - extra blank before and after comment */ ) }
+func _()	{ f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+	f( /* no args - extra blank before and after comment */ )
+	f(a, b /* args - no extra blank after comment */)
+}
+
+func ( /* comment1 */ T /* comment2 */) _()	{}
+
+func _()	{ /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _()	{ x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+	_ = 0
+	/* closing curly brace should be on new line */
+}
+
+func _() {
+	_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+	var a = []int{1, 2	/*jasldf*/}
+	_ = a
+}
+
+func _() {
+	var a = []int{1, 2}/*jasldf
+	 */
+
+	_ = a
+}
+
+func _() {
+	var a = []int{1, 2}// jasldf
+
+	_ = a
+}
+
+// Test cases from issues 11274, 15137:
+// Semicolon must not be lost when multiple statements are on the same line with a comment.
+func _() {
+	x := 0 /**/
+	y := 1
+}
+
+func _() {
+	f()
+	f()
+	f() /* comment */
+	f()
+	f() /* comment */
+	f()
+	f() /* a */ /* b */
+	f()
+	f() /* a */ /* b */
+	f()
+	f() /* a */ /* b */
+	f()
+}
+
+func _() {
+	f() /* a */ /* b */
+}
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+	_ = T{
+		1,	// comment after comma
+		2,	/* comment after comma */
+		3,	// comment after comma
+	}
+	_ = T{
+		1,	// comment after comma
+		2,	/* comment after comma */
+		3,	// comment after comma
+	}
+	_ = T{
+		/* comment before literal */ 1,
+		2,	/* comment before comma - ok to move after comma */
+		3,	/* comment before comma - ok to move after comma */
+	}
+
+	for i = 0;	// comment after semicolon
+	i < 9;		/* comment after semicolon */
+	i++ {		// comment after opening curly brace
+	}
+
+	// TODO(gri) the last comment in this example should be aligned */
+	for i = 0;	// comment after semicolon
+	i < 9;		/* comment before semicolon - ok to move after semicolon */
+	i++ /* comment before opening curly brace */ {
+	}
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+	f(x /* comment */, y)
+	f(x,	/* comment */
+		y)
+	f(
+		x,	/* comment */
+	)
+}
+
+func g(
+	x int,	/* comment */
+) {
+}
+
+type _ struct {
+	a, b /* comment */, c int
+}
+
+type _ struct {
+	a, b /* comment */, c int
+}
+
+func _() {
+	for a /* comment */, b := range x {
+	}
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+	_ = 0
+	// The following is a legal line directive. It must not be indented:
+//line foo:2
+	_ = 1
+
+	// The following is not a legal line directive (it doesn't start in column 1):
+	//line foo:2
+	_ = 2
+
+	// The following is not a legal line directive (missing colon):
+//line foo -3
+	_ = 3
+}
+
+// Line comments with tabs
+func _() {
+	var finput *bufio.Reader	// input file
+	var stderr *bufio.Writer
+	var ftable *bufio.Writer	// y.go file
+	var foutput *bufio.Writer	// y.output file
+
+	var oflag string	// -o [y.go]		- y.go file
+	var vflag string	// -v [y.output]	- y.output file
+	var lflag bool		// -l			- disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+	// This comment has 4 blanks following that should be trimmed:
+	/* Each line of this comment has blanks or tabs following that should be trimmed:
+	   line 2:
+	   line 3:
+	*/
+}
+
+var _ = []T{ /* lone comment */ }
+
+var _ = []T{
+	/* lone comment */
+}
+
+var _ = []T{
+	// lone comments
+	// in composite lit
+}
+
+var _ = [][]T{
+	{
+		// lone comments
+		// in composite lit
+	},
+}
+
+// TODO: gofmt doesn't add these tabs; make it so that these golden
+// tests run the printer in a way that it's exactly like gofmt.
+
+var _ = []T{	// lone comment
+}
+
+var _ = []T{	// lone comments
+	// in composite lit
+}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/internal/backport/go/printer/testdata/comments.input b/internal/backport/go/printer/testdata/comments.input
new file mode 100644
index 0000000..aa428a2
--- /dev/null
+++ b/internal/backport/go/printer/testdata/comments.input
@@ -0,0 +1,756 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+import "fmt"  // fmt
+
+const c0 = 0  // zero
+const (
+	c1 = iota  // c1
+	c2  // c2
+)
+
+// Alignment of comments in declarations>
+const (
+	_ T = iota  // comment
+	_  // comment
+	_  // comment
+	_ = iota+10
+	_  // comments
+
+	_ = 10  // comment
+	_ T = 20  // comment
+)
+
+const (
+	_____ = iota // foo
+	_ // bar
+	_  = 0    // bal
+	_ // bat
+)
+
+const (
+	_ T = iota // comment
+	_ // comment
+	_ // comment
+	_ = iota + 10
+	_ // comment
+	_ = 10
+	_ = 20 // comment
+	_ T = 0 // comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct {}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+	int
+	x, y, z int  // 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+	S0
+	A, B, C float  // 3 exported fields
+	D, b, c int  // 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+	S1
+	A, B, C float  // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface {}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+	f(x int) int  // unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+	I0
+	F(x float) float  // exported methods
+	g(x int) int  // unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+	I0
+	F(x float) float  // exported method
+	G(x float) float  // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+	// lead comment for F1
+	F1 int // line comment for F1
+	// lead comment for F2
+	F2 int // line comment for F2
+	f3 int // f3 is not exported
+}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int  // x
+var ()
+
+
+// This comment SHOULD be associated with f0.
+func f0() {
+	const pi = 3.14  // pi
+	var s1 struct {}  /* an empty struct */ /* foo */
+	// a struct constructor
+	// --------------------
+	var s2 struct {} = struct {}{}
+	x := pi
+}
+//
+// This comment should be associated with f1, with one blank line before the comment.
+//
+func f1() {
+	f0()
+	/* 1 */
+	// 2
+	/* 3 */
+	/* 4 */
+	f0()
+}
+
+
+func _() {
+// this comment should be properly indented
+}
+
+
+func _(x int) int {
+	if x < 0 {  // the tab printed before this comment's // must not affect the remaining lines
+		return -x  // this statement should be properly indented
+	}
+	if x < 0 {  /* the tab printed before this comment's /* must not affect the remaining lines */
+		return -x  // this statement should be properly indented
+	}
+	return x
+}
+
+
+func typeswitch(x interface{}) {
+	switch v := x.(type) {
+	case bool, int, float:
+	case string:
+	default:
+	}
+
+	switch x.(type) {
+	}
+
+	switch v0, ok := x.(int); v := x.(type) {
+	}
+
+	switch v0, ok := x.(int); x.(type) {
+	case byte:  // this comment should be on the same line as the keyword
+		// this comment should be normally indented
+		_ = 0
+	case bool, int, float:
+		// this comment should be indented
+	case string:
+	default:
+		// this comment should be indented
+	}
+	// this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+	s := 1 +
+		2
+// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2 // comment
+		// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2 // comment
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+
+// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2 // comment
+
+		// should be indented like s
+}
+
+func _() {
+	s := 1 +
+		2 // comment
+
+	// should be indented like s
+	_ = 0
+}
+
+func _() {
+	s := 1 +
+		2
+
+	// should be indented like s
+	_ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+	templateText := "a" + // A
+		"b" + // B
+		"c" // C
+
+	// should be aligned with f()
+	f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+	templateText := "a" + // A
+		"b" + // B
+		"c" // C
+
+		// may not be aligned with f() (source is not aligned)
+	f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line
+	   */
+}
+
+func _() {
+	/* freestanding comment
+	   aligned		line
+	   aligned line */
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line
+		*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned		line
+		aligned line */
+}
+
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line
+	   */
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned		line
+	   aligned line */
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line
+		*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned		line
+		aligned line */
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line
+	   */
+}
+
+func _() {
+	/* freestanding comment
+	   aligned line */
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line
+		*/
+}
+
+func _() {
+	/*	freestanding comment
+		aligned line */
+}
+
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line
+	*/
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line
+	   */
+}
+
+func _() {
+	/*
+	   freestanding comment
+	   aligned line */
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line
+	*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line
+		*/
+}
+
+func _() {
+	/*
+		freestanding comment
+		aligned line */
+}
+
+// Issue 9751.
+func _() {
+	/*a string
+
+	b string*/
+
+	/*A string
+
+
+
+	Z string*/
+
+	/*a string
+
+	b string
+
+	c string*/
+
+	{
+		/*a string
+b string*/
+
+		/*a string
+
+b string*/
+
+		/*a string
+
+b string
+
+c string*/
+	}
+
+	{
+		/*a string
+				b string*/
+
+		/*a string
+
+				b string*/
+
+		/*a string
+
+				b string
+
+				c string*/
+	}
+
+	/*
+	*/
+
+	/*
+
+	*/
+
+	/*
+
+	 * line
+
+	*/
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/*	and another line
+ *	of
+ *	stars */
+
+/* a line of
+ * stars */
+
+/*	and another line of
+ *	stars */
+
+/* a line of stars
+*/
+
+/*	and another line of
+*/
+
+/* a line of stars
+ */
+
+/*	and another line of
+ */
+
+/*
+aligned in middle
+here
+        not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+   aligned in middle
+   here
+           not here
+*/
+
+/*
+	blank line in middle:
+
+	with no leading spaces on blank line.
+*/
+
+func _() {
+	/*
+	 * line
+	 * of
+	 * stars
+	 */
+
+	/*
+	aligned in middle
+	here
+		not here
+	*/
+
+	/*
+	blank line in middle:
+
+	with no leading spaces on blank line.
+*/
+}
+
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _(/* this */x/* is *//* an */ int) {
+}
+
+func _(/* no params - extra blank before and after comment */) {}
+func _(a, b int /* params - no extra blank after comment */) {}
+
+func _() { f(/* no args - extra blank before and after comment */) }
+func _() { f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+	f(/* no args - extra blank before and after comment */)
+	f(a, b /* args - no extra blank after comment */)
+}
+
+func (/* comment1 */ T /* comment2 */) _() {}
+
+func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _() { x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+	_ = 0
+	/* closing curly brace should be on new line */ }
+
+func _() {
+	_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+	var a = []int{1, 2, /*jasldf*/
+	}
+	_ = a
+}
+
+func _() {
+	var a = []int{1, 2, /*jasldf
+						*/
+	}
+	_ = a
+}
+
+func _() {
+	var a = []int{1, 2, // jasldf
+	}
+	_ = a
+}
+
+// Test cases from issues 11274, 15137:
+// Semicolon must not be lost when multiple statements are on the same line with a comment.
+func _() {
+    x := 0 /**/; y := 1
+}
+
+func _() {
+	f(); f()
+	f(); /* comment */ f()
+	f() /* comment */; f()	
+	f(); /* a */ /* b */ f()
+	f() /* a */ /* b */; f()
+	f() /* a */; /* b */ f()
+}
+
+func _() {
+	f() /* a */ /* b */ }
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+	_ = T{
+		1,    // comment after comma
+		2,    /* comment after comma */
+		3  ,  // comment after comma
+	}
+	_ = T{
+		1  ,// comment after comma
+		2  ,/* comment after comma */
+		3,// comment after comma
+	}
+	_ = T{
+		/* comment before literal */1,
+		2/* comment before comma - ok to move after comma */,
+		3  /* comment before comma - ok to move after comma */  ,
+	}
+
+	for
+		i=0;// comment after semicolon
+		i<9;/* comment after semicolon */
+		i++{// comment after opening curly brace
+	}
+
+	// TODO(gri) the last comment in this example should be aligned */
+	for
+		i=0;// comment after semicolon
+		i<9/* comment before semicolon - ok to move after semicolon */;
+		i++ /* comment before opening curly brace */ {
+	}
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+	f(x /* comment */, y)
+	f(x /* comment */, 
+	y)
+	f(
+		x /* comment */,
+	)
+}
+
+func g(
+	x int /* comment */,
+) {}
+
+type _ struct {
+	a, b /* comment */, c int
+}
+
+type _ struct { a, b /* comment */, c int }
+
+func _() {
+	for a /* comment */, b := range x {
+	}
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+	_ = 0
+// The following is a legal line directive. It must not be indented:
+//line foo:2
+	_ = 1
+
+// The following is not a legal line directive (it doesn't start in column 1):
+	//line foo:2
+	_ = 2
+
+// The following is not a legal line directive (missing colon):
+//line foo -3
+	_ = 3
+}
+
+// Line comments with tabs
+func _() {
+var	finput		*bufio.Reader			// input file
+var	stderr		*bufio.Writer
+var	ftable		*bufio.Writer			// y.go file
+var	foutput		*bufio.Writer			// y.output file
+
+var	oflag		string				// -o [y.go]		- y.go file
+var	vflag		string				// -v [y.output]	- y.output file
+var	lflag		bool				// -l			- disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+// This comment has 4 blanks following that should be trimmed:    
+/* Each line of this comment has blanks or tabs following that should be trimmed:	
+   line 2:    
+   line 3:    			
+*/
+}
+
+var _ = []T{/* lone comment */}
+
+var _ = []T{
+/* lone comment */
+}
+
+var _ = []T{
+// lone comments
+// in composite lit
+}
+
+var _ = [][]T{
+	{
+		// lone comments
+		// in composite lit
+	},
+}
+
+// TODO: gofmt doesn't add these tabs; make it so that these golden
+// tests run the printer in a way that it's exactly like gofmt.
+
+var _ = []T{// lone comment
+}
+
+var _ = []T{// lone comments
+// in composite lit
+}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/internal/backport/go/printer/testdata/comments.x b/internal/backport/go/printer/testdata/comments.x
new file mode 100644
index 0000000..ae77292
--- /dev/null
+++ b/internal/backport/go/printer/testdata/comments.x
@@ -0,0 +1,56 @@
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+	// contains filtered or unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+	S0
+	A, B, C	float	// 3 exported fields
+	D	int	// 2 unexported fields
+	// contains filtered or unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+	S1
+	A, B, C	float	// 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+	// contains filtered or unexported methods
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+	I0
+	F(x float) float	// exported methods
+	// contains filtered or unexported methods
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+	I0
+	F(x float) float	// exported method
+	G(x float) float	// exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+	// lead comment for F1
+	F1	int	// line comment for F1
+	// lead comment for F2
+	F2	int	// line comment for F2
+	// contains filtered or unexported fields
+}
diff --git a/internal/backport/go/printer/testdata/comments2.golden b/internal/backport/go/printer/testdata/comments2.golden
new file mode 100644
index 0000000..8b3a94d
--- /dev/null
+++ b/internal/backport/go/printer/testdata/comments2.golden
@@ -0,0 +1,164 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+   c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+   c1d
+*/
+/*
+c1e
+foo */
+/*
+   c1f
+   foo */
+
+func f() {
+	/*
+	   c2a
+	*/
+	/*
+	   c2b
+	*/
+	/* foo
+	   c2c
+	*/
+	/* foo
+	   c2d
+	*/
+	/*
+	   c2e
+	   foo */
+	/*
+	   c2f
+	   foo */
+}
+
+func g() {
+	/*
+	   c3a
+	*/
+	/*
+	   c3b
+	*/
+	/* foo
+	   c3c
+	*/
+	/* foo
+	   c3d
+	*/
+	/*
+	   c3e
+	   foo */
+	/*
+	   c3f
+	   foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+	/*
+	   prints test 5 times
+	*/
+	for i := 0; i < 5; i++ {
+		println("test")
+	}
+}
+
+func issue5623() {
+L:
+	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
+	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	/* comment */
+
+	_ = yyyyyyyyyyyyyyyy			/* comment - should be aligned */
+	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
+
+LLLLLLL:
+	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
+	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
+
+LL:
+LLLLL:
+	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	/* comment */
+	_ = yyyyyyyyyyyyyyyy			/* comment - should be aligned */
+
+	_ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx	// comment
+	_ = yyyyyyyyyyyyyyyy			// comment - should be aligned
+
+	// test case from issue
+label:
+	mask := uint64(1)<<c - 1		// Allocation mask
+	used := atomic.LoadUint64(&h.used)	// Current allocations
+}
+
+// Test cases for issue 18782
+var _ = [][]int{
+	/*       a, b, c, d, e */
+	/* a */ {0, 0, 0, 0, 0},
+	/* b */ {0, 5, 4, 4, 4},
+	/* c */ {0, 4, 5, 4, 4},
+	/* d */ {0, 4, 4, 5, 4},
+	/* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0}
+
+var _ = T{ /* a */ /* b */ 0}
+
+var _ = T{	/* a */	/* b */
+	/* c */ 0,
+}
+
+var _ = T{	/* a */	/* b */
+	/* c */
+	/* d */ 0,
+}
+
+var _ = T{
+	/* a */
+	/* b */ 0,
+}
+
+var _ = T{ /* a */ {}}
+
+var _ = T{ /* a */ /* b */ {}}
+
+var _ = T{	/* a */	/* b */
+	/* c */ {},
+}
+
+var _ = T{	/* a */	/* b */
+	/* c */
+	/* d */ {},
+}
+
+var _ = T{
+	/* a */
+	/* b */ {},
+}
+
+var _ = []T{
+	func() {
+		var _ = [][]int{
+			/*       a, b, c, d, e */
+			/* a */ {0, 0, 0, 0, 0},
+			/* b */ {0, 5, 4, 4, 4},
+			/* c */ {0, 4, 5, 4, 4},
+			/* d */ {0, 4, 4, 5, 4},
+			/* e */ {0, 4, 4, 4, 5},
+		}
+	},
+}
diff --git a/internal/backport/go/printer/testdata/comments2.input b/internal/backport/go/printer/testdata/comments2.input
new file mode 100644
index 0000000..8d38c41
--- /dev/null
+++ b/internal/backport/go/printer/testdata/comments2.input
@@ -0,0 +1,168 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+   c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+   c1d
+*/
+/*
+c1e
+foo */
+/*
+   c1f
+   foo */
+
+func f() {
+/*
+c2a
+*/
+/*
+   c2b
+*/
+/* foo
+c2c
+*/
+/* foo
+   c2d
+*/
+/*
+c2e
+foo */
+/*
+   c2f
+   foo */
+}
+
+func g() {
+/*
+c3a
+*/
+/*
+   c3b
+*/
+/* foo
+c3c
+*/
+/* foo
+   c3d
+*/
+/*
+c3e
+foo */
+/*
+   c3f
+   foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+/*
+prints test 5 times
+*/
+   for i := 0; i < 5; i++ {
+      println("test")
+   }
+}
+
+func issue5623() {
+L:
+   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+
+   _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LLLLLLL:
+   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LL:
+LLLLL:
+   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+   _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+
+   _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+   _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+
+// test case from issue
+label:
+   mask := uint64(1)<<c - 1 // Allocation mask
+   used := atomic.LoadUint64(&h.used) // Current allocations
+}
+
+// Test cases for issue 18782
+var _ = [][]int{
+   /*       a, b, c, d, e */
+   /* a */ {0, 0, 0, 0, 0},
+   /* b */ {0, 5, 4, 4, 4},
+   /* c */ {0, 4, 5, 4, 4},
+   /* d */ {0, 4, 4, 5, 4},
+   /* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0,
+}
+
+var _ = T{ /* a */ /* b */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */
+   /* d */ 0,
+}
+
+var _ = T{
+   /* a */
+   /* b */ 0,
+}
+
+var _ = T{ /* a */ {},
+}
+
+var _ = T{ /* a */ /* b */ {},
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */ {},
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */
+   /* d */ {},
+}
+
+var _ = T{
+   /* a */
+   /* b */ {},
+}
+
+var _ = []T{
+   func() {
+      var _ = [][]int{
+         /*       a, b, c, d, e */
+         /* a */ {0, 0, 0, 0, 0},
+         /* b */ {0, 5, 4, 4, 4},
+         /* c */ {0, 4, 5, 4, 4},
+         /* d */ {0, 4, 4, 5, 4},
+         /* e */ {0, 4, 4, 4, 5},
+      }
+   },
+}
diff --git a/internal/backport/go/printer/testdata/complit.input b/internal/backport/go/printer/testdata/complit.input
new file mode 100644
index 0000000..82806a4
--- /dev/null
+++ b/internal/backport/go/printer/testdata/complit.input
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package complit
+
+var (
+	// Multi-line declarations
+	V1 = T{
+		F1: "hello",
+		f2: 1,
+	}
+	V2 = T{
+		f2: 1,
+		F1: "hello",
+	}
+	V3 = T{
+		F1: "hello",
+		F2: T2{
+			A: "world",
+			b: "hidden",
+		},
+		f3: T2{
+			A: "world",
+		},
+	}
+	V4 = T{
+		f2: 1,
+	}
+
+	// Single-line declarations
+	V5 = T{F1: "hello", f2: 1}
+	V6 = T{f2: 1, F1: "hello"}
+	V7 = T{f2: 1}
+
+	// Mixed-mode declarations
+	V8 = T{
+		F1: "hello", f2: 1,
+		F3: "world",
+		f4: 2}
+	V9 = T{
+	f2: 1, F1: "hello",}
+	V10 = T{
+		F1: "hello", f2: 1,
+		f3: 2,
+		F4: "world", f5: 3,
+	}
+
+	// Other miscellaneous declarations
+	V11 = T{
+		t{
+			A: "world",
+			b: "hidden",
+		},
+		f2: t{
+			A: "world",
+			b: "hidden",
+		},
+	}
+	V12 = T{
+		F1: make(chan int),
+		f2: []int{},
+		F3: make(map[int]string), f4: 1,
+	}
+)
\ No newline at end of file
diff --git a/internal/backport/go/printer/testdata/complit.x b/internal/backport/go/printer/testdata/complit.x
new file mode 100644
index 0000000..458ac61
--- /dev/null
+++ b/internal/backport/go/printer/testdata/complit.x
@@ -0,0 +1,62 @@
+package complit
+
+var (
+	// Multi-line declarations
+	V1	= T{
+		F1: "hello",
+		// contains filtered or unexported fields
+	}
+	V2	= T{
+
+		F1: "hello",
+		// contains filtered or unexported fields
+	}
+	V3	= T{
+		F1:	"hello",
+		F2: T2{
+			A: "world",
+			// contains filtered or unexported fields
+		},
+		// contains filtered or unexported fields
+	}
+	V4	= T{
+		// contains filtered or unexported fields
+	}
+
+	// Single-line declarations
+	V5	= T{F1: "hello", /* contains filtered or unexported fields */}
+	V6	= T{F1: "hello", /* contains filtered or unexported fields */}
+	V7	= T{/* contains filtered or unexported fields */}
+
+	// Mixed-mode declarations
+	V8	= T{
+		F1:	"hello",
+		F3:	"world",
+		// contains filtered or unexported fields
+	}
+	V9	= T{
+		F1: "hello",
+		// contains filtered or unexported fields
+	}
+	V10	= T{
+		F1:	"hello",
+
+		F4:	"world",
+		// contains filtered or unexported fields
+	}
+
+	// Other miscellaneous declarations
+	V11	= T{
+		t{
+			A: "world",
+			// contains filtered or unexported fields
+		},
+		// contains filtered or unexported fields
+	}
+	V12	= T{
+		F1:	make(chan int),
+
+		F3:	make(map[int]string),
+		// contains filtered or unexported fields
+	}
+)
diff --git a/internal/backport/go/printer/testdata/declarations.golden b/internal/backport/go/printer/testdata/declarations.golden
new file mode 100644
index 0000000..fe0f783
--- /dev/null
+++ b/internal/backport/go/printer/testdata/declarations.golden
@@ -0,0 +1,1008 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+	_ "io"
+)
+
+import _ "io"
+
+import (
+	"io"
+	"io"
+	"io"
+)
+
+import (
+	"io"
+	aLongRename "io"
+
+	b "io"
+)
+
+import (
+	"unrenamed"
+	renamed "renameMe"
+	. "io"
+	_ "io"
+	"io"
+	. "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo"	// a comment
+import "bar"	// a comment
+
+import (
+	_ "foo"
+	// a comment
+	"bar"
+	"foo"	// a comment
+	"bar"	// a comment
+)
+
+// comments + renames
+import (
+	"unrenamed"	// a comment
+	renamed "renameMe"
+	. "io"		/* a comment */
+	_ "io/ioutil"	// a comment
+	"io"		// testing alignment
+	. "os"
+	// a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+	. "fmt"
+	"io"
+	"malloc"	// for the malloc count test only
+	"math"
+	"strings"
+	"testing"
+)
+
+// more import examples
+import (
+	"xxx"
+	"much_longer_name"	// comment
+	"short_name"		// comment
+)
+
+import (
+	_ "xxx"
+	"much_longer_name"	// comment
+)
+
+import (
+	mymath "math"
+	"/foo/bar/long_package_path"	// a comment
+)
+
+import (
+	"package_a"	// comment
+	"package_b"
+	my_better_c "package_c"	// comment
+	"package_d"		// comment
+	my_e "package_e"	// comment
+
+	"package_a"	// comment
+	"package_bb"
+	"package_ccc"	// comment
+	"package_dddd"	// comment
+)
+
+// print import paths as double-quoted strings
+// (we would like more test cases but the go/parser
+// already excludes most incorrect paths, and we don't
+// bother setting up test-ASTs manually)
+import (
+	"fmt"
+	"math"
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+
+// T2 comment
+type T2 struct {
+}	// should be a two-line struct
+
+// T3 comment
+type T2 struct {
+}	// should be a two-line struct
+
+// printing of constant literals
+const (
+	_	= "foobar"
+	_	= "a۰۱۸"
+	_	= "foo६४"
+	_	= "bar9876"
+	_	= 0
+	_	= 1
+	_	= 123456789012345678890
+	_	= 01234567
+	_	= 0xcafebabe
+	_	= 0.
+	_	= .0
+	_	= 3.14159265
+	_	= 1e0
+	_	= 1e+100
+	_	= 1e-100
+	_	= 2.71828e-1000
+	_	= 0i
+	_	= 1i
+	_	= 012345678901234567889i
+	_	= 123456789012345678890i
+	_	= 0.i
+	_	= .0i
+	_	= 3.14159265i
+	_	= 1e0i
+	_	= 1e+100i
+	_	= 1e-100i
+	_	= 2.71828e-1000i
+	_	= 'a'
+	_	= '\000'
+	_	= '\xFF'
+	_	= '\uff16'
+	_	= '\U0000ff16'
+	_	= `foobar`
+	_	= `foo
+---
+---
+bar`
+)
+
+func _() {
+	type _ int
+	type _ *int
+	type _ []int
+	type _ map[string]int
+	type _ chan int
+	type _ func() int
+
+	var _ int
+	var _ *int
+	var _ []int
+	var _ map[string]int
+	var _ chan int
+	var _ func() int
+
+	type _ struct{}
+	type _ *struct{}
+	type _ []struct{}
+	type _ map[string]struct{}
+	type _ chan struct{}
+	type _ func() struct{}
+
+	type _ interface{}
+	type _ *interface{}
+	type _ []interface{}
+	type _ map[string]interface{}
+	type _ chan interface{}
+	type _ func() interface{}
+
+	var _ struct{}
+	var _ *struct{}
+	var _ []struct{}
+	var _ map[string]struct{}
+	var _ chan struct{}
+	var _ func() struct{}
+
+	var _ interface{}
+	var _ *interface{}
+	var _ []interface{}
+	var _ map[string]interface{}
+	var _ chan interface{}
+	var _ func() interface{}
+}
+
+// don't lose blank lines in grouped declarations
+const (
+	_	int	= 0
+	_	float	= 1
+
+	_	string	= "foo"
+
+	_	= iota
+	_
+
+	// a comment
+	_
+
+	_
+)
+
+type (
+	_	int
+	_	struct{}
+
+	_	interface{}
+
+	// a comment
+	_	map[string]int
+)
+
+var (
+	_	int	= 0
+	_	float	= 1
+
+	_	string	= "foo"
+
+	_	bool
+
+	// a comment
+	_	bool
+)
+
+// don't lose blank lines in this struct
+type _ struct {
+	String	struct {
+		Str, Len int
+	}
+	Slice	struct {
+		Array, Len, Cap int
+	}
+	Eface	struct {
+		Typ, Ptr int
+	}
+
+	UncommonType	struct {
+		Name, PkgPath int
+	}
+	CommonType	struct {
+		Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+	}
+	Type	struct {
+		Typ, Ptr int
+	}
+	StructField	struct {
+		Name, PkgPath, Typ, Tag, Offset int
+	}
+	StructType	struct {
+		Fields int
+	}
+	PtrType	struct {
+		Elem int
+	}
+	SliceType	struct {
+		Elem int
+	}
+	ArrayType	struct {
+		Elem, Len int
+	}
+
+	Stktop	struct {
+		Stackguard, Stackbase, Gobuf int
+	}
+	Gobuf	struct {
+		Sp, Pc, G int
+	}
+	G	struct {
+		Stackbase, Sched, Status, Alllink int
+	}
+}
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{}
+type _ struct {
+}
+
+type _ interface{}
+type _ interface {
+}
+
+// no tabs for single or ungrouped decls
+func _() {
+	const xxxxxx = 0
+	type x int
+	var xxx int
+	var yyyy float = 3.14
+	var zzzzz = "bar"
+
+	const (
+		xxxxxx = 0
+	)
+	type (
+		x int
+	)
+	var (
+		xxx int
+	)
+	var (
+		yyyy float = 3.14
+	)
+	var (
+		zzzzz = "bar"
+	)
+}
+
+// tabs for multiple or grouped decls
+func _() {
+	// no entry has a type
+	const (
+		zzzzzz	= 1
+		z	= 2
+		zzz	= 3
+	)
+	// some entries have a type
+	const (
+		xxxxxx			= 1
+		x			= 2
+		xxx			= 3
+		yyyyyyyy	float	= iota
+		yyyy			= "bar"
+		yyy
+		yy	= 2
+	)
+}
+
+func _() {
+	// no entry has a type
+	var (
+		zzzzzz	= 1
+		z	= 2
+		zzz	= 3
+	)
+	// no entry has a value
+	var (
+		_	int
+		_	float
+		_	string
+
+		_	int	// comment
+		_	float	// comment
+		_	string	// comment
+	)
+	// some entries have a type
+	var (
+		xxxxxx		int
+		x		float
+		xxx		string
+		yyyyyyyy	int	= 1234
+		y		float	= 3.14
+		yyyy			= "bar"
+		yyy		string	= "foo"
+	)
+	// mixed entries - all comments should be aligned
+	var (
+		a, b, c			int
+		x			= 10
+		d			int			// comment
+		y				= 20		// comment
+		f, ff, fff, ffff	int	= 0, 1, 2, 3	// comment
+	)
+	// respect original line breaks
+	var _ = []T{
+		T{0x20, "Telugu"},
+	}
+	var _ = []T{
+		// respect original line breaks
+		T{0x20, "Telugu"},
+	}
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+	short		= 2 * (1 + 2)
+	aMuchLongerName	= 3
+)
+
+var (
+	short		= X{}
+	aMuchLongerName	= X{}
+
+	x1	= X{}	// foo
+	x2	= X{}	// foo
+)
+
+func _() {
+	type (
+		xxxxxx	int
+		x	float
+		xxx	string
+		xxxxx	[]x
+		xx	struct{}
+		xxxxxxx	struct {
+			_, _	int
+			_	float
+		}
+		xxxx	chan<- string
+	)
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+	umax	uint	= ^uint(0)		// maximum value for a uint
+	bpu		= 1 << (5 + umax>>63)	// bits per uint
+	foo
+	bar	= -1
+)
+
+// typical enum
+const (
+	a	MyType	= iota
+	abcd
+	b
+	c
+	def
+)
+
+// excerpt from godoc.go
+var (
+	goroot		= flag.String("goroot", runtime.GOROOT(), "Go root directory")
+	testDir		= flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+	pkgPath		= flag.String("path", "", "additional package directories (colon-separated)")
+	filter		= flag.String("filter", "", "filter file containing permitted package directory paths")
+	filterMin	= flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+	filterDelay	delayTime	// actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+// formatting of structs
+type _ struct{}
+
+type _ struct { /* this comment should be visible */
+}
+
+type _ struct {
+	// this comment should be visible and properly indented
+}
+
+type _ struct {	// this comment must not change indentation
+	f			int
+	f, ff, fff, ffff	int
+}
+
+type _ struct {
+	string
+}
+
+type _ struct {
+	string	// comment
+}
+
+type _ struct {
+	string "tag"
+}
+
+type _ struct {
+	string "tag"	// comment
+}
+
+type _ struct {
+	f int
+}
+
+type _ struct {
+	f int	// comment
+}
+
+type _ struct {
+	f int "tag"
+}
+
+type _ struct {
+	f int "tag"	// comment
+}
+
+type _ struct {
+	bool
+	a, b, c			int
+	int			"tag"
+	ES				// comment
+	float			"tag"	// comment
+	f			int	// comment
+	f, ff, fff, ffff	int	// comment
+	g			float	"tag"
+	h			float	"tag"	// comment
+}
+
+type _ struct {
+	a, b,
+	c, d	int	// this line should be indented
+	u, v, w, x	float	// this line should be indented
+	p, q,
+	r, s	float	// this line should be indented
+}
+
+// difficult cases
+type _ struct {
+	bool		// comment
+	text	[]byte	// comment
+}
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+	EI
+}
+
+type _ interface {
+	f()
+	fffff()
+}
+
+type _ interface {
+	EI
+	f()
+	fffffg()
+}
+
+type _ interface {	// this comment must not change indentation
+	EI				// here's a comment
+	f()				// no blank between identifier and ()
+	fffff()				// no blank between identifier and ()
+	gggggggggggg(x, y, z int)	// hurray
+}
+
+// formatting of variable declarations
+func _() {
+	type day struct {
+		n		int
+		short, long	string
+	}
+	var (
+		Sunday		= day{0, "SUN", "Sunday"}
+		Monday		= day{1, "MON", "Monday"}
+		Tuesday		= day{2, "TUE", "Tuesday"}
+		Wednesday	= day{3, "WED", "Wednesday"}
+		Thursday	= day{4, "THU", "Thursday"}
+		Friday		= day{5, "FRI", "Friday"}
+		Saturday	= day{6, "SAT", "Saturday"}
+	)
+}
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int	// all on one line
+
+var a2, b2,
+	c2 int	// this line should be indented
+
+var (
+	a3, b3,
+	c3, d3	int	// this line should be indented
+	a4, b4, c4	int	// this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+	minRefreshTimeSec	= flag.Int64("min_refresh_time_sec", 604800,
+		"minimum time window between two refreshes for a given user.")
+	x	= flag.Int64("refresh_user_rollout_percent", 100,
+		"temporary flag to ramp up the refresh user rpc")
+	aVeryLongVariableName	= stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+	var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+		Headers:	map[string]string{},
+		Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+			0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+			0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+			0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+		},
+	}
+}
+
+func _() {
+	var Universe = Scope{
+		Names: map[string]*Ident{
+			// basic types
+			"bool":		nil,
+			"byte":		nil,
+			"int8":		nil,
+			"int16":	nil,
+			"int32":	nil,
+			"int64":	nil,
+			"uint8":	nil,
+			"uint16":	nil,
+			"uint32":	nil,
+			"uint64":	nil,
+			"float32":	nil,
+			"float64":	nil,
+			"string":	nil,
+
+			// convenience types
+			"int":		nil,
+			"uint":		nil,
+			"uintptr":	nil,
+			"float":	nil,
+
+			// constants
+			"false":	nil,
+			"true":		nil,
+			"iota":		nil,
+			"nil":		nil,
+
+			// functions
+			"cap":		nil,
+			"len":		nil,
+			"new":		nil,
+			"make":		nil,
+			"panic":	nil,
+			"panicln":	nil,
+			"print":	nil,
+			"println":	nil,
+		},
+	}
+}
+
+// alignment of map composite entries
+var _ = map[int]int{
+	// small key sizes: always align even if size ratios are large
+	a:			a,
+	abcdefghabcdefgh:	a,
+	ab:			a,
+	abc:			a,
+	abcdefgabcdefg:		a,
+	abcd:			a,
+	abcde:			a,
+	abcdef:			a,
+
+	// mixed key sizes: align when key sizes change within accepted ratio
+	abcdefgh:		a,
+	abcdefghabcdefg:	a,
+	abcdefghij:		a,
+	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij:	a,	// outlier - do not align with previous line
+	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij:		a,	// align with previous line
+
+	ab:	a,	// do not align with previous line
+	abcde:	a,	// align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+	a:			x,
+	b:			y,
+	cccccccccccccccccccc:	z,
+}
+
+// not aligned
+var _ = T2{
+	a:			x,
+	b:			y,
+	ccccccccccccccccccccc:	z,
+}
+
+// aligned
+var _ = T3{
+	aaaaaaaaaaaaaaaaaaaa:	x,
+	b:			y,
+	c:			z,
+}
+
+// not aligned
+var _ = T4{
+	aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:	x,
+	b:						y,
+	c:						z,
+}
+
+// no alignment of map composite entries if they are not the first entry on a line
+var _ = T{0: 0}	// not aligned
+var _ = T{0: 0,	// not aligned
+	1:	1,				// aligned
+	22:	22,				// aligned
+	333:	333, 1234: 12, 12345: 0,	// first on line aligned
+}
+
+// test cases form issue 8685
+// not aligned
+var _ = map[int]string{1: "spring", 2: "summer",
+	3:	"autumn", 4: "winter"}
+
+// not aligned
+var _ = map[string]string{"a": "spring", "b": "summer",
+	"c":	"autumn", "d": "winter"}
+
+// aligned
+var _ = map[string]string{"a": "spring",
+	"b":	"summer",
+	"c":	"autumn",
+	"d":	"winter"}
+
+func _() {
+	var _ = T{
+		a,	// must introduce trailing comma
+	}
+}
+
+// formatting of function results
+func _() func()				{}
+func _() func(int)			{ return nil }
+func _() func(int) int			{ return nil }
+func _() func(int) func(int) func()	{ return nil }
+
+// formatting of consecutive single-line functions
+func _()	{}
+func _()	{}
+func _()	{}
+
+func _()	{}	// an empty line before this function
+func _()	{}
+func _()	{}
+
+func _()		{ f(1, 2, 3) }
+func _(x int) int	{ y := x; return y + 1 }
+func _() int		{ type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+	f(1, 2, 3)
+}
+func _(x int) int {
+	y := x
+	return y + 1
+}
+func _() int {
+	type T struct{}
+	var x T
+	return x
+}
+
+// making function declarations safe for new semicolon rules
+func _()	{ /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */
+}
+
+func _() {
+	/* multi-line func because block is on multiple lines */
+}
+
+// test case for issue #19544
+func _()	{}
+func _longer_name_() {	// this comment must not force the {} from above to alignment
+	// multiple lines
+}
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+	int) {
+}
+func _(x bool,
+	y int) {
+}
+func _(x,
+	y bool) {
+}
+func _(bool,	// comment
+	int) {
+}
+func _(x bool,	// comment
+	y int) {
+}
+func _(x,	// comment
+	y bool) {
+}
+func _(bool,	// comment
+	// comment
+	int) {
+}
+func _(x bool,	// comment
+	// comment
+	y int) {
+}
+func _(x,	// comment
+	// comment
+	y bool) {
+}
+func _(bool,
+	// comment
+	int) {
+}
+func _(x bool,
+	// comment
+	y int) {
+}
+func _(x,
+	// comment
+	y bool) {
+}
+func _(x,	// comment
+	y,	// comment
+	z bool) {
+}
+func _(x,	// comment
+	y,	// comment
+	z bool) {
+}
+func _(x int,	// comment
+	y float,	// comment
+	z bool) {
+}
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+	stat chan<- *TargetInfo,
+	TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+	a, b, c int,
+) {
+}
+
+func MultiLineSignature1(
+	a, b, c int,
+	u, v, w float,
+) {
+}
+
+func MultiLineSignature2(
+	a, b,
+	c int,
+) {
+}
+
+func MultiLineSignature3(
+	a, b,
+	c int, u, v,
+	w float,
+	x ...int) {
+}
+
+func MultiLineSignature4(
+	a, b, c int,
+	u, v,
+	w float,
+	x ...int) {
+}
+
+func MultiLineSignature5(
+	a, b, c int,
+	u, v, w float,
+	p, q,
+	r string,
+	x ...int) {
+}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+	MultiLineSignature0(
+		a, b, c int,
+	)
+
+	MultiLineSignature1(
+		a, b, c int,
+		u, v, w float,
+	)
+
+	MultiLineSignature2(
+		a, b,
+		c int,
+	)
+
+	MultiLineSignature3(
+		a, b,
+		c int, u, v,
+		w float,
+		x ...int)
+
+	MultiLineSignature4(
+		a, b, c int,
+		u, v,
+		w float,
+		x ...int)
+
+	MultiLineSignature5(
+		a, b, c int,
+		u, v, w float,
+		p, q,
+		r string,
+		x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _(int)
+func _(int)
+func _(x int)
+func _(x int)
+func _(x, y int)
+func _(x, y int)
+
+func _() int
+func _() int
+func _() int
+
+func _() (x int)
+func _() (x int)
+func _() (x int)
+
+// special cases: some channel types require parentheses
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+	m()
+	n()
+})	// no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+	m()
+	n()
+})	// no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+	m()
+})	// no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+	x	int
+	y	int
+})	// no extra comma between } and )
+
+// alias declarations
+
+type c0 struct{}
+type c1 = C
+type c2 = struct{ x int }
+type c3 = p.C
+type (
+	s	struct{}
+	a	= A
+	b	= A
+	c	= foo
+	d	= interface{}
+	ddd	= p.Foo
+)
diff --git a/internal/backport/go/printer/testdata/declarations.input b/internal/backport/go/printer/testdata/declarations.input
new file mode 100644
index 0000000..f34395b
--- /dev/null
+++ b/internal/backport/go/printer/testdata/declarations.input
@@ -0,0 +1,1021 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+	_ "io"
+)
+
+import _ "io"
+
+import (
+	"io"
+	"io"
+	"io"
+)
+
+import (
+	"io"
+	aLongRename "io"
+
+	b "io"
+)
+
+import (
+       "unrenamed"
+       renamed "renameMe"
+       . "io"
+       _ "io"
+       "io"
+       . "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo"  // a comment
+import "bar"  // a comment
+
+import (
+	_ "foo"
+	// a comment
+	"bar"
+	"foo"  // a comment
+	"bar"  // a comment
+)
+
+// comments + renames
+import (
+       "unrenamed" // a comment
+       renamed "renameMe"
+       . "io" /* a comment */
+       _ "io/ioutil" // a comment
+       "io" // testing alignment
+       . "os"
+       // a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+	. "fmt"
+	"io"
+	"malloc"	// for the malloc count test only
+	"math"
+	"strings"
+	"testing"
+)
+
+// more import examples
+import (
+	"xxx"
+	"much_longer_name" // comment
+	"short_name" // comment
+)
+
+import (
+	_ "xxx"
+	"much_longer_name" // comment
+)
+
+import (
+	mymath "math"
+	"/foo/bar/long_package_path" // a comment
+)
+
+import (
+	"package_a" // comment
+	"package_b"
+	my_better_c "package_c" // comment
+	"package_d" // comment
+	my_e "package_e" // comment
+
+	"package_a"    // comment
+	"package_bb"
+	"package_ccc"  // comment
+	"package_dddd" // comment
+)
+
+// print import paths as double-quoted strings
+// (we would like more test cases but the go/parser
+// already excludes most incorrect paths, and we don't
+// bother setting up test-ASTs manually)
+import (
+	`fmt`
+	"math"
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+
+// T3 comment
+type T2 struct {
+
+
+} // should be a two-line struct
+
+
+// printing of constant literals
+const (
+	_ = "foobar"
+	_ = "a۰۱۸"
+	_ = "foo६४"
+	_ = "bar9876"
+	_ = 0
+	_ = 1
+	_ = 123456789012345678890
+	_ = 01234567
+	_ = 0xcafebabe
+	_ = 0.
+	_ = .0
+	_ = 3.14159265
+	_ = 1e0
+	_ = 1e+100
+	_ = 1e-100
+	_ = 2.71828e-1000
+	_ = 0i
+	_ = 1i
+	_ = 012345678901234567889i
+	_ = 123456789012345678890i
+	_ = 0.i
+	_ = .0i
+	_ = 3.14159265i
+	_ = 1e0i
+	_ = 1e+100i
+	_ = 1e-100i
+	_ = 2.71828e-1000i
+	_ = 'a'
+	_ = '\000'
+	_ = '\xFF'
+	_ = '\uff16'
+	_ = '\U0000ff16'
+	_ = `foobar`
+	_ = `foo
+---
+---
+bar`
+)
+
+
+func _() {
+	type _ int
+	type _ *int
+	type _ []int
+	type _ map[string]int
+	type _ chan int
+	type _ func() int
+
+	var _ int
+	var _ *int
+	var _ []int
+	var _ map[string]int
+	var _ chan int
+	var _ func() int
+
+	type _ struct{}
+	type _ *struct{}
+	type _ []struct{}
+	type _ map[string]struct{}
+	type _ chan struct{}
+	type _ func() struct{}
+
+	type _ interface{}
+	type _ *interface{}
+	type _ []interface{}
+	type _ map[string]interface{}
+	type _ chan interface{}
+	type _ func() interface{}
+
+	var _ struct{}
+	var _ *struct{}
+	var _ []struct{}
+	var _ map[string]struct{}
+	var _ chan struct{}
+	var _ func() struct{}
+
+	var _ interface{}
+	var _ *interface{}
+	var _ []interface{}
+	var _ map[string]interface{}
+	var _ chan interface{}
+	var _ func() interface{}
+}
+
+
+// don't lose blank lines in grouped declarations
+const (
+	_ int = 0
+	_ float = 1
+
+	_ string = "foo"
+
+	_ = iota
+	_
+	
+	// a comment
+	_
+
+	_
+)
+
+
+type (
+	_ int
+	_ struct {}
+	
+	_ interface{}
+	
+	// a comment
+	_ map[string]int
+)
+
+
+var (
+	_ int = 0
+	_ float = 1
+
+	_ string = "foo"
+
+	_ bool
+	
+	// a comment
+	_ bool
+)
+
+
+// don't lose blank lines in this struct
+type _ struct {
+	String struct {
+		Str, Len int
+	}
+	Slice struct {
+		Array, Len, Cap int
+	}
+	Eface struct {
+		Typ, Ptr int
+	}
+
+	UncommonType struct {
+		Name, PkgPath int
+	}
+	CommonType struct {
+		Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+	}
+	Type struct {
+		Typ, Ptr int
+	}
+	StructField struct {
+		Name, PkgPath, Typ, Tag, Offset int
+	}
+	StructType struct {
+		Fields int
+	}
+	PtrType struct {
+		Elem int
+	}
+	SliceType struct {
+		Elem int
+	}
+	ArrayType struct {
+		Elem, Len int
+	}
+
+	Stktop struct {
+		Stackguard, Stackbase, Gobuf int
+	}
+	Gobuf struct {
+		Sp, Pc, G int
+	}
+	G struct {
+		Stackbase, Sched, Status, Alllink int
+	}
+}
+
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{            }
+type _ struct {
+
+}
+
+type _ interface{            }
+type _ interface {
+
+}
+
+
+// no tabs for single or ungrouped decls
+func _() {
+	const xxxxxx = 0
+	type x int
+	var xxx int
+	var yyyy float = 3.14
+	var zzzzz = "bar"
+
+	const (
+		xxxxxx = 0
+	)
+	type (
+		x int
+	)
+	var (
+		xxx int
+	)
+	var (
+		yyyy float = 3.14
+	)
+	var (
+		zzzzz = "bar"
+	)
+}
+
+// tabs for multiple or grouped decls
+func _() {
+	// no entry has a type
+	const (
+		zzzzzz = 1
+		z = 2
+		zzz = 3
+	)
+	// some entries have a type
+	const (
+		xxxxxx = 1
+		x = 2
+		xxx = 3
+		yyyyyyyy float = iota
+		yyyy = "bar"
+		yyy
+		yy = 2
+	)
+}
+
+func _() {
+	// no entry has a type
+	var (
+		zzzzzz = 1
+		z = 2
+		zzz = 3
+	)
+	// no entry has a value
+	var (
+		_ int
+		_ float
+		_ string
+
+		_ int  // comment
+		_ float  // comment
+		_ string  // comment
+	)
+	// some entries have a type
+	var (
+		xxxxxx int
+		x float
+		xxx string
+		yyyyyyyy int = 1234
+		y float = 3.14
+		yyyy = "bar"
+		yyy string = "foo"
+	)
+	// mixed entries - all comments should be aligned
+	var (
+		a, b, c int
+		x = 10
+		d int  // comment
+		y = 20  // comment
+		f, ff, fff, ffff int = 0, 1, 2, 3  // comment
+	)
+	// respect original line breaks
+	var _ = []T {
+		T{0x20,	"Telugu"},
+	}
+	var _ = []T {
+		// respect original line breaks
+		T{0x20,	"Telugu"},
+	}
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+	short = 2 * (
+	1 + 2)
+	aMuchLongerName = 3
+)
+
+var (
+	short = X{
+	}
+	aMuchLongerName = X{}
+
+	x1 = X{} // foo
+	x2 = X{
+	} // foo
+)
+
+func _() {
+	type (
+		xxxxxx int
+		x float
+		xxx string
+		xxxxx []x
+		xx struct{}
+		xxxxxxx struct {
+			_, _ int
+			_ float
+		}
+		xxxx chan<- string
+	)
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+	umax uint                  = ^uint(0) // maximum value for a uint
+	bpu  = 1 << (5 + umax>>63)            // bits per uint
+	foo
+	bar  = -1
+)
+
+// typical enum
+const (
+	a MyType = iota
+	abcd
+	b
+	c
+	def
+)
+
+// excerpt from godoc.go
+var (
+	goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+	testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+	pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+	filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+	filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+	filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+
+// formatting of structs
+type _ struct{}
+
+type _ struct{ /* this comment should be visible */ }
+
+type _ struct{
+	// this comment should be visible and properly indented
+}
+
+type _ struct {  // this comment must not change indentation
+	f int
+	f, ff, fff, ffff int
+}
+
+type _ struct {
+	string
+}
+
+type _ struct {
+	string  // comment
+}
+
+type _ struct {
+	string "tag"
+}
+
+type _ struct {
+	string "tag"  // comment
+}
+
+type _ struct {
+	f int
+}
+
+type _ struct {
+	f int  // comment
+}
+
+type _ struct {
+	f int "tag"
+}
+
+type _ struct {
+	f int "tag"  // comment
+}
+
+type _ struct {
+	bool
+	a, b, c int
+	int "tag"
+	ES // comment
+	float "tag"  // comment
+	f int  // comment
+	f, ff, fff, ffff int  // comment
+	g float "tag"
+	h float "tag"  // comment
+}
+
+type _ struct { a, b,
+c, d int  // this line should be indented
+u, v, w, x float // this line should be indented
+p, q,
+r, s float // this line should be indented
+}
+
+
+// difficult cases
+type _ struct {
+	bool  // comment
+	text []byte  // comment
+}
+
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+	EI
+}
+
+type _ interface {
+	f()
+	fffff()
+}
+
+type _ interface {
+	EI
+	f()
+	fffffg()
+}
+
+type _ interface {  // this comment must not change indentation
+	EI  // here's a comment
+	f()  // no blank between identifier and ()
+	fffff()  // no blank between identifier and ()
+	gggggggggggg(x, y, z int) ()  // hurray
+}
+
+
+// formatting of variable declarations
+func _() {
+	type day struct { n int; short, long string }
+	var (
+		Sunday = day{ 0, "SUN", "Sunday" }
+		Monday = day{ 1, "MON", "Monday" }
+		Tuesday = day{ 2, "TUE", "Tuesday" }
+		Wednesday = day{ 3, "WED", "Wednesday" }
+		Thursday = day{ 4, "THU", "Thursday" }
+		Friday = day{ 5, "FRI", "Friday" }
+		Saturday = day{ 6, "SAT", "Saturday" }
+	)
+}
+
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int  // all on one line
+
+var a2, b2,
+c2 int  // this line should be indented
+
+var (a3, b3,
+c3, d3 int  // this line should be indented
+a4, b4, c4 int  // this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+	minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+		"minimum time window between two refreshes for a given user.")
+	x = flag.Int64("refresh_user_rollout_percent", 100,
+		"temporary flag to ramp up the refresh user rpc")
+	aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+	var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+					Headers: map[string]string{},
+					Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+			0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+			0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+			0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+		},
+	}
+}
+
+
+func _() {
+	var Universe = Scope {
+		Names: map[string]*Ident {
+			// basic types
+			"bool": nil,
+			"byte": nil,
+			"int8": nil,
+			"int16": nil,
+			"int32": nil,
+			"int64": nil,
+			"uint8": nil,
+			"uint16": nil,
+			"uint32": nil,
+			"uint64": nil,
+			"float32": nil,
+			"float64": nil,
+			"string": nil,
+
+			// convenience types
+			"int": nil,
+			"uint": nil,
+			"uintptr": nil,
+			"float": nil,
+
+			// constants
+			"false": nil,
+			"true": nil,
+			"iota": nil,
+			"nil": nil,
+
+			// functions
+			"cap": nil,
+			"len": nil,
+			"new": nil,
+			"make": nil,
+			"panic": nil,
+			"panicln": nil,
+			"print": nil,
+			"println": nil,
+		},
+	}
+}
+
+
+// alignment of map composite entries
+var _ = map[int]int{
+	// small key sizes: always align even if size ratios are large
+	a: a,
+	abcdefghabcdefgh: a,
+	ab: a,
+	abc: a,
+	abcdefgabcdefg: a,
+	abcd: a,
+	abcde: a,
+	abcdef: a,
+
+	// mixed key sizes: align when key sizes change within accepted ratio
+	abcdefgh: a,
+	abcdefghabcdefg: a,
+	abcdefghij: a,
+	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
+	abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
+
+	ab: a, // do not align with previous line
+	abcde: a, // align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+	a:                    x,
+	b:                    y,
+	cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+	a: x,
+	b: y,
+	ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+	aaaaaaaaaaaaaaaaaaaa: x,
+	b:                    y,
+	c:                    z,
+}
+
+// not aligned
+var _ = T4{
+	aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+	b:                                       y,
+	c:                                       z,
+}
+
+
+// no alignment of map composite entries if they are not the first entry on a line
+var _ = T{0: 0} // not aligned
+var _ = T{0: 0, // not aligned
+	1: 1, // aligned
+	22: 22, // aligned
+	333: 333, 1234: 12, 12345: 0, // first on line aligned
+}
+
+
+// test cases form issue 8685
+// not aligned
+var _ = map[int]string{1: "spring", 2: "summer",
+					3:             "autumn", 4: "winter"}
+
+// not aligned
+var _ = map[string]string{"a": "spring", "b": "summer",
+	"c": "autumn", "d": "winter"}
+
+// aligned
+var _ = map[string]string{"a": "spring",
+"b": "summer",
+	"c": "autumn",
+"d": "winter"}
+
+
+func _() {
+	var _ = T{
+		a,	// must introduce trailing comma
+	}
+}
+
+
+// formatting of function results
+func _() func() {}
+func _() func(int) { return nil }
+func _() func(int) int { return nil }
+func _() func(int) func(int) func() { return nil }
+
+
+// formatting of consecutive single-line functions
+func _() {}
+func _() {}
+func _() {}
+
+func _() {}  // an empty line before this function
+func _() {}
+func _() {}
+
+func _() { f(1, 2, 3) }
+func _(x int) int { y := x; return y+1 }
+func _() int { type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+	f(1, 2, 3)
+}
+func _(x int) int {
+	y := x; return y+1
+}
+func _() int {
+	type T struct{}; var x T; return x
+}
+
+
+// making function declarations safe for new semicolon rules
+func _() { /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */ }
+
+func _() {
+/* multi-line func because block is on multiple lines */ }
+
+// test case for issue #19544
+func _() {}
+func _longer_name_() { // this comment must not force the {} from above to alignment
+	// multiple lines
+}
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+int) {
+}
+func _(x bool,
+y int) {
+}
+func _(x,
+y bool) {
+}
+func _(bool, // comment
+int) {
+}
+func _(x bool, // comment
+y int) {
+}
+func _(x, // comment
+y bool) {
+}
+func _(bool, // comment
+// comment
+int) {
+}
+func _(x bool, // comment
+// comment
+y int) {
+}
+func _(x, // comment
+// comment
+y bool) {
+}
+func _(bool,
+// comment
+int) {
+}
+func _(x bool,
+// comment
+y int) {
+}
+func _(x,
+// comment
+y bool) {
+}
+func _(x, // comment
+y,// comment
+z bool) {
+}
+func _(x, // comment
+	y,// comment
+	z bool) {
+}
+func _(x int,	// comment
+	y float,	// comment
+	z bool) {
+}
+
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+stat chan<- *TargetInfo,
+TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+a, b, c int,
+) {}
+
+func MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+) {}
+
+func MultiLineSignature2(
+a, b,
+c int,
+) {}
+
+func MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+		x ...int) {}
+
+func MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+		x ...int) {}
+
+func MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+		x ...int) {}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+MultiLineSignature0(
+a, b, c int,
+)
+
+MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+)
+
+MultiLineSignature2(
+a, b,
+c int,
+)
+
+MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+		x ...int)
+
+MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+		x ...int)
+
+MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+		x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _((int))
+func _((((((int))))))
+func _(x (int))
+func _(x (((((int))))))
+func _(x, y (int))
+func _(x, y (((((int))))))
+
+func _() (int)
+func _() ((int))
+func _() ((((((int))))))
+
+func _() (x int)
+func _() (x (int))
+func _() (x (((((int))))))
+
+// special cases: some channel types require parentheses
+func _(x chan(<-chan int))
+func _(x (chan(<-chan int)))
+func _(x ((((chan(<-chan int))))))
+
+func _(x chan<-(chan int))
+func _(x (chan<-(chan int)))
+func _(x ((((chan<-(chan int))))))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+	m()
+	n()
+}) // no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+	m()
+	n()
+}) // no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+	m()
+}) // no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+	x int
+	y int
+}) // no extra comma between } and )
+
+// alias declarations
+
+type c0 struct{}
+type c1 = C
+type c2 = struct{ x int}
+type c3 = p.C
+type (
+	s struct{}
+	a = A
+	b = A
+	c = foo
+	d = interface{}
+	ddd = p.Foo
+)
diff --git a/internal/backport/go/printer/testdata/empty.golden b/internal/backport/go/printer/testdata/empty.golden
new file mode 100644
index 0000000..a055f47
--- /dev/null
+++ b/internal/backport/go/printer/testdata/empty.golden
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/internal/backport/go/printer/testdata/empty.input b/internal/backport/go/printer/testdata/empty.input
new file mode 100644
index 0000000..a055f47
--- /dev/null
+++ b/internal/backport/go/printer/testdata/empty.input
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/internal/backport/go/printer/testdata/expressions.golden b/internal/backport/go/printer/testdata/expressions.golden
new file mode 100644
index 0000000..16a68c7
--- /dev/null
+++ b/internal/backport/go/printer/testdata/expressions.golden
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+	x, y, z int
+}
+
+var (
+	a, b, c, d, e						int
+	under_bar						int
+	longIdentifier1, longIdentifier2, longIdentifier3	int
+	t0, t1, t2						T
+	s							string
+	p							*int
+)
+
+func _() {
+	// no spaces around simple or parenthesized expressions
+	_ = (a + 0)
+	_ = a + b
+	_ = a + b + c
+	_ = a + b - c
+	_ = a - b - c
+	_ = a + (b * c)
+	_ = a + (b / c)
+	_ = a - (b % c)
+	_ = 1 + a
+	_ = a + 1
+	_ = a + b + 1
+	_ = s[a]
+	_ = s[a:]
+	_ = s[:b]
+	_ = s[1:2]
+	_ = s[a:b]
+	_ = s[0:len(s)]
+	_ = s[0] << 1
+	_ = (s[0] << 1) & 0xf
+	_ = s[0]<<2 | s[1]>>4
+	_ = "foo" + s
+	_ = s + "foo"
+	_ = 'a' + 'b'
+	_ = len(s) / 2
+	_ = len(t0.x) / a
+
+	// spaces around expressions of different precedence or expressions containing spaces
+	_ = a + -b
+	_ = a - ^b
+	_ = a / *p
+	_ = a + b*c
+	_ = 1 + b*c
+	_ = a + 2*c
+	_ = a + c*2
+	_ = 1 + 2*3
+	_ = s[1 : 2*3]
+	_ = s[a : b-c]
+	_ = s[0:]
+	_ = s[a+b]
+	_ = s[:b-c]
+	_ = s[a+b:]
+	_ = a[a<<b+1]
+	_ = a[a<<b+1:]
+	_ = s[a+b : len(s)]
+	_ = s[len(s):-a]
+	_ = s[a : len(s)+1]
+	_ = s[a:len(s)+1] + s
+
+	// spaces around operators with equal or lower precedence than comparisons
+	_ = a == b
+	_ = a != b
+	_ = a > b
+	_ = a >= b
+	_ = a < b
+	_ = a <= b
+	_ = a < b && c > d
+	_ = a < b || c > d
+
+	// spaces around "long" operands
+	_ = a + longIdentifier1
+	_ = longIdentifier1 + a
+	_ = longIdentifier1 + longIdentifier2*longIdentifier3
+	_ = s + "a longer string"
+
+	// some selected cases
+	_ = a + t0.x
+	_ = a + t0.x + t1.x*t2.x
+	_ = a + b + c + d + e + 2*3
+	_ = a + b + c + 2*3 + d + e
+	_ = (a + b + c) * 2
+	_ = a - b + c - d + (a + b + c) + d&e
+	_ = under_bar - 1
+	_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+	// test case for issue 8021
+	// want:
+	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+	_ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+
+	// the parser does not restrict expressions that may appear as statements
+	true
+	42
+	"foo"
+	x
+	(x)
+	a + b
+	a + b + c
+	a + (b * c)
+	a + (b / c)
+	1 + a
+	a + 1
+	s[a]
+	x << 1
+	(s[0] << 1) & 0xf
+	"foo" + s
+	x == y
+	x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+	_ = x[a:b:c]
+	_ = x[a : b : c+d]
+	_ = x[a : b+d : c]
+	_ = x[a : b+d : c+d]
+	_ = x[a+d : b : c]
+	_ = x[a+d : b : c+d]
+	_ = x[a+d : b+d : c]
+	_ = x[a+d : b+d : c+d]
+
+	_ = x[:b:c]
+	_ = x[: b : c+d]
+	_ = x[: b+d : c]
+	_ = x[: b+d : c+d]
+}
+
+func issue22111() {
+	_ = x[:]
+
+	_ = x[:b]
+	_ = x[:b+1]
+
+	_ = x[a:]
+	_ = x[a+1:]
+
+	_ = x[a:b]
+	_ = x[a+1 : b]
+	_ = x[a : b+1]
+	_ = x[a+1 : b+1]
+
+	_ = x[:b:c]
+	_ = x[: b+1 : c]
+	_ = x[: b : c+1]
+	_ = x[: b+1 : c+1]
+
+	_ = x[a:b:c]
+	_ = x[a+1 : b : c]
+	_ = x[a : b+1 : c]
+	_ = x[a+1 : b+1 : c]
+	_ = x[a : b : c+1]
+	_ = x[a+1 : b : c+1]
+	_ = x[a : b+1 : c+1]
+	_ = x[a+1 : b+1 : c+1]
+}
+
+func _() {
+	_ = a + b
+	_ = a + b + c
+	_ = a + b*c
+	_ = a + (b * c)
+	_ = (a + b) * c
+	_ = a + (b * c * d)
+	_ = a + (b*c + d)
+
+	_ = 1 << x
+	_ = -1 << x
+	_ = 1<<x - 1
+	_ = -1<<x - 1
+
+	_ = f(a + b)
+	_ = f(a + b + c)
+	_ = f(a + b*c)
+	_ = f(a + (b * c))
+	_ = f(1<<x-1, 1<<x-2)
+
+	_ = 1<<d.logWindowSize - 1
+
+	buf = make(x, 2*cap(b.buf)+n)
+
+	dst[i*3+2] = dbuf[0] << 2
+	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+	b.buf = b.buf[0 : b.off+m+n]
+	b.buf = b.buf[0 : b.off+m*n]
+	f(b.buf[0 : b.off+m+n])
+
+	signed += ' ' * 8
+	tw.octal(header[148:155], chksum)
+
+	_ = x > 0 && i >= 0
+
+	x1, x0 := x>>w2, x&m2
+	z0 = t1<<w2 + t0
+	z1 = (t1 + t0>>w2) >> w2
+	q1, r1 := x1/d1, x1%d1
+	r1 = r1*b2 | x0>>w2
+	x1 = (x1 << z) | (x0 >> (uint(w) - z))
+	x1 = x1<<z | x0>>(uint(w)-z)
+
+	_ = buf[0 : len(buf)+1]
+	_ = buf[0 : n+1]
+
+	a, b = b, a
+	a = b + c
+	a = b*c + d
+	_ = a*b + c
+	_ = a - b - c
+	_ = a - (b - c)
+	_ = a - b*c
+	_ = a - (b * c)
+	_ = a * b / c
+	_ = a / *b
+	_ = x[a|^b]
+	_ = x[a / *b]
+	_ = a & ^b
+	_ = a + +b
+	_ = a - -b
+	_ = x[a*-b]
+	_ = x[a + +b]
+	_ = x ^ y ^ z
+	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+	_ = len(longVariableName) * 2
+
+	_ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+	f(0, args...)
+	f(1, args)
+	f(2, args[0])
+
+	// make sure syntactically legal code remains syntactically legal
+	f(3, 42 ...)	// a blank must remain between 42 and ...
+	f(4, 42....)
+	f(5, 42....)
+	f(6, 42.0...)
+	f(7, 42.0...)
+	f(8, .42...)
+	f(9, .42...)
+	f(10, 42e0...)
+	f(11, 42e0...)
+
+	_ = 42 .x	// a blank must remain between 42 and .x
+	_ = 42..x
+	_ = 42..x
+	_ = 42.0.x
+	_ = 42.0.x
+	_ = .42.x
+	_ = .42.x
+	_ = 42e0.x
+	_ = 42e0.x
+
+	// a blank must remain between the binary operator and the 2nd operand
+	_ = x / *y
+	_ = x < -1
+	_ = x < <-1
+	_ = x + +1
+	_ = x - -1
+	_ = x & &x
+	_ = x & ^x
+
+	_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+	_ = T{}
+	_ = struct{}{}
+	_ = [10]T{}
+	_ = [...]T{}
+	_ = []T{}
+	_ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+	_ = struct{}{}
+	_ = struct{ x int }{0}
+	_ = struct{ x, y, z int }{0, 1, 2}
+	_ = struct{ int }{0}
+	_ = struct{ s struct{ int } }{struct{ int }{0}}
+
+	_ = (interface{})(nil)
+	_ = (interface{ String() string })(nil)
+	_ = (interface {
+		String() string
+	})(nil)
+	_ = (interface{ fmt.Stringer })(nil)
+	_ = (interface {
+		fmt.Stringer
+	})(nil)
+}
+
+func _() {
+	// do not modify literals
+	_ = "tab1	tab2	tab3	end"	// string contains 3 tabs
+	_ = "tab1 tab2 tab3 end"	// same string with 3 blanks - may be unaligned because editors see tabs in strings
+	_ = ""				// this comment should be aligned with the one on the previous line
+	_ = ``
+	_ = `
+`
+	_ = `foo
+		bar`
+	_ = `three spaces before the end of the line starting here:   
+they must not be removed`
+}
+
+func _() {
+	// smart handling of indentation for multi-line raw strings
+	var _ = ``
+	var _ = `foo`
+	var _ = `foo
+bar`
+
+	var _ = ``
+	var _ = `foo`
+	var _ =
+	// the next line should remain indented
+	`foo
+bar`
+
+	var _ =	// comment
+	``
+	var _ =	// comment
+	`foo`
+	var _ =	// comment
+	// the next line should remain indented
+	`foo
+bar`
+
+	var _ = /* comment */ ``
+	var _ = /* comment */ `foo`
+	var _ = /* comment */ `foo
+bar`
+
+	var _ =	/* comment */
+	``
+	var _ =	/* comment */
+	`foo`
+	var _ =	/* comment */
+	// the next line should remain indented
+	`foo
+bar`
+
+	var board = []int(
+		`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+	var state = S{
+		"foo",
+		// the next line should remain indented
+		`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+		"bar",
+	}
+}
+
+func _() {
+	// one-line function literals (body is on a single line)
+	_ = func() {}
+	_ = func() int { return 0 }
+	_ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+	// multi-line function literals (body is not on one line)
+	_ = func() {
+	}
+	_ = func() int {
+		return 0
+	}
+	_ = func(x, y int) bool {
+		m := (x + y) / 2
+		return x < y
+	}
+
+	f(func() {
+	})
+	f(func() int {
+		return 0
+	})
+	f(func(x, y int) bool {
+		m := (x + y) / 2
+		return x < y
+	})
+}
+
+func _() {
+	_ = [][]int{
+		[]int{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int{
+		{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int{
+		{1},
+		{1, 2},
+		{1, 2, 3},
+	}
+	_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+	// do not add extra indentation to multi-line string lists
+	_ = "foo" + "bar"
+	_ = "foo" +
+		"bar" +
+		"bah"
+	_ = []string{
+		"abc" +
+			"def",
+		"foo" +
+			"bar",
+	}
+}
+
+const _ = F1 +
+	`string = "%s";` +
+	`ptr = *;` +
+	`datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+
+func _() {
+	_ = F1 +
+		`string = "%s";` +
+		`ptr = *;` +
+		`datafmt.T2 = s ["-" p "-"];`
+
+	_ =
+		`datafmt "datafmt";` +
+			`default = "%v";` +
+			`array = *;` +
+			`datafmt.T3 = s  {" " a a / ","};`
+
+	_ = `datafmt "datafmt";` +
+		`default = "%v";` +
+		`array = *;` +
+		`datafmt.T3 = s  {" " a a / ","};`
+}
+
+func _() {
+	// respect source lines in multi-line expressions
+	_ = a +
+		b +
+		c
+	_ = a < b ||
+		b < a
+	_ = "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000"	// 100!
+	_ = "170141183460469231731687303715884105727"	// prime
+}
+
+// Alignment after overlong lines
+const (
+	_	= "991"
+	_	= "2432902008176640000"	// 20!
+	_	= "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000"	// 100!
+	_	= "170141183460469231731687303715884105727"	// prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+	_ = a +	// comment
+		b +	// comment
+		c
+	_ = "a" +
+		"b" +	// comment
+		"c"
+	_ = "ba0408" + "7265717569726564"	// field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+	f(1,
+		2,
+		3)
+	f(1,
+		2,
+		3,
+	)
+	f(1,
+		2,
+		3)	// comment
+	f(1,
+		2,
+		3,	// comment
+	)
+	f(1,
+		2,
+		3)	// comment
+	f(1,
+		2,
+		3,	// comment
+	)
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+	draw.Yellow,		// yellow
+	draw.Cyan,		// cyan
+	draw.Green,		// lime green
+	draw.GreyBlue,		// slate
+	draw.Red,		/* red */
+	draw.GreyGreen,		/* olive green */
+	draw.Blue,		/* blue */
+	draw.Color(0xFF55AAFF),	/* pink */
+	draw.Color(0xFFAAFFFF),	/* lavender */
+	draw.Color(0xBB005DFF),	/* maroon */
+}
+
+func same(t, u *Time) bool {
+	// respect source lines in multi-line expressions
+	return t.Year == u.Year &&
+		t.Month == u.Month &&
+		t.Day == u.Day &&
+		t.Hour == u.Hour &&
+		t.Minute == u.Minute &&
+		t.Second == u.Second &&
+		t.Weekday == u.Weekday &&
+		t.ZoneOffset == u.ZoneOffset &&
+		t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+	// respect source lines in multi-line expressions
+	if cc.negate && len(cc.ranges) == 2 &&
+		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+		nl := new(_NotNl)
+		p.re.add(nl)
+	}
+}
+
+func addState(s []state, inst instr, match []int) {
+	// handle comments correctly in multi-line expressions
+	for i := 0; i < l; i++ {
+		if s[i].inst.index() == index &&	// same instruction
+			s[i].match[0] < pos {	// earlier match already going; leftmost wins
+			return s
+		}
+	}
+}
+
+func (self *T) foo(x int) *T	{ return self }
+
+func _()	{ module.Func1().Func2() }
+
+func _() {
+	_ = new(T).
+		foo(1).
+		foo(2).
+		foo(3)
+
+	_ = new(T).
+		foo(1).
+		foo(2).	// inline comments
+		foo(3)
+
+	_ = new(T).foo(1).foo(2).foo(3)
+
+	// handle multiline argument list correctly
+	_ = new(T).
+		foo(
+			1).
+		foo(2)
+
+	_ = new(T).foo(
+		1).foo(2)
+
+	_ = Array[3+
+		4]
+
+	_ = Method(1, 2,
+		3)
+
+	_ = new(T).
+		foo().
+		bar().(*Type)
+
+	_ = new(T).
+		foo().
+		bar().(*Type).
+		baz()
+
+	_ = new(T).
+		foo().
+		bar()["idx"]
+
+	_ = new(T).
+		foo().
+		bar()["idx"].
+		baz()
+
+	_ = new(T).
+		foo().
+		bar()[1:2]
+
+	_ = new(T).
+		foo().
+		bar()[1:2].
+		baz()
+
+	_ = new(T).
+		Field.
+		Array[3+
+		4].
+		Table["foo"].
+		Blob.(*Type).
+		Slices[1:4].
+		Method(1, 2,
+			3).
+		Thingy
+
+	_ = a.b.c
+	_ = a.
+		b.
+		c
+	_ = a.b().c
+	_ = a.
+		b().
+		c
+	_ = a.b[0].c
+	_ = a.
+		b[0].
+		c
+	_ = a.b[0:].c
+	_ = a.
+		b[0:].
+		c
+	_ = a.b.(T).c
+	_ = a.
+		b.(T).
+		c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+	// os.Open parameters should remain on two lines
+	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+		os.O_TRUNC, 0666); err != nil {
+		log.Fatal(err)
+	}
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+	_ = append(s, a...)
+	_ = append(
+		s, a...)
+	_ = append(s,
+		a...)
+	_ = append(
+		s,
+		a...)
+	_ = append(s, a...,
+	)
+	_ = append(s,
+		a...,
+	)
+	_ = append(
+		s,
+		a...,
+	)
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+	// these conversions should be rewritten to look
+	// the same as the parenthesized conversions below
+	_ = (func())(nil)
+	_ = (func(x int) float)(nil)
+	_ = (func() func() func())(nil)
+
+	_ = (func())(nil)
+	_ = (func(x int) float)(nil)
+	_ = (func() func() func())(nil)
+}
+
+func _() {
+	_ = f().
+		f(func() {
+			f()
+		}).
+		f(map[int]int{
+			1:	2,
+			3:	4,
+		})
+
+	_ = f().
+		f(
+			func() {
+				f()
+			},
+		)
+}
diff --git a/internal/backport/go/printer/testdata/expressions.input b/internal/backport/go/printer/testdata/expressions.input
new file mode 100644
index 0000000..8c523b6
--- /dev/null
+++ b/internal/backport/go/printer/testdata/expressions.input
@@ -0,0 +1,771 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+	x, y, z int
+}
+
+var (
+	a, b, c, d, e int
+	under_bar int
+	longIdentifier1, longIdentifier2, longIdentifier3 int
+	t0, t1, t2 T
+	s string
+	p *int
+)
+
+
+func _() {
+	// no spaces around simple or parenthesized expressions
+	_ = (a+0)
+	_ = a+b
+	_ = a+b+c
+	_ = a+b-c
+	_ = a-b-c
+	_ = a+(b*c)
+	_ = a+(b/c)
+	_ = a-(b%c)
+	_ = 1+a
+	_ = a+1
+	_ = a+b+1
+	_ = s[a]
+	_ = s[a:]
+	_ = s[:b]
+	_ = s[1:2]
+	_ = s[a:b]
+	_ = s[0:len(s)]
+	_ = s[0]<<1
+	_ = (s[0]<<1)&0xf
+	_ = s[0] << 2 | s[1] >> 4
+	_ = "foo"+s
+	_ = s+"foo"
+	_ = 'a'+'b'
+	_ = len(s)/2
+	_ = len(t0.x)/a
+
+	// spaces around expressions of different precedence or expressions containing spaces
+	_ = a + -b
+	_ = a - ^b
+	_ = a / *p
+	_ = a + b*c
+	_ = 1 + b*c
+	_ = a + 2*c
+	_ = a + c*2
+	_ = 1 + 2*3
+	_ = s[1 : 2*3]
+	_ = s[a : b-c]
+	_ = s[0:]
+	_ = s[a+b]
+	_ = s[: b-c]
+	_ = s[a+b :]
+	_ = a[a<<b+1]
+	_ = a[a<<b+1 :]
+	_ = s[a+b : len(s)]
+	_ = s[len(s) : -a]
+	_ = s[a : len(s)+1]
+	_ = s[a : len(s)+1]+s
+
+	// spaces around operators with equal or lower precedence than comparisons
+	_ = a == b
+	_ = a != b
+	_ = a > b
+	_ = a >= b
+	_ = a < b
+	_ = a <= b
+	_ = a < b && c > d
+	_ = a < b || c > d
+
+	// spaces around "long" operands
+	_ = a + longIdentifier1
+	_ = longIdentifier1 + a
+	_ = longIdentifier1 + longIdentifier2 * longIdentifier3
+	_ = s + "a longer string"
+
+	// some selected cases
+	_ = a + t0.x
+	_ = a + t0.x + t1.x * t2.x
+	_ = a + b + c + d + e + 2*3
+	_ = a + b + c + 2*3 + d + e
+	_ = (a+b+c)*2
+	_ = a - b + c - d + (a+b+c) + d&e
+	_ = under_bar-1
+	_ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666)
+	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+	// test case for issue 8021
+	// want:
+	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+	_ = ([]bool{})[([]int{})[((1) + (((((1) + (((((((1) * (((((1) + (1))) + (1))))) + (1))) * (1))))) + (1))))]]
+
+	// the parser does not restrict expressions that may appear as statements
+	true
+	42
+	"foo"
+	x
+	(x)
+	a+b
+	a+b+c
+	a+(b*c)
+	a+(b/c)
+	1+a
+	a+1
+	s[a]
+	x<<1
+	(s[0]<<1)&0xf
+	"foo"+s
+	x == y
+	x < y || z > 42
+}
+
+
+// slice expressions with cap
+func _() {
+	_ = x[a:b:c]
+	_ = x[a:b:c+d]
+	_ = x[a:b+d:c]
+	_ = x[a:b+d:c+d]
+	_ = x[a+d:b:c]
+	_ = x[a+d:b:c+d]
+	_ = x[a+d:b+d:c]
+	_ = x[a+d:b+d:c+d]
+
+	_ = x[:b:c]
+	_ = x[:b:c+d]
+	_ = x[:b+d:c]
+	_ = x[:b+d:c+d]
+}
+
+func issue22111() {
+	_ = x[:]
+
+	_ = x[:b]
+	_ = x[:b+1]
+
+	_ = x[a:]
+	_ = x[a+1:]
+
+	_ = x[a:b]
+	_ = x[a+1:b]
+	_ = x[a:b+1]
+	_ = x[a+1:b+1]
+
+	_ = x[:b:c]
+	_ = x[:b+1:c]
+	_ = x[:b:c+1]
+	_ = x[:b+1:c+1]
+
+	_ = x[a:b:c]
+	_ = x[a+1:b:c]
+	_ = x[a:b+1:c]
+	_ = x[a+1:b+1:c]
+	_ = x[a:b:c+1]
+	_ = x[a+1:b:c+1]
+	_ = x[a:b+1:c+1]
+	_ = x[a+1:b+1:c+1]
+}
+
+func _() {
+	_ = a+b
+	_ = a+b+c
+	_ = a+b*c
+	_ = a+(b*c)
+	_ = (a+b)*c
+	_ = a+(b*c*d)
+	_ = a+(b*c+d)
+
+	_ = 1<<x
+	_ = -1<<x
+	_ = 1<<x-1
+	_ = -1<<x-1
+
+	_ = f(a+b)
+	_ = f(a+b+c)
+	_ = f(a+b*c)
+	_ = f(a+(b*c))
+	_ = f(1<<x-1, 1<<x-2)
+
+	_ = 1<<d.logWindowSize-1
+
+	buf = make(x, 2*cap(b.buf) + n)
+
+	dst[i*3+2] = dbuf[0]<<2
+	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+	b.buf = b.buf[0:b.off+m+n]
+	b.buf = b.buf[0:b.off+m*n]
+	f(b.buf[0:b.off+m+n])
+
+	signed += ' '*8
+	tw.octal(header[148:155], chksum)
+
+	_ = x > 0 && i >= 0
+
+	x1, x0 := x>>w2, x&m2
+	z0 = t1<<w2+t0
+	z1 = (t1+t0>>w2)>>w2
+	q1, r1 := x1/d1, x1%d1
+	r1 = r1*b2 | x0>>w2
+	x1 = (x1<<z)|(x0>>(uint(w)-z))
+	x1 = x1<<z | x0>>(uint(w)-z)
+
+	_ = buf[0:len(buf)+1]
+	_ = buf[0:n+1]
+
+	a,b = b,a
+	a = b+c
+	a = b*c+d
+	_ = a*b+c
+	_ = a-b-c
+	_ = a-(b-c)
+	_ = a-b*c
+	_ = a-(b*c)
+	_ = a*b/c
+	_ = a/ *b
+	_ = x[a|^b]
+	_ = x[a/ *b]
+	_ = a& ^b
+	_ = a+ +b
+	_ = a- -b
+	_ = x[a*-b]
+	_ = x[a+ +b]
+	_ = x^y^z
+	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+	_ = len(longVariableName)*2
+
+	_ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+
+func f(x int, args ...int) {
+	f(0, args...)
+	f(1, args)
+	f(2, args[0])
+
+	// make sure syntactically legal code remains syntactically legal
+	f(3, 42 ...) // a blank must remain between 42 and ...
+	f(4, 42. ...)
+	f(5, 42....)
+	f(6, 42.0 ...)
+	f(7, 42.0...)
+	f(8, .42 ...)
+	f(9, .42...)
+	f(10, 42e0 ...)
+	f(11, 42e0...)
+
+	_ = 42 .x // a blank must remain between 42 and .x
+	_ = 42. .x
+	_ = 42..x
+	_ = 42.0 .x
+	_ = 42.0.x
+	_ = .42 .x
+	_ = .42.x
+	_ = 42e0 .x
+	_ = 42e0.x
+
+	// a blank must remain between the binary operator and the 2nd operand
+	_ = x/ *y
+	_ = x< -1
+	_ = x< <-1
+	_ = x+ +1
+	_ = x- -1
+	_ = x& &x
+	_ = x& ^x
+
+	_ = f(x/ *y, x< -1, x< <-1, x+ +1, x- -1, x& &x, x& ^x)
+}
+
+
+func _() {
+	_ = T{}
+	_ = struct{}{}
+	_ = [10]T{}
+	_ = [...]T{}
+	_ = []T{}
+	_ = map[int]T{}
+}
+
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+	_ = struct{}{}
+	_ = struct{ x int }{0}
+	_ = struct{ x, y, z int }{0, 1, 2}
+	_ = struct{ int }{0}
+	_ = struct{ s struct { int } }{struct{ int}{0} }
+
+	_ = (interface{})(nil)
+	_ = (interface{String() string})(nil)
+	_ = (interface{
+		String()    string
+	})(nil)
+	_ = (interface{fmt.Stringer})(nil)
+	_ = (interface{
+		    fmt.Stringer
+	})(nil)
+}
+
+func _() {
+	// do not modify literals
+	_ = "tab1	tab2	tab3	end"  // string contains 3 tabs
+	_ = "tab1 tab2 tab3 end"  // same string with 3 blanks - may be unaligned because editors see tabs in strings
+	_ = ""  // this comment should be aligned with the one on the previous line
+	_ = ``
+	_ = `
+`
+_ = `foo
+		bar`
+	_ = `three spaces before the end of the line starting here:   
+they must not be removed`
+}
+
+
+func _() {
+	// smart handling of indentation for multi-line raw strings
+	var _ = ``
+	var _ = `foo`
+	var _ = `foo
+bar`
+
+
+var _ =
+	``
+var _ =
+	`foo`
+var _ =
+	// the next line should remain indented
+	`foo
+bar`
+
+
+	var _ = // comment
+		``
+	var _ = // comment
+		`foo`
+	var _ = // comment
+		// the next line should remain indented
+		`foo
+bar`
+
+
+var _ = /* comment */ ``
+var _ = /* comment */ `foo`
+var _ = /* comment */ `foo
+bar`
+
+
+	var _ = /* comment */
+		``
+	var _ = /* comment */
+		`foo`
+	var _ = /* comment */
+		// the next line should remain indented
+		`foo
+bar`
+
+
+var board = []int(
+	`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+
+	var state = S{
+		"foo",
+		// the next line should remain indented
+		`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+		"bar",
+	}
+}
+
+
+func _() {
+	// one-line function literals (body is on a single line)
+	_ = func() {}
+	_ = func() int { return 0 }
+	_ = func(x, y int) bool { m := (x+y)/2; return m < 0 }
+
+	// multi-line function literals (body is not on one line)
+	_ = func() {
+	}
+	_ = func() int {
+		return 0
+	}
+	_ = func(x, y int) bool {
+		m := (x+y)/2; return x < y }
+
+	f(func() {
+	})
+	f(func() int {
+		return 0
+	})
+	f(func(x, y int) bool {
+		m := (x+y)/2; return x < y })
+}
+
+
+func _() {
+	_ = [][]int {
+		[]int{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int {
+		{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int {
+		{1},
+		{1, 2},
+		{1, 2, 3},
+	}
+	_ = [][]int {{1}, {1, 2}, {1, 2, 3}}
+}
+
+
+// various multi-line expressions
+func _() {
+	// do not add extra indentation to multi-line string lists
+	_ = "foo" + "bar"
+	_ = "foo" +
+	"bar" +
+	"bah"
+	_ = []string {
+		"abc" +
+		"def",
+		"foo" +
+		"bar",
+	}
+}
+
+
+const _ = F1 +
+	`string = "%s";` +
+	`ptr = *;` +
+	`datafmt.T2 = s ["-" p "-"];`
+
+
+const _ =
+	`datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+
+
+const _ = `datafmt "datafmt";` +
+`default = "%v";` +
+`array = *;` +
+`datafmt.T3 = s  {" " a a / ","};`
+
+
+func _() {
+	_ = F1 +
+		`string = "%s";` +
+		`ptr = *;` +
+		`datafmt.T2 = s ["-" p "-"];`
+
+	_ =
+		`datafmt "datafmt";` +
+		`default = "%v";` +
+		`array = *;` +
+		`datafmt.T3 = s  {" " a a / ","};`
+
+	_ = `datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+}
+
+
+func _() {
+	// respect source lines in multi-line expressions
+	_ = a+
+	b+
+	c
+	_ = a < b ||
+		b < a
+	_ = "933262154439441526816992388562667004907159682643816214685929" +
+	"638952175999932299156089414639761565182862536979208272237582" +
+	"51185210916864000000000000000000000000"  // 100!
+	_ = "170141183460469231731687303715884105727"  // prime
+}
+
+
+// Alignment after overlong lines
+const (
+	_ = "991"
+	_ = "2432902008176640000"  // 20!
+	_ = "933262154439441526816992388562667004907159682643816214685929" +
+	"638952175999932299156089414639761565182862536979208272237582" +
+	"51185210916864000000000000000000000000"  // 100!
+	_ = "170141183460469231731687303715884105727"  // prime
+)
+
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+	_ = a +  // comment
+		b +  // comment
+		c
+	_ = "a"	+
+		"b" +	// comment
+		"c"
+	_ = "ba0408" + "7265717569726564"     // field 71, encoding 2, string "required"
+}
+
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+	f(1,
+		2,
+		3)
+	f(1,
+		2,
+		3,
+	)
+	f(1,
+		2,
+		3)  // comment
+	f(1,
+		2,
+		3,  // comment
+	)
+	f(1,
+		2,
+		3)// comment
+	f(1,
+		2,
+		3,// comment
+	)
+}
+
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+	draw.Yellow, // yellow
+	draw.Cyan, // cyan
+	draw.Green, // lime green
+	draw.GreyBlue, // slate
+	draw.Red, /* red */
+	draw.GreyGreen, /* olive green */
+	draw.Blue, /* blue */
+	draw.Color(0xFF55AAFF), /* pink */
+	draw.Color(0xFFAAFFFF), /* lavender */
+	draw.Color(0xBB005DFF), /* maroon */
+}
+
+
+func same(t, u *Time) bool {
+	// respect source lines in multi-line expressions
+	return t.Year == u.Year &&
+		t.Month == u.Month &&
+		t.Day == u.Day &&
+		t.Hour == u.Hour &&
+		t.Minute == u.Minute &&
+		t.Second == u.Second &&
+		t.Weekday == u.Weekday &&
+		t.ZoneOffset == u.ZoneOffset &&
+		t.Zone == u.Zone
+}
+
+
+func (p *parser) charClass() {
+	// respect source lines in multi-line expressions
+	if cc.negate && len(cc.ranges) == 2 &&
+		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+		nl := new(_NotNl)
+		p.re.add(nl)
+	}
+}
+
+
+func addState(s []state, inst instr, match []int) {
+	// handle comments correctly in multi-line expressions
+	for i := 0; i < l; i++ {
+		if s[i].inst.index() == index && // same instruction
+		   s[i].match[0] < pos {	// earlier match already going; leftmost wins
+		   	return s
+		 }
+	}
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+	_ = new(T).
+		foo(1).
+			foo(2).
+		foo(3)
+
+	_ = new(T).
+	foo(1).
+	foo(2). // inline comments
+	foo(3)
+
+	_ = new(T).foo(1).foo(2).foo(3)
+
+	// handle multiline argument list correctly
+	_ = new(T).
+	foo(
+		1).
+		foo(2)
+
+	_ = new(T).foo(
+		1).foo(2)
+
+	_ = Array[3 +
+4]
+
+	_ = Method(1, 2,
+		3)
+
+	_ = new(T).
+   foo().
+   bar() . (*Type)
+
+	_ = new(T).
+foo().
+bar().(*Type).
+baz()
+
+	_ = new(T).
+	foo().
+	bar()["idx"]
+
+	_ = new(T).
+	foo().
+	bar()["idx"]	.
+	baz()
+
+	_ = new(T).
+	foo().
+	bar()[1:2]
+
+	_ = new(T).
+	foo().
+	bar()[1:2].
+	baz()
+
+	_ = new(T).
+		Field.
+		Array[3+
+       		4].
+		Table ["foo"].
+		Blob. (*Type).
+	Slices[1:4].
+	Method(1, 2,
+	3).
+		Thingy
+
+	_ = a.b.c
+	_ = a.
+	b.
+	c
+	_ = a.b().c
+	_ = a.
+	b().
+	c
+	_ = a.b[0].c
+	_ = a.
+	b[0].
+	c
+	_ = a.b[0:].c
+	_ = a.
+	b[0:].
+	c
+	_ = a.b.(T).c
+	_ = a.
+	b.
+	(T).
+	c
+}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+	// os.Open parameters should remain on two lines
+	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+		os.O_TRUNC, 0666); err != nil {
+	    log.Fatal(err)
+	}
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+	_ = append(s, a...)
+	_ = append(
+		s, a...)
+	_ = append(s,
+		a...)
+	_ = append(
+		s,
+		a...)
+	_ = append(s, a...,
+	)
+	_ = append(s,
+		a...,
+	)
+	_ = append(
+		s,
+		a...,
+	)
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+	// these conversions should be rewritten to look
+	// the same as the parenthesized conversions below
+	_ = func()()(nil)
+	_ = func(x int)(float)(nil)
+	_ = func() func() func()()(nil)
+
+	_ = (func()())(nil)
+	_ = (func(x int)(float))(nil)
+	_ = (func() func() func()())(nil)
+}
+
+func _() {
+	_ = f().
+	f(func() {
+		f()
+	}).
+	f(map[int]int{
+	1: 2,
+	3: 4,
+})
+
+	_ = f().
+	f(
+	func() {
+		f()
+	},
+	)
+}
diff --git a/internal/backport/go/printer/testdata/expressions.raw b/internal/backport/go/printer/testdata/expressions.raw
new file mode 100644
index 0000000..058fded
--- /dev/null
+++ b/internal/backport/go/printer/testdata/expressions.raw
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+	x, y, z int
+}
+
+var (
+	a, b, c, d, e	int
+	under_bar	int
+	longIdentifier1, longIdentifier2, longIdentifier3	int
+	t0, t1, t2	T
+	s	string
+	p	*int
+)
+
+func _() {
+	// no spaces around simple or parenthesized expressions
+	_ = (a + 0)
+	_ = a + b
+	_ = a + b + c
+	_ = a + b - c
+	_ = a - b - c
+	_ = a + (b * c)
+	_ = a + (b / c)
+	_ = a - (b % c)
+	_ = 1 + a
+	_ = a + 1
+	_ = a + b + 1
+	_ = s[a]
+	_ = s[a:]
+	_ = s[:b]
+	_ = s[1:2]
+	_ = s[a:b]
+	_ = s[0:len(s)]
+	_ = s[0] << 1
+	_ = (s[0] << 1) & 0xf
+	_ = s[0]<<2 | s[1]>>4
+	_ = "foo" + s
+	_ = s + "foo"
+	_ = 'a' + 'b'
+	_ = len(s) / 2
+	_ = len(t0.x) / a
+
+	// spaces around expressions of different precedence or expressions containing spaces
+	_ = a + -b
+	_ = a - ^b
+	_ = a / *p
+	_ = a + b*c
+	_ = 1 + b*c
+	_ = a + 2*c
+	_ = a + c*2
+	_ = 1 + 2*3
+	_ = s[1 : 2*3]
+	_ = s[a : b-c]
+	_ = s[0:]
+	_ = s[a+b]
+	_ = s[:b-c]
+	_ = s[a+b:]
+	_ = a[a<<b+1]
+	_ = a[a<<b+1:]
+	_ = s[a+b : len(s)]
+	_ = s[len(s):-a]
+	_ = s[a : len(s)+1]
+	_ = s[a:len(s)+1] + s
+
+	// spaces around operators with equal or lower precedence than comparisons
+	_ = a == b
+	_ = a != b
+	_ = a > b
+	_ = a >= b
+	_ = a < b
+	_ = a <= b
+	_ = a < b && c > d
+	_ = a < b || c > d
+
+	// spaces around "long" operands
+	_ = a + longIdentifier1
+	_ = longIdentifier1 + a
+	_ = longIdentifier1 + longIdentifier2*longIdentifier3
+	_ = s + "a longer string"
+
+	// some selected cases
+	_ = a + t0.x
+	_ = a + t0.x + t1.x*t2.x
+	_ = a + b + c + d + e + 2*3
+	_ = a + b + c + 2*3 + d + e
+	_ = (a + b + c) * 2
+	_ = a - b + c - d + (a + b + c) + d&e
+	_ = under_bar - 1
+	_ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+	_ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+	// test case for issue 8021
+	// want:
+	//  ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+	_ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+
+	// the parser does not restrict expressions that may appear as statements
+	true
+	42
+	"foo"
+	x
+	(x)
+	a + b
+	a + b + c
+	a + (b * c)
+	a + (b / c)
+	1 + a
+	a + 1
+	s[a]
+	x << 1
+	(s[0] << 1) & 0xf
+	"foo" + s
+	x == y
+	x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+	_ = x[a:b:c]
+	_ = x[a : b : c+d]
+	_ = x[a : b+d : c]
+	_ = x[a : b+d : c+d]
+	_ = x[a+d : b : c]
+	_ = x[a+d : b : c+d]
+	_ = x[a+d : b+d : c]
+	_ = x[a+d : b+d : c+d]
+
+	_ = x[:b:c]
+	_ = x[: b : c+d]
+	_ = x[: b+d : c]
+	_ = x[: b+d : c+d]
+}
+
+func issue22111() {
+	_ = x[:]
+
+	_ = x[:b]
+	_ = x[:b+1]
+
+	_ = x[a:]
+	_ = x[a+1:]
+
+	_ = x[a:b]
+	_ = x[a+1 : b]
+	_ = x[a : b+1]
+	_ = x[a+1 : b+1]
+
+	_ = x[:b:c]
+	_ = x[: b+1 : c]
+	_ = x[: b : c+1]
+	_ = x[: b+1 : c+1]
+
+	_ = x[a:b:c]
+	_ = x[a+1 : b : c]
+	_ = x[a : b+1 : c]
+	_ = x[a+1 : b+1 : c]
+	_ = x[a : b : c+1]
+	_ = x[a+1 : b : c+1]
+	_ = x[a : b+1 : c+1]
+	_ = x[a+1 : b+1 : c+1]
+}
+
+func _() {
+	_ = a + b
+	_ = a + b + c
+	_ = a + b*c
+	_ = a + (b * c)
+	_ = (a + b) * c
+	_ = a + (b * c * d)
+	_ = a + (b*c + d)
+
+	_ = 1 << x
+	_ = -1 << x
+	_ = 1<<x - 1
+	_ = -1<<x - 1
+
+	_ = f(a + b)
+	_ = f(a + b + c)
+	_ = f(a + b*c)
+	_ = f(a + (b * c))
+	_ = f(1<<x-1, 1<<x-2)
+
+	_ = 1<<d.logWindowSize - 1
+
+	buf = make(x, 2*cap(b.buf)+n)
+
+	dst[i*3+2] = dbuf[0] << 2
+	dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+	b.buf = b.buf[0 : b.off+m+n]
+	b.buf = b.buf[0 : b.off+m*n]
+	f(b.buf[0 : b.off+m+n])
+
+	signed += ' ' * 8
+	tw.octal(header[148:155], chksum)
+
+	_ = x > 0 && i >= 0
+
+	x1, x0 := x>>w2, x&m2
+	z0 = t1<<w2 + t0
+	z1 = (t1 + t0>>w2) >> w2
+	q1, r1 := x1/d1, x1%d1
+	r1 = r1*b2 | x0>>w2
+	x1 = (x1 << z) | (x0 >> (uint(w) - z))
+	x1 = x1<<z | x0>>(uint(w)-z)
+
+	_ = buf[0 : len(buf)+1]
+	_ = buf[0 : n+1]
+
+	a, b = b, a
+	a = b + c
+	a = b*c + d
+	_ = a*b + c
+	_ = a - b - c
+	_ = a - (b - c)
+	_ = a - b*c
+	_ = a - (b * c)
+	_ = a * b / c
+	_ = a / *b
+	_ = x[a|^b]
+	_ = x[a / *b]
+	_ = a & ^b
+	_ = a + +b
+	_ = a - -b
+	_ = x[a*-b]
+	_ = x[a + +b]
+	_ = x ^ y ^ z
+	_ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+	_ = len(longVariableName) * 2
+
+	_ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+	f(0, args...)
+	f(1, args)
+	f(2, args[0])
+
+	// make sure syntactically legal code remains syntactically legal
+	f(3, 42 ...)	// a blank must remain between 42 and ...
+	f(4, 42....)
+	f(5, 42....)
+	f(6, 42.0...)
+	f(7, 42.0...)
+	f(8, .42...)
+	f(9, .42...)
+	f(10, 42e0...)
+	f(11, 42e0...)
+
+	_ = 42 .x	// a blank must remain between 42 and .x
+	_ = 42..x
+	_ = 42..x
+	_ = 42.0.x
+	_ = 42.0.x
+	_ = .42.x
+	_ = .42.x
+	_ = 42e0.x
+	_ = 42e0.x
+
+	// a blank must remain between the binary operator and the 2nd operand
+	_ = x / *y
+	_ = x < -1
+	_ = x < <-1
+	_ = x + +1
+	_ = x - -1
+	_ = x & &x
+	_ = x & ^x
+
+	_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+	_ = T{}
+	_ = struct{}{}
+	_ = [10]T{}
+	_ = [...]T{}
+	_ = []T{}
+	_ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+	_ = struct{}{}
+	_ = struct{ x int }{0}
+	_ = struct{ x, y, z int }{0, 1, 2}
+	_ = struct{ int }{0}
+	_ = struct{ s struct{ int } }{struct{ int }{0}}
+
+	_ = (interface{})(nil)
+	_ = (interface{ String() string })(nil)
+	_ = (interface {
+		String() string
+	})(nil)
+	_ = (interface{ fmt.Stringer })(nil)
+	_ = (interface {
+		fmt.Stringer
+	})(nil)
+}
+
+func _() {
+	// do not modify literals
+	_ = "tab1	tab2	tab3	end"	// string contains 3 tabs
+	_ = "tab1 tab2 tab3 end"	// same string with 3 blanks - may be unaligned because editors see tabs in strings
+	_ = ""	// this comment should be aligned with the one on the previous line
+	_ = ``
+	_ = `
+`
+	_ = `foo
+		bar`
+	_ = `three spaces before the end of the line starting here:   
+they must not be removed`
+}
+
+func _() {
+	// smart handling of indentation for multi-line raw strings
+	var _ = ``
+	var _ = `foo`
+	var _ = `foo
+bar`
+
+	var _ = ``
+	var _ = `foo`
+	var _ =
+	// the next line should remain indented
+	`foo
+bar`
+
+	var _ =	// comment
+	``
+	var _ =	// comment
+	`foo`
+	var _ =	// comment
+	// the next line should remain indented
+	`foo
+bar`
+
+	var _ = /* comment */ ``
+	var _ = /* comment */ `foo`
+	var _ = /* comment */ `foo
+bar`
+
+	var _ =	/* comment */
+	``
+	var _ =	/* comment */
+	`foo`
+	var _ =	/* comment */
+	// the next line should remain indented
+	`foo
+bar`
+
+	var board = []int(
+		`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+	var state = S{
+		"foo",
+		// the next line should remain indented
+		`...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+		"bar",
+	}
+}
+
+func _() {
+	// one-line function literals (body is on a single line)
+	_ = func() {}
+	_ = func() int { return 0 }
+	_ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+	// multi-line function literals (body is not on one line)
+	_ = func() {
+	}
+	_ = func() int {
+		return 0
+	}
+	_ = func(x, y int) bool {
+		m := (x + y) / 2
+		return x < y
+	}
+
+	f(func() {
+	})
+	f(func() int {
+		return 0
+	})
+	f(func(x, y int) bool {
+		m := (x + y) / 2
+		return x < y
+	})
+}
+
+func _() {
+	_ = [][]int{
+		[]int{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int{
+		{1},
+		[]int{1, 2},
+		[]int{1, 2, 3},
+	}
+	_ = [][]int{
+		{1},
+		{1, 2},
+		{1, 2, 3},
+	}
+	_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+	// do not add extra indentation to multi-line string lists
+	_ = "foo" + "bar"
+	_ = "foo" +
+		"bar" +
+		"bah"
+	_ = []string{
+		"abc" +
+			"def",
+		"foo" +
+			"bar",
+	}
+}
+
+const _ = F1 +
+	`string = "%s";` +
+	`ptr = *;` +
+	`datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+	`default = "%v";` +
+	`array = *;` +
+	`datafmt.T3 = s  {" " a a / ","};`
+
+func _() {
+	_ = F1 +
+		`string = "%s";` +
+		`ptr = *;` +
+		`datafmt.T2 = s ["-" p "-"];`
+
+	_ =
+		`datafmt "datafmt";` +
+			`default = "%v";` +
+			`array = *;` +
+			`datafmt.T3 = s  {" " a a / ","};`
+
+	_ = `datafmt "datafmt";` +
+		`default = "%v";` +
+		`array = *;` +
+		`datafmt.T3 = s  {" " a a / ","};`
+}
+
+func _() {
+	// respect source lines in multi-line expressions
+	_ = a +
+		b +
+		c
+	_ = a < b ||
+		b < a
+	_ = "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000"	// 100!
+	_ = "170141183460469231731687303715884105727"	// prime
+}
+
+// Alignment after overlong lines
+const (
+	_	= "991"
+	_	= "2432902008176640000"		// 20!
+	_	= "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000"	// 100!
+	_	= "170141183460469231731687303715884105727"		// prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+	_ = a +	// comment
+		b +	// comment
+		c
+	_ = "a" +
+		"b" +	// comment
+		"c"
+	_ = "ba0408" + "7265717569726564"	// field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+	f(1,
+		2,
+		3)
+	f(1,
+		2,
+		3,
+	)
+	f(1,
+		2,
+		3)	// comment
+	f(1,
+		2,
+		3,	// comment
+	)
+	f(1,
+		2,
+		3)	// comment
+	f(1,
+		2,
+		3,	// comment
+	)
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+	draw.Yellow,	// yellow
+	draw.Cyan,	// cyan
+	draw.Green,	// lime green
+	draw.GreyBlue,	// slate
+	draw.Red,	/* red */
+	draw.GreyGreen,	/* olive green */
+	draw.Blue,	/* blue */
+	draw.Color(0xFF55AAFF),	/* pink */
+	draw.Color(0xFFAAFFFF),	/* lavender */
+	draw.Color(0xBB005DFF),	/* maroon */
+}
+
+func same(t, u *Time) bool {
+	// respect source lines in multi-line expressions
+	return t.Year == u.Year &&
+		t.Month == u.Month &&
+		t.Day == u.Day &&
+		t.Hour == u.Hour &&
+		t.Minute == u.Minute &&
+		t.Second == u.Second &&
+		t.Weekday == u.Weekday &&
+		t.ZoneOffset == u.ZoneOffset &&
+		t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+	// respect source lines in multi-line expressions
+	if cc.negate && len(cc.ranges) == 2 &&
+		cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+		nl := new(_NotNl)
+		p.re.add(nl)
+	}
+}
+
+func addState(s []state, inst instr, match []int) {
+	// handle comments correctly in multi-line expressions
+	for i := 0; i < l; i++ {
+		if s[i].inst.index() == index &&	// same instruction
+			s[i].match[0] < pos {	// earlier match already going; leftmost wins
+			return s
+		}
+	}
+}
+
+func (self *T) foo(x int) *T	{ return self }
+
+func _()	{ module.Func1().Func2() }
+
+func _() {
+	_ = new(T).
+		foo(1).
+		foo(2).
+		foo(3)
+
+	_ = new(T).
+		foo(1).
+		foo(2).	// inline comments
+		foo(3)
+
+	_ = new(T).foo(1).foo(2).foo(3)
+
+	// handle multiline argument list correctly
+	_ = new(T).
+		foo(
+			1).
+		foo(2)
+
+	_ = new(T).foo(
+		1).foo(2)
+
+	_ = Array[3+
+		4]
+
+	_ = Method(1, 2,
+		3)
+
+	_ = new(T).
+		foo().
+		bar().(*Type)
+
+	_ = new(T).
+		foo().
+		bar().(*Type).
+		baz()
+
+	_ = new(T).
+		foo().
+		bar()["idx"]
+
+	_ = new(T).
+		foo().
+		bar()["idx"].
+		baz()
+
+	_ = new(T).
+		foo().
+		bar()[1:2]
+
+	_ = new(T).
+		foo().
+		bar()[1:2].
+		baz()
+
+	_ = new(T).
+		Field.
+		Array[3+
+		4].
+		Table["foo"].
+		Blob.(*Type).
+		Slices[1:4].
+		Method(1, 2,
+			3).
+		Thingy
+
+	_ = a.b.c
+	_ = a.
+		b.
+		c
+	_ = a.b().c
+	_ = a.
+		b().
+		c
+	_ = a.b[0].c
+	_ = a.
+		b[0].
+		c
+	_ = a.b[0:].c
+	_ = a.
+		b[0:].
+		c
+	_ = a.b.(T).c
+	_ = a.
+		b.(T).
+		c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+	// os.Open parameters should remain on two lines
+	if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+		os.O_TRUNC, 0666); err != nil {
+		log.Fatal(err)
+	}
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+	_ = append(s, a...)
+	_ = append(
+		s, a...)
+	_ = append(s,
+		a...)
+	_ = append(
+		s,
+		a...)
+	_ = append(s, a...,
+	)
+	_ = append(s,
+		a...,
+	)
+	_ = append(
+		s,
+		a...,
+	)
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+	// these conversions should be rewritten to look
+	// the same as the parenthesized conversions below
+	_ = (func())(nil)
+	_ = (func(x int) float)(nil)
+	_ = (func() func() func())(nil)
+
+	_ = (func())(nil)
+	_ = (func(x int) float)(nil)
+	_ = (func() func() func())(nil)
+}
+
+func _() {
+	_ = f().
+		f(func() {
+			f()
+		}).
+		f(map[int]int{
+			1:	2,
+			3:	4,
+		})
+
+	_ = f().
+		f(
+			func() {
+				f()
+			},
+		)
+}
diff --git a/internal/backport/go/printer/testdata/generics.golden b/internal/backport/go/printer/testdata/generics.golden
new file mode 100644
index 0000000..c3a7df8
--- /dev/null
+++ b/internal/backport/go/printer/testdata/generics.golden
@@ -0,0 +1,92 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generics
+
+func _[A, B any](a A, b B) int	{}
+func _[T any](x, y T) T
+
+type T[P any] struct{}
+type T[P1, P2, P3 any] struct{}
+
+type T[P C] struct{}
+type T[P1, P2, P3 C] struct{}
+
+type T[P C[P]] struct{}
+type T[P1, P2, P3 C[P1, P2, P3]] struct{}
+
+func f[P any](x P)
+func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
+
+func f[P interface{}](x P)
+func f[P1, P2, P3 interface {
+	m1(P1)
+	~P2 | ~P3
+}](x1 P1, x2 P2, x3 P3) struct{}
+func f[P any](T1[P], T2[P]) T3[P]
+
+func (x T[P]) m()
+func (T[P]) m(x T[P]) P
+
+func _() {
+	type _ []T[P]
+	var _ []T[P]
+	_ = []T[P]{}
+}
+
+// type constraint literals with elided interfaces
+func _[P ~int, Q int | string]()	{}
+func _[P struct{ f int }, Q *P]()	{}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P T] struct{}
+type _[P T, _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _[P *struct{}] struct{}
+type _[P []int] struct{}
+
+// array type declarations
+type _ [P(T)]struct{}
+type _ [P((T))]struct{}
+type _ [P * *T]struct{}
+type _ [P * T]struct{}
+type _ [P(*T)]struct{}
+type _ [P(**T)]struct{}
+type _ [P * T]struct{}
+type _ [P*T - T]struct{}
+
+type _[
+	P *T,
+] struct{}
+
+// equivalent test cases for potentially ambiguous type parameter lists, except
+// for function declarations there is no ambiguity (issue #51548)
+func _[P *T]()		{}
+func _[P *T, _ any]()	{}
+func _[P *T]()		{}
+func _[P *T, _ any]()	{}
+func _[P T]()		{}
+func _[P T, _ any]()	{}
+
+func _[P *struct{}]()	{}
+func _[P *struct{}]()	{}
+func _[P []int]()	{}
+
+func _[P T]()	{}
+func _[P T]()	{}
+func _[P **T]()	{}
+func _[P *T]()	{}
+func _[P *T]()	{}
+func _[P **T]()	{}
+func _[P *T]()	{}
+
+func _[
+	P *T,
+]() {
+}
diff --git a/internal/backport/go/printer/testdata/generics.input b/internal/backport/go/printer/testdata/generics.input
new file mode 100644
index 0000000..66e1554
--- /dev/null
+++ b/internal/backport/go/printer/testdata/generics.input
@@ -0,0 +1,88 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generics
+
+func _[A, B any](a A, b B) int {}
+func _[T any](x, y T) T
+
+type T[P any] struct{}
+type T[P1, P2, P3 any] struct{}
+
+type T[P C] struct{}
+type T[P1, P2, P3 C] struct{}
+
+type T[P C[P]] struct{}
+type T[P1, P2, P3 C[P1, P2, P3]] struct{}
+
+func f[P any](x P)
+func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
+
+func f[P interface{}](x P)
+func f[P1, P2, P3 interface{ m1(P1); ~P2|~P3 }](x1 P1, x2 P2, x3 P3) struct{}
+func f[P any](T1[P], T2[P]) T3[P]
+
+func (x T[P]) m()
+func ((T[P])) m(x T[P]) P
+
+func _() {
+	type _ []T[P]
+	var _ []T[P]
+	_ = []T[P]{}
+}
+
+// type constraint literals with elided interfaces
+func _[P ~int, Q int | string]() {}
+func _[P struct{f int}, Q *P]() {}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P (*T),] struct{}
+type _[P (*T), _ any] struct{}
+type _[P (T),] struct{}
+type _[P (T), _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _[P (*struct{})] struct{}
+type _[P ([]int)] struct{}
+
+// array type declarations
+type _ [P(T)]struct{}
+type _ [P((T))]struct{}
+type _ [P * *T]struct{}
+type _ [P * T]struct{}
+type _ [P(*T)]struct{}
+type _ [P(**T)]struct{}
+type _ [P * T]struct{}
+type _ [P * T - T]struct{}
+
+type _[
+	P *T,
+] struct{}
+
+// equivalent test cases for potentially ambiguous type parameter lists, except
+// for function declarations there is no ambiguity (issue #51548)
+func _[P *T,]() {}
+func _[P *T, _ any]() {}
+func _[P (*T),]() {}
+func _[P (*T), _ any]() {}
+func _[P (T),]() {}
+func _[P (T), _ any]() {}
+
+func _[P *struct{}] () {}
+func _[P (*struct{})] () {}
+func _[P ([]int)] () {}
+
+func _ [P(T)]() {}
+func _ [P((T))]() {}
+func _ [P * *T]() {}
+func _ [P * T]() {}
+func _ [P(*T)]() {}
+func _ [P(**T)]() {}
+func _ [P * T]() {}
+
+func _[
+	P *T,
+]() {}
diff --git a/internal/backport/go/printer/testdata/go2numbers.golden b/internal/backport/go/printer/testdata/go2numbers.golden
new file mode 100644
index 0000000..3c12049
--- /dev/null
+++ b/internal/backport/go/printer/testdata/go2numbers.golden
@@ -0,0 +1,186 @@
+package p
+
+const (
+	// 0-octals
+	_	= 0
+	_	= 0123
+	_	= 0123456
+
+	_	= 0_123
+	_	= 0123_456
+
+	// decimals
+	_	= 1
+	_	= 1234
+	_	= 1234567
+
+	_	= 1_234
+	_	= 1_234_567
+
+	// hexadecimals
+	_	= 0x0
+	_	= 0x1234
+	_	= 0xcafef00d
+
+	_	= 0X0
+	_	= 0X1234
+	_	= 0XCAFEf00d
+
+	_	= 0X_0
+	_	= 0X_1234
+	_	= 0X_CAFE_f00d
+
+	// octals
+	_	= 0o0
+	_	= 0o1234
+	_	= 0o01234567
+
+	_	= 0O0
+	_	= 0O1234
+	_	= 0O01234567
+
+	_	= 0o_0
+	_	= 0o_1234
+	_	= 0o0123_4567
+
+	_	= 0O_0
+	_	= 0O_1234
+	_	= 0O0123_4567
+
+	// binaries
+	_	= 0b0
+	_	= 0b1011
+	_	= 0b00101101
+
+	_	= 0B0
+	_	= 0B1011
+	_	= 0B00101101
+
+	_	= 0b_0
+	_	= 0b10_11
+	_	= 0b_0010_1101
+
+	// decimal floats
+	_	= 0.
+	_	= 123.
+	_	= 0123.
+
+	_	= .0
+	_	= .123
+	_	= .0123
+
+	_	= 0e0
+	_	= 123e+0
+	_	= 0123E-1
+
+	_	= 0e-0
+	_	= 123E+0
+	_	= 0123E123
+
+	_	= 0.e+1
+	_	= 123.E-10
+	_	= 0123.e123
+
+	_	= .0e-1
+	_	= .123E+10
+	_	= .0123E123
+
+	_	= 0.0
+	_	= 123.123
+	_	= 0123.0123
+
+	_	= 0.0e1
+	_	= 123.123E-10
+	_	= 0123.0123e+456
+
+	_	= 1_2_3.
+	_	= 0_123.
+
+	_	= 0_0e0
+	_	= 1_2_3e0
+	_	= 0_123e0
+
+	_	= 0e-0_0
+	_	= 1_2_3E+0
+	_	= 0123E1_2_3
+
+	_	= 0.e+1
+	_	= 123.E-1_0
+	_	= 01_23.e123
+
+	_	= .0e-1
+	_	= .123E+10
+	_	= .0123E123
+
+	_	= 1_2_3.123
+	_	= 0123.01_23
+
+	// hexadecimal floats
+	_	= 0x0.p+0
+	_	= 0Xdeadcafe.p-10
+	_	= 0x1234.P123
+
+	_	= 0x.1p-0
+	_	= 0X.deadcafep2
+	_	= 0x.1234P+10
+
+	_	= 0x0p0
+	_	= 0Xdeadcafep+1
+	_	= 0x1234P-10
+
+	_	= 0x0.0p0
+	_	= 0Xdead.cafep+1
+	_	= 0x12.34P-10
+
+	_	= 0Xdead_cafep+1
+	_	= 0x_1234P-10
+
+	_	= 0X_dead_cafe.p-10
+	_	= 0x12_34.P1_2_3
+	_	= 0X1_2_3_4.P-1_2_3
+
+	// imaginaries
+	_	= 0i
+	_	= 00i
+	_	= 08i
+	_	= 0000000000i
+	_	= 0123i
+	_	= 0000000123i
+	_	= 0000056789i
+	_	= 1234i
+	_	= 1234567i
+
+	_	= 0i
+	_	= 0_0i
+	_	= 0_8i
+	_	= 0_000_000_000i
+	_	= 0_123i
+	_	= 0_000_000_123i
+	_	= 0_000_056_789i
+	_	= 1_234i
+	_	= 1_234_567i
+
+	_	= 0.i
+	_	= 123.i
+	_	= 0123.i
+	_	= 000123.i
+
+	_	= 0e0i
+	_	= 123e0i
+	_	= 0123E0i
+	_	= 000123E0i
+
+	_	= 0.e+1i
+	_	= 123.E-1_0i
+	_	= 01_23.e123i
+	_	= 00_01_23.e123i
+
+	_	= 0b1010i
+	_	= 0B1010i
+	_	= 0o660i
+	_	= 0O660i
+	_	= 0xabcDEFi
+	_	= 0XabcDEFi
+	_	= 0xabcDEFP0i
+	_	= 0XabcDEFp0i
+)
diff --git a/internal/backport/go/printer/testdata/go2numbers.input b/internal/backport/go/printer/testdata/go2numbers.input
new file mode 100644
index 0000000..f3e7828
--- /dev/null
+++ b/internal/backport/go/printer/testdata/go2numbers.input
@@ -0,0 +1,186 @@
+package p
+
+const (
+	// 0-octals
+	_ = 0
+	_ = 0123
+	_ = 0123456
+
+	_ = 0_123
+	_ = 0123_456
+
+	// decimals
+	_ = 1
+	_ = 1234
+	_ = 1234567
+
+	_ = 1_234
+	_ = 1_234_567
+
+	// hexadecimals
+	_ = 0x0
+	_ = 0x1234
+	_ = 0xcafef00d
+
+	_ = 0X0
+	_ = 0X1234
+	_ = 0XCAFEf00d
+
+	_ = 0X_0
+	_ = 0X_1234
+	_ = 0X_CAFE_f00d
+
+	// octals
+	_ = 0o0
+	_ = 0o1234
+	_ = 0o01234567
+
+	_ = 0O0
+	_ = 0O1234
+	_ = 0O01234567
+
+	_ = 0o_0
+	_ = 0o_1234
+	_ = 0o0123_4567
+
+	_ = 0O_0
+	_ = 0O_1234
+	_ = 0O0123_4567
+
+	// binaries
+	_ = 0b0
+	_ = 0b1011
+	_ = 0b00101101
+
+	_ = 0B0
+	_ = 0B1011
+	_ = 0B00101101
+
+	_ = 0b_0
+	_ = 0b10_11
+	_ = 0b_0010_1101
+
+	// decimal floats
+	_ = 0.
+	_ = 123.
+	_ = 0123.
+
+	_ = .0
+	_ = .123
+	_ = .0123
+
+	_ = 0e0
+	_ = 123e+0
+	_ = 0123E-1
+
+	_ = 0e-0
+	_ = 123E+0
+	_ = 0123E123
+
+	_ = 0.e+1
+	_ = 123.E-10
+	_ = 0123.e123
+
+	_ = .0e-1
+	_ = .123E+10
+	_ = .0123E123
+
+	_ = 0.0
+	_ = 123.123
+	_ = 0123.0123
+
+	_ = 0.0e1
+	_ = 123.123E-10
+	_ = 0123.0123e+456
+
+	_ = 1_2_3.
+	_ = 0_123.
+
+	_ = 0_0e0
+	_ = 1_2_3e0
+	_ = 0_123e0
+
+	_ = 0e-0_0
+	_ = 1_2_3E+0
+	_ = 0123E1_2_3
+
+	_ = 0.e+1
+	_ = 123.E-1_0
+	_ = 01_23.e123
+
+	_ = .0e-1
+	_ = .123E+10
+	_ = .0123E123
+
+	_ = 1_2_3.123
+	_ = 0123.01_23
+
+	// hexadecimal floats
+	_ = 0x0.p+0
+	_ = 0Xdeadcafe.p-10
+	_ = 0x1234.P123
+
+	_ = 0x.1p-0
+	_ = 0X.deadcafep2
+	_ = 0x.1234P+10
+
+	_ = 0x0p0
+	_ = 0Xdeadcafep+1
+	_ = 0x1234P-10
+
+	_ = 0x0.0p0
+	_ = 0Xdead.cafep+1
+	_ = 0x12.34P-10
+
+	_ = 0Xdead_cafep+1
+	_ = 0x_1234P-10
+
+	_ = 0X_dead_cafe.p-10
+	_ = 0x12_34.P1_2_3
+	_ = 0X1_2_3_4.P-1_2_3
+
+	// imaginaries
+	_ = 0i
+	_ = 00i
+	_ = 08i
+	_ = 0000000000i
+	_ = 0123i
+	_ = 0000000123i
+	_ = 0000056789i
+	_ = 1234i
+	_ = 1234567i
+
+	_ = 0i
+	_ = 0_0i
+	_ = 0_8i
+	_ = 0_000_000_000i
+	_ = 0_123i
+	_ = 0_000_000_123i
+	_ = 0_000_056_789i
+	_ = 1_234i
+	_ = 1_234_567i
+
+	_ = 0.i
+	_ = 123.i
+	_ = 0123.i
+	_ = 000123.i
+
+	_ = 0e0i
+	_ = 123e0i
+	_ = 0123E0i
+	_ = 000123E0i
+
+	_ = 0.e+1i
+	_ = 123.E-1_0i
+	_ = 01_23.e123i
+	_ = 00_01_23.e123i
+
+	_ = 0b1010i
+	_ = 0B1010i
+	_ = 0o660i
+	_ = 0O660i
+	_ = 0xabcDEFi
+	_ = 0XabcDEFi
+	_ = 0xabcDEFP0i
+	_ = 0XabcDEFp0i
+)
diff --git a/internal/backport/go/printer/testdata/go2numbers.norm b/internal/backport/go/printer/testdata/go2numbers.norm
new file mode 100644
index 0000000..855f0fc
--- /dev/null
+++ b/internal/backport/go/printer/testdata/go2numbers.norm
@@ -0,0 +1,186 @@
+package p
+
+const (
+	// 0-octals
+	_	= 0
+	_	= 0123
+	_	= 0123456
+
+	_	= 0_123
+	_	= 0123_456
+
+	// decimals
+	_	= 1
+	_	= 1234
+	_	= 1234567
+
+	_	= 1_234
+	_	= 1_234_567
+
+	// hexadecimals
+	_	= 0x0
+	_	= 0x1234
+	_	= 0xcafef00d
+
+	_	= 0x0
+	_	= 0x1234
+	_	= 0xCAFEf00d
+
+	_	= 0x_0
+	_	= 0x_1234
+	_	= 0x_CAFE_f00d
+
+	// octals
+	_	= 0o0
+	_	= 0o1234
+	_	= 0o01234567
+
+	_	= 0o0
+	_	= 0o1234
+	_	= 0o01234567
+
+	_	= 0o_0
+	_	= 0o_1234
+	_	= 0o0123_4567
+
+	_	= 0o_0
+	_	= 0o_1234
+	_	= 0o0123_4567
+
+	// binaries
+	_	= 0b0
+	_	= 0b1011
+	_	= 0b00101101
+
+	_	= 0b0
+	_	= 0b1011
+	_	= 0b00101101
+
+	_	= 0b_0
+	_	= 0b10_11
+	_	= 0b_0010_1101
+
+	// decimal floats
+	_	= 0.
+	_	= 123.
+	_	= 0123.
+
+	_	= .0
+	_	= .123
+	_	= .0123
+
+	_	= 0e0
+	_	= 123e+0
+	_	= 0123e-1
+
+	_	= 0e-0
+	_	= 123e+0
+	_	= 0123e123
+
+	_	= 0.e+1
+	_	= 123.e-10
+	_	= 0123.e123
+
+	_	= .0e-1
+	_	= .123e+10
+	_	= .0123e123
+
+	_	= 0.0
+	_	= 123.123
+	_	= 0123.0123
+
+	_	= 0.0e1
+	_	= 123.123e-10
+	_	= 0123.0123e+456
+
+	_	= 1_2_3.
+	_	= 0_123.
+
+	_	= 0_0e0
+	_	= 1_2_3e0
+	_	= 0_123e0
+
+	_	= 0e-0_0
+	_	= 1_2_3e+0
+	_	= 0123e1_2_3
+
+	_	= 0.e+1
+	_	= 123.e-1_0
+	_	= 01_23.e123
+
+	_	= .0e-1
+	_	= .123e+10
+	_	= .0123e123
+
+	_	= 1_2_3.123
+	_	= 0123.01_23
+
+	// hexadecimal floats
+	_	= 0x0.p+0
+	_	= 0xdeadcafe.p-10
+	_	= 0x1234.p123
+
+	_	= 0x.1p-0
+	_	= 0x.deadcafep2
+	_	= 0x.1234p+10
+
+	_	= 0x0p0
+	_	= 0xdeadcafep+1
+	_	= 0x1234p-10
+
+	_	= 0x0.0p0
+	_	= 0xdead.cafep+1
+	_	= 0x12.34p-10
+
+	_	= 0xdead_cafep+1
+	_	= 0x_1234p-10
+
+	_	= 0x_dead_cafe.p-10
+	_	= 0x12_34.p1_2_3
+	_	= 0x1_2_3_4.p-1_2_3
+
+	// imaginaries
+	_	= 0i
+	_	= 0i
+	_	= 8i
+	_	= 0i
+	_	= 123i
+	_	= 123i
+	_	= 56789i
+	_	= 1234i
+	_	= 1234567i
+
+	_	= 0i
+	_	= 0i
+	_	= 8i
+	_	= 0i
+	_	= 123i
+	_	= 123i
+	_	= 56_789i
+	_	= 1_234i
+	_	= 1_234_567i
+
+	_	= 0.i
+	_	= 123.i
+	_	= 0123.i
+	_	= 000123.i
+
+	_	= 0e0i
+	_	= 123e0i
+	_	= 0123e0i
+	_	= 000123e0i
+
+	_	= 0.e+1i
+	_	= 123.e-1_0i
+	_	= 01_23.e123i
+	_	= 00_01_23.e123i
+
+	_	= 0b1010i
+	_	= 0b1010i
+	_	= 0o660i
+	_	= 0o660i
+	_	= 0xabcDEFi
+	_	= 0xabcDEFi
+	_	= 0xabcDEFp0i
+	_	= 0xabcDEFp0i
+)
diff --git a/internal/backport/go/printer/testdata/gobuild1.golden b/internal/backport/go/printer/testdata/gobuild1.golden
new file mode 100644
index 0000000..649da40
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild1.golden
@@ -0,0 +1,6 @@
+//go:build x
+// +build x
+
+package p
+
+func f()
diff --git a/internal/backport/go/printer/testdata/gobuild1.input b/internal/backport/go/printer/testdata/gobuild1.input
new file mode 100644
index 0000000..6538ee6
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild1.input
@@ -0,0 +1,7 @@
+package p
+
+//go:build x
+
+func f()
+
+// +build y
diff --git a/internal/backport/go/printer/testdata/gobuild2.golden b/internal/backport/go/printer/testdata/gobuild2.golden
new file mode 100644
index 0000000..c46fd34
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild2.golden
@@ -0,0 +1,8 @@
+//go:build x
+// +build x
+
+// other comment
+
+package p
+
+func f()
diff --git a/internal/backport/go/printer/testdata/gobuild2.input b/internal/backport/go/printer/testdata/gobuild2.input
new file mode 100644
index 0000000..f0f772a
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild2.input
@@ -0,0 +1,9 @@
+// +build y
+
+// other comment
+
+package p
+
+func f()
+
+//go:build x
diff --git a/internal/backport/go/printer/testdata/gobuild3.golden b/internal/backport/go/printer/testdata/gobuild3.golden
new file mode 100644
index 0000000..db92c57
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild3.golden
@@ -0,0 +1,10 @@
+// other comment
+
+//go:build x
+// +build x
+
+// yet another comment
+
+package p
+
+func f()
diff --git a/internal/backport/go/printer/testdata/gobuild3.input b/internal/backport/go/printer/testdata/gobuild3.input
new file mode 100644
index 0000000..d0c97b2
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild3.input
@@ -0,0 +1,11 @@
+// other comment
+
+// +build y
+
+// yet another comment
+
+package p
+
+//go:build x
+
+func f()
diff --git a/internal/backport/go/printer/testdata/gobuild4.golden b/internal/backport/go/printer/testdata/gobuild4.golden
new file mode 100644
index 0000000..b16477f
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild4.golden
@@ -0,0 +1,6 @@
+//go:build (x || y) && z
+// +build x y
+// +build z
+
+// doc comment
+package p
diff --git a/internal/backport/go/printer/testdata/gobuild4.input b/internal/backport/go/printer/testdata/gobuild4.input
new file mode 100644
index 0000000..29d5a0a
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild4.input
@@ -0,0 +1,5 @@
+// doc comment
+package p
+
+// +build x y
+// +build z
diff --git a/internal/backport/go/printer/testdata/gobuild5.golden b/internal/backport/go/printer/testdata/gobuild5.golden
new file mode 100644
index 0000000..2808a53
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild5.golden
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// +build !x,!y,z
+
+package p
diff --git a/internal/backport/go/printer/testdata/gobuild5.input b/internal/backport/go/printer/testdata/gobuild5.input
new file mode 100644
index 0000000..ec5815c
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild5.input
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// +build something else
+
+package p
diff --git a/internal/backport/go/printer/testdata/gobuild6.golden b/internal/backport/go/printer/testdata/gobuild6.golden
new file mode 100644
index 0000000..abb1e2a
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild6.golden
@@ -0,0 +1,5 @@
+//go:build !(x || y) && z
+
+// no +build line
+
+package p
diff --git a/internal/backport/go/printer/testdata/gobuild6.input b/internal/backport/go/printer/testdata/gobuild6.input
new file mode 100644
index 0000000..1621897
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild6.input
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// no +build line
+
+package p
diff --git a/internal/backport/go/printer/testdata/gobuild7.golden b/internal/backport/go/printer/testdata/gobuild7.golden
new file mode 100644
index 0000000..bf41dd4
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild7.golden
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
+
+//go:build !go1.16
+// +build !go1.16
+
+// Package buildtag defines an Analyzer that checks build tags.
+package buildtag
diff --git a/internal/backport/go/printer/testdata/gobuild7.input b/internal/backport/go/printer/testdata/gobuild7.input
new file mode 100644
index 0000000..bf41dd4
--- /dev/null
+++ b/internal/backport/go/printer/testdata/gobuild7.input
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
+
+//go:build !go1.16
+// +build !go1.16
+
+// Package buildtag defines an Analyzer that checks build tags.
+package buildtag
diff --git a/internal/backport/go/printer/testdata/linebreaks.golden b/internal/backport/go/printer/testdata/linebreaks.golden
new file mode 100644
index 0000000..17d2b5c
--- /dev/null
+++ b/internal/backport/go/printer/testdata/linebreaks.golden
@@ -0,0 +1,295 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+type writerTestEntry struct {
+	header		*Header
+	contents	string
+}
+
+type writerTest struct {
+	file	string	// filename of expected output
+	entries	[]*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+	&writerTest{
+		file:	"testdata/writer.tar",
+		entries: []*writerTestEntry{
+			&writerTestEntry{
+				header: &Header{
+					Name:		"small.txt",
+					Mode:		0640,
+					Uid:		73025,
+					Gid:		5000,
+					Size:		5,
+					Mtime:		1246508266,
+					Typeflag:	'0',
+					Uname:		"dsymonds",
+					Gname:		"eng",
+				},
+				contents:	"Kilts",
+			},
+			&writerTestEntry{
+				header: &Header{
+					Name:		"small2.txt",
+					Mode:		0640,
+					Uid:		73025,
+					Gid:		5000,
+					Size:		11,
+					Mtime:		1245217492,
+					Typeflag:	'0',
+					Uname:		"dsymonds",
+					Gname:		"eng",
+				},
+				contents:	"Google.com\n",
+			},
+		},
+	},
+	// The truncated test file was produced using these commands:
+	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+	&writerTest{
+		file:	"testdata/writer-big.tar",
+		entries: []*writerTestEntry{
+			&writerTestEntry{
+				header: &Header{
+					Name:		"tmp/16gig.txt",
+					Mode:		0640,
+					Uid:		73025,
+					Gid:		5000,
+					Size:		16 << 30,
+					Mtime:		1254699560,
+					Typeflag:	'0',
+					Uname:		"dsymonds",
+					Gname:		"eng",
+				},
+				// no contents
+			},
+		},
+	},
+}
+
+type untarTest struct {
+	file	string
+	headers	[]*Header
+}
+
+var untarTests = []*untarTest{
+	&untarTest{
+		file:	"testdata/gnu.tar",
+		headers: []*Header{
+			&Header{
+				Name:		"small.txt",
+				Mode:		0640,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		5,
+				Mtime:		1244428340,
+				Typeflag:	'0',
+				Uname:		"dsymonds",
+				Gname:		"eng",
+			},
+			&Header{
+				Name:		"small2.txt",
+				Mode:		0640,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		11,
+				Mtime:		1244436044,
+				Typeflag:	'0',
+				Uname:		"dsymonds",
+				Gname:		"eng",
+			},
+		},
+	},
+	&untarTest{
+		file:	"testdata/star.tar",
+		headers: []*Header{
+			&Header{
+				Name:		"small.txt",
+				Mode:		0640,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		5,
+				Mtime:		1244592783,
+				Typeflag:	'0',
+				Uname:		"dsymonds",
+				Gname:		"eng",
+				Atime:		1244592783,
+				Ctime:		1244592783,
+			},
+			&Header{
+				Name:		"small2.txt",
+				Mode:		0640,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		11,
+				Mtime:		1244592783,
+				Typeflag:	'0',
+				Uname:		"dsymonds",
+				Gname:		"eng",
+				Atime:		1244592783,
+				Ctime:		1244592783,
+			},
+		},
+	},
+	&untarTest{
+		file:	"testdata/v7.tar",
+		headers: []*Header{
+			&Header{
+				Name:		"small.txt",
+				Mode:		0444,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		5,
+				Mtime:		1244593104,
+				Typeflag:	'\x00',
+			},
+			&Header{
+				Name:		"small2.txt",
+				Mode:		0444,
+				Uid:		73025,
+				Gid:		5000,
+				Size:		11,
+				Mtime:		1244593104,
+				Typeflag:	'\x00',
+			},
+		},
+	},
+}
+
+var facts = map[int]string{
+	0:	"1",
+	1:	"1",
+	2:	"2",
+	10:	"3628800",
+	20:	"2432902008176640000",
+	100: "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000",
+}
+
+func usage() {
+	fmt.Fprintf(os.Stderr,
+		// TODO(gri): the 2nd string of this string list should not be indented
+		"usage: godoc package [name ...]\n"+
+			"	godoc -http=:6060\n")
+	flag.PrintDefaults()
+	os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+	for i, test := range untarTests {
+		f, err := os.Open(test.file, os.O_RDONLY, 0444)
+		if err != nil {
+			t.Errorf("test %d: Unexpected error: %v", i, err)
+			continue
+		}
+		tr := NewReader(f)
+		for j, header := range test.headers {
+			hdr, err := tr.Next()
+			if err != nil || hdr == nil {
+				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+				f.Close()
+				continue testLoop
+			}
+			if !reflect.DeepEqual(hdr, header) {
+				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+					i, j, *hdr, *header)
+			}
+		}
+		hdr, err := tr.Next()
+		if hdr != nil || err != nil {
+			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+		}
+		f.Close()
+	}
+}
+
+// Respect line breaks in function calls.
+func _() {
+	f(x)
+	f(x,
+		x)
+	f(x,
+		x,
+	)
+	f(
+		x,
+		x)
+	f(
+		x,
+		x,
+	)
+}
+
+// Respect line breaks in function declarations.
+func _(x T)	{}
+func _(x T,
+	y T) {
+}
+func _(x T,
+	y T,
+) {
+}
+func _(
+	x T,
+	y T) {
+}
+func _(
+	x T,
+	y T,
+) {
+}
+
+// Example from issue #2597.
+func ManageStatus0(
+	in <-chan *Status,
+	req <-chan Request,
+	stat chan<- *TargetInfo,
+	TargetHistorySize int) {
+}
+
+func ManageStatus1(
+	in <-chan *Status,
+	req <-chan Request,
+	stat chan<- *TargetInfo,
+	TargetHistorySize int,
+) {
+}
+
+// Example from issue #9064.
+func (y *y) xerrors() error {
+	_ = "xerror.test"	//TODO-
+	_ = []byte(`
+foo bar foo bar foo bar
+`) //TODO-
+}
+
+func _() {
+	_ = "abc"		// foo
+	_ = `abc_0123456789_`	// foo
+}
+
+func _() {
+	_ = "abc"	// foo
+	_ = `abc
+0123456789
+` // foo
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/internal/backport/go/printer/testdata/linebreaks.input b/internal/backport/go/printer/testdata/linebreaks.input
new file mode 100644
index 0000000..9e714f3
--- /dev/null
+++ b/internal/backport/go/printer/testdata/linebreaks.input
@@ -0,0 +1,291 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+type writerTestEntry struct {
+	header *Header
+	contents string
+}
+
+type writerTest struct {
+	file string  // filename of expected output
+	entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+	&writerTest{
+		file: "testdata/writer.tar",
+		entries: []*writerTestEntry{
+			&writerTestEntry{
+				header: &Header{
+					Name: "small.txt",
+					Mode: 0640,
+					Uid: 73025,
+					Gid: 5000,
+					Size: 5,
+					Mtime: 1246508266,
+					Typeflag: '0',
+					Uname: "dsymonds",
+					Gname: "eng",
+				},
+				contents: "Kilts",
+			},
+			&writerTestEntry{
+				header: &Header{
+					Name: "small2.txt",
+					Mode: 0640,
+					Uid: 73025,
+					Gid: 5000,
+					Size: 11,
+					Mtime: 1245217492,
+					Typeflag: '0',
+					Uname: "dsymonds",
+					Gname: "eng",
+				},
+				contents: "Google.com\n",
+			},
+		},
+	},
+	// The truncated test file was produced using these commands:
+	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+	&writerTest{
+		file: "testdata/writer-big.tar",
+		entries: []*writerTestEntry{
+			&writerTestEntry{
+				header: &Header{
+					Name: "tmp/16gig.txt",
+					Mode: 0640,
+					Uid: 73025,
+					Gid: 5000,
+					Size: 16 << 30,
+					Mtime: 1254699560,
+					Typeflag: '0',
+					Uname: "dsymonds",
+					Gname: "eng",
+				},
+				// no contents
+			},
+		},
+	},
+}
+
+type untarTest struct {
+	file string
+	headers []*Header
+}
+
+var untarTests = []*untarTest{
+	&untarTest{
+		file: "testdata/gnu.tar",
+		headers: []*Header{
+			&Header{
+				Name: "small.txt",
+				Mode: 0640,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 5,
+				Mtime: 1244428340,
+				Typeflag: '0',
+				Uname: "dsymonds",
+				Gname: "eng",
+			},
+			&Header{
+				Name: "small2.txt",
+				Mode: 0640,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 11,
+				Mtime: 1244436044,
+				Typeflag: '0',
+				Uname: "dsymonds",
+				Gname: "eng",
+			},
+		},
+	},
+	&untarTest{
+		file: "testdata/star.tar",
+		headers: []*Header{
+			&Header{
+				Name: "small.txt",
+				Mode: 0640,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 5,
+				Mtime: 1244592783,
+				Typeflag: '0',
+				Uname: "dsymonds",
+				Gname: "eng",
+				Atime: 1244592783,
+				Ctime: 1244592783,
+			},
+			&Header{
+				Name: "small2.txt",
+				Mode: 0640,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 11,
+				Mtime: 1244592783,
+				Typeflag: '0',
+				Uname: "dsymonds",
+				Gname: "eng",
+				Atime: 1244592783,
+				Ctime: 1244592783,
+			},
+		},
+	},
+	&untarTest{
+		file: "testdata/v7.tar",
+		headers: []*Header{
+			&Header{
+				Name: "small.txt",
+				Mode: 0444,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 5,
+				Mtime: 1244593104,
+				Typeflag: '\x00',
+			},
+			&Header{
+				Name: "small2.txt",
+				Mode: 0444,
+				Uid: 73025,
+				Gid: 5000,
+				Size: 11,
+				Mtime: 1244593104,
+				Typeflag: '\x00',
+			},
+		},
+	},
+}
+
+var facts = map[int] string {
+	0: "1",
+	1: "1",
+	2: "2",
+	10: "3628800",
+	20: "2432902008176640000",
+	100: "933262154439441526816992388562667004907159682643816214685929" +
+		"638952175999932299156089414639761565182862536979208272237582" +
+		"51185210916864000000000000000000000000",
+}
+
+func usage() {
+	fmt.Fprintf(os.Stderr,
+		// TODO(gri): the 2nd string of this string list should not be indented
+		"usage: godoc package [name ...]\n" +
+		"	godoc -http=:6060\n")
+	flag.PrintDefaults()
+	os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+	for i, test := range untarTests {
+		f, err := os.Open(test.file, os.O_RDONLY, 0444)
+		if err != nil {
+			t.Errorf("test %d: Unexpected error: %v", i, err)
+			continue
+		}
+		tr := NewReader(f)
+		for j, header := range test.headers {
+			hdr, err := tr.Next()
+			if err != nil || hdr == nil {
+				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+				f.Close()
+				continue testLoop
+			}
+			if !reflect.DeepEqual(hdr, header) {
+				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+					 i, j, *hdr, *header)
+			}
+		}
+		hdr, err := tr.Next()
+		if hdr != nil || err != nil {
+			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+		}
+		f.Close()
+	}
+}
+
+// Respect line breaks in function calls.
+func _() {
+	f(x)
+	f(x,
+	  x)
+	f(x,
+	  x,
+	)
+	f(
+	  x,
+	  x)
+	f(
+	  x,
+	  x,
+	)
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+       y T) {}
+func _(x T,
+       y T,
+) {}
+func _(
+       x T,
+       y T) {}
+func _(
+       x T,
+       y T,
+) {}
+
+// Example from issue #2597.
+func ManageStatus0(
+	in <-chan *Status,
+	req <-chan Request,
+	stat chan<- *TargetInfo,
+	TargetHistorySize int) {
+}
+    
+func ManageStatus1(
+	in <-chan *Status,
+	req <-chan Request,
+	stat chan<- *TargetInfo,
+	TargetHistorySize int,
+) {
+}
+
+// Example from issue #9064.
+func (y *y) xerrors() error {
+	_ = "xerror.test" //TODO-
+	_ = []byte(`
+foo bar foo bar foo bar
+`) //TODO-
+}
+
+func _() {
+	_ = "abc" // foo
+	_ = `abc_0123456789_` // foo
+}
+
+func _() {
+	_ = "abc" // foo
+	_ = `abc
+0123456789
+` // foo
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/internal/backport/go/printer/testdata/parser.go b/internal/backport/go/printer/testdata/parser.go
new file mode 100644
index 0000000..762f8ff
--- /dev/null
+++ b/internal/backport/go/printer/testdata/parser.go
@@ -0,0 +1,2149 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+
+package parser
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+// The mode parameter to the Parse* functions is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+const (
+	PackageClauseOnly uint = 1 << iota // parsing stops after package clause
+	ImportsOnly                        // parsing stops after import declarations
+	ParseComments                      // parse comments and add them to AST
+	Trace                              // print a trace of parsed productions
+	DeclarationErrors                  // report declaration errors
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+	file *token.File
+	scanner.ErrorVector
+	scanner scanner.Scanner
+
+	// Tracing/debugging
+	mode   uint // parsing mode
+	trace  bool // == (mode & Trace != 0)
+	indent uint // indentation used for tracing output
+
+	// Comments
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	// Next token
+	pos token.Pos   // token position
+	tok token.Token // one token look-ahead
+	lit string      // token literal
+
+	// Non-syntactic parser control
+	exprLev int // < 0: in control clause, >= 0: in expression
+
+	// Ordinary identifier scopes
+	pkgScope   *ast.Scope        // pkgScope.Outer == nil
+	topScope   *ast.Scope        // top-most scope; may be pkgScope
+	unresolved []*ast.Ident      // unresolved identifiers
+	imports    []*ast.ImportSpec // list of imports
+
+	// Label scope
+	// (maintained by open/close LabelScope)
+	labelScope  *ast.Scope     // label scope for current function
+	targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+// scannerMode returns the scanner mode bits given the parser's mode bits.
+func scannerMode(mode uint) uint {
+	var m uint = scanner.InsertSemis
+	if mode&ParseComments != 0 {
+		m |= scanner.ScanComments
+	}
+	return m
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
+	p.file = fset.AddFile(filename, fset.Base(), len(src))
+	p.scanner.Init(p.file, src, p, scannerMode(mode))
+
+	p.mode = mode
+	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
+	p.next()
+
+	// set up the pkgScope here (as opposed to in parseFile) because
+	// there are other parser entry points (ParseExpr, etc.)
+	p.openScope()
+	p.pkgScope = p.topScope
+
+	// for the same reason, set up a label scope
+	p.openLabelScope()
+}
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+	p.topScope = ast.NewScope(p.topScope)
+}
+
+func (p *parser) closeScope() {
+	p.topScope = p.topScope.Outer
+}
+
+func (p *parser) openLabelScope() {
+	p.labelScope = ast.NewScope(p.labelScope)
+	p.targetStack = append(p.targetStack, nil)
+}
+
+func (p *parser) closeLabelScope() {
+	// resolve labels
+	n := len(p.targetStack) - 1
+	scope := p.labelScope
+	for _, ident := range p.targetStack[n] {
+		ident.Obj = scope.Lookup(ident.Name)
+		if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+			p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+		}
+	}
+	// pop label scope
+	p.targetStack = p.targetStack[0:n]
+	p.labelScope = p.labelScope.Outer
+}
+
+func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+	for _, ident := range idents {
+		assert(ident.Obj == nil, "identifier already declared or resolved")
+		if ident.Name != "_" {
+			obj := ast.NewObj(kind, ident.Name)
+			// remember the corresponding declaration for redeclaration
+			// errors and global variable resolution/typechecking phase
+			obj.Decl = decl
+			if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+				prevDecl := ""
+				if pos := alt.Pos(); pos.IsValid() {
+					prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+				}
+				p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+			}
+			ident.Obj = obj
+		}
+	}
+}
+
+func (p *parser) shortVarDecl(idents []*ast.Ident) {
+	// Go spec: A short variable declaration may redeclare variables
+	// provided they were originally declared in the same block with
+	// the same type, and at least one of the non-blank variables is new.
+	n := 0 // number of new variables
+	for _, ident := range idents {
+		assert(ident.Obj == nil, "identifier already declared or resolved")
+		if ident.Name != "_" {
+			obj := ast.NewObj(ast.Var, ident.Name)
+			// short var declarations cannot have redeclaration errors
+			// and are not global => no need to remember the respective
+			// declaration
+			alt := p.topScope.Insert(obj)
+			if alt == nil {
+				n++ // new declaration
+				alt = obj
+			}
+			ident.Obj = alt
+		}
+	}
+	if n == 0 && p.mode&DeclarationErrors != 0 {
+		p.error(idents[0].Pos(), "no new variables on left side of :=")
+	}
+}
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+func (p *parser) resolve(x ast.Expr) {
+	// nothing to do if x is not an identifier or the blank identifier
+	ident, _ := x.(*ast.Ident)
+	if ident == nil {
+		return
+	}
+	assert(ident.Obj == nil, "identifier already declared or resolved")
+	if ident.Name == "_" {
+		return
+	}
+	// try to resolve the identifier
+	for s := p.topScope; s != nil; s = s.Outer {
+		if obj := s.Lookup(ident.Name); obj != nil {
+			ident.Obj = obj
+			return
+		}
+	}
+	// all local scopes are known, so any unresolved identifier
+	// must be found either in the file scope, package scope
+	// (perhaps in another file), or universe scope --- collect
+	// them so that they can be resolved later
+	ident.Obj = unresolved
+	p.unresolved = append(p.unresolved, ident)
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
+		". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = uint(len(dots))
+	pos := p.file.Position(p.pos)
+	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+	i := 2 * p.indent
+	for ; i > n; i -= n {
+		fmt.Print(dots)
+	}
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."));
+func un(p *parser) {
+	p.indent--
+	p.printTrace(")")
+}
+
+// Advance to the next token.
+func (p *parser) next0() {
+	// Because of one-token look-ahead, print the previous token
+	// when tracing as it provides a more readable output. The
+	// very first token (!p.pos.IsValid()) is not initialized
+	// (it is token.ILLEGAL), so don't print it.
+	if p.trace && p.pos.IsValid() {
+		s := p.tok.String()
+		switch {
+		case p.tok.IsLiteral():
+			p.printTrace(s, p.lit)
+		case p.tok.IsOperator(), p.tok.IsKeyword():
+			p.printTrace("\"" + s + "\"")
+		default:
+			p.printTrace(s)
+		}
+	}
+
+	p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+	// /*-style comments may end on a different line than where they start.
+	// Scan the comment for '\n' chars and adjust endline accordingly.
+	endline = p.file.Line(p.pos)
+	if p.lit[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.lit); i++ {
+			if p.lit[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{p.pos, p.lit}
+	p.next0()
+
+	return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. An empty line or non-comment
+// token terminates a comment group.
+func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.file.Line(p.pos)
+	for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+func (p *parser) next() {
+	p.leadComment = nil
+	p.lineComment = nil
+	line := p.file.Line(p.pos) // current line
+	p.next0()
+
+	if p.tok == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		if p.file.Line(p.pos) == line {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup()
+			if p.file.Line(p.pos) != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok == token.COMMENT {
+			comment, endline = p.consumeCommentGroup()
+		}
+
+		if endline+1 == p.file.Line(p.pos) {
+			// The next token is following on the line immediately after the
+			// comment group, thus the last comment group is a lead comment.
+			p.leadComment = comment
+		}
+	}
+}
+
+func (p *parser) error(pos token.Pos, msg string) {
+	p.Error(p.file.Position(pos), msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+	msg = "expected " + msg
+	if pos == p.pos {
+		// the error happened at the current position;
+		// make the error message more specific
+		if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
+			msg += ", found newline"
+		} else {
+			msg += ", found '" + p.tok.String() + "'"
+			if p.tok.IsLiteral() {
+				msg += " " + p.lit
+			}
+		}
+	}
+	p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+	pos := p.pos
+	if p.tok != tok {
+		p.errorExpected(pos, "'"+tok.String()+"'")
+	}
+	p.next() // make progress
+	return pos
+}
+
+func (p *parser) expectSemi() {
+	if p.tok != token.RPAREN && p.tok != token.RBRACE {
+		p.expect(token.SEMICOLON)
+	}
+}
+
+func assert(cond bool, msg string) {
+	if !cond {
+		panic("go/parser internal error: " + msg)
+	}
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+	pos := p.pos
+	name := "_"
+	if p.tok == token.IDENT {
+		name = p.lit
+		p.next()
+	} else {
+		p.expect(token.IDENT) // use expect() error handling
+	}
+	return &ast.Ident{pos, name, nil}
+}
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+	if p.trace {
+		defer un(trace(p, "IdentList"))
+	}
+
+	list = append(list, p.parseIdent())
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.parseIdent())
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ExpressionList"))
+	}
+
+	list = append(list, p.parseExpr(lhs))
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.parseExpr(lhs))
+	}
+
+	return
+}
+
+func (p *parser) parseLhsList() []ast.Expr {
+	list := p.parseExprList(true)
+	switch p.tok {
+	case token.DEFINE:
+		// lhs of a short variable declaration
+		p.shortVarDecl(p.makeIdentList(list))
+	case token.COLON:
+		// lhs of a label declaration or a communication clause of a select
+		// statement (parseLhsList is not called when parsing the case clause
+		// of a switch statement):
+		// - labels are declared by the caller of parseLhsList
+		// - for communication clauses, if there is a stand-alone identifier
+		//   followed by a colon, we have a syntax error; there is no need
+		//   to resolve the identifier in that case
+	default:
+		// identifiers must be declared elsewhere
+		for _, x := range list {
+			p.resolve(x)
+		}
+	}
+	return list
+}
+
+func (p *parser) parseRhsList() []ast.Expr {
+	return p.parseExprList(false)
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Type"))
+	}
+
+	typ := p.tryType()
+
+	if typ == nil {
+		pos := p.pos
+		p.errorExpected(pos, "type")
+		p.next() // make progress
+		return &ast.BadExpr{pos, p.pos}
+	}
+
+	return typ
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "TypeName"))
+	}
+
+	ident := p.parseIdent()
+	// don't resolve ident yet - it may be a parameter or field name
+
+	if p.tok == token.PERIOD {
+		// ident is a package name
+		p.next()
+		p.resolve(ident)
+		sel := p.parseIdent()
+		return &ast.SelectorExpr{ident, sel}
+	}
+
+	return ident
+}
+
+func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "ArrayType"))
+	}
+
+	lbrack := p.expect(token.LBRACK)
+	var len ast.Expr
+	if ellipsisOk && p.tok == token.ELLIPSIS {
+		len = &ast.Ellipsis{p.pos, nil}
+		p.next()
+	} else if p.tok != token.RBRACK {
+		len = p.parseRhs()
+	}
+	p.expect(token.RBRACK)
+	elt := p.parseType()
+
+	return &ast.ArrayType{lbrack, len, elt}
+}
+
+func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
+	idents := make([]*ast.Ident, len(list))
+	for i, x := range list {
+		ident, isIdent := x.(*ast.Ident)
+		if !isIdent {
+			pos := x.(ast.Expr).Pos()
+			p.errorExpected(pos, "identifier")
+			ident = &ast.Ident{pos, "_", nil}
+		}
+		idents[i] = ident
+	}
+	return idents
+}
+
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
+	if p.trace {
+		defer un(trace(p, "FieldDecl"))
+	}
+
+	doc := p.leadComment
+
+	// fields
+	list, typ := p.parseVarList(false)
+
+	// optional tag
+	var tag *ast.BasicLit
+	if p.tok == token.STRING {
+		tag = &ast.BasicLit{p.pos, p.tok, p.lit}
+		p.next()
+	}
+
+	// analyze case
+	var idents []*ast.Ident
+	if typ != nil {
+		// IdentifierList Type
+		idents = p.makeIdentList(list)
+	} else {
+		// ["*"] TypeName (AnonymousField)
+		typ = list[0] // we always have at least one element
+		p.resolve(typ)
+		if n := len(list); n > 1 || !isTypeName(deref(typ)) {
+			pos := typ.Pos()
+			p.errorExpected(pos, "anonymous field")
+			typ = &ast.BadExpr{pos, list[n-1].End()}
+		}
+	}
+
+	p.expectSemi() // call before accessing p.linecomment
+
+	field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+	p.declare(field, scope, ast.Var, idents...)
+
+	return field
+}
+
+func (p *parser) parseStructType() *ast.StructType {
+	if p.trace {
+		defer un(trace(p, "StructType"))
+	}
+
+	pos := p.expect(token.STRUCT)
+	lbrace := p.expect(token.LBRACE)
+	scope := ast.NewScope(nil) // struct scope
+	var list []*ast.Field
+	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+		// a field declaration cannot start with a '(' but we accept
+		// it here for more robust parsing and better error messages
+		// (parseFieldDecl will check and complain if necessary)
+		list = append(list, p.parseFieldDecl(scope))
+	}
+	rbrace := p.expect(token.RBRACE)
+
+	// TODO(gri): store struct scope in AST
+	return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+	if p.trace {
+		defer un(trace(p, "PointerType"))
+	}
+
+	star := p.expect(token.MUL)
+	base := p.parseType()
+
+	return &ast.StarExpr{star, base}
+}
+
+func (p *parser) tryVarType(isParam bool) ast.Expr {
+	if isParam && p.tok == token.ELLIPSIS {
+		pos := p.pos
+		p.next()
+		typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
+		if typ == nil {
+			p.error(pos, "'...' parameter is missing type")
+			typ = &ast.BadExpr{pos, p.pos}
+		}
+		if p.tok != token.RPAREN {
+			p.error(pos, "can use '...' with last parameter type only")
+		}
+		return &ast.Ellipsis{pos, typ}
+	}
+	return p.tryIdentOrType(false)
+}
+
+func (p *parser) parseVarType(isParam bool) ast.Expr {
+	typ := p.tryVarType(isParam)
+	if typ == nil {
+		pos := p.pos
+		p.errorExpected(pos, "type")
+		p.next() // make progress
+		typ = &ast.BadExpr{pos, p.pos}
+	}
+	return typ
+}
+
+func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "VarList"))
+	}
+
+	// a list of identifiers looks like a list of type names
+	for {
+		// parseVarType accepts any type (including parenthesized ones)
+		// even though the syntax does not permit them here: we
+		// accept them all for more robust parsing and complain
+		// afterwards
+		list = append(list, p.parseVarType(isParam))
+		if p.tok != token.COMMA {
+			break
+		}
+		p.next()
+	}
+
+	// if we had a list of identifiers, it must be followed by a type
+	typ = p.tryVarType(isParam)
+	if typ != nil {
+		p.resolve(typ)
+	}
+
+	return
+}
+
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
+	if p.trace {
+		defer un(trace(p, "ParameterList"))
+	}
+
+	list, typ := p.parseVarList(ellipsisOk)
+	if typ != nil {
+		// IdentifierList Type
+		idents := p.makeIdentList(list)
+		field := &ast.Field{nil, idents, typ, nil, nil}
+		params = append(params, field)
+		// Go spec: The scope of an identifier denoting a function
+		// parameter or result variable is the function body.
+		p.declare(field, scope, ast.Var, idents...)
+		if p.tok == token.COMMA {
+			p.next()
+		}
+
+		for p.tok != token.RPAREN && p.tok != token.EOF {
+			idents := p.parseIdentList()
+			typ := p.parseVarType(ellipsisOk)
+			field := &ast.Field{nil, idents, typ, nil, nil}
+			params = append(params, field)
+			// Go spec: The scope of an identifier denoting a function
+			// parameter or result variable is the function body.
+			p.declare(field, scope, ast.Var, idents...)
+			if p.tok != token.COMMA {
+				break
+			}
+			p.next()
+		}
+
+	} else {
+		// Type { "," Type } (anonymous parameters)
+		params = make([]*ast.Field, len(list))
+		for i, x := range list {
+			p.resolve(x)
+			params[i] = &ast.Field{Type: x}
+		}
+	}
+
+	return
+}
+
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
+	if p.trace {
+		defer un(trace(p, "Parameters"))
+	}
+
+	var params []*ast.Field
+	lparen := p.expect(token.LPAREN)
+	if p.tok != token.RPAREN {
+		params = p.parseParameterList(scope, ellipsisOk)
+	}
+	rparen := p.expect(token.RPAREN)
+
+	return &ast.FieldList{lparen, params, rparen}
+}
+
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
+	if p.trace {
+		defer un(trace(p, "Result"))
+	}
+
+	if p.tok == token.LPAREN {
+		return p.parseParameters(scope, false)
+	}
+
+	typ := p.tryType()
+	if typ != nil {
+		list := make([]*ast.Field, 1)
+		list[0] = &ast.Field{Type: typ}
+		return &ast.FieldList{List: list}
+	}
+
+	return nil
+}
+
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
+	if p.trace {
+		defer un(trace(p, "Signature"))
+	}
+
+	params = p.parseParameters(scope, true)
+	results = p.parseResult(scope)
+
+	return
+}
+
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
+	if p.trace {
+		defer un(trace(p, "FuncType"))
+	}
+
+	pos := p.expect(token.FUNC)
+	scope := ast.NewScope(p.topScope) // function scope
+	params, results := p.parseSignature(scope)
+
+	return &ast.FuncType{pos, params, results}, scope
+}
+
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
+	if p.trace {
+		defer un(trace(p, "MethodSpec"))
+	}
+
+	doc := p.leadComment
+	var idents []*ast.Ident
+	var typ ast.Expr
+	x := p.parseTypeName()
+	if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
+		// method
+		idents = []*ast.Ident{ident}
+		scope := ast.NewScope(nil) // method scope
+		params, results := p.parseSignature(scope)
+		typ = &ast.FuncType{token.NoPos, params, results}
+	} else {
+		// embedded interface
+		typ = x
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+	p.declare(spec, scope, ast.Fun, idents...)
+
+	return spec
+}
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+	if p.trace {
+		defer un(trace(p, "InterfaceType"))
+	}
+
+	pos := p.expect(token.INTERFACE)
+	lbrace := p.expect(token.LBRACE)
+	scope := ast.NewScope(nil) // interface scope
+	var list []*ast.Field
+	for p.tok == token.IDENT {
+		list = append(list, p.parseMethodSpec(scope))
+	}
+	rbrace := p.expect(token.RBRACE)
+
+	// TODO(gri): store interface scope in AST
+	return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parseMapType() *ast.MapType {
+	if p.trace {
+		defer un(trace(p, "MapType"))
+	}
+
+	pos := p.expect(token.MAP)
+	p.expect(token.LBRACK)
+	key := p.parseType()
+	p.expect(token.RBRACK)
+	value := p.parseType()
+
+	return &ast.MapType{pos, key, value}
+}
+
+func (p *parser) parseChanType() *ast.ChanType {
+	if p.trace {
+		defer un(trace(p, "ChanType"))
+	}
+
+	pos := p.pos
+	dir := ast.SEND | ast.RECV
+	if p.tok == token.CHAN {
+		p.next()
+		if p.tok == token.ARROW {
+			p.next()
+			dir = ast.SEND
+		}
+	} else {
+		p.expect(token.ARROW)
+		p.expect(token.CHAN)
+		dir = ast.RECV
+	}
+	value := p.parseType()
+
+	return &ast.ChanType{pos, dir, value}
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
+	switch p.tok {
+	case token.IDENT:
+		return p.parseTypeName()
+	case token.LBRACK:
+		return p.parseArrayType(ellipsisOk)
+	case token.STRUCT:
+		return p.parseStructType()
+	case token.MUL:
+		return p.parsePointerType()
+	case token.FUNC:
+		typ, _ := p.parseFuncType()
+		return typ
+	case token.INTERFACE:
+		return p.parseInterfaceType()
+	case token.MAP:
+		return p.parseMapType()
+	case token.CHAN, token.ARROW:
+		return p.parseChanType()
+	case token.LPAREN:
+		lparen := p.pos
+		p.next()
+		typ := p.parseType()
+		rparen := p.expect(token.RPAREN)
+		return &ast.ParenExpr{lparen, typ, rparen}
+	}
+
+	// no type found
+	return nil
+}
+
+func (p *parser) tryType() ast.Expr {
+	typ := p.tryIdentOrType(false)
+	if typ != nil {
+		p.resolve(typ)
+	}
+	return typ
+}
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+	if p.trace {
+		defer un(trace(p, "StatementList"))
+	}
+
+	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+		list = append(list, p.parseStmt())
+	}
+
+	return
+}
+
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
+	if p.trace {
+		defer un(trace(p, "Body"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	p.topScope = scope // open function scope
+	p.openLabelScope()
+	list := p.parseStmtList()
+	p.closeLabelScope()
+	p.closeScope()
+	rbrace := p.expect(token.RBRACE)
+
+	return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+	if p.trace {
+		defer un(trace(p, "BlockStmt"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	p.openScope()
+	list := p.parseStmtList()
+	p.closeScope()
+	rbrace := p.expect(token.RBRACE)
+
+	return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "FuncTypeOrLit"))
+	}
+
+	typ, scope := p.parseFuncType()
+	if p.tok != token.LBRACE {
+		// function type only
+		return typ
+	}
+
+	p.exprLev++
+	body := p.parseBody(scope)
+	p.exprLev--
+
+	return &ast.FuncLit{typ, body}
+}
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseOperand(lhs bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Operand"))
+	}
+
+	switch p.tok {
+	case token.IDENT:
+		x := p.parseIdent()
+		if !lhs {
+			p.resolve(x)
+		}
+		return x
+
+	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+		x := &ast.BasicLit{p.pos, p.tok, p.lit}
+		p.next()
+		return x
+
+	case token.LPAREN:
+		lparen := p.pos
+		p.next()
+		p.exprLev++
+		x := p.parseRhs()
+		p.exprLev--
+		rparen := p.expect(token.RPAREN)
+		return &ast.ParenExpr{lparen, x, rparen}
+
+	case token.FUNC:
+		return p.parseFuncTypeOrLit()
+
+	default:
+		if typ := p.tryIdentOrType(true); typ != nil {
+			// could be type for composite literal or conversion
+			_, isIdent := typ.(*ast.Ident)
+			assert(!isIdent, "type cannot be identifier")
+			return typ
+		}
+	}
+
+	pos := p.pos
+	p.errorExpected(pos, "operand")
+	p.next() // make progress
+	return &ast.BadExpr{pos, p.pos}
+}
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Selector"))
+	}
+
+	sel := p.parseIdent()
+
+	return &ast.SelectorExpr{x, sel}
+}
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "TypeAssertion"))
+	}
+
+	p.expect(token.LPAREN)
+	var typ ast.Expr
+	if p.tok == token.TYPE {
+		// type switch: typ == nil
+		p.next()
+	} else {
+		typ = p.parseType()
+	}
+	p.expect(token.RPAREN)
+
+	return &ast.TypeAssertExpr{x, typ}
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "IndexOrSlice"))
+	}
+
+	lbrack := p.expect(token.LBRACK)
+	p.exprLev++
+	var low, high ast.Expr
+	isSlice := false
+	if p.tok != token.COLON {
+		low = p.parseRhs()
+	}
+	if p.tok == token.COLON {
+		isSlice = true
+		p.next()
+		if p.tok != token.RBRACK {
+			high = p.parseRhs()
+		}
+	}
+	p.exprLev--
+	rbrack := p.expect(token.RBRACK)
+
+	if isSlice {
+		return &ast.SliceExpr{x, lbrack, low, high, rbrack}
+	}
+	return &ast.IndexExpr{x, lbrack, low, rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+	if p.trace {
+		defer un(trace(p, "CallOrConversion"))
+	}
+
+	lparen := p.expect(token.LPAREN)
+	p.exprLev++
+	var list []ast.Expr
+	var ellipsis token.Pos
+	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+		list = append(list, p.parseRhs())
+		if p.tok == token.ELLIPSIS {
+			ellipsis = p.pos
+			p.next()
+		}
+		if p.tok != token.COMMA {
+			break
+		}
+		p.next()
+	}
+	p.exprLev--
+	rparen := p.expect(token.RPAREN)
+
+	return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
+}
+
+func (p *parser) parseElement(keyOk bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Element"))
+	}
+
+	if p.tok == token.LBRACE {
+		return p.parseLiteralValue(nil)
+	}
+
+	x := p.parseExpr(keyOk) // don't resolve if map key
+	if keyOk {
+		if p.tok == token.COLON {
+			colon := p.pos
+			p.next()
+			return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+		}
+		p.resolve(x) // not a map key
+	}
+
+	return x
+}
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ElementList"))
+	}
+
+	for p.tok != token.RBRACE && p.tok != token.EOF {
+		list = append(list, p.parseElement(true))
+		if p.tok != token.COMMA {
+			break
+		}
+		p.next()
+	}
+
+	return
+}
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "LiteralValue"))
+	}
+
+	lbrace := p.expect(token.LBRACE)
+	var elts []ast.Expr
+	p.exprLev++
+	if p.tok != token.RBRACE {
+		elts = p.parseElementList()
+	}
+	p.exprLev--
+	rbrace := p.expect(token.RBRACE)
+	return &ast.CompositeLit{typ, lbrace, elts, rbrace}
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+	switch t := unparen(x).(type) {
+	case *ast.BadExpr:
+	case *ast.Ident:
+	case *ast.BasicLit:
+	case *ast.FuncLit:
+	case *ast.CompositeLit:
+	case *ast.ParenExpr:
+		panic("unreachable")
+	case *ast.SelectorExpr:
+	case *ast.IndexExpr:
+	case *ast.SliceExpr:
+	case *ast.TypeAssertExpr:
+		if t.Type == nil {
+			// the form X.(type) is only allowed in type switch expressions
+			p.errorExpected(x.Pos(), "expression")
+			x = &ast.BadExpr{x.Pos(), x.End()}
+		}
+	case *ast.CallExpr:
+	case *ast.StarExpr:
+	case *ast.UnaryExpr:
+		if t.Op == token.RANGE {
+			// the range operator is only allowed at the top of a for statement
+			p.errorExpected(x.Pos(), "expression")
+			x = &ast.BadExpr{x.Pos(), x.End()}
+		}
+	case *ast.BinaryExpr:
+	default:
+		// all other nodes are not proper expressions
+		p.errorExpected(x.Pos(), "expression")
+		x = &ast.BadExpr{x.Pos(), x.End()}
+	}
+	return x
+}
+
+// isTypeName reports whether x is a (qualified) TypeName.
+func isTypeName(x ast.Expr) bool {
+	switch t := x.(type) {
+	case *ast.BadExpr:
+	case *ast.Ident:
+	case *ast.SelectorExpr:
+		_, isIdent := t.X.(*ast.Ident)
+		return isIdent
+	default:
+		return false // all other nodes are not type names
+	}
+	return true
+}
+
+// isLiteralType reports whether x is a legal composite literal type.
+func isLiteralType(x ast.Expr) bool {
+	switch t := x.(type) {
+	case *ast.BadExpr:
+	case *ast.Ident:
+	case *ast.SelectorExpr:
+		_, isIdent := t.X.(*ast.Ident)
+		return isIdent
+	case *ast.ArrayType:
+	case *ast.StructType:
+	case *ast.MapType:
+	default:
+		return false // all other nodes are not legal composite literal types
+	}
+	return true
+}
+
+// If x is of the form *T, deref returns T, otherwise it returns x.
+func deref(x ast.Expr) ast.Expr {
+	if p, isPtr := x.(*ast.StarExpr); isPtr {
+		x = p.X
+	}
+	return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+	if p, isParen := x.(*ast.ParenExpr); isParen {
+		x = unparen(p.X)
+	}
+	return x
+}
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+	switch t := unparen(x).(type) {
+	case *ast.ParenExpr:
+		panic("unreachable")
+	case *ast.UnaryExpr:
+		if t.Op == token.RANGE {
+			// the range operator is only allowed at the top of a for statement
+			p.errorExpected(x.Pos(), "expression")
+			x = &ast.BadExpr{x.Pos(), x.End()}
+		}
+	case *ast.ArrayType:
+		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+			p.error(len.Pos(), "expected array length, found '...'")
+			x = &ast.BadExpr{x.Pos(), x.End()}
+		}
+	}
+
+	// all other nodes are expressions or types
+	return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "PrimaryExpr"))
+	}
+
+	x := p.parseOperand(lhs)
+L:
+	for {
+		switch p.tok {
+		case token.PERIOD:
+			p.next()
+			if lhs {
+				p.resolve(x)
+			}
+			switch p.tok {
+			case token.IDENT:
+				x = p.parseSelector(p.checkExpr(x))
+			case token.LPAREN:
+				x = p.parseTypeAssertion(p.checkExpr(x))
+			default:
+				pos := p.pos
+				p.next() // make progress
+				p.errorExpected(pos, "selector or type assertion")
+				x = &ast.BadExpr{pos, p.pos}
+			}
+		case token.LBRACK:
+			if lhs {
+				p.resolve(x)
+			}
+			x = p.parseIndexOrSlice(p.checkExpr(x))
+		case token.LPAREN:
+			if lhs {
+				p.resolve(x)
+			}
+			x = p.parseCallOrConversion(p.checkExprOrType(x))
+		case token.LBRACE:
+			if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+				if lhs {
+					p.resolve(x)
+				}
+				x = p.parseLiteralValue(x)
+			} else {
+				break L
+			}
+		default:
+			break L
+		}
+		lhs = false // no need to try to resolve again
+	}
+
+	return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "UnaryExpr"))
+	}
+
+	switch p.tok {
+	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
+		pos, op := p.pos, p.tok
+		p.next()
+		x := p.parseUnaryExpr(false)
+		return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
+
+	case token.ARROW:
+		// channel type or receive expression
+		pos := p.pos
+		p.next()
+		if p.tok == token.CHAN {
+			p.next()
+			value := p.parseType()
+			return &ast.ChanType{pos, ast.RECV, value}
+		}
+
+		x := p.parseUnaryExpr(false)
+		return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
+
+	case token.MUL:
+		// pointer type or unary "*" expression
+		pos := p.pos
+		p.next()
+		x := p.parseUnaryExpr(false)
+		return &ast.StarExpr{pos, p.checkExprOrType(x)}
+	}
+
+	return p.parsePrimaryExpr(lhs)
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "BinaryExpr"))
+	}
+
+	x := p.parseUnaryExpr(lhs)
+	for prec := p.tok.Precedence(); prec >= prec1; prec-- {
+		for p.tok.Precedence() == prec {
+			pos, op := p.pos, p.tok
+			p.next()
+			if lhs {
+				p.resolve(x)
+				lhs = false
+			}
+			y := p.parseBinaryExpr(false, prec+1)
+			x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
+		}
+	}
+
+	return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
+//
+//	should reject when a type/raw type is obviously not allowed
+func (p *parser) parseExpr(lhs bool) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Expression"))
+	}
+
+	return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+func (p *parser) parseRhs() ast.Expr {
+	return p.parseExpr(false)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "SimpleStmt"))
+	}
+
+	x := p.parseLhsList()
+
+	switch p.tok {
+	case
+		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+		// assignment statement
+		pos, tok := p.pos, p.tok
+		p.next()
+		y := p.parseRhsList()
+		return &ast.AssignStmt{x, pos, tok, y}
+	}
+
+	if len(x) > 1 {
+		p.errorExpected(x[0].Pos(), "1 expression")
+		// continue with first expression
+	}
+
+	switch p.tok {
+	case token.COLON:
+		// labeled statement
+		colon := p.pos
+		p.next()
+		if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
+			// Go spec: The scope of a label is the body of the function
+			// in which it is declared and excludes the body of any nested
+			// function.
+			stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+			p.declare(stmt, p.labelScope, ast.Lbl, label)
+			return stmt
+		}
+		p.error(x[0].Pos(), "illegal label declaration")
+		return &ast.BadStmt{x[0].Pos(), colon + 1}
+
+	case token.ARROW:
+		// send statement
+		arrow := p.pos
+		p.next() // consume "<-"
+		y := p.parseRhs()
+		return &ast.SendStmt{x[0], arrow, y}
+
+	case token.INC, token.DEC:
+		// increment or decrement
+		s := &ast.IncDecStmt{x[0], p.pos, p.tok}
+		p.next() // consume "++" or "--"
+		return s
+	}
+
+	// expression
+	return &ast.ExprStmt{x[0]}
+}
+
+func (p *parser) parseCallExpr() *ast.CallExpr {
+	x := p.parseRhs()
+	if call, isCall := x.(*ast.CallExpr); isCall {
+		return call
+	}
+	p.errorExpected(x.Pos(), "function/method call")
+	return nil
+}
+
+func (p *parser) parseGoStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "GoStmt"))
+	}
+
+	pos := p.expect(token.GO)
+	call := p.parseCallExpr()
+	p.expectSemi()
+	if call == nil {
+		return &ast.BadStmt{pos, pos + 2} // len("go")
+	}
+
+	return &ast.GoStmt{pos, call}
+}
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "DeferStmt"))
+	}
+
+	pos := p.expect(token.DEFER)
+	call := p.parseCallExpr()
+	p.expectSemi()
+	if call == nil {
+		return &ast.BadStmt{pos, pos + 5} // len("defer")
+	}
+
+	return &ast.DeferStmt{pos, call}
+}
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+	if p.trace {
+		defer un(trace(p, "ReturnStmt"))
+	}
+
+	pos := p.pos
+	p.expect(token.RETURN)
+	var x []ast.Expr
+	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+		x = p.parseRhsList()
+	}
+	p.expectSemi()
+
+	return &ast.ReturnStmt{pos, x}
+}
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+	if p.trace {
+		defer un(trace(p, "BranchStmt"))
+	}
+
+	pos := p.expect(tok)
+	var label *ast.Ident
+	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+		label = p.parseIdent()
+		// add to list of unresolved targets
+		n := len(p.targetStack) - 1
+		p.targetStack[n] = append(p.targetStack[n], label)
+	}
+	p.expectSemi()
+
+	return &ast.BranchStmt{pos, tok, label}
+}
+
+func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
+	if s == nil {
+		return nil
+	}
+	if es, isExpr := s.(*ast.ExprStmt); isExpr {
+		return p.checkExpr(es.X)
+	}
+	p.error(s.Pos(), "expected condition, found simple statement")
+	return &ast.BadExpr{s.Pos(), s.End()}
+}
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+	if p.trace {
+		defer un(trace(p, "IfStmt"))
+	}
+
+	pos := p.expect(token.IF)
+	p.openScope()
+	defer p.closeScope()
+
+	var s ast.Stmt
+	var x ast.Expr
+	{
+		prevLev := p.exprLev
+		p.exprLev = -1
+		if p.tok == token.SEMICOLON {
+			p.next()
+			x = p.parseRhs()
+		} else {
+			s = p.parseSimpleStmt(false)
+			if p.tok == token.SEMICOLON {
+				p.next()
+				x = p.parseRhs()
+			} else {
+				x = p.makeExpr(s)
+				s = nil
+			}
+		}
+		p.exprLev = prevLev
+	}
+
+	body := p.parseBlockStmt()
+	var else_ ast.Stmt
+	if p.tok == token.ELSE {
+		p.next()
+		else_ = p.parseStmt()
+	} else {
+		p.expectSemi()
+	}
+
+	return &ast.IfStmt{pos, s, x, body, else_}
+}
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "TypeList"))
+	}
+
+	list = append(list, p.parseType())
+	for p.tok == token.COMMA {
+		p.next()
+		list = append(list, p.parseType())
+	}
+
+	return
+}
+
+func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
+	if p.trace {
+		defer un(trace(p, "CaseClause"))
+	}
+
+	pos := p.pos
+	var list []ast.Expr
+	if p.tok == token.CASE {
+		p.next()
+		if exprSwitch {
+			list = p.parseRhsList()
+		} else {
+			list = p.parseTypeList()
+		}
+	} else {
+		p.expect(token.DEFAULT)
+	}
+
+	colon := p.expect(token.COLON)
+	p.openScope()
+	body := p.parseStmtList()
+	p.closeScope()
+
+	return &ast.CaseClause{pos, list, colon, body}
+}
+
+func isExprSwitch(s ast.Stmt) bool {
+	if s == nil {
+		return true
+	}
+	if e, ok := s.(*ast.ExprStmt); ok {
+		if a, ok := e.X.(*ast.TypeAssertExpr); ok {
+			return a.Type != nil // regular type assertion
+		}
+		return true
+	}
+	return false
+}
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "SwitchStmt"))
+	}
+
+	pos := p.expect(token.SWITCH)
+	p.openScope()
+	defer p.closeScope()
+
+	var s1, s2 ast.Stmt
+	if p.tok != token.LBRACE {
+		prevLev := p.exprLev
+		p.exprLev = -1
+		if p.tok != token.SEMICOLON {
+			s2 = p.parseSimpleStmt(false)
+		}
+		if p.tok == token.SEMICOLON {
+			p.next()
+			s1 = s2
+			s2 = nil
+			if p.tok != token.LBRACE {
+				s2 = p.parseSimpleStmt(false)
+			}
+		}
+		p.exprLev = prevLev
+	}
+
+	exprSwitch := isExprSwitch(s2)
+	lbrace := p.expect(token.LBRACE)
+	var list []ast.Stmt
+	for p.tok == token.CASE || p.tok == token.DEFAULT {
+		list = append(list, p.parseCaseClause(exprSwitch))
+	}
+	rbrace := p.expect(token.RBRACE)
+	p.expectSemi()
+	body := &ast.BlockStmt{lbrace, list, rbrace}
+
+	if exprSwitch {
+		return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+	}
+	// type switch
+	// TODO(gri): do all the checks!
+	return &ast.TypeSwitchStmt{pos, s1, s2, body}
+}
+
+func (p *parser) parseCommClause() *ast.CommClause {
+	if p.trace {
+		defer un(trace(p, "CommClause"))
+	}
+
+	p.openScope()
+	pos := p.pos
+	var comm ast.Stmt
+	if p.tok == token.CASE {
+		p.next()
+		lhs := p.parseLhsList()
+		if p.tok == token.ARROW {
+			// SendStmt
+			if len(lhs) > 1 {
+				p.errorExpected(lhs[0].Pos(), "1 expression")
+				// continue with first expression
+			}
+			arrow := p.pos
+			p.next()
+			rhs := p.parseRhs()
+			comm = &ast.SendStmt{lhs[0], arrow, rhs}
+		} else {
+			// RecvStmt
+			pos := p.pos
+			tok := p.tok
+			var rhs ast.Expr
+			if tok == token.ASSIGN || tok == token.DEFINE {
+				// RecvStmt with assignment
+				if len(lhs) > 2 {
+					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+					// continue with first two expressions
+					lhs = lhs[0:2]
+				}
+				p.next()
+				rhs = p.parseRhs()
+			} else {
+				// rhs must be single receive operation
+				if len(lhs) > 1 {
+					p.errorExpected(lhs[0].Pos(), "1 expression")
+					// continue with first expression
+				}
+				rhs = lhs[0]
+				lhs = nil // there is no lhs
+			}
+			if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
+				p.errorExpected(rhs.Pos(), "send or receive operation")
+				rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
+			}
+			if lhs != nil {
+				comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
+			} else {
+				comm = &ast.ExprStmt{rhs}
+			}
+		}
+	} else {
+		p.expect(token.DEFAULT)
+	}
+
+	colon := p.expect(token.COLON)
+	body := p.parseStmtList()
+	p.closeScope()
+
+	return &ast.CommClause{pos, comm, colon, body}
+}
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+	if p.trace {
+		defer un(trace(p, "SelectStmt"))
+	}
+
+	pos := p.expect(token.SELECT)
+	lbrace := p.expect(token.LBRACE)
+	var list []ast.Stmt
+	for p.tok == token.CASE || p.tok == token.DEFAULT {
+		list = append(list, p.parseCommClause())
+	}
+	rbrace := p.expect(token.RBRACE)
+	p.expectSemi()
+	body := &ast.BlockStmt{lbrace, list, rbrace}
+
+	return &ast.SelectStmt{pos, body}
+}
+
+func (p *parser) parseForStmt() ast.Stmt {
+	if p.trace {
+		defer un(trace(p, "ForStmt"))
+	}
+
+	pos := p.expect(token.FOR)
+	p.openScope()
+	defer p.closeScope()
+
+	var s1, s2, s3 ast.Stmt
+	if p.tok != token.LBRACE {
+		prevLev := p.exprLev
+		p.exprLev = -1
+		if p.tok != token.SEMICOLON {
+			s2 = p.parseSimpleStmt(false)
+		}
+		if p.tok == token.SEMICOLON {
+			p.next()
+			s1 = s2
+			s2 = nil
+			if p.tok != token.SEMICOLON {
+				s2 = p.parseSimpleStmt(false)
+			}
+			p.expectSemi()
+			if p.tok != token.LBRACE {
+				s3 = p.parseSimpleStmt(false)
+			}
+		}
+		p.exprLev = prevLev
+	}
+
+	body := p.parseBlockStmt()
+	p.expectSemi()
+
+	if as, isAssign := s2.(*ast.AssignStmt); isAssign {
+		// possibly a for statement with a range clause; check assignment operator
+		if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
+			p.errorExpected(as.TokPos, "'=' or ':='")
+			return &ast.BadStmt{pos, body.End()}
+		}
+		// check lhs
+		var key, value ast.Expr
+		switch len(as.Lhs) {
+		case 2:
+			key, value = as.Lhs[0], as.Lhs[1]
+		case 1:
+			key = as.Lhs[0]
+		default:
+			p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
+			return &ast.BadStmt{pos, body.End()}
+		}
+		// check rhs
+		if len(as.Rhs) != 1 {
+			p.errorExpected(as.Rhs[0].Pos(), "1 expression")
+			return &ast.BadStmt{pos, body.End()}
+		}
+		if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
+			// rhs is range expression
+			// (any short variable declaration was handled by parseSimpleStat above)
+			return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
+		}
+		p.errorExpected(s2.Pos(), "range clause")
+		return &ast.BadStmt{pos, body.End()}
+	}
+
+	// regular for statement
+	return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+}
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+	if p.trace {
+		defer un(trace(p, "Statement"))
+	}
+
+	switch p.tok {
+	case token.CONST, token.TYPE, token.VAR:
+		s = &ast.DeclStmt{p.parseDecl()}
+	case
+		// tokens that may start a top-level expression
+		token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
+		token.LBRACK, token.STRUCT, // composite type
+		token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
+		s = p.parseSimpleStmt(true)
+		// because of the required look-ahead, labeled statements are
+		// parsed by parseSimpleStmt - don't expect a semicolon after
+		// them
+		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+			p.expectSemi()
+		}
+	case token.GO:
+		s = p.parseGoStmt()
+	case token.DEFER:
+		s = p.parseDeferStmt()
+	case token.RETURN:
+		s = p.parseReturnStmt()
+	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+		s = p.parseBranchStmt(p.tok)
+	case token.LBRACE:
+		s = p.parseBlockStmt()
+		p.expectSemi()
+	case token.IF:
+		s = p.parseIfStmt()
+	case token.SWITCH:
+		s = p.parseSwitchStmt()
+	case token.SELECT:
+		s = p.parseSelectStmt()
+	case token.FOR:
+		s = p.parseForStmt()
+	case token.SEMICOLON:
+		s = &ast.EmptyStmt{p.pos}
+		p.next()
+	case token.RBRACE:
+		// a semicolon may be omitted before a closing "}"
+		s = &ast.EmptyStmt{p.pos}
+	default:
+		// no statement found
+		pos := p.pos
+		p.errorExpected(pos, "statement")
+		p.next() // make progress
+		s = &ast.BadStmt{pos, p.pos}
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+
+func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "ImportSpec"))
+	}
+
+	var ident *ast.Ident
+	switch p.tok {
+	case token.PERIOD:
+		ident = &ast.Ident{p.pos, ".", nil}
+		p.next()
+	case token.IDENT:
+		ident = p.parseIdent()
+	}
+
+	var path *ast.BasicLit
+	if p.tok == token.STRING {
+		path = &ast.BasicLit{p.pos, p.tok, p.lit}
+		p.next()
+	} else {
+		p.expect(token.STRING) // use expect() error handling
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	// collect imports
+	spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+	p.imports = append(p.imports, spec)
+
+	return spec
+}
+
+func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "ConstSpec"))
+	}
+
+	idents := p.parseIdentList()
+	typ := p.tryType()
+	var values []ast.Expr
+	if typ != nil || p.tok == token.ASSIGN || iota == 0 {
+		p.expect(token.ASSIGN)
+		values = p.parseRhsList()
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	// Go spec: The scope of a constant or variable identifier declared inside
+	// a function begins at the end of the ConstSpec or VarSpec and ends at
+	// the end of the innermost containing block.
+	// (Global identifiers are resolved in a separate phase after parsing.)
+	spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+	p.declare(spec, p.topScope, ast.Con, idents...)
+
+	return spec
+}
+
+func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "TypeSpec"))
+	}
+
+	ident := p.parseIdent()
+
+	// Go spec: The scope of a type identifier declared inside a function begins
+	// at the identifier in the TypeSpec and ends at the end of the innermost
+	// containing block.
+	// (Global identifiers are resolved in a separate phase after parsing.)
+	spec := &ast.TypeSpec{doc, ident, nil, nil}
+	p.declare(spec, p.topScope, ast.Typ, ident)
+
+	spec.Type = p.parseType()
+	p.expectSemi() // call before accessing p.linecomment
+	spec.Comment = p.lineComment
+
+	return spec
+}
+
+func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+	if p.trace {
+		defer un(trace(p, "VarSpec"))
+	}
+
+	idents := p.parseIdentList()
+	typ := p.tryType()
+	var values []ast.Expr
+	if typ == nil || p.tok == token.ASSIGN {
+		p.expect(token.ASSIGN)
+		values = p.parseRhsList()
+	}
+	p.expectSemi() // call before accessing p.linecomment
+
+	// Go spec: The scope of a constant or variable identifier declared inside
+	// a function begins at the end of the ConstSpec or VarSpec and ends at
+	// the end of the innermost containing block.
+	// (Global identifiers are resolved in a separate phase after parsing.)
+	spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+	p.declare(spec, p.topScope, ast.Var, idents...)
+
+	return spec
+}
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+	if p.trace {
+		defer un(trace(p, "GenDecl("+keyword.String()+")"))
+	}
+
+	doc := p.leadComment
+	pos := p.expect(keyword)
+	var lparen, rparen token.Pos
+	var list []ast.Spec
+	if p.tok == token.LPAREN {
+		lparen = p.pos
+		p.next()
+		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+			list = append(list, f(p, p.leadComment, iota))
+		}
+		rparen = p.expect(token.RPAREN)
+		p.expectSemi()
+	} else {
+		list = append(list, f(p, nil, 0))
+	}
+
+	return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
+}
+
+func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
+	if p.trace {
+		defer un(trace(p, "Receiver"))
+	}
+
+	pos := p.pos
+	par := p.parseParameters(scope, false)
+
+	// must have exactly one receiver
+	if par.NumFields() != 1 {
+		p.errorExpected(pos, "exactly one receiver")
+		// TODO determine a better range for BadExpr below
+		par.List = []*ast.Field{{Type: &ast.BadExpr{pos, pos}}}
+		return par
+	}
+
+	// recv type must be of the form ["*"] identifier
+	recv := par.List[0]
+	base := deref(recv.Type)
+	if _, isIdent := base.(*ast.Ident); !isIdent {
+		p.errorExpected(base.Pos(), "(unqualified) identifier")
+		par.List = []*ast.Field{{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+	}
+
+	return par
+}
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+	if p.trace {
+		defer un(trace(p, "FunctionDecl"))
+	}
+
+	doc := p.leadComment
+	pos := p.expect(token.FUNC)
+	scope := ast.NewScope(p.topScope) // function scope
+
+	var recv *ast.FieldList
+	if p.tok == token.LPAREN {
+		recv = p.parseReceiver(scope)
+	}
+
+	ident := p.parseIdent()
+
+	params, results := p.parseSignature(scope)
+
+	var body *ast.BlockStmt
+	if p.tok == token.LBRACE {
+		body = p.parseBody(scope)
+	}
+	p.expectSemi()
+
+	decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+	if recv == nil {
+		// Go spec: The scope of an identifier denoting a constant, type,
+		// variable, or function (but not method) declared at top level
+		// (outside any function) is the package block.
+		//
+		// init() functions cannot be referred to and there may
+		// be more than one - don't put them in the pkgScope
+		if ident.Name != "init" {
+			p.declare(decl, p.pkgScope, ast.Fun, ident)
+		}
+	}
+
+	return decl
+}
+
+func (p *parser) parseDecl() ast.Decl {
+	if p.trace {
+		defer un(trace(p, "Declaration"))
+	}
+
+	var f parseSpecFunction
+	switch p.tok {
+	case token.CONST:
+		f = parseConstSpec
+
+	case token.TYPE:
+		f = parseTypeSpec
+
+	case token.VAR:
+		f = parseVarSpec
+
+	case token.FUNC:
+		return p.parseFuncDecl()
+
+	default:
+		pos := p.pos
+		p.errorExpected(pos, "declaration")
+		p.next() // make progress
+		decl := &ast.BadDecl{pos, p.pos}
+		return decl
+	}
+
+	return p.parseGenDecl(p.tok, f)
+}
+
+func (p *parser) parseDeclList() (list []ast.Decl) {
+	if p.trace {
+		defer un(trace(p, "DeclList"))
+	}
+
+	for p.tok != token.EOF {
+		list = append(list, p.parseDecl())
+	}
+
+	return
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+	if p.trace {
+		defer un(trace(p, "File"))
+	}
+
+	// package clause
+	doc := p.leadComment
+	pos := p.expect(token.PACKAGE)
+	// Go spec: The package clause is not a declaration;
+	// the package name does not appear in any scope.
+	ident := p.parseIdent()
+	if ident.Name == "_" {
+		p.error(p.pos, "invalid package name _")
+	}
+	p.expectSemi()
+
+	var decls []ast.Decl
+
+	// Don't bother parsing the rest if we had errors already.
+	// Likely not a Go source file at all.
+
+	if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
+		// import decls
+		for p.tok == token.IMPORT {
+			decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
+		}
+
+		if p.mode&ImportsOnly == 0 {
+			// rest of package body
+			for p.tok != token.EOF {
+				decls = append(decls, p.parseDecl())
+			}
+		}
+	}
+
+	assert(p.topScope == p.pkgScope, "imbalanced scopes")
+
+	// resolve global identifiers within the same file
+	i := 0
+	for _, ident := range p.unresolved {
+		// i <= index for current ident
+		assert(ident.Obj == unresolved, "object already resolved")
+		ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+		if ident.Obj == nil {
+			p.unresolved[i] = ident
+			i++
+		}
+	}
+
+	// TODO(gri): store p.imports in AST
+	return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
+}
diff --git a/internal/backport/go/printer/testdata/slow.golden b/internal/backport/go/printer/testdata/slow.golden
new file mode 100644
index 0000000..43a15cb
--- /dev/null
+++ b/internal/backport/go/printer/testdata/slow.golden
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+	"testing"
+	"google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+	if err := deepequal.Check(nil, nil); err != nil {
+		t.Errorf("expected nil, saw %v", err)
+	}
+}
+
+type Foo struct {
+	bar	*Bar
+	bang	*Bar
+}
+
+type Bar struct {
+	baz	*Baz
+	foo	[]*Foo
+}
+
+type Baz struct {
+	entries		map[int]interface{}
+	whatever	string
+}
+
+func newFoo() *Foo {
+	return &Foo{bar: &Bar{baz: &Baz{
+		entries: map[int]interface{}{
+			42:	&Foo{},
+			21:	&Bar{},
+			11:	&Baz{whatever: "it's just a test"}}}},
+		bang: &Bar{foo: []*Foo{
+			&Foo{bar: &Bar{baz: &Baz{
+				entries: map[int]interface{}{
+					43:	&Foo{},
+					22:	&Bar{},
+					13:	&Baz{whatever: "this is nuts"}}}},
+				bang: &Bar{foo: []*Foo{
+					&Foo{bar: &Bar{baz: &Baz{
+						entries: map[int]interface{}{
+							61:	&Foo{},
+							71:	&Bar{},
+							11:	&Baz{whatever: "no, it's Go"}}}},
+						bang: &Bar{foo: []*Foo{
+							&Foo{bar: &Bar{baz: &Baz{
+								entries: map[int]interface{}{
+									0:	&Foo{},
+									-2:	&Bar{},
+									-11:	&Baz{whatever: "we need to go deeper"}}}},
+								bang: &Bar{foo: []*Foo{
+									&Foo{bar: &Bar{baz: &Baz{
+										entries: map[int]interface{}{
+											-2:	&Foo{},
+											-5:	&Bar{},
+											-7:	&Baz{whatever: "are you serious?"}}}},
+										bang:	&Bar{foo: []*Foo{}}},
+									&Foo{bar: &Bar{baz: &Baz{
+										entries: map[int]interface{}{
+											-100:	&Foo{},
+											50:	&Bar{},
+											20:	&Baz{whatever: "na, not really ..."}}}},
+										bang:	&Bar{foo: []*Foo{}}}}}}}}},
+					&Foo{bar: &Bar{baz: &Baz{
+						entries: map[int]interface{}{
+							2:	&Foo{},
+							1:	&Bar{},
+							-1:	&Baz{whatever: "... it's just a test."}}}},
+						bang:	&Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+	a := newFoo()
+	b := newFoo()
+
+	if err := deepequal.Check(a, b); err != nil {
+		t.Errorf("expected nil, saw %v", err)
+	}
+}
diff --git a/internal/backport/go/printer/testdata/slow.input b/internal/backport/go/printer/testdata/slow.input
new file mode 100644
index 0000000..0e5a23d
--- /dev/null
+++ b/internal/backport/go/printer/testdata/slow.input
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+        "testing"
+        "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+        if err := deepequal.Check(nil, nil); err != nil {
+                t.Errorf("expected nil, saw %v", err)
+        }
+}
+
+type Foo struct {
+        bar *Bar
+        bang *Bar
+}
+
+type Bar struct {
+        baz *Baz
+        foo []*Foo
+}
+
+type Baz struct {
+        entries  map[int]interface{}
+        whatever string
+}
+
+func newFoo() (*Foo) {
+return &Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+42: &Foo{},
+21: &Bar{},
+11: &Baz{ whatever: "it's just a test" }}}},
+        bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+43: &Foo{},
+22: &Bar{},
+13: &Baz{ whatever: "this is nuts" }}}},
+        bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+61: &Foo{},
+71: &Bar{},
+11: &Baz{ whatever: "no, it's Go" }}}},
+        bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+0: &Foo{},
+-2: &Bar{},
+-11: &Baz{ whatever: "we need to go deeper" }}}},
+        bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-2: &Foo{},
+-5: &Bar{},
+-7: &Baz{ whatever: "are you serious?" }}}},
+        bang: &Bar{foo: []*Foo{}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-100: &Foo{},
+50: &Bar{},
+20: &Baz{ whatever: "na, not really ..." }}}},
+        bang: &Bar{foo: []*Foo{}}}}}}}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+2: &Foo{},
+1: &Bar{},
+-1: &Baz{ whatever: "... it's just a test." }}}},
+        bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+        a := newFoo()
+        b := newFoo()
+
+        if err := deepequal.Check(a, b); err != nil {
+                t.Errorf("expected nil, saw %v", err)
+        }
+}
diff --git a/internal/backport/go/printer/testdata/statements.golden b/internal/backport/go/printer/testdata/statements.golden
new file mode 100644
index 0000000..4b13460
--- /dev/null
+++ b/internal/backport/go/printer/testdata/statements.golden
@@ -0,0 +1,644 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{})	{}
+
+// Formatting of multi-line return statements.
+func _f() {
+	return
+	return x, y, z
+	return T{}
+	return T{1, 2, 3},
+		x, y, z
+	return T{1, 2, 3},
+		x, y,
+		z
+	return T{1,
+		2,
+		3}
+	return T{1,
+		2,
+		3,
+	}
+	return T{
+		1,
+		2,
+		3}
+	return T{
+		1,
+		2,
+		3,
+	}
+	return T{
+		1,
+		T{1, 2, 3},
+		3,
+	}
+	return T{
+		1,
+		T{1,
+			2, 3},
+		3,
+	}
+	return T{
+		1,
+		T{1,
+			2,
+			3},
+		3,
+	}
+	return T{
+		1,
+		2,
+	}, nil
+	return T{
+			1,
+			2,
+		},
+		T{
+			x:	3,
+			y:	4,
+		}, nil
+	return T{
+			1,
+			2,
+		},
+		nil
+	return T{
+			1,
+			2,
+		},
+		T{
+			x:	3,
+			y:	4,
+		},
+		nil
+	return x + y +
+		z
+	return func() {}
+	return func() {
+			_ = 0
+		}, T{
+			1, 2,
+		}
+	return func() {
+		_ = 0
+	}
+	return func() T {
+		return T{
+			1, 2,
+		}
+	}
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+	return &T{
+			X:	1,
+			Y:	2,
+		},
+		nil
+}
+
+func G() (*T, *T, os.Error) {
+	return &T{
+			X:	1,
+			Y:	2,
+		},
+		&T{
+			X:	3,
+			Y:	4,
+		},
+		nil
+}
+
+func _() interface{} {
+	return &fileStat{
+		name:		basename(file.name),
+		size:		mkSize(d.FileSizeHigh, d.FileSizeLow),
+		modTime:	mkModTime(d.LastWriteTime),
+		mode:		mkMode(d.FileAttributes),
+		sys:		mkSysFromFI(&d),
+	}, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+	if true {
+	}
+	if true {
+	}	// no semicolon printed
+	if expr {
+	}
+	if expr {
+	}	// no semicolon printed
+	if expr {
+	}	// no parens printed
+	if expr {
+	}	// no semicolon and parens printed
+	if x := expr; true {
+		use(x)
+	}
+	if x := expr; expr {
+		use(x)
+	}
+}
+
+// Formatting of switch-statement headers.
+func _() {
+	switch {
+	}
+	switch {
+	}	// no semicolon printed
+	switch expr {
+	}
+	switch expr {
+	}	// no semicolon printed
+	switch expr {
+	}	// no parens printed
+	switch expr {
+	}	// no semicolon and parens printed
+	switch x := expr; {
+	default:
+		use(
+			x)
+	}
+	switch x := expr; expr {
+	default:
+		use(x)
+	}
+}
+
+// Formatting of switch statement bodies.
+func _() {
+	switch {
+	}
+
+	switch x := 0; x {
+	case 1:
+		use(x)
+		use(x)	// followed by an empty line
+
+	case 2:	// followed by an empty line
+
+		use(x)	// followed by an empty line
+
+	case 3:	// no empty lines
+		use(x)
+		use(x)
+	}
+
+	switch x {
+	case 0:
+		use(x)
+	case 1:	// this comment should have no effect on the previous or next line
+		use(x)
+	}
+
+	switch x := 0; x {
+	case 1:
+		x = 0
+		// this comment should be indented
+	case 2:
+		x = 0
+	// this comment should not be indented, it is aligned with the next case
+	case 3:
+		x = 0
+		/* indented comment
+		   aligned
+		   aligned
+		*/
+		// bla
+		/* and more */
+	case 4:
+		x = 0
+	/* not indented comment
+	   aligned
+	   aligned
+	*/
+	// bla
+	/* and more */
+	case 5:
+	}
+}
+
+// Formatting of selected select statements.
+func _() {
+	select {}
+	select { /* this comment should not be tab-aligned because the closing } is on the same line */
+	}
+	select {	/* this comment should be tab-aligned */
+	}
+	select {	// this comment should be tab-aligned
+	}
+	select {
+	case <-c:
+	}
+}
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+	for {
+	}
+	for expr {
+	}
+	for expr {
+	}	// no parens printed
+	for {
+	}	// no semicolons printed
+	for x := expr; ; {
+		use(x)
+	}
+	for expr {
+	}	// no semicolons printed
+	for expr {
+	}	// no semicolons and parens printed
+	for ; ; expr = false {
+	}
+	for x := expr; expr; {
+		use(x)
+	}
+	for x := expr; ; expr = false {
+		use(x)
+	}
+	for ; expr; expr = false {
+	}
+	for x := expr; expr; expr = false {
+		use(x)
+	}
+	for x := range []int{} {
+		use(x)
+	}
+	for x := range []int{} {
+		use(x)
+	}	// no parens printed
+}
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+	for {
+	}
+	for expr {
+	}
+	for expr {
+	}	// no parens printed
+	for {
+	}	// no semicolons printed
+	for x := expr; ; {
+		use(x)
+	}
+	for expr {
+	}	// no semicolons printed
+	for expr {
+	}	// no semicolons and parens printed
+	for ; ; expr = false {
+	}
+	for x := expr; expr; {
+		use(x)
+	}
+	for x := expr; ; expr = false {
+		use(x)
+	}
+	for ; expr; expr = false {
+	}
+	for x := expr; expr; expr = false {
+		use(x)
+	}
+	for range []int{} {
+		println("foo")
+	}
+	for x := range []int{} {
+		use(x)
+	}
+	for x := range []int{} {
+		use(x)
+	}	// no parens printed
+}
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+	if cond {
+	}
+	if cond {
+	}	// multiple lines
+	if cond {
+	} else {
+	}	// else clause always requires multiple lines
+
+	for {
+	}
+	for i := 0; i < len(a); 1++ {
+	}
+	for i := 0; i < len(a); 1++ {
+		a[i] = i
+	}
+	for i := 0; i < len(a); 1++ {
+		a[i] = i
+	}	// multiple lines
+
+	for range a {
+	}
+	for _ = range a {
+	}
+	for _, _ = range a {
+	}
+	for i := range a {
+	}
+	for i := range a {
+		a[i] = i
+	}
+	for i := range a {
+		a[i] = i
+	}	// multiple lines
+
+	go func() {
+		for {
+			a <- <-b
+		}
+	}()
+	defer func() {
+		if x := recover(); x != nil {
+			err = fmt.Sprintf("error: %s", x.msg)
+		}
+	}()
+}
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+	// strip parentheses - no composite literals or composite literals don't start with a type name
+	if x {
+	}
+	if x {
+	}
+	if []T{} {
+	}
+	if []T{} {
+	}
+	if []T{} {
+	}
+
+	for x {
+	}
+	for x {
+	}
+	for []T{} {
+	}
+	for []T{} {
+	}
+	for []T{} {
+	}
+
+	switch x {
+	}
+	switch x {
+	}
+	switch []T{} {
+	}
+	switch []T{} {
+	}
+
+	for _ = range []T{T{42}} {
+	}
+
+	// leave parentheses - composite literals start with a type name
+	if (T{}) {
+	}
+	if (T{}) {
+	}
+	if (T{}) {
+	}
+
+	for (T{}) {
+	}
+	for (T{}) {
+	}
+	for (T{}) {
+	}
+
+	switch (T{}) {
+	}
+	switch (T{}) {
+	}
+
+	for _ = range (T1{T{42}}) {
+	}
+
+	if x == (T{42}[0]) {
+	}
+	if (x == T{42}[0]) {
+	}
+	if x == (T{42}[0]) {
+	}
+	if x == (T{42}[0]) {
+	}
+	if x == (T{42}[0]) {
+	}
+	if x == a+b*(T{42}[0]) {
+	}
+	if (x == a+b*T{42}[0]) {
+	}
+	if x == a+b*(T{42}[0]) {
+	}
+	if x == a+(b*(T{42}[0])) {
+	}
+	if x == a+b*(T{42}[0]) {
+	}
+	if (a + b*(T{42}[0])) == x {
+	}
+	if (a + b*(T{42}[0])) == x {
+	}
+
+	if struct{ x bool }{false}.x {
+	}
+	if (struct{ x bool }{false}.x) == false {
+	}
+	if struct{ x bool }{false}.x == false {
+	}
+}
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+	const _ = 0
+
+	const _ = 1
+	type _ int
+	type _ float
+
+	var _ = 0
+	var x = 1
+
+	// Each use(x) call below should have at most one empty line before and after.
+	// Known bug: The first use call may have more than one empty line before
+	//            (see go/printer/nodes.go, func linebreak).
+
+	use(x)
+
+	if x < x {
+
+		use(x)
+
+	} else {
+
+		use(x)
+
+	}
+}
+
+// Formatting around labels.
+func _() {
+L:
+}
+
+func _() {
+	// this comment should be indented
+L:	// no semicolon needed
+}
+
+func _() {
+	switch 0 {
+	case 0:
+	L0:
+		;	// semicolon required
+	case 1:
+	L1:
+		;	// semicolon required
+	default:
+	L2:	// no semicolon needed
+	}
+}
+
+func _() {
+	f()
+L1:
+	f()
+L2:
+	;
+L3:
+}
+
+func _() {
+	// this comment should be indented
+L:
+}
+
+func _() {
+L:
+	_ = 0
+}
+
+func _() {
+	// this comment should be indented
+L:
+	_ = 0
+}
+
+func _() {
+	for {
+	L1:
+		_ = 0
+	L2:
+		_ = 0
+	}
+}
+
+func _() {
+	// this comment should be indented
+	for {
+	L1:
+		_ = 0
+	L2:
+		_ = 0
+	}
+}
+
+func _() {
+	if true {
+		_ = 0
+	}
+	_ = 0	// the indentation here should not be affected by the long label name
+AnOverlongLabel:
+	_ = 0
+
+	if true {
+		_ = 0
+	}
+	_ = 0
+
+L:
+	_ = 0
+}
+
+func _() {
+	for {
+		goto L
+	}
+L:
+
+	MoreCode()
+}
+
+func _() {
+	for {
+		goto L
+	}
+L:	// A comment on the same line as the label, followed by a single empty line.
+	// Known bug: There may be more than one empty line before MoreCode()
+	//            (see go/printer/nodes.go, func linebreak).
+
+	MoreCode()
+}
+
+func _() {
+	for {
+		goto L
+	}
+L:
+
+	// There should be a single empty line before this comment.
+	MoreCode()
+}
+
+func _() {
+	for {
+		goto AVeryLongLabelThatShouldNotAffectFormatting
+	}
+AVeryLongLabelThatShouldNotAffectFormatting:
+	// There should be a single empty line after this comment.
+
+	// There should be a single empty line before this comment.
+	MoreCode()
+}
+
+// Formatting of empty statements.
+func _() {
+
+}
+
+func _() {
+}
+
+func _() {
+}
+
+func _() {
+	f()
+}
+
+func _() {
+L:
+	;
+}
+
+func _() {
+L:
+	;
+	f()
+}
diff --git a/internal/backport/go/printer/testdata/statements.input b/internal/backport/go/printer/testdata/statements.input
new file mode 100644
index 0000000..cade157
--- /dev/null
+++ b/internal/backport/go/printer/testdata/statements.input
@@ -0,0 +1,555 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{}) {}
+
+// Formatting of multi-line return statements.
+func _f() {
+	return
+	return x, y, z
+	return T{}
+	return T{1, 2, 3},
+		x, y, z
+	return T{1, 2, 3},
+		x, y,
+		z
+	return T{1,
+		2,
+		3}
+	return T{1,
+		2,
+		3,
+	}
+	return T{
+		1,
+		2,
+		3}
+	return T{
+		1,
+		2,
+		3,
+	}
+	return T{
+		1,
+		T{1, 2, 3},
+		3,
+	}
+	return T{
+		1,
+		T{1,
+			2, 3},
+		3,
+	}
+	return T{
+		1,
+		T{1,
+			2,
+			3},
+		3,
+	}
+	return T{
+			1,
+			2,
+		}, nil
+	return T{
+			1,
+			2,
+		},
+		T{
+			x: 3,
+			y: 4,
+		}, nil
+	return T{
+			1,
+			2,
+		},
+		nil
+	return T{
+			1,
+			2,
+		},
+		T{
+			x: 3,
+			y: 4,
+		},
+		nil
+	return x + y +
+		z
+	return func() {}
+	return func() {
+		_ = 0
+	}, T{
+		1, 2,
+	}
+	return func() {
+		_ = 0
+	}
+	return func() T {
+		return T {
+			1, 2,
+		}
+	}
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+       return &T{
+               X: 1,
+               Y: 2,
+       },
+               nil
+}
+
+func G() (*T, *T, os.Error) {
+       return &T{
+               X: 1,
+               Y: 2,
+       },
+               &T{
+                       X: 3,
+                       Y: 4,
+               },
+               nil
+}
+
+func _() interface{} {
+	return &fileStat{
+			name:    basename(file.name),
+			size:    mkSize(d.FileSizeHigh, d.FileSizeLow),
+			modTime: mkModTime(d.LastWriteTime),
+			mode:    mkMode(d.FileAttributes),
+			sys:     mkSysFromFI(&d),
+		}, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+	if true {}
+	if; true {}  // no semicolon printed
+	if expr{}
+	if;expr{}  // no semicolon printed
+	if (expr){}  // no parens printed
+	if;((expr)){}  // no semicolon and parens printed
+	if x:=expr;true{
+	use(x)}
+	if x:=expr; expr {use(x)}
+}
+
+
+// Formatting of switch-statement headers.
+func _() {
+	switch {}
+	switch;{}  // no semicolon printed
+	switch expr {}
+	switch;expr{}  // no semicolon printed
+	switch (expr) {}  // no parens printed
+	switch;((expr)){}  // no semicolon and parens printed
+	switch x := expr; { default:use(
+x)
+	}
+	switch x := expr; expr {default:use(x)}
+}
+
+
+// Formatting of switch statement bodies.
+func _() {
+	switch {
+	}
+
+	switch x := 0; x {
+	case 1:
+		use(x)
+		use(x)  // followed by an empty line
+
+	case 2:  // followed by an empty line
+
+		use(x)  // followed by an empty line
+
+	case 3:  // no empty lines
+		use(x)
+		use(x)
+	}
+
+	switch x {
+	case 0:
+		use(x)
+	case 1:  // this comment should have no effect on the previous or next line
+		use(x)
+	}
+
+	switch x := 0; x {
+	case 1:
+		x = 0
+		// this comment should be indented
+	case 2:
+		x = 0
+	// this comment should not be indented, it is aligned with the next case
+	case 3:
+		x = 0
+		/* indented comment
+		   aligned
+		   aligned
+		*/
+		// bla
+		/* and more */
+	case 4:
+		x = 0
+	/* not indented comment
+	   aligned
+	   aligned
+	*/
+	// bla
+	/* and more */
+	case 5:
+	}
+}
+
+
+// Formatting of selected select statements.
+func _() {
+	select {
+	}
+	select { /* this comment should not be tab-aligned because the closing } is on the same line */ }
+	select { /* this comment should be tab-aligned */
+	}
+	select { // this comment should be tab-aligned
+	}
+	select { case <-c: }
+}
+
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+	for{}
+	for expr {}
+	for (expr) {}  // no parens printed
+	for;;{}  // no semicolons printed
+	for x :=expr;; {use( x)}
+	for; expr;{}  // no semicolons printed
+	for; ((expr));{}  // no semicolons and parens printed
+	for; ; expr = false {}
+	for x :=expr; expr; {use(x)}
+	for x := expr;; expr=false {use(x)}
+	for;expr;expr =false {}
+	for x := expr;expr;expr = false { use(x) }
+	for x := range []int{} { use(x) }
+	for x := range (([]int{})) { use(x) }  // no parens printed
+}
+
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+	for{
+	}
+	for expr {
+	}
+	for (expr) {
+	}  // no parens printed
+	for;;{
+	}  // no semicolons printed
+	for x :=expr;; {use( x)
+	}
+	for; expr;{
+	}  // no semicolons printed
+	for; ((expr));{
+	}  // no semicolons and parens printed
+	for; ; expr = false {
+	}
+	for x :=expr; expr; {use(x)
+	}
+	for x := expr;; expr=false {use(x)
+	}
+	for;expr;expr =false {
+	}
+	for x := expr;expr;expr = false {
+	use(x)
+	}
+	for range []int{} {
+	println("foo")}
+	for x := range []int{} {
+	use(x) }
+	for x := range (([]int{})) {
+	use(x) }  // no parens printed
+}
+
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+	if cond {}
+	if cond {
+	} // multiple lines
+	if cond {} else {} // else clause always requires multiple lines
+
+	for {}
+	for i := 0; i < len(a); 1++ {}
+	for i := 0; i < len(a); 1++ { a[i] = i }
+	for i := 0; i < len(a); 1++ { a[i] = i
+	} // multiple lines
+
+	for range a{}
+	for _ = range a{}
+	for _, _ = range a{}
+	for i := range a {}
+	for i := range a { a[i] = i }
+	for i := range a { a[i] = i
+	} // multiple lines
+
+	go func() { for { a <- <-b } }()
+	defer func() { if x := recover(); x != nil { err = fmt.Sprintf("error: %s", x.msg) } }()
+}
+
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+	// strip parentheses - no composite literals or composite literals don't start with a type name
+	if (x) {}
+	if (((x))) {}
+	if ([]T{}) {}
+	if (([]T{})) {}
+	if ; (((([]T{})))) {}
+
+	for (x) {}
+	for (((x))) {}
+	for ([]T{}) {}
+	for (([]T{})) {}
+	for ; (((([]T{})))) ; {}
+
+	switch (x) {}
+	switch (((x))) {}
+	switch ([]T{}) {}
+	switch ; (((([]T{})))) {}
+
+	for _ = range ((([]T{T{42}}))) {}
+
+	// leave parentheses - composite literals start with a type name
+	if (T{}) {}
+	if ((T{})) {}
+	if ; ((((T{})))) {}
+
+	for (T{}) {}
+	for ((T{})) {}
+	for ; ((((T{})))) ; {}
+
+	switch (T{}) {}
+	switch ; ((((T{})))) {}
+
+	for _ = range (((T1{T{42}}))) {}
+
+	if x == (T{42}[0]) {}
+	if (x == T{42}[0]) {}
+	if (x == (T{42}[0])) {}
+	if (x == (((T{42}[0])))) {}
+	if (((x == (T{42}[0])))) {}
+	if x == a + b*(T{42}[0]) {}
+	if (x == a + b*T{42}[0]) {}
+	if (x == a + b*(T{42}[0])) {}
+	if (x == a + ((b * (T{42}[0])))) {}
+	if (((x == a + b * (T{42}[0])))) {}
+	if (((a + b * (T{42}[0])) == x)) {}
+	if (((a + b * (T{42}[0])))) == x {}
+
+	if (struct{x bool}{false}.x) {}
+	if (struct{x bool}{false}.x) == false {}
+	if (struct{x bool}{false}.x == false) {}
+}
+
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+	const _ = 0
+
+	const _ = 1
+	type _ int
+	type _ float
+
+	var _ = 0
+	var x = 1
+
+	// Each use(x) call below should have at most one empty line before and after.
+	// Known bug: The first use call may have more than one empty line before
+	//            (see go/printer/nodes.go, func linebreak).
+
+
+
+	use(x)
+
+	if x < x {
+
+		use(x)
+
+	} else {
+
+		use(x)
+
+	}
+}
+
+
+// Formatting around labels.
+func _() {
+	L:
+}
+
+
+func _() {
+	// this comment should be indented
+	L: ;  // no semicolon needed
+}
+
+
+func _() {
+	switch 0 {
+	case 0:
+		L0: ;  // semicolon required
+	case 1:
+		L1: ;  // semicolon required
+	default:
+		L2: ;  // no semicolon needed
+	}
+}
+
+
+func _() {
+	f()
+L1:
+	f()
+L2:
+	;
+L3:
+}
+
+
+func _() {
+	// this comment should be indented
+	L:
+}
+
+
+func _() {
+	L: _ = 0
+}
+
+
+func _() {
+	// this comment should be indented
+	L: _ = 0
+}
+
+
+func _() {
+	for {
+	L1: _ = 0
+	L2:
+		_ = 0
+	}
+}
+
+
+func _() {
+		// this comment should be indented
+	for {
+	L1: _ = 0
+	L2:
+		_ = 0
+	}
+}
+
+
+func _() {
+	if true {
+		_ = 0
+	}
+	_ = 0  // the indentation here should not be affected by the long label name
+AnOverlongLabel:
+	_ = 0
+	
+	if true {
+		_ = 0
+	}
+	_ = 0
+
+L:	_ = 0
+}
+
+
+func _() {
+	for {
+		goto L
+	}
+L:
+
+	MoreCode()
+}
+
+
+func _() {
+	for {
+		goto L
+	}
+L:	// A comment on the same line as the label, followed by a single empty line.
+	// Known bug: There may be more than one empty line before MoreCode()
+	//            (see go/printer/nodes.go, func linebreak).
+
+
+
+
+	MoreCode()
+}
+
+
+func _() {
+	for {
+		goto L
+	}
+L:
+
+
+
+
+	// There should be a single empty line before this comment.
+	MoreCode()
+}
+
+
+func _() {
+	for {
+		goto AVeryLongLabelThatShouldNotAffectFormatting
+	}
+AVeryLongLabelThatShouldNotAffectFormatting:
+	// There should be a single empty line after this comment.
+
+	// There should be a single empty line before this comment.
+	MoreCode()
+}
+
+
+// Formatting of empty statements.
+func _() {
+	;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;}
+
+func _() {
+f();;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+	f()
+}
diff --git a/internal/backport/go/scanner/errors.go b/internal/backport/go/scanner/errors.go
new file mode 100644
index 0000000..539bbfd
--- /dev/null
+++ b/internal/backport/go/scanner/errors.go
@@ -0,0 +1,120 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/token"
+	"io"
+	"sort"
+)
+
+// In an ErrorList, an error is represented by an *Error.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
+type Error struct {
+	Pos token.Position
+	Msg string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	if e.Pos.Filename != "" || e.Pos.IsValid() {
+		// don't print "<unknown position>"
+		// TODO(gri) reconsider the semantics of Position.IsValid
+		return e.Pos.String() + ": " + e.Msg
+	}
+	return e.Msg
+}
+
+// ErrorList is a list of *Errors.
+// The zero value for an ErrorList is an empty ErrorList ready to use.
+type ErrorList []*Error
+
+// Add adds an Error with given position and error message to an ErrorList.
+func (p *ErrorList) Add(pos token.Position, msg string) {
+	*p = append(*p, &Error{pos, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (p *ErrorList) Reset() { *p = (*p)[0:0] }
+
+// ErrorList implements the sort Interface.
+func (p ErrorList) Len() int      { return len(p) }
+func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p ErrorList) Less(i, j int) bool {
+	e := &p[i].Pos
+	f := &p[j].Pos
+	// Note that it is not sufficient to simply compare file offsets because
+	// the offsets do not reflect modified line information (through //line
+	// comments).
+	if e.Filename != f.Filename {
+		return e.Filename < f.Filename
+	}
+	if e.Line != f.Line {
+		return e.Line < f.Line
+	}
+	if e.Column != f.Column {
+		return e.Column < f.Column
+	}
+	return p[i].Msg < p[j].Msg
+}
+
+// Sort sorts an ErrorList. *Error entries are sorted by position,
+// other errors are sorted by error message, and before any *Error
+// entry.
+func (p ErrorList) Sort() {
+	sort.Sort(p)
+}
+
+// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+func (p *ErrorList) RemoveMultiples() {
+	sort.Sort(p)
+	var last token.Position // initial last.Line is != any legal error line
+	i := 0
+	for _, e := range *p {
+		if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
+			last = e.Pos
+			(*p)[i] = e
+			i++
+		}
+	}
+	*p = (*p)[0:i]
+}
+
+// An ErrorList implements the error interface.
+func (p ErrorList) Error() string {
+	switch len(p) {
+	case 0:
+		return "no errors"
+	case 1:
+		return p[0].Error()
+	}
+	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
+}
+
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p ErrorList) Err() error {
+	if len(p) == 0 {
+		return nil
+	}
+	return p
+}
+
+// PrintError is a utility function that prints a list of errors to w,
+// one error per line, if the err parameter is an ErrorList. Otherwise
+// it prints the err string.
+func PrintError(w io.Writer, err error) {
+	if list, ok := err.(ErrorList); ok {
+		for _, e := range list {
+			fmt.Fprintf(w, "%s\n", e)
+		}
+	} else if err != nil {
+		fmt.Fprintf(w, "%s\n", err)
+	}
+}
diff --git a/internal/backport/go/scanner/example_test.go b/internal/backport/go/scanner/example_test.go
new file mode 100644
index 0000000..e909237
--- /dev/null
+++ b/internal/backport/go/scanner/example_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner_test
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+func ExampleScanner_Scan() {
+	// src is the input that we want to tokenize.
+	src := []byte("cos(x) + 1i*sin(x) // Euler")
+
+	// Initialize the scanner.
+	var s scanner.Scanner
+	fset := token.NewFileSet()                      // positions are relative to fset
+	file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
+	s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
+
+	// Repeated calls to Scan yield the token sequence found in the input.
+	for {
+		pos, tok, lit := s.Scan()
+		if tok == token.EOF {
+			break
+		}
+		fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
+	}
+
+	// output:
+	// 1:1	IDENT	"cos"
+	// 1:4	(	""
+	// 1:5	IDENT	"x"
+	// 1:6	)	""
+	// 1:8	+	""
+	// 1:10	IMAG	"1i"
+	// 1:12	*	""
+	// 1:13	IDENT	"sin"
+	// 1:16	(	""
+	// 1:17	IDENT	"x"
+	// 1:18	)	""
+	// 1:20	;	"\n"
+	// 1:20	COMMENT	"// Euler"
+}
diff --git a/internal/backport/go/scanner/scanner.go b/internal/backport/go/scanner/scanner.go
new file mode 100644
index 0000000..f96569e
--- /dev/null
+++ b/internal/backport/go/scanner/scanner.go
@@ -0,0 +1,983 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner implements a scanner for Go source text.
+// It takes a []byte as source which can then be tokenized
+// through repeated calls to the Scan method.
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"golang.org/x/website/internal/backport/go/token"
+	"path/filepath"
+	"strconv"
+	"unicode"
+	"unicode/utf8"
+)
+
+// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// encountered and a handler was installed, the handler is called with a
+// position and an error message. The position points to the beginning of
+// the offending token.
+type ErrorHandler func(pos token.Position, msg string)
+
+// A Scanner holds the scanner's internal state while processing
+// a given text. It can be allocated as part of another data
+// structure but must be initialized via Init before use.
+type Scanner struct {
+	// immutable state
+	file *token.File  // source file handle
+	dir  string       // directory portion of file.Name()
+	src  []byte       // source
+	err  ErrorHandler // error reporting; or nil
+	mode Mode         // scanning mode
+
+	// scanning state
+	ch         rune // current character
+	offset     int  // character offset
+	rdOffset   int  // reading offset (position after current character)
+	lineOffset int  // current line offset
+	insertSemi bool // insert a semicolon before next newline
+
+	// public state - ok to modify
+	ErrorCount int // number of errors encountered
+}
+
+const (
+	bom = 0xFEFF // byte order mark, only permitted as very first character
+	eof = -1     // end of file
+)
+
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
+//
+// For optimization, there is some overlap between this method and
+// s.scanIdentifier.
+func (s *Scanner) next() {
+	if s.rdOffset < len(s.src) {
+		s.offset = s.rdOffset
+		if s.ch == '\n' {
+			s.lineOffset = s.offset
+			s.file.AddLine(s.offset)
+		}
+		r, w := rune(s.src[s.rdOffset]), 1
+		switch {
+		case r == 0:
+			s.error(s.offset, "illegal character NUL")
+		case r >= utf8.RuneSelf:
+			// not ASCII
+			r, w = utf8.DecodeRune(s.src[s.rdOffset:])
+			if r == utf8.RuneError && w == 1 {
+				s.error(s.offset, "illegal UTF-8 encoding")
+			} else if r == bom && s.offset > 0 {
+				s.error(s.offset, "illegal byte order mark")
+			}
+		}
+		s.rdOffset += w
+		s.ch = r
+	} else {
+		s.offset = len(s.src)
+		if s.ch == '\n' {
+			s.lineOffset = s.offset
+			s.file.AddLine(s.offset)
+		}
+		s.ch = eof
+	}
+}
+
+// peek returns the byte following the most recently read character without
+// advancing the scanner. If the scanner is at EOF, peek returns 0.
+func (s *Scanner) peek() byte {
+	if s.rdOffset < len(s.src) {
+		return s.src[s.rdOffset]
+	}
+	return 0
+}
+
+// A mode value is a set of flags (or 0).
+// They control scanner behavior.
+type Mode uint
+
+const (
+	ScanComments    Mode = 1 << iota // return comments as COMMENT tokens
+	dontInsertSemis                  // do not automatically insert semicolons - for testing only
+)
+
+// Init prepares the scanner s to tokenize the text src by setting the
+// scanner at the beginning of src. The scanner uses the file set file
+// for position information and it adds line information for each line.
+// It is ok to re-use the same file when re-scanning the same file as
+// line information which is already present is ignored. Init causes a
+// panic if the file size does not match the src size.
+//
+// Calls to Scan will invoke the error handler err if they encounter a
+// syntax error and err is not nil. Also, for each error encountered,
+// the Scanner field ErrorCount is incremented by one. The mode parameter
+// determines how comments are handled.
+//
+// Note that Init may call err if there is an error in the first character
+// of the file.
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
+	// Explicitly initialize all fields since a scanner may be reused.
+	if file.Size() != len(src) {
+		panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+	}
+	s.file = file
+	s.dir, _ = filepath.Split(file.Name())
+	s.src = src
+	s.err = err
+	s.mode = mode
+
+	s.ch = ' '
+	s.offset = 0
+	s.rdOffset = 0
+	s.lineOffset = 0
+	s.insertSemi = false
+	s.ErrorCount = 0
+
+	s.next()
+	if s.ch == bom {
+		s.next() // ignore BOM at file beginning
+	}
+}
+
+func (s *Scanner) error(offs int, msg string) {
+	if s.err != nil {
+		s.err(s.file.Position(s.file.Pos(offs)), msg)
+	}
+	s.ErrorCount++
+}
+
+func (s *Scanner) errorf(offs int, format string, args ...interface{}) {
+	s.error(offs, fmt.Sprintf(format, args...))
+}
+
+func (s *Scanner) scanComment() string {
+	// initial '/' already consumed; s.ch == '/' || s.ch == '*'
+	offs := s.offset - 1 // position of initial '/'
+	next := -1           // position immediately following the comment; < 0 means invalid comment
+	numCR := 0
+
+	if s.ch == '/' {
+		//-style comment
+		// (the final '\n' is not considered part of the comment)
+		s.next()
+		for s.ch != '\n' && s.ch >= 0 {
+			if s.ch == '\r' {
+				numCR++
+			}
+			s.next()
+		}
+		// if we are at '\n', the position following the comment is afterwards
+		next = s.offset
+		if s.ch == '\n' {
+			next++
+		}
+		goto exit
+	}
+
+	/*-style comment */
+	s.next()
+	for s.ch >= 0 {
+		ch := s.ch
+		if ch == '\r' {
+			numCR++
+		}
+		s.next()
+		if ch == '*' && s.ch == '/' {
+			s.next()
+			next = s.offset
+			goto exit
+		}
+	}
+
+	s.error(offs, "comment not terminated")
+
+exit:
+	lit := s.src[offs:s.offset]
+
+	// On Windows, a (//-comment) line may end in "\r\n".
+	// Remove the final '\r' before analyzing the text for
+	// line directives (matching the compiler). Remove any
+	// other '\r' afterwards (matching the pre-existing be-
+	// havior of the scanner).
+	if numCR > 0 && len(lit) >= 2 && lit[1] == '/' && lit[len(lit)-1] == '\r' {
+		lit = lit[:len(lit)-1]
+		numCR--
+	}
+
+	// interpret line directives
+	// (//line directives must start at the beginning of the current line)
+	if next >= 0 /* implies valid comment */ && (lit[1] == '*' || offs == s.lineOffset) && bytes.HasPrefix(lit[2:], prefix) {
+		s.updateLineInfo(next, offs, lit)
+	}
+
+	if numCR > 0 {
+		lit = stripCR(lit, lit[1] == '*')
+	}
+
+	return string(lit)
+}
+
+var prefix = []byte("line ")
+
+// updateLineInfo parses the incoming comment text at offset offs
+// as a line directive. If successful, it updates the line info table
+// for the position next per the line directive.
+func (s *Scanner) updateLineInfo(next, offs int, text []byte) {
+	// extract comment text
+	if text[1] == '*' {
+		text = text[:len(text)-2] // lop off trailing "*/"
+	}
+	text = text[7:] // lop off leading "//line " or "/*line "
+	offs += 7
+
+	i, n, ok := trailingDigits(text)
+	if i == 0 {
+		return // ignore (not a line directive)
+	}
+	// i > 0
+
+	if !ok {
+		// text has a suffix :xxx but xxx is not a number
+		s.error(offs+i, "invalid line number: "+string(text[i:]))
+		return
+	}
+
+	var line, col int
+	i2, n2, ok2 := trailingDigits(text[:i-1])
+	if ok2 {
+		//line filename:line:col
+		i, i2 = i2, i
+		line, col = n2, n
+		if col == 0 {
+			s.error(offs+i2, "invalid column number: "+string(text[i2:]))
+			return
+		}
+		text = text[:i2-1] // lop off ":col"
+	} else {
+		//line filename:line
+		line = n
+	}
+
+	if line == 0 {
+		s.error(offs+i, "invalid line number: "+string(text[i:]))
+		return
+	}
+
+	// If we have a column (//line filename:line:col form),
+	// an empty filename means to use the previous filename.
+	filename := string(text[:i-1]) // lop off ":line", and trim white space
+	if filename == "" && ok2 {
+		filename = s.file.Position(s.file.Pos(offs)).Filename
+	} else if filename != "" {
+		// Put a relative filename in the current directory.
+		// This is for compatibility with earlier releases.
+		// See issue 26671.
+		filename = filepath.Clean(filename)
+		if !filepath.IsAbs(filename) {
+			filename = filepath.Join(s.dir, filename)
+		}
+	}
+
+	s.file.AddLineColumnInfo(next, filename, line, col)
+}
+
+func trailingDigits(text []byte) (int, int, bool) {
+	i := bytes.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':')
+	if i < 0 {
+		return 0, 0, false // no ":"
+	}
+	// i >= 0
+	n, err := strconv.ParseUint(string(text[i+1:]), 10, 0)
+	return i + 1, int(n), err == nil
+}
+
+func (s *Scanner) findLineEnd() bool {
+	// initial '/' already consumed
+
+	defer func(offs int) {
+		// reset scanner state to where it was upon calling findLineEnd
+		s.ch = '/'
+		s.offset = offs
+		s.rdOffset = offs + 1
+		s.next() // consume initial '/' again
+	}(s.offset - 1)
+
+	// read ahead until a newline, EOF, or non-comment token is found
+	for s.ch == '/' || s.ch == '*' {
+		if s.ch == '/' {
+			//-style comment always contains a newline
+			return true
+		}
+		/*-style comment: look for newline */
+		s.next()
+		for s.ch >= 0 {
+			ch := s.ch
+			if ch == '\n' {
+				return true
+			}
+			s.next()
+			if ch == '*' && s.ch == '/' {
+				s.next()
+				break
+			}
+		}
+		s.skipWhitespace() // s.insertSemi is set
+		if s.ch < 0 || s.ch == '\n' {
+			return true
+		}
+		if s.ch != '/' {
+			// non-comment token
+			return false
+		}
+		s.next() // consume '/'
+	}
+
+	return false
+}
+
+func isLetter(ch rune) bool {
+	return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+	return isDecimal(ch) || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
+}
+
+// scanIdentifier reads the string of valid identifier characters at s.offset.
+// It must only be called when s.ch is known to be a valid letter.
+//
+// Be careful when making changes to this function: it is optimized and affects
+// scanning performance significantly.
+func (s *Scanner) scanIdentifier() string {
+	offs := s.offset
+
+	// Optimize for the common case of an ASCII identifier.
+	//
+	// Ranging over s.src[s.rdOffset:] lets us avoid some bounds checks, and
+	// avoids conversions to runes.
+	//
+	// In case we encounter a non-ASCII character, fall back on the slower path
+	// of calling into s.next().
+	for rdOffset, b := range s.src[s.rdOffset:] {
+		if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || b == '_' || '0' <= b && b <= '9' {
+			// Avoid assigning a rune for the common case of an ascii character.
+			continue
+		}
+		s.rdOffset += rdOffset
+		if 0 < b && b < utf8.RuneSelf {
+			// Optimization: we've encountered an ASCII character that's not a letter
+			// or number. Avoid the call into s.next() and corresponding set up.
+			//
+			// Note that s.next() does some line accounting if s.ch is '\n', so this
+			// shortcut is only possible because we know that the preceding character
+			// is not '\n'.
+			s.ch = rune(b)
+			s.offset = s.rdOffset
+			s.rdOffset++
+			goto exit
+		}
+		// We know that the preceding character is valid for an identifier because
+		// scanIdentifier is only called when s.ch is a letter, so calling s.next()
+		// at s.rdOffset resets the scanner state.
+		s.next()
+		for isLetter(s.ch) || isDigit(s.ch) {
+			s.next()
+		}
+		goto exit
+	}
+	s.offset = len(s.src)
+	s.rdOffset = len(s.src)
+	s.ch = eof
+
+exit:
+	return string(s.src[offs:s.offset])
+}
+
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= lower(ch) && lower(ch) <= 'f':
+		return int(lower(ch) - 'a' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
+
+func lower(ch rune) rune     { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+func isHex(ch rune) bool     { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
+
+// digits accepts the sequence { digit | '_' }.
+// If base <= 10, digits accepts any decimal digit but records
+// the offset (relative to the source start) of a digit >= base
+// in *invalid, if *invalid < 0.
+// digits returns a bitset describing whether the sequence contained
+// digits (bit 0 is set), or separators '_' (bit 1 is set).
+func (s *Scanner) digits(base int, invalid *int) (digsep int) {
+	if base <= 10 {
+		max := rune('0' + base)
+		for isDecimal(s.ch) || s.ch == '_' {
+			ds := 1
+			if s.ch == '_' {
+				ds = 2
+			} else if s.ch >= max && *invalid < 0 {
+				*invalid = s.offset // record invalid rune offset
+			}
+			digsep |= ds
+			s.next()
+		}
+	} else {
+		for isHex(s.ch) || s.ch == '_' {
+			ds := 1
+			if s.ch == '_' {
+				ds = 2
+			}
+			digsep |= ds
+			s.next()
+		}
+	}
+	return
+}
+
+func (s *Scanner) scanNumber() (token.Token, string) {
+	offs := s.offset
+	tok := token.ILLEGAL
+
+	base := 10        // number base
+	prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
+	digsep := 0       // bit 0: digit present, bit 1: '_' present
+	invalid := -1     // index of invalid digit in literal, or < 0
+
+	// integer part
+	if s.ch != '.' {
+		tok = token.INT
+		if s.ch == '0' {
+			s.next()
+			switch lower(s.ch) {
+			case 'x':
+				s.next()
+				base, prefix = 16, 'x'
+			case 'o':
+				s.next()
+				base, prefix = 8, 'o'
+			case 'b':
+				s.next()
+				base, prefix = 2, 'b'
+			default:
+				base, prefix = 8, '0'
+				digsep = 1 // leading 0
+			}
+		}
+		digsep |= s.digits(base, &invalid)
+	}
+
+	// fractional part
+	if s.ch == '.' {
+		tok = token.FLOAT
+		if prefix == 'o' || prefix == 'b' {
+			s.error(s.offset, "invalid radix point in "+litname(prefix))
+		}
+		s.next()
+		digsep |= s.digits(base, &invalid)
+	}
+
+	if digsep&1 == 0 {
+		s.error(s.offset, litname(prefix)+" has no digits")
+	}
+
+	// exponent
+	if e := lower(s.ch); e == 'e' || e == 'p' {
+		switch {
+		case e == 'e' && prefix != 0 && prefix != '0':
+			s.errorf(s.offset, "%q exponent requires decimal mantissa", s.ch)
+		case e == 'p' && prefix != 'x':
+			s.errorf(s.offset, "%q exponent requires hexadecimal mantissa", s.ch)
+		}
+		s.next()
+		tok = token.FLOAT
+		if s.ch == '+' || s.ch == '-' {
+			s.next()
+		}
+		ds := s.digits(10, nil)
+		digsep |= ds
+		if ds&1 == 0 {
+			s.error(s.offset, "exponent has no digits")
+		}
+	} else if prefix == 'x' && tok == token.FLOAT {
+		s.error(s.offset, "hexadecimal mantissa requires a 'p' exponent")
+	}
+
+	// suffix 'i'
+	if s.ch == 'i' {
+		tok = token.IMAG
+		s.next()
+	}
+
+	lit := string(s.src[offs:s.offset])
+	if tok == token.INT && invalid >= 0 {
+		s.errorf(invalid, "invalid digit %q in %s", lit[invalid-offs], litname(prefix))
+	}
+	if digsep&2 != 0 {
+		if i := invalidSep(lit); i >= 0 {
+			s.error(offs+i, "'_' must separate successive digits")
+		}
+	}
+
+	return tok, lit
+}
+
+func litname(prefix rune) string {
+	switch prefix {
+	case 'x':
+		return "hexadecimal literal"
+	case 'o', '0':
+		return "octal literal"
+	case 'b':
+		return "binary literal"
+	}
+	return "decimal literal"
+}
+
+// invalidSep returns the index of the first invalid separator in x, or -1.
+func invalidSep(x string) int {
+	x1 := ' ' // prefix char, we only care if it's 'x'
+	d := '.'  // digit, one of '_', '0' (a digit), or '.' (anything else)
+	i := 0
+
+	// a prefix counts as a digit
+	if len(x) >= 2 && x[0] == '0' {
+		x1 = lower(rune(x[1]))
+		if x1 == 'x' || x1 == 'o' || x1 == 'b' {
+			d = '0'
+			i = 2
+		}
+	}
+
+	// mantissa and exponent
+	for ; i < len(x); i++ {
+		p := d // previous digit
+		d = rune(x[i])
+		switch {
+		case d == '_':
+			if p != '0' {
+				return i
+			}
+		case isDecimal(d) || x1 == 'x' && isHex(d):
+			d = '0'
+		default:
+			if p == '_' {
+				return i - 1
+			}
+			d = '.'
+		}
+	}
+	if d == '_' {
+		return len(x) - 1
+	}
+
+	return -1
+}
+
+// scanEscape parses an escape sequence where rune is the accepted
+// escaped quote. In case of a syntax error, it stops at the offending
+// character (without consuming it) and returns false. Otherwise
+// it returns true.
+func (s *Scanner) scanEscape(quote rune) bool {
+	offs := s.offset
+
+	var n int
+	var base, max uint32
+	switch s.ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+		s.next()
+		return true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		n, base, max = 3, 8, 255
+	case 'x':
+		s.next()
+		n, base, max = 2, 16, 255
+	case 'u':
+		s.next()
+		n, base, max = 4, 16, unicode.MaxRune
+	case 'U':
+		s.next()
+		n, base, max = 8, 16, unicode.MaxRune
+	default:
+		msg := "unknown escape sequence"
+		if s.ch < 0 {
+			msg = "escape sequence not terminated"
+		}
+		s.error(offs, msg)
+		return false
+	}
+
+	var x uint32
+	for n > 0 {
+		d := uint32(digitVal(s.ch))
+		if d >= base {
+			msg := fmt.Sprintf("illegal character %#U in escape sequence", s.ch)
+			if s.ch < 0 {
+				msg = "escape sequence not terminated"
+			}
+			s.error(s.offset, msg)
+			return false
+		}
+		x = x*base + d
+		s.next()
+		n--
+	}
+
+	if x > max || 0xD800 <= x && x < 0xE000 {
+		s.error(offs, "escape sequence is invalid Unicode code point")
+		return false
+	}
+
+	return true
+}
+
+func (s *Scanner) scanRune() string {
+	// '\'' opening already consumed
+	offs := s.offset - 1
+
+	valid := true
+	n := 0
+	for {
+		ch := s.ch
+		if ch == '\n' || ch < 0 {
+			// only report error if we don't have one already
+			if valid {
+				s.error(offs, "rune literal not terminated")
+				valid = false
+			}
+			break
+		}
+		s.next()
+		if ch == '\'' {
+			break
+		}
+		n++
+		if ch == '\\' {
+			if !s.scanEscape('\'') {
+				valid = false
+			}
+			// continue to read to closing quote
+		}
+	}
+
+	if valid && n != 1 {
+		s.error(offs, "illegal rune literal")
+	}
+
+	return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanString() string {
+	// '"' opening already consumed
+	offs := s.offset - 1
+
+	for {
+		ch := s.ch
+		if ch == '\n' || ch < 0 {
+			s.error(offs, "string literal not terminated")
+			break
+		}
+		s.next()
+		if ch == '"' {
+			break
+		}
+		if ch == '\\' {
+			s.scanEscape('"')
+		}
+	}
+
+	return string(s.src[offs:s.offset])
+}
+
+func stripCR(b []byte, comment bool) []byte {
+	c := make([]byte, len(b))
+	i := 0
+	for j, ch := range b {
+		// In a /*-style comment, don't strip \r from *\r/ (incl.
+		// sequences of \r from *\r\r...\r/) since the resulting
+		// */ would terminate the comment too early unless the \r
+		// is immediately following the opening /* in which case
+		// it's ok because /*/ is not closed yet (issue #11151).
+		if ch != '\r' || comment && i > len("/*") && c[i-1] == '*' && j+1 < len(b) && b[j+1] == '/' {
+			c[i] = ch
+			i++
+		}
+	}
+	return c[:i]
+}
+
+func (s *Scanner) scanRawString() string {
+	// '`' opening already consumed
+	offs := s.offset - 1
+
+	hasCR := false
+	for {
+		ch := s.ch
+		if ch < 0 {
+			s.error(offs, "raw string literal not terminated")
+			break
+		}
+		s.next()
+		if ch == '`' {
+			break
+		}
+		if ch == '\r' {
+			hasCR = true
+		}
+	}
+
+	lit := s.src[offs:s.offset]
+	if hasCR {
+		lit = stripCR(lit, false)
+	}
+
+	return string(lit)
+}
+
+func (s *Scanner) skipWhitespace() {
+	for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' {
+		s.next()
+	}
+}
+
+// Helper functions for scanning multi-byte tokens such as >> += >>= .
+// Different routines recognize different length tok_i based on matches
+// of ch_i. If a token ends in '=', the result is tok1 or tok3
+// respectively. Otherwise, the result is tok0 if there was no other
+// matching character, or tok2 if the matching character was ch2.
+
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+	if s.ch == '=' {
+		s.next()
+		return tok1
+	}
+	return tok0
+}
+
+func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
+	if s.ch == '=' {
+		s.next()
+		return tok1
+	}
+	if s.ch == ch2 {
+		s.next()
+		return tok2
+	}
+	return tok0
+}
+
+func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
+	if s.ch == '=' {
+		s.next()
+		return tok1
+	}
+	if s.ch == ch2 {
+		s.next()
+		if s.ch == '=' {
+			s.next()
+			return tok3
+		}
+		return tok2
+	}
+	return tok0
+}
+
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// token.EOF.
+//
+// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT,
+// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
+// has the corresponding value.
+//
+// If the returned token is a keyword, the literal string is the keyword.
+//
+// If the returned token is token.SEMICOLON, the corresponding
+// literal string is ";" if the semicolon was present in the source,
+// and "\n" if the semicolon was inserted because of a newline or
+// at EOF.
+//
+// If the returned token is token.ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
+// For more tolerant parsing, Scan will return a valid token if
+// possible even if a syntax error was encountered. Thus, even
+// if the resulting token sequence contains no illegal tokens,
+// a client may not assume that no error occurred. Instead it
+// must check the scanner's ErrorCount or the number of calls
+// of the error handler, if there was one installed.
+//
+// Scan adds line information to the file added to the file
+// set with Init. Token positions are relative to that file
+// and thus relative to the file set.
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+scanAgain:
+	s.skipWhitespace()
+
+	// current token start
+	pos = s.file.Pos(s.offset)
+
+	// determine token value
+	insertSemi := false
+	switch ch := s.ch; {
+	case isLetter(ch):
+		lit = s.scanIdentifier()
+		if len(lit) > 1 {
+			// keywords are longer than one letter - avoid lookup otherwise
+			tok = token.Lookup(lit)
+			switch tok {
+			case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
+				insertSemi = true
+			}
+		} else {
+			insertSemi = true
+			tok = token.IDENT
+		}
+	case isDecimal(ch) || ch == '.' && isDecimal(rune(s.peek())):
+		insertSemi = true
+		tok, lit = s.scanNumber()
+	default:
+		s.next() // always make progress
+		switch ch {
+		case -1:
+			if s.insertSemi {
+				s.insertSemi = false // EOF consumed
+				return pos, token.SEMICOLON, "\n"
+			}
+			tok = token.EOF
+		case '\n':
+			// we only reach here if s.insertSemi was
+			// set in the first place and exited early
+			// from s.skipWhitespace()
+			s.insertSemi = false // newline consumed
+			return pos, token.SEMICOLON, "\n"
+		case '"':
+			insertSemi = true
+			tok = token.STRING
+			lit = s.scanString()
+		case '\'':
+			insertSemi = true
+			tok = token.CHAR
+			lit = s.scanRune()
+		case '`':
+			insertSemi = true
+			tok = token.STRING
+			lit = s.scanRawString()
+		case ':':
+			tok = s.switch2(token.COLON, token.DEFINE)
+		case '.':
+			// fractions starting with a '.' are handled by outer switch
+			tok = token.PERIOD
+			if s.ch == '.' && s.peek() == '.' {
+				s.next()
+				s.next() // consume last '.'
+				tok = token.ELLIPSIS
+			}
+		case ',':
+			tok = token.COMMA
+		case ';':
+			tok = token.SEMICOLON
+			lit = ";"
+		case '(':
+			tok = token.LPAREN
+		case ')':
+			insertSemi = true
+			tok = token.RPAREN
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			insertSemi = true
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			insertSemi = true
+			tok = token.RBRACE
+		case '+':
+			tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
+			if tok == token.INC {
+				insertSemi = true
+			}
+		case '-':
+			tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
+			if tok == token.DEC {
+				insertSemi = true
+			}
+		case '*':
+			tok = s.switch2(token.MUL, token.MUL_ASSIGN)
+		case '/':
+			if s.ch == '/' || s.ch == '*' {
+				// comment
+				if s.insertSemi && s.findLineEnd() {
+					// reset position to the beginning of the comment
+					s.ch = '/'
+					s.offset = s.file.Offset(pos)
+					s.rdOffset = s.offset + 1
+					s.insertSemi = false // newline consumed
+					return pos, token.SEMICOLON, "\n"
+				}
+				comment := s.scanComment()
+				if s.mode&ScanComments == 0 {
+					// skip comment
+					s.insertSemi = false // newline consumed
+					goto scanAgain
+				}
+				tok = token.COMMENT
+				lit = comment
+			} else {
+				tok = s.switch2(token.QUO, token.QUO_ASSIGN)
+			}
+		case '%':
+			tok = s.switch2(token.REM, token.REM_ASSIGN)
+		case '^':
+			tok = s.switch2(token.XOR, token.XOR_ASSIGN)
+		case '<':
+			if s.ch == '-' {
+				s.next()
+				tok = token.ARROW
+			} else {
+				tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
+			}
+		case '>':
+			tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
+		case '=':
+			tok = s.switch2(token.ASSIGN, token.EQL)
+		case '!':
+			tok = s.switch2(token.NOT, token.NEQ)
+		case '&':
+			if s.ch == '^' {
+				s.next()
+				tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
+			} else {
+				tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
+			}
+		case '|':
+			tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+		case '~':
+			tok = token.TILDE
+		default:
+			// next reports unexpected BOMs - don't repeat
+			if ch != bom {
+				s.errorf(s.file.Offset(pos), "illegal character %#U", ch)
+			}
+			insertSemi = s.insertSemi // preserve insertSemi info
+			tok = token.ILLEGAL
+			lit = string(ch)
+		}
+	}
+	if s.mode&dontInsertSemis == 0 {
+		s.insertSemi = insertSemi
+	}
+
+	return
+}
diff --git a/internal/backport/go/scanner/scanner_test.go b/internal/backport/go/scanner/scanner_test.go
new file mode 100644
index 0000000..d56abcb
--- /dev/null
+++ b/internal/backport/go/scanner/scanner_test.go
@@ -0,0 +1,1127 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+	"golang.org/x/website/internal/backport/go/token"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"testing"
+)
+
+var fset = token.NewFileSet()
+
+const /* class */ (
+	special = iota
+	literal
+	operator
+	keyword
+)
+
+func tokenclass(tok token.Token) int {
+	switch {
+	case tok.IsLiteral():
+		return literal
+	case tok.IsOperator():
+		return operator
+	case tok.IsKeyword():
+		return keyword
+	}
+	return special
+}
+
+type elt struct {
+	tok   token.Token
+	lit   string
+	class int
+}
+
+var tokens = []elt{
+	// Special tokens
+	{token.COMMENT, "/* a comment */", special},
+	{token.COMMENT, "// a comment \n", special},
+	{token.COMMENT, "/*\r*/", special},
+	{token.COMMENT, "/**\r/*/", special}, // issue 11151
+	{token.COMMENT, "/**\r\r/*/", special},
+	{token.COMMENT, "//\r\n", special},
+
+	// Identifiers and basic type literals
+	{token.IDENT, "foobar", literal},
+	{token.IDENT, "a۰۱۸", literal},
+	{token.IDENT, "foo६४", literal},
+	{token.IDENT, "bar9876", literal},
+	{token.IDENT, "ŝ", literal},    // was bug (issue 4000)
+	{token.IDENT, "ŝfoo", literal}, // was bug (issue 4000)
+	{token.INT, "0", literal},
+	{token.INT, "1", literal},
+	{token.INT, "123456789012345678890", literal},
+	{token.INT, "01234567", literal},
+	{token.INT, "0xcafebabe", literal},
+	{token.FLOAT, "0.", literal},
+	{token.FLOAT, ".0", literal},
+	{token.FLOAT, "3.14159265", literal},
+	{token.FLOAT, "1e0", literal},
+	{token.FLOAT, "1e+100", literal},
+	{token.FLOAT, "1e-100", literal},
+	{token.FLOAT, "2.71828e-1000", literal},
+	{token.IMAG, "0i", literal},
+	{token.IMAG, "1i", literal},
+	{token.IMAG, "012345678901234567889i", literal},
+	{token.IMAG, "123456789012345678890i", literal},
+	{token.IMAG, "0.i", literal},
+	{token.IMAG, ".0i", literal},
+	{token.IMAG, "3.14159265i", literal},
+	{token.IMAG, "1e0i", literal},
+	{token.IMAG, "1e+100i", literal},
+	{token.IMAG, "1e-100i", literal},
+	{token.IMAG, "2.71828e-1000i", literal},
+	{token.CHAR, "'a'", literal},
+	{token.CHAR, "'\\000'", literal},
+	{token.CHAR, "'\\xFF'", literal},
+	{token.CHAR, "'\\uff16'", literal},
+	{token.CHAR, "'\\U0000ff16'", literal},
+	{token.STRING, "`foobar`", literal},
+	{token.STRING, "`" + `foo
+	                        bar` +
+		"`",
+		literal,
+	},
+	{token.STRING, "`\r`", literal},
+	{token.STRING, "`foo\r\nbar`", literal},
+
+	// Operators and delimiters
+	{token.ADD, "+", operator},
+	{token.SUB, "-", operator},
+	{token.MUL, "*", operator},
+	{token.QUO, "/", operator},
+	{token.REM, "%", operator},
+
+	{token.AND, "&", operator},
+	{token.OR, "|", operator},
+	{token.XOR, "^", operator},
+	{token.SHL, "<<", operator},
+	{token.SHR, ">>", operator},
+	{token.AND_NOT, "&^", operator},
+
+	{token.ADD_ASSIGN, "+=", operator},
+	{token.SUB_ASSIGN, "-=", operator},
+	{token.MUL_ASSIGN, "*=", operator},
+	{token.QUO_ASSIGN, "/=", operator},
+	{token.REM_ASSIGN, "%=", operator},
+
+	{token.AND_ASSIGN, "&=", operator},
+	{token.OR_ASSIGN, "|=", operator},
+	{token.XOR_ASSIGN, "^=", operator},
+	{token.SHL_ASSIGN, "<<=", operator},
+	{token.SHR_ASSIGN, ">>=", operator},
+	{token.AND_NOT_ASSIGN, "&^=", operator},
+
+	{token.LAND, "&&", operator},
+	{token.LOR, "||", operator},
+	{token.ARROW, "<-", operator},
+	{token.INC, "++", operator},
+	{token.DEC, "--", operator},
+
+	{token.EQL, "==", operator},
+	{token.LSS, "<", operator},
+	{token.GTR, ">", operator},
+	{token.ASSIGN, "=", operator},
+	{token.NOT, "!", operator},
+
+	{token.NEQ, "!=", operator},
+	{token.LEQ, "<=", operator},
+	{token.GEQ, ">=", operator},
+	{token.DEFINE, ":=", operator},
+	{token.ELLIPSIS, "...", operator},
+
+	{token.LPAREN, "(", operator},
+	{token.LBRACK, "[", operator},
+	{token.LBRACE, "{", operator},
+	{token.COMMA, ",", operator},
+	{token.PERIOD, ".", operator},
+
+	{token.RPAREN, ")", operator},
+	{token.RBRACK, "]", operator},
+	{token.RBRACE, "}", operator},
+	{token.SEMICOLON, ";", operator},
+	{token.COLON, ":", operator},
+	{token.TILDE, "~", operator},
+
+	// Keywords
+	{token.BREAK, "break", keyword},
+	{token.CASE, "case", keyword},
+	{token.CHAN, "chan", keyword},
+	{token.CONST, "const", keyword},
+	{token.CONTINUE, "continue", keyword},
+
+	{token.DEFAULT, "default", keyword},
+	{token.DEFER, "defer", keyword},
+	{token.ELSE, "else", keyword},
+	{token.FALLTHROUGH, "fallthrough", keyword},
+	{token.FOR, "for", keyword},
+
+	{token.FUNC, "func", keyword},
+	{token.GO, "go", keyword},
+	{token.GOTO, "goto", keyword},
+	{token.IF, "if", keyword},
+	{token.IMPORT, "import", keyword},
+
+	{token.INTERFACE, "interface", keyword},
+	{token.MAP, "map", keyword},
+	{token.PACKAGE, "package", keyword},
+	{token.RANGE, "range", keyword},
+	{token.RETURN, "return", keyword},
+
+	{token.SELECT, "select", keyword},
+	{token.STRUCT, "struct", keyword},
+	{token.SWITCH, "switch", keyword},
+	{token.TYPE, "type", keyword},
+	{token.VAR, "var", keyword},
+}
+
+const whitespace = "  \t  \n\n\n" // to separate tokens
+
+var source = func() []byte {
+	var src []byte
+	for _, t := range tokens {
+		src = append(src, t.lit...)
+		src = append(src, whitespace...)
+	}
+	return src
+}()
+
+func newlineCount(s string) int {
+	n := 0
+	for i := 0; i < len(s); i++ {
+		if s[i] == '\n' {
+			n++
+		}
+	}
+	return n
+}
+
+func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
+	pos := fset.Position(p)
+	// Check cleaned filenames so that we don't have to worry about
+	// different os.PathSeparator values.
+	if pos.Filename != expected.Filename && filepath.Clean(pos.Filename) != filepath.Clean(expected.Filename) {
+		t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
+	}
+	if pos.Offset != expected.Offset {
+		t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
+	}
+	if pos.Line != expected.Line {
+		t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
+	}
+	if pos.Column != expected.Column {
+		t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
+	}
+}
+
+// Verify that calling Scan() provides the correct results.
+func TestScan(t *testing.T) {
+	whitespace_linecount := newlineCount(whitespace)
+
+	// error handler
+	eh := func(_ token.Position, msg string) {
+		t.Errorf("error handler called (msg = %s)", msg)
+	}
+
+	// verify scan
+	var s Scanner
+	s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
+
+	// set up expected position
+	epos := token.Position{
+		Filename: "",
+		Offset:   0,
+		Line:     1,
+		Column:   1,
+	}
+
+	index := 0
+	for {
+		pos, tok, lit := s.Scan()
+
+		// check position
+		if tok == token.EOF {
+			// correction for EOF
+			epos.Line = newlineCount(string(source))
+			epos.Column = 2
+		}
+		checkPos(t, lit, pos, epos)
+
+		// check token
+		e := elt{token.EOF, "", special}
+		if index < len(tokens) {
+			e = tokens[index]
+			index++
+		}
+		if tok != e.tok {
+			t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
+		}
+
+		// check token class
+		if tokenclass(tok) != e.class {
+			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
+		}
+
+		// check literal
+		elit := ""
+		switch e.tok {
+		case token.COMMENT:
+			// no CRs in comments
+			elit = string(stripCR([]byte(e.lit), e.lit[1] == '*'))
+			//-style comment literal doesn't contain newline
+			if elit[1] == '/' {
+				elit = elit[0 : len(elit)-1]
+			}
+		case token.IDENT:
+			elit = e.lit
+		case token.SEMICOLON:
+			elit = ";"
+		default:
+			if e.tok.IsLiteral() {
+				// no CRs in raw string literals
+				elit = e.lit
+				if elit[0] == '`' {
+					elit = string(stripCR([]byte(elit), false))
+				}
+			} else if e.tok.IsKeyword() {
+				elit = e.lit
+			}
+		}
+		if lit != elit {
+			t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
+		}
+
+		if tok == token.EOF {
+			break
+		}
+
+		// update position
+		epos.Offset += len(e.lit) + len(whitespace)
+		epos.Line += newlineCount(e.lit) + whitespace_linecount
+
+	}
+
+	if s.ErrorCount != 0 {
+		t.Errorf("found %d errors", s.ErrorCount)
+	}
+}
+
+func TestStripCR(t *testing.T) {
+	for _, test := range []struct{ have, want string }{
+		{"//\n", "//\n"},
+		{"//\r\n", "//\n"},
+		{"//\r\r\r\n", "//\n"},
+		{"//\r*\r/\r\n", "//*/\n"},
+		{"/**/", "/**/"},
+		{"/*\r/*/", "/*/*/"},
+		{"/*\r*/", "/**/"},
+		{"/**\r/*/", "/**\r/*/"},
+		{"/*\r/\r*\r/*/", "/*/*\r/*/"},
+		{"/*\r\r\r\r*/", "/**/"},
+	} {
+		got := string(stripCR([]byte(test.have), len(test.have) >= 2 && test.have[1] == '*'))
+		if got != test.want {
+			t.Errorf("stripCR(%q) = %q; want %q", test.have, got, test.want)
+		}
+	}
+}
+
+func checkSemi(t *testing.T, line string, mode Mode) {
+	var S Scanner
+	file := fset.AddFile("TestSemis", fset.Base(), len(line))
+	S.Init(file, []byte(line), nil, mode)
+	pos, tok, lit := S.Scan()
+	for tok != token.EOF {
+		if tok == token.ILLEGAL {
+			// the illegal token literal indicates what
+			// kind of semicolon literal to expect
+			semiLit := "\n"
+			if lit[0] == '#' {
+				semiLit = ";"
+			}
+			// next token must be a semicolon
+			semiPos := file.Position(pos)
+			semiPos.Offset++
+			semiPos.Column++
+			pos, tok, lit = S.Scan()
+			if tok == token.SEMICOLON {
+				if lit != semiLit {
+					t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
+				}
+				checkPos(t, line, pos, semiPos)
+			} else {
+				t.Errorf("bad token for %q: got %s, expected ;", line, tok)
+			}
+		} else if tok == token.SEMICOLON {
+			t.Errorf("bad token for %q: got ;, expected no ;", line)
+		}
+		pos, tok, lit = S.Scan()
+	}
+}
+
+var lines = []string{
+	// # indicates a semicolon present in the source
+	// $ indicates an automatically inserted semicolon
+	"",
+	"\ufeff#;", // first BOM is ignored
+	"#;",
+	"foo$\n",
+	"123$\n",
+	"1.2$\n",
+	"'x'$\n",
+	`"x"` + "$\n",
+	"`x`$\n",
+
+	"+\n",
+	"-\n",
+	"*\n",
+	"/\n",
+	"%\n",
+
+	"&\n",
+	"|\n",
+	"^\n",
+	"<<\n",
+	">>\n",
+	"&^\n",
+
+	"+=\n",
+	"-=\n",
+	"*=\n",
+	"/=\n",
+	"%=\n",
+
+	"&=\n",
+	"|=\n",
+	"^=\n",
+	"<<=\n",
+	">>=\n",
+	"&^=\n",
+
+	"&&\n",
+	"||\n",
+	"<-\n",
+	"++$\n",
+	"--$\n",
+
+	"==\n",
+	"<\n",
+	">\n",
+	"=\n",
+	"!\n",
+
+	"!=\n",
+	"<=\n",
+	">=\n",
+	":=\n",
+	"...\n",
+
+	"(\n",
+	"[\n",
+	"{\n",
+	",\n",
+	".\n",
+
+	")$\n",
+	"]$\n",
+	"}$\n",
+	"#;\n",
+	":\n",
+
+	"break$\n",
+	"case\n",
+	"chan\n",
+	"const\n",
+	"continue$\n",
+
+	"default\n",
+	"defer\n",
+	"else\n",
+	"fallthrough$\n",
+	"for\n",
+
+	"func\n",
+	"go\n",
+	"goto\n",
+	"if\n",
+	"import\n",
+
+	"interface\n",
+	"map\n",
+	"package\n",
+	"range\n",
+	"return$\n",
+
+	"select\n",
+	"struct\n",
+	"switch\n",
+	"type\n",
+	"var\n",
+
+	"foo$//comment\n",
+	"foo$//comment",
+	"foo$/*comment*/\n",
+	"foo$/*\n*/",
+	"foo$/*comment*/    \n",
+	"foo$/*\n*/    ",
+
+	"foo    $// comment\n",
+	"foo    $// comment",
+	"foo    $/*comment*/\n",
+	"foo    $/*\n*/",
+	"foo    $/*  */ /* \n */ bar$/**/\n",
+	"foo    $/*0*/ /*1*/ /*2*/\n",
+
+	"foo    $/*comment*/    \n",
+	"foo    $/*0*/ /*1*/ /*2*/    \n",
+	"foo	$/**/ /*-------------*/       /*----\n*/bar       $/*  \n*/baa$\n",
+	"foo    $/* an EOF terminates a line */",
+	"foo    $/* an EOF terminates a line */ /*",
+	"foo    $/* an EOF terminates a line */ //",
+
+	"package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
+	"package main$",
+}
+
+func TestSemis(t *testing.T) {
+	for _, line := range lines {
+		checkSemi(t, line, 0)
+		checkSemi(t, line, ScanComments)
+
+		// if the input ended in newlines, the input must tokenize the
+		// same with or without those newlines
+		for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
+			checkSemi(t, line[0:i], 0)
+			checkSemi(t, line[0:i], ScanComments)
+		}
+	}
+}
+
+type segment struct {
+	srcline      string // a line of source text
+	filename     string // filename for current token; error message for invalid line directives
+	line, column int    // line and column for current token; error position for invalid line directives
+}
+
+var segments = []segment{
+	// exactly one token per line since the test consumes one token per segment
+	{"  line1", "TestLineDirectives", 1, 3},
+	{"\nline2", "TestLineDirectives", 2, 1},
+	{"\nline3  //line File1.go:100", "TestLineDirectives", 3, 1}, // bad line comment, ignored
+	{"\nline4", "TestLineDirectives", 4, 1},
+	{"\n//line File1.go:100\n  line100", "File1.go", 100, 0},
+	{"\n//line  \t :42\n  line1", " \t ", 42, 0},
+	{"\n//line File2.go:200\n  line200", "File2.go", 200, 0},
+	{"\n//line foo\t:42\n  line42", "foo\t", 42, 0},
+	{"\n //line foo:42\n  line43", "foo\t", 44, 0}, // bad line comment, ignored (use existing, prior filename)
+	{"\n//line foo 42\n  line44", "foo\t", 46, 0},  // bad line comment, ignored (use existing, prior filename)
+	{"\n//line /bar:42\n  line45", "/bar", 42, 0},
+	{"\n//line ./foo:42\n  line46", "foo", 42, 0},
+	{"\n//line a/b/c/File1.go:100\n  line100", "a/b/c/File1.go", 100, 0},
+	{"\n//line c:\\bar:42\n  line200", "c:\\bar", 42, 0},
+	{"\n//line c:\\dir\\File1.go:100\n  line201", "c:\\dir\\File1.go", 100, 0},
+
+	// tests for new line directive syntax
+	{"\n//line :100\na1", "", 100, 0}, // missing filename means empty filename
+	{"\n//line bar:100\nb1", "bar", 100, 0},
+	{"\n//line :100:10\nc1", "bar", 100, 10}, // missing filename means current filename
+	{"\n//line foo:100:10\nd1", "foo", 100, 10},
+
+	{"\n/*line :100*/a2", "", 100, 0}, // missing filename means empty filename
+	{"\n/*line bar:100*/b2", "bar", 100, 0},
+	{"\n/*line :100:10*/c2", "bar", 100, 10}, // missing filename means current filename
+	{"\n/*line foo:100:10*/d2", "foo", 100, 10},
+	{"\n/*line foo:100:10*/    e2", "foo", 100, 14}, // line-directive relative column
+	{"\n/*line foo:100:10*/\n\nf2", "foo", 102, 1},  // absolute column since on new line
+}
+
+var dirsegments = []segment{
+	// exactly one token per line since the test consumes one token per segment
+	{"  line1", "TestLineDir/TestLineDirectives", 1, 3},
+	{"\n//line File1.go:100\n  line100", "TestLineDir/File1.go", 100, 0},
+}
+
+var dirUnixSegments = []segment{
+	{"\n//line /bar:42\n  line42", "/bar", 42, 0},
+}
+
+var dirWindowsSegments = []segment{
+	{"\n//line c:\\bar:42\n  line42", "c:\\bar", 42, 0},
+}
+
+// Verify that line directives are interpreted correctly.
+func TestLineDirectives(t *testing.T) {
+	testSegments(t, segments, "TestLineDirectives")
+	testSegments(t, dirsegments, "TestLineDir/TestLineDirectives")
+	if runtime.GOOS == "windows" {
+		testSegments(t, dirWindowsSegments, "TestLineDir/TestLineDirectives")
+	} else {
+		testSegments(t, dirUnixSegments, "TestLineDir/TestLineDirectives")
+	}
+}
+
+func testSegments(t *testing.T, segments []segment, filename string) {
+	var src string
+	for _, e := range segments {
+		src += e.srcline
+	}
+
+	// verify scan
+	var S Scanner
+	file := fset.AddFile(filename, fset.Base(), len(src))
+	S.Init(file, []byte(src), func(pos token.Position, msg string) { t.Error(Error{pos, msg}) }, dontInsertSemis)
+	for _, s := range segments {
+		p, _, lit := S.Scan()
+		pos := file.Position(p)
+		checkPos(t, lit, p, token.Position{
+			Filename: s.filename,
+			Offset:   pos.Offset,
+			Line:     s.line,
+			Column:   s.column,
+		})
+	}
+
+	if S.ErrorCount != 0 {
+		t.Errorf("got %d errors", S.ErrorCount)
+	}
+}
+
+// The filename is used for the error message in these test cases.
+// The first line directive is valid and used to control the expected error line.
+var invalidSegments = []segment{
+	{"\n//line :1:1\n//line foo:42 extra text\ndummy", "invalid line number: 42 extra text", 1, 12},
+	{"\n//line :2:1\n//line foobar:\ndummy", "invalid line number: ", 2, 15},
+	{"\n//line :5:1\n//line :0\ndummy", "invalid line number: 0", 5, 9},
+	{"\n//line :10:1\n//line :1:0\ndummy", "invalid column number: 0", 10, 11},
+	{"\n//line :1:1\n//line :foo:0\ndummy", "invalid line number: 0", 1, 13}, // foo is considered part of the filename
+}
+
+// Verify that invalid line directives get the correct error message.
+func TestInvalidLineDirectives(t *testing.T) {
+	// make source
+	var src string
+	for _, e := range invalidSegments {
+		src += e.srcline
+	}
+
+	// verify scan
+	var S Scanner
+	var s segment // current segment
+	file := fset.AddFile(filepath.Join("dir", "TestInvalidLineDirectives"), fset.Base(), len(src))
+	S.Init(file, []byte(src), func(pos token.Position, msg string) {
+		if msg != s.filename {
+			t.Errorf("got error %q; want %q", msg, s.filename)
+		}
+		if pos.Line != s.line || pos.Column != s.column {
+			t.Errorf("got position %d:%d; want %d:%d", pos.Line, pos.Column, s.line, s.column)
+		}
+	}, dontInsertSemis)
+	for _, s = range invalidSegments {
+		S.Scan()
+	}
+
+	if S.ErrorCount != len(invalidSegments) {
+		t.Errorf("got %d errors; want %d", S.ErrorCount, len(invalidSegments))
+	}
+}
+
+// Verify that initializing the same scanner more than once works correctly.
+func TestInit(t *testing.T) {
+	var s Scanner
+
+	// 1st init
+	src1 := "if true { }"
+	f1 := fset.AddFile("src1", fset.Base(), len(src1))
+	s.Init(f1, []byte(src1), nil, dontInsertSemis)
+	if f1.Size() != len(src1) {
+		t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
+	}
+	s.Scan()              // if
+	s.Scan()              // true
+	_, tok, _ := s.Scan() // {
+	if tok != token.LBRACE {
+		t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
+	}
+
+	// 2nd init
+	src2 := "go true { ]"
+	f2 := fset.AddFile("src2", fset.Base(), len(src2))
+	s.Init(f2, []byte(src2), nil, dontInsertSemis)
+	if f2.Size() != len(src2) {
+		t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
+	}
+	_, tok, _ = s.Scan() // go
+	if tok != token.GO {
+		t.Errorf("bad token: got %s, expected %s", tok, token.GO)
+	}
+
+	if s.ErrorCount != 0 {
+		t.Errorf("found %d errors", s.ErrorCount)
+	}
+}
+
+func TestStdErrorHander(t *testing.T) {
+	const src = "@\n" + // illegal character, cause an error
+		"@ @\n" + // two errors on the same line
+		"//line File2:20\n" +
+		"@\n" + // different file, but same line
+		"//line File2:1\n" +
+		"@ @\n" + // same file, decreasing line number
+		"//line File1:1\n" +
+		"@ @ @" // original file, line 1 again
+
+	var list ErrorList
+	eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
+
+	var s Scanner
+	s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis)
+	for {
+		if _, tok, _ := s.Scan(); tok == token.EOF {
+			break
+		}
+	}
+
+	if len(list) != s.ErrorCount {
+		t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
+	}
+
+	if len(list) != 9 {
+		t.Errorf("found %d raw errors, expected 9", len(list))
+		PrintError(os.Stderr, list)
+	}
+
+	list.Sort()
+	if len(list) != 9 {
+		t.Errorf("found %d sorted errors, expected 9", len(list))
+		PrintError(os.Stderr, list)
+	}
+
+	list.RemoveMultiples()
+	if len(list) != 4 {
+		t.Errorf("found %d one-per-line errors, expected 4", len(list))
+		PrintError(os.Stderr, list)
+	}
+}
+
+type errorCollector struct {
+	cnt int            // number of errors encountered
+	msg string         // last error message encountered
+	pos token.Position // last error position encountered
+}
+
+func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) {
+	var s Scanner
+	var h errorCollector
+	eh := func(pos token.Position, msg string) {
+		h.cnt++
+		h.msg = msg
+		h.pos = pos
+	}
+	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis)
+	_, tok0, lit0 := s.Scan()
+	if tok0 != tok {
+		t.Errorf("%q: got %s, expected %s", src, tok0, tok)
+	}
+	if tok0 != token.ILLEGAL && lit0 != lit {
+		t.Errorf("%q: got literal %q, expected %q", src, lit0, lit)
+	}
+	cnt := 0
+	if err != "" {
+		cnt = 1
+	}
+	if h.cnt != cnt {
+		t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
+	}
+	if h.msg != err {
+		t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
+	}
+	if h.pos.Offset != pos {
+		t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
+	}
+}
+
+var errors = []struct {
+	src string
+	tok token.Token
+	pos int
+	lit string
+	err string
+}{
+	{"\a", token.ILLEGAL, 0, "", "illegal character U+0007"},
+	{`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"},
+	{`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"},
+	{"..", token.PERIOD, 0, "", ""}, // two periods, not invalid token (issue #28112)
+	{`' '`, token.CHAR, 0, `' '`, ""},
+	{`''`, token.CHAR, 0, `''`, "illegal rune literal"},
+	{`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"},
+	{`'123'`, token.CHAR, 0, `'123'`, "illegal rune literal"},
+	{`'\0'`, token.CHAR, 3, `'\0'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\07'`, token.CHAR, 4, `'\07'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\8'`, token.CHAR, 2, `'\8'`, "unknown escape sequence"},
+	{`'\08'`, token.CHAR, 3, `'\08'`, "illegal character U+0038 '8' in escape sequence"},
+	{`'\x'`, token.CHAR, 3, `'\x'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\x0'`, token.CHAR, 4, `'\x0'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\x0g'`, token.CHAR, 4, `'\x0g'`, "illegal character U+0067 'g' in escape sequence"},
+	{`'\u'`, token.CHAR, 3, `'\u'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\u0'`, token.CHAR, 4, `'\u0'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\u00'`, token.CHAR, 5, `'\u00'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\u000'`, token.CHAR, 6, `'\u000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\u000`, token.CHAR, 6, `'\u000`, "escape sequence not terminated"},
+	{`'\u0000'`, token.CHAR, 0, `'\u0000'`, ""},
+	{`'\U'`, token.CHAR, 3, `'\U'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U0'`, token.CHAR, 4, `'\U0'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U00'`, token.CHAR, 5, `'\U00'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U000'`, token.CHAR, 6, `'\U000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U0000'`, token.CHAR, 7, `'\U0000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U00000'`, token.CHAR, 8, `'\U00000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U000000'`, token.CHAR, 9, `'\U000000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U0000000'`, token.CHAR, 10, `'\U0000000'`, "illegal character U+0027 ''' in escape sequence"},
+	{`'\U0000000`, token.CHAR, 10, `'\U0000000`, "escape sequence not terminated"},
+	{`'\U00000000'`, token.CHAR, 0, `'\U00000000'`, ""},
+	{`'\Uffffffff'`, token.CHAR, 2, `'\Uffffffff'`, "escape sequence is invalid Unicode code point"},
+	{`'`, token.CHAR, 0, `'`, "rune literal not terminated"},
+	{`'\`, token.CHAR, 2, `'\`, "escape sequence not terminated"},
+	{"'\n", token.CHAR, 0, "'", "rune literal not terminated"},
+	{"'\n   ", token.CHAR, 0, "'", "rune literal not terminated"},
+	{`""`, token.STRING, 0, `""`, ""},
+	{`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"},
+	{"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"},
+	{"\"abc\n   ", token.STRING, 0, `"abc`, "string literal not terminated"},
+	{"``", token.STRING, 0, "``", ""},
+	{"`", token.STRING, 0, "`", "raw string literal not terminated"},
+	{"/**/", token.COMMENT, 0, "/**/", ""},
+	{"/*", token.COMMENT, 0, "/*", "comment not terminated"},
+	{"077", token.INT, 0, "077", ""},
+	{"078.", token.FLOAT, 0, "078.", ""},
+	{"07801234567.", token.FLOAT, 0, "07801234567.", ""},
+	{"078e0", token.FLOAT, 0, "078e0", ""},
+	{"0E", token.FLOAT, 2, "0E", "exponent has no digits"}, // issue 17621
+	{"078", token.INT, 2, "078", "invalid digit '8' in octal literal"},
+	{"07090000008", token.INT, 3, "07090000008", "invalid digit '9' in octal literal"},
+	{"0x", token.INT, 2, "0x", "hexadecimal literal has no digits"},
+	{"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"},
+	{"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"},
+	{"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"},                        // only first BOM is ignored
+	{"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"},                                // only first BOM is ignored
+	{"'\ufeff" + `'`, token.CHAR, 1, "'\ufeff" + `'`, "illegal byte order mark"},                         // only first BOM is ignored
+	{`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored
+	{"abc\x00def", token.IDENT, 3, "abc", "illegal character NUL"},
+	{"abc\x00", token.IDENT, 3, "abc", "illegal character NUL"},
+}
+
+func TestScanErrors(t *testing.T) {
+	for _, e := range errors {
+		checkError(t, e.src, e.tok, e.pos, e.lit, e.err)
+	}
+}
+
+// Verify that no comments show up as literal values when skipping comments.
+func TestIssue10213(t *testing.T) {
+	const src = `
+		var (
+			A = 1 // foo
+		)
+
+		var (
+			B = 2
+			// foo
+		)
+
+		var C = 3 // foo
+
+		var D = 4
+		// foo
+
+		func anycode() {
+		// foo
+		}
+	`
+	var s Scanner
+	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
+	for {
+		pos, tok, lit := s.Scan()
+		class := tokenclass(tok)
+		if lit != "" && class != keyword && class != literal && tok != token.SEMICOLON {
+			t.Errorf("%s: tok = %s, lit = %q", fset.Position(pos), tok, lit)
+		}
+		if tok <= token.EOF {
+			break
+		}
+	}
+}
+
+func TestIssue28112(t *testing.T) {
+	const src = "... .. 0.. .." // make sure to have stand-alone ".." immediately before EOF to test EOF behavior
+	tokens := []token.Token{token.ELLIPSIS, token.PERIOD, token.PERIOD, token.FLOAT, token.PERIOD, token.PERIOD, token.PERIOD, token.EOF}
+	var s Scanner
+	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0)
+	for _, want := range tokens {
+		pos, got, lit := s.Scan()
+		if got != want {
+			t.Errorf("%s: got %s, want %s", fset.Position(pos), got, want)
+		}
+		// literals expect to have a (non-empty) literal string and we don't care about other tokens for this test
+		if tokenclass(got) == literal && lit == "" {
+			t.Errorf("%s: for %s got empty literal string", fset.Position(pos), got)
+		}
+	}
+}
+
+func BenchmarkScan(b *testing.B) {
+	b.StopTimer()
+	fset := token.NewFileSet()
+	file := fset.AddFile("", fset.Base(), len(source))
+	var s Scanner
+	b.StartTimer()
+	for i := 0; i < b.N; i++ {
+		s.Init(file, source, nil, ScanComments)
+		for {
+			_, tok, _ := s.Scan()
+			if tok == token.EOF {
+				break
+			}
+		}
+	}
+}
+
+func BenchmarkScanFiles(b *testing.B) {
+	// Scan a few arbitrary large files, and one small one, to provide some
+	// variety in benchmarks.
+	for _, p := range []string{
+		"golang.org/x/website/internal/backport/go/types/expr.go",
+		"golang.org/x/website/internal/backport/go/parser/parser.go",
+		"net/http/server.go",
+		"golang.org/x/website/internal/backport/go/scanner/errors.go",
+	} {
+		b.Run(p, func(b *testing.B) {
+			b.StopTimer()
+			filename := filepath.Join("..", "..", filepath.FromSlash(p))
+			src, err := os.ReadFile(filename)
+			if err != nil {
+				b.Fatal(err)
+			}
+			fset := token.NewFileSet()
+			file := fset.AddFile(filename, fset.Base(), len(src))
+			b.SetBytes(int64(len(src)))
+			var s Scanner
+			b.StartTimer()
+			for i := 0; i < b.N; i++ {
+				s.Init(file, src, nil, ScanComments)
+				for {
+					_, tok, _ := s.Scan()
+					if tok == token.EOF {
+						break
+					}
+				}
+			}
+		})
+	}
+}
+
+func TestNumbers(t *testing.T) {
+	for _, test := range []struct {
+		tok              token.Token
+		src, tokens, err string
+	}{
+		// binaries
+		{token.INT, "0b0", "0b0", ""},
+		{token.INT, "0b1010", "0b1010", ""},
+		{token.INT, "0B1110", "0B1110", ""},
+
+		{token.INT, "0b", "0b", "binary literal has no digits"},
+		{token.INT, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
+		{token.INT, "0b01a0", "0b01 a0", ""}, // only accept 0-9
+
+		{token.FLOAT, "0b.", "0b.", "invalid radix point in binary literal"},
+		{token.FLOAT, "0b.1", "0b.1", "invalid radix point in binary literal"},
+		{token.FLOAT, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
+		{token.FLOAT, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
+		{token.FLOAT, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
+
+		{token.IMAG, "0b10i", "0b10i", ""},
+		{token.IMAG, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"},
+
+		// octals
+		{token.INT, "0o0", "0o0", ""},
+		{token.INT, "0o1234", "0o1234", ""},
+		{token.INT, "0O1234", "0O1234", ""},
+
+		{token.INT, "0o", "0o", "octal literal has no digits"},
+		{token.INT, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
+		{token.INT, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
+		{token.INT, "0o12a3", "0o12 a3", ""}, // only accept 0-9
+
+		{token.FLOAT, "0o.", "0o.", "invalid radix point in octal literal"},
+		{token.FLOAT, "0o.2", "0o.2", "invalid radix point in octal literal"},
+		{token.FLOAT, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
+		{token.FLOAT, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
+		{token.FLOAT, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
+
+		{token.IMAG, "0o10i", "0o10i", ""},
+		{token.IMAG, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"},
+
+		// 0-octals
+		{token.INT, "0", "0", ""},
+		{token.INT, "0123", "0123", ""},
+
+		{token.INT, "08123", "08123", "invalid digit '8' in octal literal"},
+		{token.INT, "01293", "01293", "invalid digit '9' in octal literal"},
+		{token.INT, "0F.", "0 F .", ""}, // only accept 0-9
+		{token.INT, "0123F.", "0123 F .", ""},
+		{token.INT, "0123456x", "0123456 x", ""},
+
+		// decimals
+		{token.INT, "1", "1", ""},
+		{token.INT, "1234", "1234", ""},
+
+		{token.INT, "1f", "1 f", ""}, // only accept 0-9
+
+		{token.IMAG, "0i", "0i", ""},
+		{token.IMAG, "0678i", "0678i", ""},
+
+		// decimal floats
+		{token.FLOAT, "0.", "0.", ""},
+		{token.FLOAT, "123.", "123.", ""},
+		{token.FLOAT, "0123.", "0123.", ""},
+
+		{token.FLOAT, ".0", ".0", ""},
+		{token.FLOAT, ".123", ".123", ""},
+		{token.FLOAT, ".0123", ".0123", ""},
+
+		{token.FLOAT, "0.0", "0.0", ""},
+		{token.FLOAT, "123.123", "123.123", ""},
+		{token.FLOAT, "0123.0123", "0123.0123", ""},
+
+		{token.FLOAT, "0e0", "0e0", ""},
+		{token.FLOAT, "123e+0", "123e+0", ""},
+		{token.FLOAT, "0123E-1", "0123E-1", ""},
+
+		{token.FLOAT, "0.e+1", "0.e+1", ""},
+		{token.FLOAT, "123.E-10", "123.E-10", ""},
+		{token.FLOAT, "0123.e123", "0123.e123", ""},
+
+		{token.FLOAT, ".0e-1", ".0e-1", ""},
+		{token.FLOAT, ".123E+10", ".123E+10", ""},
+		{token.FLOAT, ".0123E123", ".0123E123", ""},
+
+		{token.FLOAT, "0.0e1", "0.0e1", ""},
+		{token.FLOAT, "123.123E-10", "123.123E-10", ""},
+		{token.FLOAT, "0123.0123e+456", "0123.0123e+456", ""},
+
+		{token.FLOAT, "0e", "0e", "exponent has no digits"},
+		{token.FLOAT, "0E+", "0E+", "exponent has no digits"},
+		{token.FLOAT, "1e+f", "1e+ f", "exponent has no digits"},
+		{token.FLOAT, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
+		{token.FLOAT, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
+
+		{token.IMAG, "0.i", "0.i", ""},
+		{token.IMAG, ".123i", ".123i", ""},
+		{token.IMAG, "123.123i", "123.123i", ""},
+		{token.IMAG, "123e+0i", "123e+0i", ""},
+		{token.IMAG, "123.E-10i", "123.E-10i", ""},
+		{token.IMAG, ".123E+10i", ".123E+10i", ""},
+
+		// hexadecimals
+		{token.INT, "0x0", "0x0", ""},
+		{token.INT, "0x1234", "0x1234", ""},
+		{token.INT, "0xcafef00d", "0xcafef00d", ""},
+		{token.INT, "0XCAFEF00D", "0XCAFEF00D", ""},
+
+		{token.INT, "0x", "0x", "hexadecimal literal has no digits"},
+		{token.INT, "0x1g", "0x1 g", ""},
+
+		{token.IMAG, "0xf00i", "0xf00i", ""},
+
+		// hexadecimal floats
+		{token.FLOAT, "0x0p0", "0x0p0", ""},
+		{token.FLOAT, "0x12efp-123", "0x12efp-123", ""},
+		{token.FLOAT, "0xABCD.p+0", "0xABCD.p+0", ""},
+		{token.FLOAT, "0x.0189P-0", "0x.0189P-0", ""},
+		{token.FLOAT, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
+
+		{token.FLOAT, "0x.", "0x.", "hexadecimal literal has no digits"},
+		{token.FLOAT, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
+		{token.FLOAT, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
+		{token.FLOAT, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
+		{token.FLOAT, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
+		{token.FLOAT, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
+		{token.FLOAT, "0x0p", "0x0p", "exponent has no digits"},
+		{token.FLOAT, "0xeP-", "0xeP-", "exponent has no digits"},
+		{token.FLOAT, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
+		{token.FLOAT, "0x1.2p1a", "0x1.2p1 a", ""},
+
+		{token.IMAG, "0xf00.bap+12i", "0xf00.bap+12i", ""},
+
+		// separators
+		{token.INT, "0b_1000_0001", "0b_1000_0001", ""},
+		{token.INT, "0o_600", "0o_600", ""},
+		{token.INT, "0_466", "0_466", ""},
+		{token.INT, "1_000", "1_000", ""},
+		{token.FLOAT, "1_000.000_1", "1_000.000_1", ""},
+		{token.IMAG, "10e+1_2_3i", "10e+1_2_3i", ""},
+		{token.INT, "0x_f00d", "0x_f00d", ""},
+		{token.FLOAT, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
+
+		{token.INT, "0b__1000", "0b__1000", "'_' must separate successive digits"},
+		{token.INT, "0o60___0", "0o60___0", "'_' must separate successive digits"},
+		{token.INT, "0466_", "0466_", "'_' must separate successive digits"},
+		{token.FLOAT, "1_.", "1_.", "'_' must separate successive digits"},
+		{token.FLOAT, "0._1", "0._1", "'_' must separate successive digits"},
+		{token.FLOAT, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
+		{token.IMAG, "10e+12_i", "10e+12_i", "'_' must separate successive digits"},
+		{token.INT, "0x___0", "0x___0", "'_' must separate successive digits"},
+		{token.FLOAT, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
+	} {
+		var s Scanner
+		var err string
+		s.Init(fset.AddFile("", fset.Base(), len(test.src)), []byte(test.src), func(_ token.Position, msg string) {
+			if err == "" {
+				err = msg
+			}
+		}, 0)
+		for i, want := range strings.Split(test.tokens, " ") {
+			err = ""
+			_, tok, lit := s.Scan()
+
+			// compute lit where for tokens where lit is not defined
+			switch tok {
+			case token.PERIOD:
+				lit = "."
+			case token.ADD:
+				lit = "+"
+			case token.SUB:
+				lit = "-"
+			}
+
+			if i == 0 {
+				if tok != test.tok {
+					t.Errorf("%q: got token %s; want %s", test.src, tok, test.tok)
+				}
+				if err != test.err {
+					t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
+				}
+			}
+
+			if lit != want {
+				t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, tok, want)
+			}
+		}
+
+		// make sure we read all
+		_, tok, _ := s.Scan()
+		if tok == token.SEMICOLON {
+			_, tok, _ = s.Scan()
+		}
+		if tok != token.EOF {
+			t.Errorf("%q: got %s; want EOF", test.src, tok)
+		}
+	}
+}
diff --git a/internal/backport/go/token/example_test.go b/internal/backport/go/token/example_test.go
new file mode 100644
index 0000000..e41a958
--- /dev/null
+++ b/internal/backport/go/token/example_test.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token_test
+
+import (
+	"fmt"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
+)
+
+func Example_retrievePositionInfo() {
+	fset := token.NewFileSet()
+
+	const src = `package main
+
+import "fmt"
+
+import "go/token"
+
+//line :1:5
+type p = token.Pos
+
+const bad = token.NoPos
+
+//line fake.go:42:11
+func ok(pos p) bool {
+	return pos != bad
+}
+
+/*line :7:9*/func main() {
+	fmt.Println(ok(bad) == bad.IsValid())
+}
+`
+
+	f, err := parser.ParseFile(fset, "main.go", src, 0)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+
+	// Print the location and kind of each declaration in f.
+	for _, decl := range f.Decls {
+		// Get the filename, line, and column back via the file set.
+		// We get both the relative and absolute position.
+		// The relative position is relative to the last line directive.
+		// The absolute position is the exact position in the source.
+		pos := decl.Pos()
+		relPosition := fset.Position(pos)
+		absPosition := fset.PositionFor(pos, false)
+
+		// Either a FuncDecl or GenDecl, since we exit on error.
+		kind := "func"
+		if gen, ok := decl.(*ast.GenDecl); ok {
+			kind = gen.Tok.String()
+		}
+
+		// If the relative and absolute positions differ, show both.
+		fmtPosition := relPosition.String()
+		if relPosition != absPosition {
+			fmtPosition += "[" + absPosition.String() + "]"
+		}
+
+		fmt.Printf("%s: %s\n", fmtPosition, kind)
+	}
+
+	//Output:
+	//
+	// main.go:3:1: import
+	// main.go:5:1: import
+	// main.go:1:5[main.go:8:1]: type
+	// main.go:3:1[main.go:10:1]: const
+	// fake.go:42:11[main.go:13:1]: func
+	// fake.go:7:9[main.go:17:14]: func
+}
diff --git a/internal/backport/go/token/position.go b/internal/backport/go/token/position.go
new file mode 100644
index 0000000..00f2453
--- /dev/null
+++ b/internal/backport/go/token/position.go
@@ -0,0 +1,529 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+// -----------------------------------------------------------------------------
+// Positions
+
+// Position describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Position struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (byte count)
+}
+
+// IsValid reports whether the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	file:line           valid position with file name but no column (column == 0)
+//	line:column         valid position without file name
+//	line                valid position without file name and no column (column == 0)
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (pos Position) String() string {
+	s := pos.Filename
+	if pos.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d", pos.Line)
+		if pos.Column != 0 {
+			s += fmt.Sprintf(":%d", pos.Column)
+		}
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Pos is a compact encoding of a source position within a file set.
+// It can be converted into a Position for a more convenient, but much
+// larger, representation.
+//
+// The Pos value for a given file is a number in the range [base, base+size],
+// where base and size are specified when a file is added to the file set.
+// The difference between a Pos value and the corresponding file base
+// corresponds to the byte offset of that position (represented by the Pos value)
+// from the beginning of the file. Thus, the file base offset is the Pos value
+// representing the first byte in the file.
+//
+// To create the Pos value for a specific source offset (measured in bytes),
+// first add the respective file to the current file set using FileSet.AddFile
+// and then call File.Pos(offset) for that file. Given a Pos value p
+// for a specific file set fset, the corresponding Position value is
+// obtained by calling fset.Position(p).
+//
+// Pos values can be compared directly with the usual comparison operators:
+// If two Pos values p and q are in the same file, comparing p and q is
+// equivalent to comparing the respective source file offsets. If p and q
+// are in different files, p < q is true if the file implied by p was added
+// to the respective file set before the file implied by q.
+type Pos int
+
+// The zero value for Pos is NoPos; there is no file and line information
+// associated with it, and NoPos.IsValid() is false. NoPos is always
+// smaller than any other Pos value. The corresponding Position value
+// for NoPos is the zero value for Position.
+const NoPos Pos = 0
+
+// IsValid reports whether the position is valid.
+func (p Pos) IsValid() bool {
+	return p != NoPos
+}
+
+// -----------------------------------------------------------------------------
+// File
+
+// A File is a handle for a file belonging to a FileSet.
+// A File has a name, size, and line offset table.
+type File struct {
+	set  *FileSet
+	name string // file name as provided to AddFile
+	base int    // Pos value range for this file is [base...base+size]
+	size int    // file size as provided to AddFile
+
+	// lines and infos are protected by mutex
+	mutex sync.Mutex
+	lines []int // lines contains the offset of the first character for each line (the first entry is always 0)
+	infos []lineInfo
+}
+
+// Name returns the file name of file f as registered with AddFile.
+func (f *File) Name() string {
+	return f.name
+}
+
+// Base returns the base offset of file f as registered with AddFile.
+func (f *File) Base() int {
+	return f.base
+}
+
+// Size returns the size of file f as registered with AddFile.
+func (f *File) Size() int {
+	return f.size
+}
+
+// LineCount returns the number of lines in file f.
+func (f *File) LineCount() int {
+	f.mutex.Lock()
+	n := len(f.lines)
+	f.mutex.Unlock()
+	return n
+}
+
+// AddLine adds the line offset for a new line.
+// The line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise the line offset is ignored.
+func (f *File) AddLine(offset int) {
+	f.mutex.Lock()
+	if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
+		f.lines = append(f.lines, offset)
+	}
+	f.mutex.Unlock()
+}
+
+// MergeLine merges a line with the following line. It is akin to replacing
+// the newline character at the end of the line with a space (to not change the
+// remaining offsets). To obtain the line number, consult e.g. Position.Line.
+// MergeLine will panic if given an invalid line number.
+func (f *File) MergeLine(line int) {
+	if line < 1 {
+		panic(fmt.Sprintf("invalid line number %d (should be >= 1)", line))
+	}
+	f.mutex.Lock()
+	defer f.mutex.Unlock()
+	if line >= len(f.lines) {
+		panic(fmt.Sprintf("invalid line number %d (should be < %d)", line, len(f.lines)))
+	}
+	// To merge the line numbered <line> with the line numbered <line+1>,
+	// we need to remove the entry in lines corresponding to the line
+	// numbered <line+1>. The entry in lines corresponding to the line
+	// numbered <line+1> is located at index <line>, since indices in lines
+	// are 0-based and line numbers are 1-based.
+	copy(f.lines[line:], f.lines[line+1:])
+	f.lines = f.lines[:len(f.lines)-1]
+}
+
+// SetLines sets the line offsets for a file and reports whether it succeeded.
+// The line offsets are the offsets of the first character of each line;
+// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
+// An empty file has an empty line offset table.
+// Each line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise SetLines fails and returns
+// false.
+// Callers must not mutate the provided slice after SetLines returns.
+func (f *File) SetLines(lines []int) bool {
+	// verify validity of lines table
+	size := f.size
+	for i, offset := range lines {
+		if i > 0 && offset <= lines[i-1] || size <= offset {
+			return false
+		}
+	}
+
+	// set lines table
+	f.mutex.Lock()
+	f.lines = lines
+	f.mutex.Unlock()
+	return true
+}
+
+// SetLinesForContent sets the line offsets for the given file content.
+// It ignores position-altering //line comments.
+func (f *File) SetLinesForContent(content []byte) {
+	var lines []int
+	line := 0
+	for offset, b := range content {
+		if line >= 0 {
+			lines = append(lines, line)
+		}
+		line = -1
+		if b == '\n' {
+			line = offset + 1
+		}
+	}
+
+	// set lines table
+	f.mutex.Lock()
+	f.lines = lines
+	f.mutex.Unlock()
+}
+
+// LineStart returns the Pos value of the start of the specified line.
+// It ignores any alternative positions set using AddLineColumnInfo.
+// LineStart panics if the 1-based line number is invalid.
+func (f *File) LineStart(line int) Pos {
+	if line < 1 {
+		panic(fmt.Sprintf("invalid line number %d (should be >= 1)", line))
+	}
+	f.mutex.Lock()
+	defer f.mutex.Unlock()
+	if line > len(f.lines) {
+		panic(fmt.Sprintf("invalid line number %d (should be < %d)", line, len(f.lines)))
+	}
+	return Pos(f.base + f.lines[line-1])
+}
+
+// A lineInfo object describes alternative file, line, and column
+// number information (such as provided via a //line directive)
+// for a given file offset.
+type lineInfo struct {
+	// fields are exported to make them accessible to gob
+	Offset       int
+	Filename     string
+	Line, Column int
+}
+
+// AddLineInfo is like AddLineColumnInfo with a column = 1 argument.
+// It is here for backward-compatibility for code prior to Go 1.11.
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+	f.AddLineColumnInfo(offset, filename, line, 1)
+}
+
+// AddLineColumnInfo adds alternative file, line, and column number
+// information for a given file offset. The offset must be larger
+// than the offset for the previously added alternative line info
+// and smaller than the file size; otherwise the information is
+// ignored.
+//
+// AddLineColumnInfo is typically used to register alternative position
+// information for line directives such as //line filename:line:column.
+func (f *File) AddLineColumnInfo(offset int, filename string, line, column int) {
+	f.mutex.Lock()
+	if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+		f.infos = append(f.infos, lineInfo{offset, filename, line, column})
+	}
+	f.mutex.Unlock()
+}
+
+// Pos returns the Pos value for the given file offset;
+// the offset must be <= f.Size().
+// f.Pos(f.Offset(p)) == p.
+func (f *File) Pos(offset int) Pos {
+	if offset > f.size {
+		panic(fmt.Sprintf("invalid file offset %d (should be <= %d)", offset, f.size))
+	}
+	return Pos(f.base + offset)
+}
+
+// Offset returns the offset for the given file position p;
+// p must be a valid Pos value in that file.
+// f.Offset(f.Pos(offset)) == offset.
+func (f *File) Offset(p Pos) int {
+	if int(p) < f.base || int(p) > f.base+f.size {
+		panic(fmt.Sprintf("invalid Pos value %d (should be in [%d, %d])", p, f.base, f.base+f.size))
+	}
+	return int(p) - f.base
+}
+
+// Line returns the line number for the given file position p;
+// p must be a Pos value in that file or NoPos.
+func (f *File) Line(p Pos) int {
+	return f.Position(p).Line
+}
+
+func searchLineInfos(a []lineInfo, x int) int {
+	return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
+}
+
+// unpack returns the filename and line and column number for a file offset.
+// If adjusted is set, unpack will return the filename and line information
+// possibly adjusted by //line comments; otherwise those comments are ignored.
+func (f *File) unpack(offset int, adjusted bool) (filename string, line, column int) {
+	f.mutex.Lock()
+	defer f.mutex.Unlock()
+	filename = f.name
+	if i := searchInts(f.lines, offset); i >= 0 {
+		line, column = i+1, offset-f.lines[i]+1
+	}
+	if adjusted && len(f.infos) > 0 {
+		// few files have extra line infos
+		if i := searchLineInfos(f.infos, offset); i >= 0 {
+			alt := &f.infos[i]
+			filename = alt.Filename
+			if i := searchInts(f.lines, alt.Offset); i >= 0 {
+				// i+1 is the line at which the alternative position was recorded
+				d := line - (i + 1) // line distance from alternative position base
+				line = alt.Line + d
+				if alt.Column == 0 {
+					// alternative column is unknown => relative column is unknown
+					// (the current specification for line directives requires
+					// this to apply until the next PosBase/line directive,
+					// not just until the new newline)
+					column = 0
+				} else if d == 0 {
+					// the alternative position base is on the current line
+					// => column is relative to alternative column
+					column = alt.Column + (offset - alt.Offset)
+				}
+			}
+		}
+	}
+	return
+}
+
+func (f *File) position(p Pos, adjusted bool) (pos Position) {
+	offset := int(p) - f.base
+	pos.Offset = offset
+	pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted)
+	return
+}
+
+// PositionFor returns the Position value for the given file position p.
+// If adjusted is set, the position may be adjusted by position-altering
+// //line comments; otherwise those comments are ignored.
+// p must be a Pos value in f or NoPos.
+func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) {
+	if p != NoPos {
+		if int(p) < f.base || int(p) > f.base+f.size {
+			panic(fmt.Sprintf("invalid Pos value %d (should be in [%d, %d])", p, f.base, f.base+f.size))
+		}
+		pos = f.position(p, adjusted)
+	}
+	return
+}
+
+// Position returns the Position value for the given file position p.
+// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true).
+func (f *File) Position(p Pos) (pos Position) {
+	return f.PositionFor(p, true)
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
+// A FileSet represents a set of source files.
+// Methods of file sets are synchronized; multiple goroutines
+// may invoke them concurrently.
+//
+// The byte offsets for each file in a file set are mapped into
+// distinct (integer) intervals, one interval [base, base+size]
+// per file. Base represents the first byte in the file, and size
+// is the corresponding file size. A Pos value is a value in such
+// an interval. By determining the interval a Pos value belongs
+// to, the file, its file base, and thus the byte offset (position)
+// the Pos value is representing can be computed.
+//
+// When adding a new file, a file base must be provided. That can
+// be any integer value that is past the end of any interval of any
+// file already in the file set. For convenience, FileSet.Base provides
+// such a value, which is simply the end of the Pos interval of the most
+// recently added file, plus one. Unless there is a need to extend an
+// interval later, using the FileSet.Base should be used as argument
+// for FileSet.AddFile.
+type FileSet struct {
+	mutex sync.RWMutex // protects the file set
+	base  int          // base offset for the next file
+	files []*File      // list of files in the order added to the set
+	last  *File        // cache of last file looked up
+}
+
+// NewFileSet creates a new file set.
+func NewFileSet() *FileSet {
+	return &FileSet{
+		base: 1, // 0 == NoPos
+	}
+}
+
+// Base returns the minimum base offset that must be provided to
+// AddFile when adding the next file.
+func (s *FileSet) Base() int {
+	s.mutex.RLock()
+	b := s.base
+	s.mutex.RUnlock()
+	return b
+
+}
+
+// AddFile adds a new file with a given filename, base offset, and file size
+// to the file set s and returns the file. Multiple files may have the same
+// name. The base offset must not be smaller than the FileSet's Base(), and
+// size must not be negative. As a special case, if a negative base is provided,
+// the current value of the FileSet's Base() is used instead.
+//
+// Adding the file will set the file set's Base() value to base + size + 1
+// as the minimum base value for the next file. The following relationship
+// exists between a Pos value p for a given file offset offs:
+//
+//	int(p) = base + offs
+//
+// with offs in the range [0, size] and thus p in the range [base, base+size].
+// For convenience, File.Pos may be used to create file-specific position
+// values from a file offset.
+func (s *FileSet) AddFile(filename string, base, size int) *File {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	if base < 0 {
+		base = s.base
+	}
+	if base < s.base {
+		panic(fmt.Sprintf("invalid base %d (should be >= %d)", base, s.base))
+	}
+	if size < 0 {
+		panic(fmt.Sprintf("invalid size %d (should be >= 0)", size))
+	}
+	// base >= s.base && size >= 0
+	f := &File{set: s, name: filename, base: base, size: size, lines: []int{0}}
+	base += size + 1 // +1 because EOF also has a position
+	if base < 0 {
+		panic("token.Pos offset overflow (> 2G of source code in file set)")
+	}
+	// add the file to the file set
+	s.base = base
+	s.files = append(s.files, f)
+	s.last = f
+	return f
+}
+
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+func (s *FileSet) Iterate(f func(*File) bool) {
+	for i := 0; ; i++ {
+		var file *File
+		s.mutex.RLock()
+		if i < len(s.files) {
+			file = s.files[i]
+		}
+		s.mutex.RUnlock()
+		if file == nil || !f(file) {
+			break
+		}
+	}
+}
+
+func searchFiles(a []*File, x int) int {
+	return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+	s.mutex.RLock()
+	// common case: p is in last file
+	if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+		s.mutex.RUnlock()
+		return f
+	}
+	// p is not in last file - search all files
+	if i := searchFiles(s.files, int(p)); i >= 0 {
+		f := s.files[i]
+		// f.base <= int(p) by definition of searchFiles
+		if int(p) <= f.base+f.size {
+			s.mutex.RUnlock()
+			s.mutex.Lock()
+			s.last = f // race is ok - s.last is only a cache
+			s.mutex.Unlock()
+			return f
+		}
+	}
+	s.mutex.RUnlock()
+	return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+func (s *FileSet) File(p Pos) (f *File) {
+	if p != NoPos {
+		f = s.file(p)
+	}
+	return
+}
+
+// PositionFor converts a Pos p in the fileset into a Position value.
+// If adjusted is set, the position may be adjusted by position-altering
+// //line comments; otherwise those comments are ignored.
+// p must be a Pos value in s or NoPos.
+func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) {
+	if p != NoPos {
+		if f := s.file(p); f != nil {
+			return f.position(p, adjusted)
+		}
+	}
+	return
+}
+
+// Position converts a Pos p in the fileset into a Position value.
+// Calling s.Position(p) is equivalent to calling s.PositionFor(p, true).
+func (s *FileSet) Position(p Pos) (pos Position) {
+	return s.PositionFor(p, true)
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+	// This function body is a manually inlined version of:
+	//
+	//   return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+	//
+	// With better compiler optimizations, this may not be needed in the
+	// future, but at the moment this change improves the go/printer
+	// benchmark performance by ~30%. This has a direct impact on the
+	// speed of gofmt and thus seems worthwhile (2011-04-29).
+	// TODO(gri): Remove this when compilers have caught up.
+	i, j := 0, len(a)
+	for i < j {
+		h := int(uint(i+j) >> 1) // avoid overflow when computing h
+		// i ≤ h < j
+		if a[h] <= x {
+			i = h + 1
+		} else {
+			j = h
+		}
+	}
+	return i - 1
+}
diff --git a/internal/backport/go/token/position_bench_test.go b/internal/backport/go/token/position_bench_test.go
new file mode 100644
index 0000000..41be728
--- /dev/null
+++ b/internal/backport/go/token/position_bench_test.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+	"testing"
+)
+
+func BenchmarkSearchInts(b *testing.B) {
+	data := make([]int, 10000)
+	for i := 0; i < 10000; i++ {
+		data[i] = i
+	}
+	const x = 8
+	if r := searchInts(data, x); r != x {
+		b.Errorf("got index = %d; want %d", r, x)
+	}
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		searchInts(data, x)
+	}
+}
diff --git a/internal/backport/go/token/position_test.go b/internal/backport/go/token/position_test.go
new file mode 100644
index 0000000..7d465df
--- /dev/null
+++ b/internal/backport/go/token/position_test.go
@@ -0,0 +1,341 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+	"fmt"
+	"math/rand"
+	"sync"
+	"testing"
+)
+
+func checkPos(t *testing.T, msg string, got, want Position) {
+	if got.Filename != want.Filename {
+		t.Errorf("%s: got filename = %q; want %q", msg, got.Filename, want.Filename)
+	}
+	if got.Offset != want.Offset {
+		t.Errorf("%s: got offset = %d; want %d", msg, got.Offset, want.Offset)
+	}
+	if got.Line != want.Line {
+		t.Errorf("%s: got line = %d; want %d", msg, got.Line, want.Line)
+	}
+	if got.Column != want.Column {
+		t.Errorf("%s: got column = %d; want %d", msg, got.Column, want.Column)
+	}
+}
+
+func TestNoPos(t *testing.T) {
+	if NoPos.IsValid() {
+		t.Errorf("NoPos should not be valid")
+	}
+	var fset *FileSet
+	checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
+	fset = NewFileSet()
+	checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
+}
+
+var tests = []struct {
+	filename string
+	source   []byte // may be nil
+	size     int
+	lines    []int
+}{
+	{"a", []byte{}, 0, []int{}},
+	{"b", []byte("01234"), 5, []int{0}},
+	{"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
+	{"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
+	{"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
+	{"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
+	{"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
+	{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
+}
+
+func linecol(lines []int, offs int) (int, int) {
+	prevLineOffs := 0
+	for line, lineOffs := range lines {
+		if offs < lineOffs {
+			return line, offs - prevLineOffs + 1
+		}
+		prevLineOffs = lineOffs
+	}
+	return len(lines), offs - prevLineOffs + 1
+}
+
+func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
+	for offs := 0; offs < f.Size(); offs++ {
+		p := f.Pos(offs)
+		offs2 := f.Offset(p)
+		if offs2 != offs {
+			t.Errorf("%s, Offset: got offset %d; want %d", f.Name(), offs2, offs)
+		}
+		line, col := linecol(lines, offs)
+		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+		checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
+		checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
+	}
+}
+
+func makeTestSource(size int, lines []int) []byte {
+	src := make([]byte, size)
+	for _, offs := range lines {
+		if offs > 0 {
+			src[offs-1] = '\n'
+		}
+	}
+	return src
+}
+
+func TestPositions(t *testing.T) {
+	const delta = 7 // a non-zero base offset increment
+	fset := NewFileSet()
+	for _, test := range tests {
+		// verify consistency of test case
+		if test.source != nil && len(test.source) != test.size {
+			t.Errorf("%s: inconsistent test case: got file size %d; want %d", test.filename, len(test.source), test.size)
+		}
+
+		// add file and verify name and size
+		f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
+		if f.Name() != test.filename {
+			t.Errorf("got filename %q; want %q", f.Name(), test.filename)
+		}
+		if f.Size() != test.size {
+			t.Errorf("%s: got file size %d; want %d", f.Name(), f.Size(), test.size)
+		}
+		if fset.File(f.Pos(0)) != f {
+			t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
+		}
+
+		// add lines individually and verify all positions
+		for i, offset := range test.lines {
+			f.AddLine(offset)
+			if f.LineCount() != i+1 {
+				t.Errorf("%s, AddLine: got line count %d; want %d", f.Name(), f.LineCount(), i+1)
+			}
+			// adding the same offset again should be ignored
+			f.AddLine(offset)
+			if f.LineCount() != i+1 {
+				t.Errorf("%s, AddLine: got unchanged line count %d; want %d", f.Name(), f.LineCount(), i+1)
+			}
+			verifyPositions(t, fset, f, test.lines[0:i+1])
+		}
+
+		// add lines with SetLines and verify all positions
+		if ok := f.SetLines(test.lines); !ok {
+			t.Errorf("%s: SetLines failed", f.Name())
+		}
+		if f.LineCount() != len(test.lines) {
+			t.Errorf("%s, SetLines: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
+		}
+		verifyPositions(t, fset, f, test.lines)
+
+		// add lines with SetLinesForContent and verify all positions
+		src := test.source
+		if src == nil {
+			// no test source available - create one from scratch
+			src = makeTestSource(test.size, test.lines)
+		}
+		f.SetLinesForContent(src)
+		if f.LineCount() != len(test.lines) {
+			t.Errorf("%s, SetLinesForContent: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
+		}
+		verifyPositions(t, fset, f, test.lines)
+	}
+}
+
+func TestLineInfo(t *testing.T) {
+	fset := NewFileSet()
+	f := fset.AddFile("foo", fset.Base(), 500)
+	lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
+	// add lines individually and provide alternative line information
+	for _, offs := range lines {
+		f.AddLine(offs)
+		f.AddLineInfo(offs, "bar", 42)
+	}
+	// verify positions for all offsets
+	for offs := 0; offs <= f.Size(); offs++ {
+		p := f.Pos(offs)
+		_, col := linecol(lines, offs)
+		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+		checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
+		checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
+	}
+}
+
+func TestFiles(t *testing.T) {
+	fset := NewFileSet()
+	for i, test := range tests {
+		base := fset.Base()
+		if i%2 == 1 {
+			// Setting a negative base is equivalent to
+			// fset.Base(), so test some of each.
+			base = -1
+		}
+		fset.AddFile(test.filename, base, test.size)
+		j := 0
+		fset.Iterate(func(f *File) bool {
+			if f.Name() != tests[j].filename {
+				t.Errorf("got filename = %s; want %s", f.Name(), tests[j].filename)
+			}
+			j++
+			return true
+		})
+		if j != i+1 {
+			t.Errorf("got %d files; want %d", j, i+1)
+		}
+	}
+}
+
+// FileSet.File should return nil if Pos is past the end of the FileSet.
+func TestFileSetPastEnd(t *testing.T) {
+	fset := NewFileSet()
+	for _, test := range tests {
+		fset.AddFile(test.filename, fset.Base(), test.size)
+	}
+	if f := fset.File(Pos(fset.Base())); f != nil {
+		t.Errorf("got %v, want nil", f)
+	}
+}
+
+func TestFileSetCacheUnlikely(t *testing.T) {
+	fset := NewFileSet()
+	offsets := make(map[string]int)
+	for _, test := range tests {
+		offsets[test.filename] = fset.Base()
+		fset.AddFile(test.filename, fset.Base(), test.size)
+	}
+	for file, pos := range offsets {
+		f := fset.File(Pos(pos))
+		if f.Name() != file {
+			t.Errorf("got %q at position %d, want %q", f.Name(), pos, file)
+		}
+	}
+}
+
+// issue 4345. Test that concurrent use of FileSet.Pos does not trigger a
+// race in the FileSet position cache.
+func TestFileSetRace(t *testing.T) {
+	fset := NewFileSet()
+	for i := 0; i < 100; i++ {
+		fset.AddFile(fmt.Sprintf("file-%d", i), fset.Base(), 1031)
+	}
+	max := int32(fset.Base())
+	var stop sync.WaitGroup
+	r := rand.New(rand.NewSource(7))
+	for i := 0; i < 2; i++ {
+		r := rand.New(rand.NewSource(r.Int63()))
+		stop.Add(1)
+		go func() {
+			for i := 0; i < 1000; i++ {
+				fset.Position(Pos(r.Int31n(max)))
+			}
+			stop.Done()
+		}()
+	}
+	stop.Wait()
+}
+
+// issue 16548. Test that concurrent use of File.AddLine and FileSet.PositionFor
+// does not trigger a race in the FileSet position cache.
+func TestFileSetRace2(t *testing.T) {
+	const N = 1e3
+	var (
+		fset = NewFileSet()
+		file = fset.AddFile("", -1, N)
+		ch   = make(chan int, 2)
+	)
+
+	go func() {
+		for i := 0; i < N; i++ {
+			file.AddLine(i)
+		}
+		ch <- 1
+	}()
+
+	go func() {
+		pos := file.Pos(0)
+		for i := 0; i < N; i++ {
+			fset.PositionFor(pos, false)
+		}
+		ch <- 1
+	}()
+
+	<-ch
+	<-ch
+}
+
+func TestPositionFor(t *testing.T) {
+	src := []byte(`
+foo
+b
+ar
+//line :100
+foobar
+//line bar:3
+done
+`)
+
+	const filename = "foo"
+	fset := NewFileSet()
+	f := fset.AddFile(filename, fset.Base(), len(src))
+	f.SetLinesForContent(src)
+
+	// verify position info
+	for i, offs := range f.lines {
+		got1 := f.PositionFor(f.Pos(offs), false)
+		got2 := f.PositionFor(f.Pos(offs), true)
+		got3 := f.Position(f.Pos(offs))
+		want := Position{filename, offs, i + 1, 1}
+		checkPos(t, "1. PositionFor unadjusted", got1, want)
+		checkPos(t, "1. PositionFor adjusted", got2, want)
+		checkPos(t, "1. Position", got3, want)
+	}
+
+	// manually add //line info on lines l1, l2
+	const l1, l2 = 5, 7
+	f.AddLineInfo(f.lines[l1-1], "", 100)
+	f.AddLineInfo(f.lines[l2-1], "bar", 3)
+
+	// unadjusted position info must remain unchanged
+	for i, offs := range f.lines {
+		got1 := f.PositionFor(f.Pos(offs), false)
+		want := Position{filename, offs, i + 1, 1}
+		checkPos(t, "2. PositionFor unadjusted", got1, want)
+	}
+
+	// adjusted position info should have changed
+	for i, offs := range f.lines {
+		got2 := f.PositionFor(f.Pos(offs), true)
+		got3 := f.Position(f.Pos(offs))
+		want := Position{filename, offs, i + 1, 1}
+		// manually compute wanted filename and line
+		line := want.Line
+		if i+1 >= l1 {
+			want.Filename = ""
+			want.Line = line - l1 + 100
+		}
+		if i+1 >= l2 {
+			want.Filename = "bar"
+			want.Line = line - l2 + 3
+		}
+		checkPos(t, "3. PositionFor adjusted", got2, want)
+		checkPos(t, "3. Position", got3, want)
+	}
+}
+
+func TestLineStart(t *testing.T) {
+	const src = "one\ntwo\nthree\n"
+	fset := NewFileSet()
+	f := fset.AddFile("input", -1, len(src))
+	f.SetLinesForContent([]byte(src))
+
+	for line := 1; line <= 3; line++ {
+		pos := f.LineStart(line)
+		position := fset.Position(pos)
+		if position.Line != line || position.Column != 1 {
+			t.Errorf("LineStart(%d) returned wrong pos %d: %s", line, pos, position)
+		}
+	}
+}
diff --git a/internal/backport/go/token/serialize.go b/internal/backport/go/token/serialize.go
new file mode 100644
index 0000000..d0ea345
--- /dev/null
+++ b/internal/backport/go/token/serialize.go
@@ -0,0 +1,71 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+type serializedFile struct {
+	// fields correspond 1:1 to fields with same (lower-case) name in File
+	Name  string
+	Base  int
+	Size  int
+	Lines []int
+	Infos []lineInfo
+}
+
+type serializedFileSet struct {
+	Base  int
+	Files []serializedFile
+}
+
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
+	var ss serializedFileSet
+	if err := decode(&ss); err != nil {
+		return err
+	}
+
+	s.mutex.Lock()
+	s.base = ss.Base
+	files := make([]*File, len(ss.Files))
+	for i := 0; i < len(ss.Files); i++ {
+		f := &ss.Files[i]
+		files[i] = &File{
+			set:   s,
+			name:  f.Name,
+			base:  f.Base,
+			size:  f.Size,
+			lines: f.Lines,
+			infos: f.Infos,
+		}
+	}
+	s.files = files
+	s.last = nil
+	s.mutex.Unlock()
+
+	return nil
+}
+
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
+	var ss serializedFileSet
+
+	s.mutex.Lock()
+	ss.Base = s.base
+	files := make([]serializedFile, len(s.files))
+	for i, f := range s.files {
+		f.mutex.Lock()
+		files[i] = serializedFile{
+			Name:  f.name,
+			Base:  f.base,
+			Size:  f.size,
+			Lines: append([]int(nil), f.lines...),
+			Infos: append([]lineInfo(nil), f.infos...),
+		}
+		f.mutex.Unlock()
+	}
+	ss.Files = files
+	s.mutex.Unlock()
+
+	return encode(ss)
+}
diff --git a/internal/backport/go/token/serialize_test.go b/internal/backport/go/token/serialize_test.go
new file mode 100644
index 0000000..4e925ad
--- /dev/null
+++ b/internal/backport/go/token/serialize_test.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+	"bytes"
+	"encoding/gob"
+	"fmt"
+	"testing"
+)
+
+// equal returns nil if p and q describe the same file set;
+// otherwise it returns an error describing the discrepancy.
+func equal(p, q *FileSet) error {
+	if p == q {
+		// avoid deadlock if p == q
+		return nil
+	}
+
+	// not strictly needed for the test
+	p.mutex.Lock()
+	q.mutex.Lock()
+	defer q.mutex.Unlock()
+	defer p.mutex.Unlock()
+
+	if p.base != q.base {
+		return fmt.Errorf("different bases: %d != %d", p.base, q.base)
+	}
+
+	if len(p.files) != len(q.files) {
+		return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
+	}
+
+	for i, f := range p.files {
+		g := q.files[i]
+		if f.set != p {
+			return fmt.Errorf("wrong fileset for %q", f.name)
+		}
+		if g.set != q {
+			return fmt.Errorf("wrong fileset for %q", g.name)
+		}
+		if f.name != g.name {
+			return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
+		}
+		if f.base != g.base {
+			return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
+		}
+		if f.size != g.size {
+			return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
+		}
+		for j, l := range f.lines {
+			m := g.lines[j]
+			if l != m {
+				return fmt.Errorf("different offsets for %q", f.name)
+			}
+		}
+		for j, l := range f.infos {
+			m := g.infos[j]
+			if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
+				return fmt.Errorf("different infos for %q", f.name)
+			}
+		}
+	}
+
+	// we don't care about .last - it's just a cache
+	return nil
+}
+
+func checkSerialize(t *testing.T, p *FileSet) {
+	var buf bytes.Buffer
+	encode := func(x interface{}) error {
+		return gob.NewEncoder(&buf).Encode(x)
+	}
+	if err := p.Write(encode); err != nil {
+		t.Errorf("writing fileset failed: %s", err)
+		return
+	}
+	q := NewFileSet()
+	decode := func(x interface{}) error {
+		return gob.NewDecoder(&buf).Decode(x)
+	}
+	if err := q.Read(decode); err != nil {
+		t.Errorf("reading fileset failed: %s", err)
+		return
+	}
+	if err := equal(p, q); err != nil {
+		t.Errorf("filesets not identical: %s", err)
+	}
+}
+
+func TestSerialization(t *testing.T) {
+	p := NewFileSet()
+	checkSerialize(t, p)
+	// add some files
+	for i := 0; i < 10; i++ {
+		f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
+		checkSerialize(t, p)
+		// add some lines and alternative file infos
+		line := 1000
+		for offs := 0; offs < f.Size(); offs += 40 + i {
+			f.AddLine(offs)
+			if offs%7 == 0 {
+				f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
+				line += 33
+			}
+		}
+		checkSerialize(t, p)
+	}
+}
diff --git a/internal/backport/go/token/token.go b/internal/backport/go/token/token.go
new file mode 100644
index 0000000..db4e955
--- /dev/null
+++ b/internal/backport/go/token/token.go
@@ -0,0 +1,338 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package token defines constants representing the lexical tokens of the Go
+// programming language and basic operations on tokens (printing, predicates).
+package token
+
+import (
+	"strconv"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Token is the set of lexical tokens of the Go programming language.
+type Token int
+
+// The list of tokens.
+const (
+	// Special tokens
+	ILLEGAL Token = iota
+	EOF
+	COMMENT
+
+	literal_beg
+	// Identifiers and basic type literals
+	// (these tokens stand for classes of literals)
+	IDENT  // main
+	INT    // 12345
+	FLOAT  // 123.45
+	IMAG   // 123.45i
+	CHAR   // 'a'
+	STRING // "abc"
+	literal_end
+
+	operator_beg
+	// Operators and delimiters
+	ADD // +
+	SUB // -
+	MUL // *
+	QUO // /
+	REM // %
+
+	AND     // &
+	OR      // |
+	XOR     // ^
+	SHL     // <<
+	SHR     // >>
+	AND_NOT // &^
+
+	ADD_ASSIGN // +=
+	SUB_ASSIGN // -=
+	MUL_ASSIGN // *=
+	QUO_ASSIGN // /=
+	REM_ASSIGN // %=
+
+	AND_ASSIGN     // &=
+	OR_ASSIGN      // |=
+	XOR_ASSIGN     // ^=
+	SHL_ASSIGN     // <<=
+	SHR_ASSIGN     // >>=
+	AND_NOT_ASSIGN // &^=
+
+	LAND  // &&
+	LOR   // ||
+	ARROW // <-
+	INC   // ++
+	DEC   // --
+
+	EQL    // ==
+	LSS    // <
+	GTR    // >
+	ASSIGN // =
+	NOT    // !
+
+	NEQ      // !=
+	LEQ      // <=
+	GEQ      // >=
+	DEFINE   // :=
+	ELLIPSIS // ...
+
+	LPAREN // (
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+
+	RPAREN    // )
+	RBRACK    // ]
+	RBRACE    // }
+	SEMICOLON // ;
+	COLON     // :
+	operator_end
+
+	keyword_beg
+	// Keywords
+	BREAK
+	CASE
+	CHAN
+	CONST
+	CONTINUE
+
+	DEFAULT
+	DEFER
+	ELSE
+	FALLTHROUGH
+	FOR
+
+	FUNC
+	GO
+	GOTO
+	IF
+	IMPORT
+
+	INTERFACE
+	MAP
+	PACKAGE
+	RANGE
+	RETURN
+
+	SELECT
+	STRUCT
+	SWITCH
+	TYPE
+	VAR
+	keyword_end
+
+	additional_beg
+	// additional tokens, handled in an ad-hoc manner
+	TILDE
+	additional_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF:     "EOF",
+	COMMENT: "COMMENT",
+
+	IDENT:  "IDENT",
+	INT:    "INT",
+	FLOAT:  "FLOAT",
+	IMAG:   "IMAG",
+	CHAR:   "CHAR",
+	STRING: "STRING",
+
+	ADD: "+",
+	SUB: "-",
+	MUL: "*",
+	QUO: "/",
+	REM: "%",
+
+	AND:     "&",
+	OR:      "|",
+	XOR:     "^",
+	SHL:     "<<",
+	SHR:     ">>",
+	AND_NOT: "&^",
+
+	ADD_ASSIGN: "+=",
+	SUB_ASSIGN: "-=",
+	MUL_ASSIGN: "*=",
+	QUO_ASSIGN: "/=",
+	REM_ASSIGN: "%=",
+
+	AND_ASSIGN:     "&=",
+	OR_ASSIGN:      "|=",
+	XOR_ASSIGN:     "^=",
+	SHL_ASSIGN:     "<<=",
+	SHR_ASSIGN:     ">>=",
+	AND_NOT_ASSIGN: "&^=",
+
+	LAND:  "&&",
+	LOR:   "||",
+	ARROW: "<-",
+	INC:   "++",
+	DEC:   "--",
+
+	EQL:    "==",
+	LSS:    "<",
+	GTR:    ">",
+	ASSIGN: "=",
+	NOT:    "!",
+
+	NEQ:      "!=",
+	LEQ:      "<=",
+	GEQ:      ">=",
+	DEFINE:   ":=",
+	ELLIPSIS: "...",
+
+	LPAREN: "(",
+	LBRACK: "[",
+	LBRACE: "{",
+	COMMA:  ",",
+	PERIOD: ".",
+
+	RPAREN:    ")",
+	RBRACK:    "]",
+	RBRACE:    "}",
+	SEMICOLON: ";",
+	COLON:     ":",
+
+	BREAK:    "break",
+	CASE:     "case",
+	CHAN:     "chan",
+	CONST:    "const",
+	CONTINUE: "continue",
+
+	DEFAULT:     "default",
+	DEFER:       "defer",
+	ELSE:        "else",
+	FALLTHROUGH: "fallthrough",
+	FOR:         "for",
+
+	FUNC:   "func",
+	GO:     "go",
+	GOTO:   "goto",
+	IF:     "if",
+	IMPORT: "import",
+
+	INTERFACE: "interface",
+	MAP:       "map",
+	PACKAGE:   "package",
+	RANGE:     "range",
+	RETURN:    "return",
+
+	SELECT: "select",
+	STRUCT: "struct",
+	SWITCH: "switch",
+	TYPE:   "type",
+	VAR:    "var",
+
+	TILDE: "~",
+}
+
+// String returns the string corresponding to the token tok.
+// For operators, delimiters, and keywords the string is the actual
+// token character sequence (e.g., for the token ADD, the string is
+// "+"). For all other tokens the string corresponds to the token
+// constant name (e.g. for the token IDENT, the string is "IDENT").
+func (tok Token) String() string {
+	s := ""
+	if 0 <= tok && tok < Token(len(tokens)) {
+		s = tokens[tok]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(tok)) + ")"
+	}
+	return s
+}
+
+// A set of constants for precedence-based expression parsing.
+// Non-operators have lowest precedence, followed by operators
+// starting with precedence 1 up to unary operators. The highest
+// precedence serves as "catch-all" precedence for selector,
+// indexing, and other operator and delimiter tokens.
+const (
+	LowestPrec  = 0 // non-operators
+	UnaryPrec   = 6
+	HighestPrec = 7
+)
+
+// Precedence returns the operator precedence of the binary
+// operator op. If op is not a binary operator, the result
+// is LowestPrecedence.
+func (op Token) Precedence() int {
+	switch op {
+	case LOR:
+		return 1
+	case LAND:
+		return 2
+	case EQL, NEQ, LSS, LEQ, GTR, GEQ:
+		return 3
+	case ADD, SUB, OR, XOR:
+		return 4
+	case MUL, QUO, REM, SHL, SHR, AND, AND_NOT:
+		return 5
+	}
+	return LowestPrec
+}
+
+var keywords map[string]Token
+
+func init() {
+	keywords = make(map[string]Token)
+	for i := keyword_beg + 1; i < keyword_end; i++ {
+		keywords[tokens[i]] = i
+	}
+}
+
+// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
+func Lookup(ident string) Token {
+	if tok, is_keyword := keywords[ident]; is_keyword {
+		return tok
+	}
+	return IDENT
+}
+
+// Predicates
+
+// IsLiteral returns true for tokens corresponding to identifiers
+// and basic type literals; it returns false otherwise.
+func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (tok Token) IsOperator() bool {
+	return (operator_beg < tok && tok < operator_end) || tok == TILDE
+}
+
+// IsKeyword returns true for tokens corresponding to keywords;
+// it returns false otherwise.
+func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end }
+
+// IsExported reports whether name starts with an upper-case letter.
+func IsExported(name string) bool {
+	ch, _ := utf8.DecodeRuneInString(name)
+	return unicode.IsUpper(ch)
+}
+
+// IsKeyword reports whether name is a Go keyword, such as "func" or "return".
+func IsKeyword(name string) bool {
+	// TODO: opt: use a perfect hash function instead of a global map.
+	_, ok := keywords[name]
+	return ok
+}
+
+// IsIdentifier reports whether name is a Go identifier, that is, a non-empty
+// string made up of letters, digits, and underscores, where the first character
+// is not a digit. Keywords are not identifiers.
+func IsIdentifier(name string) bool {
+	for i, c := range name {
+		if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) {
+			return false
+		}
+	}
+	return name != "" && !IsKeyword(name)
+}
diff --git a/internal/backport/go/token/token_test.go b/internal/backport/go/token/token_test.go
new file mode 100644
index 0000000..eff38cc
--- /dev/null
+++ b/internal/backport/go/token/token_test.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import "testing"
+
+func TestIsIdentifier(t *testing.T) {
+	tests := []struct {
+		name string
+		in   string
+		want bool
+	}{
+		{"Empty", "", false},
+		{"Space", " ", false},
+		{"SpaceSuffix", "foo ", false},
+		{"Number", "123", false},
+		{"Keyword", "func", false},
+
+		{"LettersASCII", "foo", true},
+		{"MixedASCII", "_bar123", true},
+		{"UppercaseKeyword", "Func", true},
+		{"LettersUnicode", "fóö", true},
+	}
+	for _, test := range tests {
+		t.Run(test.name, func(t *testing.T) {
+			if got := IsIdentifier(test.in); got != test.want {
+				t.Fatalf("IsIdentifier(%q) = %t, want %v", test.in, got, test.want)
+			}
+		})
+	}
+}
diff --git a/internal/pkgdoc/dir.go b/internal/pkgdoc/dir.go
index 2d71230..5e75e16 100644
--- a/internal/pkgdoc/dir.go
+++ b/internal/pkgdoc/dir.go
@@ -8,10 +8,10 @@
 
 import (
 	"bytes"
-	"go/ast"
-	"go/doc"
-	"go/parser"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/doc"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
 	"io/fs"
 	"log"
 	"path"
diff --git a/internal/pkgdoc/dir_test.go b/internal/pkgdoc/dir_test.go
index 1b110ff..09e0cfd 100644
--- a/internal/pkgdoc/dir_test.go
+++ b/internal/pkgdoc/dir_test.go
@@ -5,7 +5,7 @@
 package pkgdoc
 
 import (
-	"go/token"
+	"golang.org/x/website/internal/backport/go/token"
 	"os"
 	"runtime"
 	"sort"
diff --git a/internal/pkgdoc/doc.go b/internal/pkgdoc/doc.go
index 97bfd21..9058ffe 100644
--- a/internal/pkgdoc/doc.go
+++ b/internal/pkgdoc/doc.go
@@ -11,10 +11,9 @@
 
 import (
 	"bytes"
-	"go/ast"
-	"go/build"
-	"go/doc"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/doc"
+	"golang.org/x/website/internal/backport/go/token"
 	"io"
 	"io/fs"
 	"io/ioutil"
@@ -30,6 +29,7 @@
 	"unicode/utf8"
 
 	"golang.org/x/website/internal/api"
+	"golang.org/x/website/internal/backport/go/build"
 	"golang.org/x/website/internal/web"
 )
 
@@ -294,7 +294,6 @@
 // (as is the convention for packages). This is sufficient
 // to resolve package identifiers without doing an actual
 // import. It never returns an error.
-//
 func simpleImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
 	pkg := imports[path]
 	if pkg == nil {
@@ -310,7 +309,6 @@
 // which correctly updates each package file's comment list.
 // (The ast.PackageExports signature is frozen, hence the local
 // implementation).
-//
 func packageExports(fset *token.FileSet, pkg *ast.Package) {
 	for _, src := range pkg.Files {
 		cmap := ast.NewCommentMap(fset, src, src.Comments)
diff --git a/internal/pkgdoc/funcs.go b/internal/pkgdoc/funcs.go
index 01305d0..e5b8571 100644
--- a/internal/pkgdoc/funcs.go
+++ b/internal/pkgdoc/funcs.go
@@ -8,11 +8,11 @@
 	"bufio"
 	"bytes"
 	"fmt"
-	"go/ast"
-	"go/doc"
-	"go/format"
-	"go/printer"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/doc"
+	"golang.org/x/website/internal/backport/go/format"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
 	"io"
 	"log"
 	"path"
diff --git a/internal/pkgdoc/html_test.go b/internal/pkgdoc/html_test.go
index af540ed..1a98420 100644
--- a/internal/pkgdoc/html_test.go
+++ b/internal/pkgdoc/html_test.go
@@ -7,8 +7,8 @@
 import (
 	"bytes"
 	"fmt"
-	"go/parser"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/token"
 	"strings"
 	"testing"
 
diff --git a/internal/texthtml/ast.go b/internal/texthtml/ast.go
index c7e0560..7a9bb32 100644
--- a/internal/texthtml/ast.go
+++ b/internal/texthtml/ast.go
@@ -7,9 +7,9 @@
 import (
 	"bytes"
 	"fmt"
-	"go/ast"
-	"go/doc"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/doc"
+	"golang.org/x/website/internal/backport/go/token"
 	"strconv"
 	"unicode"
 	"unicode/utf8"
diff --git a/internal/texthtml/texthtml.go b/internal/texthtml/texthtml.go
index d3b1f16..90cc448 100644
--- a/internal/texthtml/texthtml.go
+++ b/internal/texthtml/texthtml.go
@@ -8,9 +8,9 @@
 import (
 	"bytes"
 	"fmt"
-	"go/ast"
-	"go/scanner"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/scanner"
+	"golang.org/x/website/internal/backport/go/token"
 	"io"
 	"regexp"
 	"text/template"
@@ -367,7 +367,6 @@
 // bit 0: comments
 // bit 1: highlights
 // bit 2: selections
-//
 var startTags = [][]byte{
 	/* 000 */ []byte(``),
 	/* 001 */ []byte(`<span class="comment">`),
diff --git a/internal/tour/fmt.go b/internal/tour/fmt.go
index 3010895..e126fb4 100644
--- a/internal/tour/fmt.go
+++ b/internal/tour/fmt.go
@@ -7,10 +7,10 @@
 import (
 	"bytes"
 	"encoding/json"
-	"go/ast"
-	"go/parser"
-	"go/printer"
-	"go/token"
+	"golang.org/x/website/internal/backport/go/ast"
+	"golang.org/x/website/internal/backport/go/parser"
+	"golang.org/x/website/internal/backport/go/printer"
+	"golang.org/x/website/internal/backport/go/token"
 	"net/http"
 
 	"golang.org/x/tools/imports"